xref: /linux/drivers/target/iscsi/iscsi_target.c (revision c532de5a67a70f8533d495f8f2aaa9a0491c3ad0)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*******************************************************************************
3  * This file contains main functions related to the iSCSI Target Core Driver.
4  *
5  * (c) Copyright 2007-2013 Datera, Inc.
6  *
7  * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
8  *
9  ******************************************************************************/
10 
11 #include <crypto/hash.h>
12 #include <linux/string.h>
13 #include <linux/kthread.h>
14 #include <linux/completion.h>
15 #include <linux/module.h>
16 #include <linux/vmalloc.h>
17 #include <linux/idr.h>
18 #include <linux/delay.h>
19 #include <linux/sched/signal.h>
20 #include <linux/unaligned.h>
21 #include <linux/inet.h>
22 #include <net/ipv6.h>
23 #include <scsi/scsi_proto.h>
24 #include <scsi/iscsi_proto.h>
25 #include <scsi/scsi_tcq.h>
26 #include <target/target_core_base.h>
27 #include <target/target_core_fabric.h>
28 
29 #include <target/target_core_backend.h>
30 #include <target/iscsi/iscsi_target_core.h>
31 #include "iscsi_target_parameters.h"
32 #include "iscsi_target_seq_pdu_list.h"
33 #include "iscsi_target_datain_values.h"
34 #include "iscsi_target_erl0.h"
35 #include "iscsi_target_erl1.h"
36 #include "iscsi_target_erl2.h"
37 #include "iscsi_target_login.h"
38 #include "iscsi_target_tmr.h"
39 #include "iscsi_target_tpg.h"
40 #include "iscsi_target_util.h"
41 #include "iscsi_target.h"
42 #include "iscsi_target_device.h"
43 #include <target/iscsi/iscsi_target_stat.h>
44 
45 #include <target/iscsi/iscsi_transport.h>
46 
47 static LIST_HEAD(g_tiqn_list);
48 static LIST_HEAD(g_np_list);
49 static DEFINE_SPINLOCK(tiqn_lock);
50 static DEFINE_MUTEX(np_lock);
51 
52 static struct idr tiqn_idr;
53 DEFINE_IDA(sess_ida);
54 struct mutex auth_id_lock;
55 
56 struct iscsit_global *iscsit_global;
57 
58 struct kmem_cache *lio_qr_cache;
59 struct kmem_cache *lio_dr_cache;
60 struct kmem_cache *lio_ooo_cache;
61 struct kmem_cache *lio_r2t_cache;
62 
63 static int iscsit_handle_immediate_data(struct iscsit_cmd *,
64 			struct iscsi_scsi_req *, u32);
65 
66 struct iscsi_tiqn *iscsit_get_tiqn_for_login(unsigned char *buf)
67 {
68 	struct iscsi_tiqn *tiqn = NULL;
69 
70 	spin_lock(&tiqn_lock);
71 	list_for_each_entry(tiqn, &g_tiqn_list, tiqn_list) {
72 		if (!strcmp(tiqn->tiqn, buf)) {
73 
74 			spin_lock(&tiqn->tiqn_state_lock);
75 			if (tiqn->tiqn_state == TIQN_STATE_ACTIVE) {
76 				tiqn->tiqn_access_count++;
77 				spin_unlock(&tiqn->tiqn_state_lock);
78 				spin_unlock(&tiqn_lock);
79 				return tiqn;
80 			}
81 			spin_unlock(&tiqn->tiqn_state_lock);
82 		}
83 	}
84 	spin_unlock(&tiqn_lock);
85 
86 	return NULL;
87 }
88 
89 static int iscsit_set_tiqn_shutdown(struct iscsi_tiqn *tiqn)
90 {
91 	spin_lock(&tiqn->tiqn_state_lock);
92 	if (tiqn->tiqn_state == TIQN_STATE_ACTIVE) {
93 		tiqn->tiqn_state = TIQN_STATE_SHUTDOWN;
94 		spin_unlock(&tiqn->tiqn_state_lock);
95 		return 0;
96 	}
97 	spin_unlock(&tiqn->tiqn_state_lock);
98 
99 	return -1;
100 }
101 
102 void iscsit_put_tiqn_for_login(struct iscsi_tiqn *tiqn)
103 {
104 	spin_lock(&tiqn->tiqn_state_lock);
105 	tiqn->tiqn_access_count--;
106 	spin_unlock(&tiqn->tiqn_state_lock);
107 }
108 
109 /*
110  * Note that IQN formatting is expected to be done in userspace, and
111  * no explict IQN format checks are done here.
112  */
113 struct iscsi_tiqn *iscsit_add_tiqn(unsigned char *buf)
114 {
115 	struct iscsi_tiqn *tiqn = NULL;
116 	int ret;
117 
118 	if (strlen(buf) >= ISCSI_IQN_LEN) {
119 		pr_err("Target IQN exceeds %d bytes\n",
120 				ISCSI_IQN_LEN);
121 		return ERR_PTR(-EINVAL);
122 	}
123 
124 	tiqn = kzalloc(sizeof(*tiqn), GFP_KERNEL);
125 	if (!tiqn)
126 		return ERR_PTR(-ENOMEM);
127 
128 	sprintf(tiqn->tiqn, "%s", buf);
129 	INIT_LIST_HEAD(&tiqn->tiqn_list);
130 	INIT_LIST_HEAD(&tiqn->tiqn_tpg_list);
131 	spin_lock_init(&tiqn->tiqn_state_lock);
132 	spin_lock_init(&tiqn->tiqn_tpg_lock);
133 	spin_lock_init(&tiqn->sess_err_stats.lock);
134 	spin_lock_init(&tiqn->login_stats.lock);
135 	spin_lock_init(&tiqn->logout_stats.lock);
136 
137 	tiqn->tiqn_state = TIQN_STATE_ACTIVE;
138 
139 	idr_preload(GFP_KERNEL);
140 	spin_lock(&tiqn_lock);
141 
142 	ret = idr_alloc(&tiqn_idr, NULL, 0, 0, GFP_NOWAIT);
143 	if (ret < 0) {
144 		pr_err("idr_alloc() failed for tiqn->tiqn_index\n");
145 		spin_unlock(&tiqn_lock);
146 		idr_preload_end();
147 		kfree(tiqn);
148 		return ERR_PTR(ret);
149 	}
150 	tiqn->tiqn_index = ret;
151 	list_add_tail(&tiqn->tiqn_list, &g_tiqn_list);
152 
153 	spin_unlock(&tiqn_lock);
154 	idr_preload_end();
155 
156 	pr_debug("CORE[0] - Added iSCSI Target IQN: %s\n", tiqn->tiqn);
157 
158 	return tiqn;
159 
160 }
161 
162 static void iscsit_wait_for_tiqn(struct iscsi_tiqn *tiqn)
163 {
164 	/*
165 	 * Wait for accesses to said struct iscsi_tiqn to end.
166 	 */
167 	spin_lock(&tiqn->tiqn_state_lock);
168 	while (tiqn->tiqn_access_count != 0) {
169 		spin_unlock(&tiqn->tiqn_state_lock);
170 		msleep(10);
171 		spin_lock(&tiqn->tiqn_state_lock);
172 	}
173 	spin_unlock(&tiqn->tiqn_state_lock);
174 }
175 
176 void iscsit_del_tiqn(struct iscsi_tiqn *tiqn)
177 {
178 	/*
179 	 * iscsit_set_tiqn_shutdown sets tiqn->tiqn_state = TIQN_STATE_SHUTDOWN
180 	 * while holding tiqn->tiqn_state_lock.  This means that all subsequent
181 	 * attempts to access this struct iscsi_tiqn will fail from both transport
182 	 * fabric and control code paths.
183 	 */
184 	if (iscsit_set_tiqn_shutdown(tiqn) < 0) {
185 		pr_err("iscsit_set_tiqn_shutdown() failed\n");
186 		return;
187 	}
188 
189 	iscsit_wait_for_tiqn(tiqn);
190 
191 	spin_lock(&tiqn_lock);
192 	list_del(&tiqn->tiqn_list);
193 	idr_remove(&tiqn_idr, tiqn->tiqn_index);
194 	spin_unlock(&tiqn_lock);
195 
196 	pr_debug("CORE[0] - Deleted iSCSI Target IQN: %s\n",
197 			tiqn->tiqn);
198 	kfree(tiqn);
199 }
200 
201 int iscsit_access_np(struct iscsi_np *np, struct iscsi_portal_group *tpg)
202 {
203 	int ret;
204 	/*
205 	 * Determine if the network portal is accepting storage traffic.
206 	 */
207 	spin_lock_bh(&np->np_thread_lock);
208 	if (np->np_thread_state != ISCSI_NP_THREAD_ACTIVE) {
209 		spin_unlock_bh(&np->np_thread_lock);
210 		return -1;
211 	}
212 	spin_unlock_bh(&np->np_thread_lock);
213 	/*
214 	 * Determine if the portal group is accepting storage traffic.
215 	 */
216 	spin_lock_bh(&tpg->tpg_state_lock);
217 	if (tpg->tpg_state != TPG_STATE_ACTIVE) {
218 		spin_unlock_bh(&tpg->tpg_state_lock);
219 		return -1;
220 	}
221 	spin_unlock_bh(&tpg->tpg_state_lock);
222 
223 	/*
224 	 * Here we serialize access across the TIQN+TPG Tuple.
225 	 */
226 	ret = down_interruptible(&tpg->np_login_sem);
227 	if (ret != 0)
228 		return -1;
229 
230 	spin_lock_bh(&tpg->tpg_state_lock);
231 	if (tpg->tpg_state != TPG_STATE_ACTIVE) {
232 		spin_unlock_bh(&tpg->tpg_state_lock);
233 		up(&tpg->np_login_sem);
234 		return -1;
235 	}
236 	spin_unlock_bh(&tpg->tpg_state_lock);
237 
238 	return 0;
239 }
240 
241 void iscsit_login_kref_put(struct kref *kref)
242 {
243 	struct iscsi_tpg_np *tpg_np = container_of(kref,
244 				struct iscsi_tpg_np, tpg_np_kref);
245 
246 	complete(&tpg_np->tpg_np_comp);
247 }
248 
249 int iscsit_deaccess_np(struct iscsi_np *np, struct iscsi_portal_group *tpg,
250 		       struct iscsi_tpg_np *tpg_np)
251 {
252 	struct iscsi_tiqn *tiqn = tpg->tpg_tiqn;
253 
254 	up(&tpg->np_login_sem);
255 
256 	if (tpg_np)
257 		kref_put(&tpg_np->tpg_np_kref, iscsit_login_kref_put);
258 
259 	if (tiqn)
260 		iscsit_put_tiqn_for_login(tiqn);
261 
262 	return 0;
263 }
264 
265 bool iscsit_check_np_match(
266 	struct sockaddr_storage *sockaddr,
267 	struct iscsi_np *np,
268 	int network_transport)
269 {
270 	struct sockaddr_in *sock_in, *sock_in_e;
271 	struct sockaddr_in6 *sock_in6, *sock_in6_e;
272 	bool ip_match = false;
273 	u16 port, port_e;
274 
275 	if (sockaddr->ss_family == AF_INET6) {
276 		sock_in6 = (struct sockaddr_in6 *)sockaddr;
277 		sock_in6_e = (struct sockaddr_in6 *)&np->np_sockaddr;
278 
279 		if (!memcmp(&sock_in6->sin6_addr.in6_u,
280 			    &sock_in6_e->sin6_addr.in6_u,
281 			    sizeof(struct in6_addr)))
282 			ip_match = true;
283 
284 		port = ntohs(sock_in6->sin6_port);
285 		port_e = ntohs(sock_in6_e->sin6_port);
286 	} else {
287 		sock_in = (struct sockaddr_in *)sockaddr;
288 		sock_in_e = (struct sockaddr_in *)&np->np_sockaddr;
289 
290 		if (sock_in->sin_addr.s_addr == sock_in_e->sin_addr.s_addr)
291 			ip_match = true;
292 
293 		port = ntohs(sock_in->sin_port);
294 		port_e = ntohs(sock_in_e->sin_port);
295 	}
296 
297 	if (ip_match && (port_e == port) &&
298 	    (np->np_network_transport == network_transport))
299 		return true;
300 
301 	return false;
302 }
303 
304 static struct iscsi_np *iscsit_get_np(
305 	struct sockaddr_storage *sockaddr,
306 	int network_transport)
307 {
308 	struct iscsi_np *np;
309 	bool match;
310 
311 	lockdep_assert_held(&np_lock);
312 
313 	list_for_each_entry(np, &g_np_list, np_list) {
314 		spin_lock_bh(&np->np_thread_lock);
315 		if (np->np_thread_state != ISCSI_NP_THREAD_ACTIVE) {
316 			spin_unlock_bh(&np->np_thread_lock);
317 			continue;
318 		}
319 
320 		match = iscsit_check_np_match(sockaddr, np, network_transport);
321 		if (match) {
322 			/*
323 			 * Increment the np_exports reference count now to
324 			 * prevent iscsit_del_np() below from being called
325 			 * while iscsi_tpg_add_network_portal() is called.
326 			 */
327 			np->np_exports++;
328 			spin_unlock_bh(&np->np_thread_lock);
329 			return np;
330 		}
331 		spin_unlock_bh(&np->np_thread_lock);
332 	}
333 
334 	return NULL;
335 }
336 
337 struct iscsi_np *iscsit_add_np(
338 	struct sockaddr_storage *sockaddr,
339 	int network_transport)
340 {
341 	struct iscsi_np *np;
342 	int ret;
343 
344 	mutex_lock(&np_lock);
345 
346 	/*
347 	 * Locate the existing struct iscsi_np if already active..
348 	 */
349 	np = iscsit_get_np(sockaddr, network_transport);
350 	if (np) {
351 		mutex_unlock(&np_lock);
352 		return np;
353 	}
354 
355 	np = kzalloc(sizeof(*np), GFP_KERNEL);
356 	if (!np) {
357 		mutex_unlock(&np_lock);
358 		return ERR_PTR(-ENOMEM);
359 	}
360 
361 	np->np_flags |= NPF_IP_NETWORK;
362 	np->np_network_transport = network_transport;
363 	spin_lock_init(&np->np_thread_lock);
364 	init_completion(&np->np_restart_comp);
365 	INIT_LIST_HEAD(&np->np_list);
366 
367 	ret = iscsi_target_setup_login_socket(np, sockaddr);
368 	if (ret != 0) {
369 		kfree(np);
370 		mutex_unlock(&np_lock);
371 		return ERR_PTR(ret);
372 	}
373 
374 	np->np_thread = kthread_run(iscsi_target_login_thread, np, "iscsi_np");
375 	if (IS_ERR(np->np_thread)) {
376 		pr_err("Unable to create kthread: iscsi_np\n");
377 		ret = PTR_ERR(np->np_thread);
378 		kfree(np);
379 		mutex_unlock(&np_lock);
380 		return ERR_PTR(ret);
381 	}
382 	/*
383 	 * Increment the np_exports reference count now to prevent
384 	 * iscsit_del_np() below from being run while a new call to
385 	 * iscsi_tpg_add_network_portal() for a matching iscsi_np is
386 	 * active.  We don't need to hold np->np_thread_lock at this
387 	 * point because iscsi_np has not been added to g_np_list yet.
388 	 */
389 	np->np_exports = 1;
390 	np->np_thread_state = ISCSI_NP_THREAD_ACTIVE;
391 
392 	list_add_tail(&np->np_list, &g_np_list);
393 	mutex_unlock(&np_lock);
394 
395 	pr_debug("CORE[0] - Added Network Portal: %pISpc on %s\n",
396 		&np->np_sockaddr, np->np_transport->name);
397 
398 	return np;
399 }
400 
401 int iscsit_reset_np_thread(
402 	struct iscsi_np *np,
403 	struct iscsi_tpg_np *tpg_np,
404 	struct iscsi_portal_group *tpg,
405 	bool shutdown)
406 {
407 	spin_lock_bh(&np->np_thread_lock);
408 	if (np->np_thread_state == ISCSI_NP_THREAD_INACTIVE) {
409 		spin_unlock_bh(&np->np_thread_lock);
410 		return 0;
411 	}
412 	np->np_thread_state = ISCSI_NP_THREAD_RESET;
413 	atomic_inc(&np->np_reset_count);
414 
415 	if (np->np_thread) {
416 		spin_unlock_bh(&np->np_thread_lock);
417 		send_sig(SIGINT, np->np_thread, 1);
418 		wait_for_completion(&np->np_restart_comp);
419 		spin_lock_bh(&np->np_thread_lock);
420 	}
421 	spin_unlock_bh(&np->np_thread_lock);
422 
423 	if (tpg_np && shutdown) {
424 		kref_put(&tpg_np->tpg_np_kref, iscsit_login_kref_put);
425 
426 		wait_for_completion(&tpg_np->tpg_np_comp);
427 	}
428 
429 	return 0;
430 }
431 
432 static void iscsit_free_np(struct iscsi_np *np)
433 {
434 	if (np->np_socket)
435 		sock_release(np->np_socket);
436 }
437 
438 int iscsit_del_np(struct iscsi_np *np)
439 {
440 	spin_lock_bh(&np->np_thread_lock);
441 	np->np_exports--;
442 	if (np->np_exports) {
443 		np->enabled = true;
444 		spin_unlock_bh(&np->np_thread_lock);
445 		return 0;
446 	}
447 	np->np_thread_state = ISCSI_NP_THREAD_SHUTDOWN;
448 	spin_unlock_bh(&np->np_thread_lock);
449 
450 	if (np->np_thread) {
451 		/*
452 		 * We need to send the signal to wakeup Linux/Net
453 		 * which may be sleeping in sock_accept()..
454 		 */
455 		send_sig(SIGINT, np->np_thread, 1);
456 		kthread_stop(np->np_thread);
457 		np->np_thread = NULL;
458 	}
459 
460 	np->np_transport->iscsit_free_np(np);
461 
462 	mutex_lock(&np_lock);
463 	list_del(&np->np_list);
464 	mutex_unlock(&np_lock);
465 
466 	pr_debug("CORE[0] - Removed Network Portal: %pISpc on %s\n",
467 		&np->np_sockaddr, np->np_transport->name);
468 
469 	iscsit_put_transport(np->np_transport);
470 	kfree(np);
471 	return 0;
472 }
473 
474 static void iscsit_get_rx_pdu(struct iscsit_conn *);
475 
476 int iscsit_queue_rsp(struct iscsit_conn *conn, struct iscsit_cmd *cmd)
477 {
478 	return iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state);
479 }
480 EXPORT_SYMBOL(iscsit_queue_rsp);
481 
482 void iscsit_aborted_task(struct iscsit_conn *conn, struct iscsit_cmd *cmd)
483 {
484 	spin_lock_bh(&conn->cmd_lock);
485 	if (!list_empty(&cmd->i_conn_node))
486 		list_del_init(&cmd->i_conn_node);
487 	spin_unlock_bh(&conn->cmd_lock);
488 
489 	__iscsit_free_cmd(cmd, true);
490 }
491 EXPORT_SYMBOL(iscsit_aborted_task);
492 
493 static void iscsit_do_crypto_hash_buf(struct ahash_request *, const void *,
494 				      u32, u32, const void *, void *);
495 static void iscsit_tx_thread_wait_for_tcp(struct iscsit_conn *);
496 
497 static int
498 iscsit_xmit_nondatain_pdu(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
499 			  const void *data_buf, u32 data_buf_len)
500 {
501 	struct iscsi_hdr *hdr = (struct iscsi_hdr *)cmd->pdu;
502 	struct kvec *iov;
503 	u32 niov = 0, tx_size = ISCSI_HDR_LEN;
504 	int ret;
505 
506 	iov = &cmd->iov_misc[0];
507 	iov[niov].iov_base	= cmd->pdu;
508 	iov[niov++].iov_len	= ISCSI_HDR_LEN;
509 
510 	if (conn->conn_ops->HeaderDigest) {
511 		u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
512 
513 		iscsit_do_crypto_hash_buf(conn->conn_tx_hash, hdr,
514 					  ISCSI_HDR_LEN, 0, NULL,
515 					  header_digest);
516 
517 		iov[0].iov_len += ISCSI_CRC_LEN;
518 		tx_size += ISCSI_CRC_LEN;
519 		pr_debug("Attaching CRC32C HeaderDigest"
520 			 " to opcode 0x%x 0x%08x\n",
521 			 hdr->opcode, *header_digest);
522 	}
523 
524 	if (data_buf_len) {
525 		u32 padding = ((-data_buf_len) & 3);
526 
527 		iov[niov].iov_base	= (void *)data_buf;
528 		iov[niov++].iov_len	= data_buf_len;
529 		tx_size += data_buf_len;
530 
531 		if (padding != 0) {
532 			iov[niov].iov_base = &cmd->pad_bytes;
533 			iov[niov++].iov_len = padding;
534 			tx_size += padding;
535 			pr_debug("Attaching %u additional"
536 				 " padding bytes.\n", padding);
537 		}
538 
539 		if (conn->conn_ops->DataDigest) {
540 			iscsit_do_crypto_hash_buf(conn->conn_tx_hash,
541 						  data_buf, data_buf_len,
542 						  padding, &cmd->pad_bytes,
543 						  &cmd->data_crc);
544 
545 			iov[niov].iov_base = &cmd->data_crc;
546 			iov[niov++].iov_len = ISCSI_CRC_LEN;
547 			tx_size += ISCSI_CRC_LEN;
548 			pr_debug("Attached DataDigest for %u"
549 				 " bytes opcode 0x%x, CRC 0x%08x\n",
550 				 data_buf_len, hdr->opcode, cmd->data_crc);
551 		}
552 	}
553 
554 	cmd->iov_misc_count = niov;
555 	cmd->tx_size = tx_size;
556 
557 	ret = iscsit_send_tx_data(cmd, conn, 1);
558 	if (ret < 0) {
559 		iscsit_tx_thread_wait_for_tcp(conn);
560 		return ret;
561 	}
562 
563 	return 0;
564 }
565 
566 static int iscsit_map_iovec(struct iscsit_cmd *cmd, struct kvec *iov, int nvec,
567 			    u32 data_offset, u32 data_length);
568 static void iscsit_unmap_iovec(struct iscsit_cmd *);
569 static u32 iscsit_do_crypto_hash_sg(struct ahash_request *, struct iscsit_cmd *,
570 				    u32, u32, u32, u8 *);
571 static int
572 iscsit_xmit_datain_pdu(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
573 		       const struct iscsi_datain *datain)
574 {
575 	struct kvec *iov;
576 	u32 iov_count = 0, tx_size = 0;
577 	int ret, iov_ret;
578 
579 	iov = &cmd->iov_data[0];
580 	iov[iov_count].iov_base	= cmd->pdu;
581 	iov[iov_count++].iov_len = ISCSI_HDR_LEN;
582 	tx_size += ISCSI_HDR_LEN;
583 
584 	if (conn->conn_ops->HeaderDigest) {
585 		u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
586 
587 		iscsit_do_crypto_hash_buf(conn->conn_tx_hash, cmd->pdu,
588 					  ISCSI_HDR_LEN, 0, NULL,
589 					  header_digest);
590 
591 		iov[0].iov_len += ISCSI_CRC_LEN;
592 		tx_size += ISCSI_CRC_LEN;
593 
594 		pr_debug("Attaching CRC32 HeaderDigest for DataIN PDU 0x%08x\n",
595 			 *header_digest);
596 	}
597 
598 	iov_ret = iscsit_map_iovec(cmd, &cmd->iov_data[iov_count],
599 				   cmd->orig_iov_data_count - (iov_count + 2),
600 				   datain->offset, datain->length);
601 	if (iov_ret < 0)
602 		return -1;
603 
604 	iov_count += iov_ret;
605 	tx_size += datain->length;
606 
607 	cmd->padding = ((-datain->length) & 3);
608 	if (cmd->padding) {
609 		iov[iov_count].iov_base		= cmd->pad_bytes;
610 		iov[iov_count++].iov_len	= cmd->padding;
611 		tx_size += cmd->padding;
612 
613 		pr_debug("Attaching %u padding bytes\n", cmd->padding);
614 	}
615 
616 	if (conn->conn_ops->DataDigest) {
617 		cmd->data_crc = iscsit_do_crypto_hash_sg(conn->conn_tx_hash,
618 							 cmd, datain->offset,
619 							 datain->length,
620 							 cmd->padding,
621 							 cmd->pad_bytes);
622 
623 		iov[iov_count].iov_base	= &cmd->data_crc;
624 		iov[iov_count++].iov_len = ISCSI_CRC_LEN;
625 		tx_size += ISCSI_CRC_LEN;
626 
627 		pr_debug("Attached CRC32C DataDigest %d bytes, crc 0x%08x\n",
628 			 datain->length + cmd->padding, cmd->data_crc);
629 	}
630 
631 	cmd->iov_data_count = iov_count;
632 	cmd->tx_size = tx_size;
633 
634 	ret = iscsit_fe_sendpage_sg(cmd, conn);
635 
636 	iscsit_unmap_iovec(cmd);
637 
638 	if (ret < 0) {
639 		iscsit_tx_thread_wait_for_tcp(conn);
640 		return ret;
641 	}
642 
643 	return 0;
644 }
645 
646 static int iscsit_xmit_pdu(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
647 			   struct iscsi_datain_req *dr, const void *buf,
648 			   u32 buf_len)
649 {
650 	if (dr)
651 		return iscsit_xmit_datain_pdu(conn, cmd, buf);
652 	else
653 		return iscsit_xmit_nondatain_pdu(conn, cmd, buf, buf_len);
654 }
655 
656 static enum target_prot_op iscsit_get_sup_prot_ops(struct iscsit_conn *conn)
657 {
658 	return TARGET_PROT_NORMAL;
659 }
660 
661 static struct iscsit_transport iscsi_target_transport = {
662 	.name			= "iSCSI/TCP",
663 	.transport_type		= ISCSI_TCP,
664 	.rdma_shutdown		= false,
665 	.owner			= NULL,
666 	.iscsit_setup_np	= iscsit_setup_np,
667 	.iscsit_accept_np	= iscsit_accept_np,
668 	.iscsit_free_np		= iscsit_free_np,
669 	.iscsit_get_login_rx	= iscsit_get_login_rx,
670 	.iscsit_put_login_tx	= iscsit_put_login_tx,
671 	.iscsit_get_dataout	= iscsit_build_r2ts_for_cmd,
672 	.iscsit_immediate_queue	= iscsit_immediate_queue,
673 	.iscsit_response_queue	= iscsit_response_queue,
674 	.iscsit_queue_data_in	= iscsit_queue_rsp,
675 	.iscsit_queue_status	= iscsit_queue_rsp,
676 	.iscsit_aborted_task	= iscsit_aborted_task,
677 	.iscsit_xmit_pdu	= iscsit_xmit_pdu,
678 	.iscsit_get_rx_pdu	= iscsit_get_rx_pdu,
679 	.iscsit_get_sup_prot_ops = iscsit_get_sup_prot_ops,
680 };
681 
682 static int __init iscsi_target_init_module(void)
683 {
684 	int ret = 0, size;
685 
686 	pr_debug("iSCSI-Target "ISCSIT_VERSION"\n");
687 	iscsit_global = kzalloc(sizeof(*iscsit_global), GFP_KERNEL);
688 	if (!iscsit_global)
689 		return -1;
690 
691 	spin_lock_init(&iscsit_global->ts_bitmap_lock);
692 	mutex_init(&auth_id_lock);
693 	idr_init(&tiqn_idr);
694 
695 	ret = target_register_template(&iscsi_ops);
696 	if (ret)
697 		goto out;
698 
699 	size = BITS_TO_LONGS(ISCSIT_BITMAP_BITS) * sizeof(long);
700 	iscsit_global->ts_bitmap = vzalloc(size);
701 	if (!iscsit_global->ts_bitmap)
702 		goto configfs_out;
703 
704 	if (!zalloc_cpumask_var(&iscsit_global->allowed_cpumask, GFP_KERNEL)) {
705 		pr_err("Unable to allocate iscsit_global->allowed_cpumask\n");
706 		goto bitmap_out;
707 	}
708 	cpumask_setall(iscsit_global->allowed_cpumask);
709 
710 	lio_qr_cache = kmem_cache_create("lio_qr_cache",
711 			sizeof(struct iscsi_queue_req),
712 			__alignof__(struct iscsi_queue_req), 0, NULL);
713 	if (!lio_qr_cache) {
714 		pr_err("Unable to kmem_cache_create() for"
715 				" lio_qr_cache\n");
716 		goto cpumask_out;
717 	}
718 
719 	lio_dr_cache = kmem_cache_create("lio_dr_cache",
720 			sizeof(struct iscsi_datain_req),
721 			__alignof__(struct iscsi_datain_req), 0, NULL);
722 	if (!lio_dr_cache) {
723 		pr_err("Unable to kmem_cache_create() for"
724 				" lio_dr_cache\n");
725 		goto qr_out;
726 	}
727 
728 	lio_ooo_cache = kmem_cache_create("lio_ooo_cache",
729 			sizeof(struct iscsi_ooo_cmdsn),
730 			__alignof__(struct iscsi_ooo_cmdsn), 0, NULL);
731 	if (!lio_ooo_cache) {
732 		pr_err("Unable to kmem_cache_create() for"
733 				" lio_ooo_cache\n");
734 		goto dr_out;
735 	}
736 
737 	lio_r2t_cache = kmem_cache_create("lio_r2t_cache",
738 			sizeof(struct iscsi_r2t), __alignof__(struct iscsi_r2t),
739 			0, NULL);
740 	if (!lio_r2t_cache) {
741 		pr_err("Unable to kmem_cache_create() for"
742 				" lio_r2t_cache\n");
743 		goto ooo_out;
744 	}
745 
746 	iscsit_register_transport(&iscsi_target_transport);
747 
748 	if (iscsit_load_discovery_tpg() < 0)
749 		goto r2t_out;
750 
751 	return ret;
752 r2t_out:
753 	iscsit_unregister_transport(&iscsi_target_transport);
754 	kmem_cache_destroy(lio_r2t_cache);
755 ooo_out:
756 	kmem_cache_destroy(lio_ooo_cache);
757 dr_out:
758 	kmem_cache_destroy(lio_dr_cache);
759 qr_out:
760 	kmem_cache_destroy(lio_qr_cache);
761 cpumask_out:
762 	free_cpumask_var(iscsit_global->allowed_cpumask);
763 bitmap_out:
764 	vfree(iscsit_global->ts_bitmap);
765 configfs_out:
766 	/* XXX: this probably wants it to be it's own unwind step.. */
767 	if (iscsit_global->discovery_tpg)
768 		iscsit_tpg_disable_portal_group(iscsit_global->discovery_tpg, 1);
769 	target_unregister_template(&iscsi_ops);
770 out:
771 	kfree(iscsit_global);
772 	return -ENOMEM;
773 }
774 
775 static void __exit iscsi_target_cleanup_module(void)
776 {
777 	iscsit_release_discovery_tpg();
778 	iscsit_unregister_transport(&iscsi_target_transport);
779 	kmem_cache_destroy(lio_qr_cache);
780 	kmem_cache_destroy(lio_dr_cache);
781 	kmem_cache_destroy(lio_ooo_cache);
782 	kmem_cache_destroy(lio_r2t_cache);
783 
784 	/*
785 	 * Shutdown discovery sessions and disable discovery TPG
786 	 */
787 	if (iscsit_global->discovery_tpg)
788 		iscsit_tpg_disable_portal_group(iscsit_global->discovery_tpg, 1);
789 
790 	target_unregister_template(&iscsi_ops);
791 
792 	free_cpumask_var(iscsit_global->allowed_cpumask);
793 	vfree(iscsit_global->ts_bitmap);
794 	kfree(iscsit_global);
795 }
796 
797 int iscsit_add_reject(
798 	struct iscsit_conn *conn,
799 	u8 reason,
800 	unsigned char *buf)
801 {
802 	struct iscsit_cmd *cmd;
803 
804 	cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
805 	if (!cmd)
806 		return -1;
807 
808 	cmd->iscsi_opcode = ISCSI_OP_REJECT;
809 	cmd->reject_reason = reason;
810 
811 	cmd->buf_ptr = kmemdup(buf, ISCSI_HDR_LEN, GFP_KERNEL);
812 	if (!cmd->buf_ptr) {
813 		pr_err("Unable to allocate memory for cmd->buf_ptr\n");
814 		iscsit_free_cmd(cmd, false);
815 		return -1;
816 	}
817 
818 	spin_lock_bh(&conn->cmd_lock);
819 	list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
820 	spin_unlock_bh(&conn->cmd_lock);
821 
822 	cmd->i_state = ISTATE_SEND_REJECT;
823 	iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
824 
825 	return -1;
826 }
827 EXPORT_SYMBOL(iscsit_add_reject);
828 
829 static int iscsit_add_reject_from_cmd(
830 	struct iscsit_cmd *cmd,
831 	u8 reason,
832 	bool add_to_conn,
833 	unsigned char *buf)
834 {
835 	struct iscsit_conn *conn;
836 	const bool do_put = cmd->se_cmd.se_tfo != NULL;
837 
838 	if (!cmd->conn) {
839 		pr_err("cmd->conn is NULL for ITT: 0x%08x\n",
840 				cmd->init_task_tag);
841 		return -1;
842 	}
843 	conn = cmd->conn;
844 
845 	cmd->iscsi_opcode = ISCSI_OP_REJECT;
846 	cmd->reject_reason = reason;
847 
848 	cmd->buf_ptr = kmemdup(buf, ISCSI_HDR_LEN, GFP_KERNEL);
849 	if (!cmd->buf_ptr) {
850 		pr_err("Unable to allocate memory for cmd->buf_ptr\n");
851 		iscsit_free_cmd(cmd, false);
852 		return -1;
853 	}
854 
855 	if (add_to_conn) {
856 		spin_lock_bh(&conn->cmd_lock);
857 		list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
858 		spin_unlock_bh(&conn->cmd_lock);
859 	}
860 
861 	cmd->i_state = ISTATE_SEND_REJECT;
862 	iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
863 	/*
864 	 * Perform the kref_put now if se_cmd has already been setup by
865 	 * scsit_setup_scsi_cmd()
866 	 */
867 	if (do_put) {
868 		pr_debug("iscsi reject: calling target_put_sess_cmd >>>>>>\n");
869 		target_put_sess_cmd(&cmd->se_cmd);
870 	}
871 	return -1;
872 }
873 
874 static int iscsit_add_reject_cmd(struct iscsit_cmd *cmd, u8 reason,
875 				 unsigned char *buf)
876 {
877 	return iscsit_add_reject_from_cmd(cmd, reason, true, buf);
878 }
879 
880 int iscsit_reject_cmd(struct iscsit_cmd *cmd, u8 reason, unsigned char *buf)
881 {
882 	return iscsit_add_reject_from_cmd(cmd, reason, false, buf);
883 }
884 EXPORT_SYMBOL(iscsit_reject_cmd);
885 
886 /*
887  * Map some portion of the allocated scatterlist to an iovec, suitable for
888  * kernel sockets to copy data in/out.
889  */
890 static int iscsit_map_iovec(struct iscsit_cmd *cmd, struct kvec *iov, int nvec,
891 			    u32 data_offset, u32 data_length)
892 {
893 	u32 i = 0, orig_data_length = data_length;
894 	struct scatterlist *sg;
895 	unsigned int page_off;
896 
897 	/*
898 	 * We know each entry in t_data_sg contains a page.
899 	 */
900 	u32 ent = data_offset / PAGE_SIZE;
901 
902 	if (!data_length)
903 		return 0;
904 
905 	if (ent >= cmd->se_cmd.t_data_nents) {
906 		pr_err("Initial page entry out-of-bounds\n");
907 		goto overflow;
908 	}
909 
910 	sg = &cmd->se_cmd.t_data_sg[ent];
911 	page_off = (data_offset % PAGE_SIZE);
912 
913 	cmd->first_data_sg = sg;
914 	cmd->first_data_sg_off = page_off;
915 
916 	while (data_length) {
917 		u32 cur_len;
918 
919 		if (WARN_ON_ONCE(!sg || i >= nvec))
920 			goto overflow;
921 
922 		cur_len = min_t(u32, data_length, sg->length - page_off);
923 
924 		iov[i].iov_base = kmap(sg_page(sg)) + sg->offset + page_off;
925 		iov[i].iov_len = cur_len;
926 
927 		data_length -= cur_len;
928 		page_off = 0;
929 		sg = sg_next(sg);
930 		i++;
931 	}
932 
933 	cmd->kmapped_nents = i;
934 
935 	return i;
936 
937 overflow:
938 	pr_err("offset %d + length %d overflow; %d/%d; sg-list:\n",
939 	       data_offset, orig_data_length, i, nvec);
940 	for_each_sg(cmd->se_cmd.t_data_sg, sg,
941 		    cmd->se_cmd.t_data_nents, i) {
942 		pr_err("[%d] off %d len %d\n",
943 		       i, sg->offset, sg->length);
944 	}
945 	return -1;
946 }
947 
948 static void iscsit_unmap_iovec(struct iscsit_cmd *cmd)
949 {
950 	u32 i;
951 	struct scatterlist *sg;
952 
953 	sg = cmd->first_data_sg;
954 
955 	for (i = 0; i < cmd->kmapped_nents; i++)
956 		kunmap(sg_page(&sg[i]));
957 }
958 
959 static void iscsit_ack_from_expstatsn(struct iscsit_conn *conn, u32 exp_statsn)
960 {
961 	LIST_HEAD(ack_list);
962 	struct iscsit_cmd *cmd, *cmd_p;
963 
964 	conn->exp_statsn = exp_statsn;
965 
966 	if (conn->sess->sess_ops->RDMAExtensions)
967 		return;
968 
969 	spin_lock_bh(&conn->cmd_lock);
970 	list_for_each_entry_safe(cmd, cmd_p, &conn->conn_cmd_list, i_conn_node) {
971 		spin_lock(&cmd->istate_lock);
972 		if ((cmd->i_state == ISTATE_SENT_STATUS) &&
973 		    iscsi_sna_lt(cmd->stat_sn, exp_statsn)) {
974 			cmd->i_state = ISTATE_REMOVE;
975 			spin_unlock(&cmd->istate_lock);
976 			list_move_tail(&cmd->i_conn_node, &ack_list);
977 			continue;
978 		}
979 		spin_unlock(&cmd->istate_lock);
980 	}
981 	spin_unlock_bh(&conn->cmd_lock);
982 
983 	list_for_each_entry_safe(cmd, cmd_p, &ack_list, i_conn_node) {
984 		list_del_init(&cmd->i_conn_node);
985 		iscsit_free_cmd(cmd, false);
986 	}
987 }
988 
989 static int iscsit_allocate_iovecs(struct iscsit_cmd *cmd)
990 {
991 	u32 iov_count = max(1UL, DIV_ROUND_UP(cmd->se_cmd.data_length, PAGE_SIZE));
992 
993 	iov_count += ISCSI_IOV_DATA_BUFFER;
994 	cmd->iov_data = kcalloc(iov_count, sizeof(*cmd->iov_data), GFP_KERNEL);
995 	if (!cmd->iov_data)
996 		return -ENOMEM;
997 
998 	cmd->orig_iov_data_count = iov_count;
999 	return 0;
1000 }
1001 
1002 int iscsit_setup_scsi_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
1003 			  unsigned char *buf)
1004 {
1005 	int data_direction, payload_length;
1006 	struct iscsi_ecdb_ahdr *ecdb_ahdr;
1007 	struct iscsi_scsi_req *hdr;
1008 	int iscsi_task_attr;
1009 	unsigned char *cdb;
1010 	int sam_task_attr;
1011 
1012 	atomic_long_inc(&conn->sess->cmd_pdus);
1013 
1014 	hdr			= (struct iscsi_scsi_req *) buf;
1015 	payload_length		= ntoh24(hdr->dlength);
1016 
1017 	/* FIXME; Add checks for AdditionalHeaderSegment */
1018 
1019 	if (!(hdr->flags & ISCSI_FLAG_CMD_WRITE) &&
1020 	    !(hdr->flags & ISCSI_FLAG_CMD_FINAL)) {
1021 		pr_err("ISCSI_FLAG_CMD_WRITE & ISCSI_FLAG_CMD_FINAL"
1022 				" not set. Bad iSCSI Initiator.\n");
1023 		return iscsit_add_reject_cmd(cmd,
1024 					     ISCSI_REASON_BOOKMARK_INVALID, buf);
1025 	}
1026 
1027 	if (((hdr->flags & ISCSI_FLAG_CMD_READ) ||
1028 	     (hdr->flags & ISCSI_FLAG_CMD_WRITE)) && !hdr->data_length) {
1029 		/*
1030 		 * From RFC-3720 Section 10.3.1:
1031 		 *
1032 		 * "Either or both of R and W MAY be 1 when either the
1033 		 *  Expected Data Transfer Length and/or Bidirectional Read
1034 		 *  Expected Data Transfer Length are 0"
1035 		 *
1036 		 * For this case, go ahead and clear the unnecssary bits
1037 		 * to avoid any confusion with ->data_direction.
1038 		 */
1039 		hdr->flags &= ~ISCSI_FLAG_CMD_READ;
1040 		hdr->flags &= ~ISCSI_FLAG_CMD_WRITE;
1041 
1042 		pr_warn("ISCSI_FLAG_CMD_READ or ISCSI_FLAG_CMD_WRITE"
1043 			" set when Expected Data Transfer Length is 0 for"
1044 			" CDB: 0x%02x, Fixing up flags\n", hdr->cdb[0]);
1045 	}
1046 
1047 	if (!(hdr->flags & ISCSI_FLAG_CMD_READ) &&
1048 	    !(hdr->flags & ISCSI_FLAG_CMD_WRITE) && (hdr->data_length != 0)) {
1049 		pr_err("ISCSI_FLAG_CMD_READ and/or ISCSI_FLAG_CMD_WRITE"
1050 			" MUST be set if Expected Data Transfer Length is not 0."
1051 			" Bad iSCSI Initiator\n");
1052 		return iscsit_add_reject_cmd(cmd,
1053 					     ISCSI_REASON_BOOKMARK_INVALID, buf);
1054 	}
1055 
1056 	if ((hdr->flags & ISCSI_FLAG_CMD_READ) &&
1057 	    (hdr->flags & ISCSI_FLAG_CMD_WRITE)) {
1058 		pr_err("Bidirectional operations not supported!\n");
1059 		return iscsit_add_reject_cmd(cmd,
1060 					     ISCSI_REASON_BOOKMARK_INVALID, buf);
1061 	}
1062 
1063 	if (hdr->opcode & ISCSI_OP_IMMEDIATE) {
1064 		pr_err("Illegally set Immediate Bit in iSCSI Initiator"
1065 				" Scsi Command PDU.\n");
1066 		return iscsit_add_reject_cmd(cmd,
1067 					     ISCSI_REASON_BOOKMARK_INVALID, buf);
1068 	}
1069 
1070 	if (payload_length && !conn->sess->sess_ops->ImmediateData) {
1071 		pr_err("ImmediateData=No but DataSegmentLength=%u,"
1072 			" protocol error.\n", payload_length);
1073 		return iscsit_add_reject_cmd(cmd,
1074 					     ISCSI_REASON_PROTOCOL_ERROR, buf);
1075 	}
1076 
1077 	if ((be32_to_cpu(hdr->data_length) == payload_length) &&
1078 	    (!(hdr->flags & ISCSI_FLAG_CMD_FINAL))) {
1079 		pr_err("Expected Data Transfer Length and Length of"
1080 			" Immediate Data are the same, but ISCSI_FLAG_CMD_FINAL"
1081 			" bit is not set protocol error\n");
1082 		return iscsit_add_reject_cmd(cmd,
1083 					     ISCSI_REASON_PROTOCOL_ERROR, buf);
1084 	}
1085 
1086 	if (payload_length > be32_to_cpu(hdr->data_length)) {
1087 		pr_err("DataSegmentLength: %u is greater than"
1088 			" EDTL: %u, protocol error.\n", payload_length,
1089 				hdr->data_length);
1090 		return iscsit_add_reject_cmd(cmd,
1091 					     ISCSI_REASON_PROTOCOL_ERROR, buf);
1092 	}
1093 
1094 	if (payload_length > conn->conn_ops->MaxXmitDataSegmentLength) {
1095 		pr_err("DataSegmentLength: %u is greater than"
1096 			" MaxXmitDataSegmentLength: %u, protocol error.\n",
1097 			payload_length, conn->conn_ops->MaxXmitDataSegmentLength);
1098 		return iscsit_add_reject_cmd(cmd,
1099 					     ISCSI_REASON_PROTOCOL_ERROR, buf);
1100 	}
1101 
1102 	if (payload_length > conn->sess->sess_ops->FirstBurstLength) {
1103 		pr_err("DataSegmentLength: %u is greater than"
1104 			" FirstBurstLength: %u, protocol error.\n",
1105 			payload_length, conn->sess->sess_ops->FirstBurstLength);
1106 		return iscsit_add_reject_cmd(cmd,
1107 					     ISCSI_REASON_BOOKMARK_INVALID, buf);
1108 	}
1109 
1110 	cdb = hdr->cdb;
1111 
1112 	if (hdr->hlength) {
1113 		ecdb_ahdr = (struct iscsi_ecdb_ahdr *) (hdr + 1);
1114 		if (ecdb_ahdr->ahstype != ISCSI_AHSTYPE_CDB) {
1115 			pr_err("Additional Header Segment type %d not supported!\n",
1116 			       ecdb_ahdr->ahstype);
1117 			return iscsit_add_reject_cmd(cmd,
1118 				ISCSI_REASON_CMD_NOT_SUPPORTED, buf);
1119 		}
1120 
1121 		cdb = kmalloc(be16_to_cpu(ecdb_ahdr->ahslength) + 15,
1122 			      GFP_KERNEL);
1123 		if (cdb == NULL)
1124 			return iscsit_add_reject_cmd(cmd,
1125 				ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
1126 		memcpy(cdb, hdr->cdb, ISCSI_CDB_SIZE);
1127 		memcpy(cdb + ISCSI_CDB_SIZE, ecdb_ahdr->ecdb,
1128 		       be16_to_cpu(ecdb_ahdr->ahslength) - 1);
1129 	}
1130 
1131 	data_direction = (hdr->flags & ISCSI_FLAG_CMD_WRITE) ? DMA_TO_DEVICE :
1132 			 (hdr->flags & ISCSI_FLAG_CMD_READ) ? DMA_FROM_DEVICE :
1133 			  DMA_NONE;
1134 
1135 	cmd->data_direction = data_direction;
1136 	iscsi_task_attr = hdr->flags & ISCSI_FLAG_CMD_ATTR_MASK;
1137 	/*
1138 	 * Figure out the SAM Task Attribute for the incoming SCSI CDB
1139 	 */
1140 	if ((iscsi_task_attr == ISCSI_ATTR_UNTAGGED) ||
1141 	    (iscsi_task_attr == ISCSI_ATTR_SIMPLE))
1142 		sam_task_attr = TCM_SIMPLE_TAG;
1143 	else if (iscsi_task_attr == ISCSI_ATTR_ORDERED)
1144 		sam_task_attr = TCM_ORDERED_TAG;
1145 	else if (iscsi_task_attr == ISCSI_ATTR_HEAD_OF_QUEUE)
1146 		sam_task_attr = TCM_HEAD_TAG;
1147 	else if (iscsi_task_attr == ISCSI_ATTR_ACA)
1148 		sam_task_attr = TCM_ACA_TAG;
1149 	else {
1150 		pr_debug("Unknown iSCSI Task Attribute: 0x%02x, using"
1151 			" TCM_SIMPLE_TAG\n", iscsi_task_attr);
1152 		sam_task_attr = TCM_SIMPLE_TAG;
1153 	}
1154 
1155 	cmd->iscsi_opcode	= ISCSI_OP_SCSI_CMD;
1156 	cmd->i_state		= ISTATE_NEW_CMD;
1157 	cmd->immediate_cmd	= ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1 : 0);
1158 	cmd->immediate_data	= (payload_length) ? 1 : 0;
1159 	cmd->unsolicited_data	= ((!(hdr->flags & ISCSI_FLAG_CMD_FINAL) &&
1160 				     (hdr->flags & ISCSI_FLAG_CMD_WRITE)) ? 1 : 0);
1161 	if (cmd->unsolicited_data)
1162 		cmd->cmd_flags |= ICF_NON_IMMEDIATE_UNSOLICITED_DATA;
1163 
1164 	conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt;
1165 	if (hdr->flags & ISCSI_FLAG_CMD_READ)
1166 		cmd->targ_xfer_tag = session_get_next_ttt(conn->sess);
1167 	else
1168 		cmd->targ_xfer_tag = 0xFFFFFFFF;
1169 	cmd->cmd_sn		= be32_to_cpu(hdr->cmdsn);
1170 	cmd->exp_stat_sn	= be32_to_cpu(hdr->exp_statsn);
1171 	cmd->first_burst_len	= payload_length;
1172 
1173 	if (!conn->sess->sess_ops->RDMAExtensions &&
1174 	     cmd->data_direction == DMA_FROM_DEVICE) {
1175 		struct iscsi_datain_req *dr;
1176 
1177 		dr = iscsit_allocate_datain_req();
1178 		if (!dr) {
1179 			if (cdb != hdr->cdb)
1180 				kfree(cdb);
1181 			return iscsit_add_reject_cmd(cmd,
1182 					ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
1183 		}
1184 
1185 		iscsit_attach_datain_req(cmd, dr);
1186 	}
1187 
1188 	/*
1189 	 * Initialize struct se_cmd descriptor from target_core_mod infrastructure
1190 	 */
1191 	__target_init_cmd(&cmd->se_cmd, &iscsi_ops,
1192 			  conn->sess->se_sess, be32_to_cpu(hdr->data_length),
1193 			  cmd->data_direction, sam_task_attr,
1194 			  cmd->sense_buffer + 2, scsilun_to_int(&hdr->lun),
1195 			  conn->cmd_cnt);
1196 
1197 	pr_debug("Got SCSI Command, ITT: 0x%08x, CmdSN: 0x%08x,"
1198 		" ExpXferLen: %u, Length: %u, CID: %hu\n", hdr->itt,
1199 		hdr->cmdsn, be32_to_cpu(hdr->data_length), payload_length,
1200 		conn->cid);
1201 
1202 	target_get_sess_cmd(&cmd->se_cmd, true);
1203 
1204 	cmd->se_cmd.tag = (__force u32)cmd->init_task_tag;
1205 	cmd->sense_reason = target_cmd_init_cdb(&cmd->se_cmd, cdb,
1206 						GFP_KERNEL);
1207 
1208 	if (cdb != hdr->cdb)
1209 		kfree(cdb);
1210 
1211 	if (cmd->sense_reason) {
1212 		if (cmd->sense_reason == TCM_OUT_OF_RESOURCES) {
1213 			return iscsit_add_reject_cmd(cmd,
1214 				ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
1215 		}
1216 
1217 		goto attach_cmd;
1218 	}
1219 
1220 	cmd->sense_reason = transport_lookup_cmd_lun(&cmd->se_cmd);
1221 	if (cmd->sense_reason)
1222 		goto attach_cmd;
1223 
1224 	cmd->sense_reason = target_cmd_parse_cdb(&cmd->se_cmd);
1225 	if (cmd->sense_reason)
1226 		goto attach_cmd;
1227 
1228 	if (iscsit_build_pdu_and_seq_lists(cmd, payload_length) < 0) {
1229 		return iscsit_add_reject_cmd(cmd,
1230 				ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
1231 	}
1232 
1233 attach_cmd:
1234 	spin_lock_bh(&conn->cmd_lock);
1235 	list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
1236 	spin_unlock_bh(&conn->cmd_lock);
1237 	return 0;
1238 }
1239 EXPORT_SYMBOL(iscsit_setup_scsi_cmd);
1240 
1241 void iscsit_set_unsolicited_dataout(struct iscsit_cmd *cmd)
1242 {
1243 	iscsit_set_dataout_sequence_values(cmd);
1244 
1245 	spin_lock_bh(&cmd->dataout_timeout_lock);
1246 	iscsit_start_dataout_timer(cmd, cmd->conn);
1247 	spin_unlock_bh(&cmd->dataout_timeout_lock);
1248 }
1249 EXPORT_SYMBOL(iscsit_set_unsolicited_dataout);
1250 
1251 int iscsit_process_scsi_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
1252 			    struct iscsi_scsi_req *hdr)
1253 {
1254 	int cmdsn_ret = 0;
1255 	/*
1256 	 * Check the CmdSN against ExpCmdSN/MaxCmdSN here if
1257 	 * the Immediate Bit is not set, and no Immediate
1258 	 * Data is attached.
1259 	 *
1260 	 * A PDU/CmdSN carrying Immediate Data can only
1261 	 * be processed after the DataCRC has passed.
1262 	 * If the DataCRC fails, the CmdSN MUST NOT
1263 	 * be acknowledged. (See below)
1264 	 */
1265 	if (!cmd->immediate_data) {
1266 		cmdsn_ret = iscsit_sequence_cmd(conn, cmd,
1267 					(unsigned char *)hdr, hdr->cmdsn);
1268 		if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
1269 			return -1;
1270 		else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) {
1271 			target_put_sess_cmd(&cmd->se_cmd);
1272 			return 0;
1273 		}
1274 	}
1275 
1276 	iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn));
1277 
1278 	/*
1279 	 * If no Immediate Data is attached, it's OK to return now.
1280 	 */
1281 	if (!cmd->immediate_data) {
1282 		if (!cmd->sense_reason && cmd->unsolicited_data)
1283 			iscsit_set_unsolicited_dataout(cmd);
1284 		if (!cmd->sense_reason)
1285 			return 0;
1286 
1287 		target_put_sess_cmd(&cmd->se_cmd);
1288 		return 0;
1289 	}
1290 
1291 	/*
1292 	 * Early CHECK_CONDITIONs with ImmediateData never make it to command
1293 	 * execution.  These exceptions are processed in CmdSN order using
1294 	 * iscsit_check_received_cmdsn() in iscsit_get_immediate_data() below.
1295 	 */
1296 	if (cmd->sense_reason)
1297 		return 1;
1298 	/*
1299 	 * Call directly into transport_generic_new_cmd() to perform
1300 	 * the backend memory allocation.
1301 	 */
1302 	cmd->sense_reason = transport_generic_new_cmd(&cmd->se_cmd);
1303 	if (cmd->sense_reason)
1304 		return 1;
1305 
1306 	return 0;
1307 }
1308 EXPORT_SYMBOL(iscsit_process_scsi_cmd);
1309 
1310 static int
1311 iscsit_get_immediate_data(struct iscsit_cmd *cmd, struct iscsi_scsi_req *hdr,
1312 			  bool dump_payload)
1313 {
1314 	int cmdsn_ret = 0, immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION;
1315 	int rc;
1316 
1317 	/*
1318 	 * Special case for Unsupported SAM WRITE Opcodes and ImmediateData=Yes.
1319 	 */
1320 	if (dump_payload) {
1321 		u32 length = min(cmd->se_cmd.data_length - cmd->write_data_done,
1322 				 cmd->first_burst_len);
1323 
1324 		pr_debug("Dumping min(%d - %d, %d) = %d bytes of immediate data\n",
1325 			 cmd->se_cmd.data_length, cmd->write_data_done,
1326 			 cmd->first_burst_len, length);
1327 		rc = iscsit_dump_data_payload(cmd->conn, length, 1);
1328 		pr_debug("Finished dumping immediate data\n");
1329 		if (rc < 0)
1330 			immed_ret = IMMEDIATE_DATA_CANNOT_RECOVER;
1331 	} else {
1332 		immed_ret = iscsit_handle_immediate_data(cmd, hdr,
1333 							 cmd->first_burst_len);
1334 	}
1335 
1336 	if (immed_ret == IMMEDIATE_DATA_NORMAL_OPERATION) {
1337 		/*
1338 		 * A PDU/CmdSN carrying Immediate Data passed
1339 		 * DataCRC, check against ExpCmdSN/MaxCmdSN if
1340 		 * Immediate Bit is not set.
1341 		 */
1342 		cmdsn_ret = iscsit_sequence_cmd(cmd->conn, cmd,
1343 					(unsigned char *)hdr, hdr->cmdsn);
1344 		if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
1345 			return -1;
1346 
1347 		if (cmd->sense_reason || cmdsn_ret == CMDSN_LOWER_THAN_EXP) {
1348 			target_put_sess_cmd(&cmd->se_cmd);
1349 
1350 			return 0;
1351 		} else if (cmd->unsolicited_data)
1352 			iscsit_set_unsolicited_dataout(cmd);
1353 
1354 	} else if (immed_ret == IMMEDIATE_DATA_ERL1_CRC_FAILURE) {
1355 		/*
1356 		 * Immediate Data failed DataCRC and ERL>=1,
1357 		 * silently drop this PDU and let the initiator
1358 		 * plug the CmdSN gap.
1359 		 *
1360 		 * FIXME: Send Unsolicited NOPIN with reserved
1361 		 * TTT here to help the initiator figure out
1362 		 * the missing CmdSN, although they should be
1363 		 * intelligent enough to determine the missing
1364 		 * CmdSN and issue a retry to plug the sequence.
1365 		 */
1366 		cmd->i_state = ISTATE_REMOVE;
1367 		iscsit_add_cmd_to_immediate_queue(cmd, cmd->conn, cmd->i_state);
1368 	} else /* immed_ret == IMMEDIATE_DATA_CANNOT_RECOVER */
1369 		return -1;
1370 
1371 	return 0;
1372 }
1373 
1374 static int
1375 iscsit_handle_scsi_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
1376 			   unsigned char *buf)
1377 {
1378 	struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)buf;
1379 	int rc, immed_data;
1380 	bool dump_payload = false;
1381 
1382 	rc = iscsit_setup_scsi_cmd(conn, cmd, buf);
1383 	if (rc < 0)
1384 		return 0;
1385 	/*
1386 	 * Allocation iovecs needed for struct socket operations for
1387 	 * traditional iSCSI block I/O.
1388 	 */
1389 	if (iscsit_allocate_iovecs(cmd) < 0) {
1390 		return iscsit_reject_cmd(cmd,
1391 				ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
1392 	}
1393 	immed_data = cmd->immediate_data;
1394 
1395 	rc = iscsit_process_scsi_cmd(conn, cmd, hdr);
1396 	if (rc < 0)
1397 		return rc;
1398 	else if (rc > 0)
1399 		dump_payload = true;
1400 
1401 	if (!immed_data)
1402 		return 0;
1403 
1404 	return iscsit_get_immediate_data(cmd, hdr, dump_payload);
1405 }
1406 
1407 static u32 iscsit_do_crypto_hash_sg(
1408 	struct ahash_request *hash,
1409 	struct iscsit_cmd *cmd,
1410 	u32 data_offset,
1411 	u32 data_length,
1412 	u32 padding,
1413 	u8 *pad_bytes)
1414 {
1415 	u32 data_crc;
1416 	struct scatterlist *sg;
1417 	unsigned int page_off;
1418 
1419 	crypto_ahash_init(hash);
1420 
1421 	sg = cmd->first_data_sg;
1422 	page_off = cmd->first_data_sg_off;
1423 
1424 	if (data_length && page_off) {
1425 		struct scatterlist first_sg;
1426 		u32 len = min_t(u32, data_length, sg->length - page_off);
1427 
1428 		sg_init_table(&first_sg, 1);
1429 		sg_set_page(&first_sg, sg_page(sg), len, sg->offset + page_off);
1430 
1431 		ahash_request_set_crypt(hash, &first_sg, NULL, len);
1432 		crypto_ahash_update(hash);
1433 
1434 		data_length -= len;
1435 		sg = sg_next(sg);
1436 	}
1437 
1438 	while (data_length) {
1439 		u32 cur_len = min_t(u32, data_length, sg->length);
1440 
1441 		ahash_request_set_crypt(hash, sg, NULL, cur_len);
1442 		crypto_ahash_update(hash);
1443 
1444 		data_length -= cur_len;
1445 		/* iscsit_map_iovec has already checked for invalid sg pointers */
1446 		sg = sg_next(sg);
1447 	}
1448 
1449 	if (padding) {
1450 		struct scatterlist pad_sg;
1451 
1452 		sg_init_one(&pad_sg, pad_bytes, padding);
1453 		ahash_request_set_crypt(hash, &pad_sg, (u8 *)&data_crc,
1454 					padding);
1455 		crypto_ahash_finup(hash);
1456 	} else {
1457 		ahash_request_set_crypt(hash, NULL, (u8 *)&data_crc, 0);
1458 		crypto_ahash_final(hash);
1459 	}
1460 
1461 	return data_crc;
1462 }
1463 
1464 static void iscsit_do_crypto_hash_buf(struct ahash_request *hash,
1465 	const void *buf, u32 payload_length, u32 padding,
1466 	const void *pad_bytes, void *data_crc)
1467 {
1468 	struct scatterlist sg[2];
1469 
1470 	sg_init_table(sg, ARRAY_SIZE(sg));
1471 	sg_set_buf(sg, buf, payload_length);
1472 	if (padding)
1473 		sg_set_buf(sg + 1, pad_bytes, padding);
1474 
1475 	ahash_request_set_crypt(hash, sg, data_crc, payload_length + padding);
1476 
1477 	crypto_ahash_digest(hash);
1478 }
1479 
1480 int
1481 __iscsit_check_dataout_hdr(struct iscsit_conn *conn, void *buf,
1482 			   struct iscsit_cmd *cmd, u32 payload_length,
1483 			   bool *success)
1484 {
1485 	struct iscsi_data *hdr = buf;
1486 	struct se_cmd *se_cmd;
1487 	int rc;
1488 
1489 	/* iSCSI write */
1490 	atomic_long_add(payload_length, &conn->sess->rx_data_octets);
1491 
1492 	pr_debug("Got DataOut ITT: 0x%08x, TTT: 0x%08x,"
1493 		" DataSN: 0x%08x, Offset: %u, Length: %u, CID: %hu\n",
1494 		hdr->itt, hdr->ttt, hdr->datasn, ntohl(hdr->offset),
1495 		payload_length, conn->cid);
1496 
1497 	if (cmd->cmd_flags & ICF_GOT_LAST_DATAOUT) {
1498 		pr_err("Command ITT: 0x%08x received DataOUT after"
1499 			" last DataOUT received, dumping payload\n",
1500 			cmd->init_task_tag);
1501 		return iscsit_dump_data_payload(conn, payload_length, 1);
1502 	}
1503 
1504 	if (cmd->data_direction != DMA_TO_DEVICE) {
1505 		pr_err("Command ITT: 0x%08x received DataOUT for a"
1506 			" NON-WRITE command.\n", cmd->init_task_tag);
1507 		return iscsit_dump_data_payload(conn, payload_length, 1);
1508 	}
1509 	se_cmd = &cmd->se_cmd;
1510 	iscsit_mod_dataout_timer(cmd);
1511 
1512 	if ((be32_to_cpu(hdr->offset) + payload_length) > cmd->se_cmd.data_length) {
1513 		pr_err("DataOut Offset: %u, Length %u greater than iSCSI Command EDTL %u, protocol error.\n",
1514 		       be32_to_cpu(hdr->offset), payload_length,
1515 		       cmd->se_cmd.data_length);
1516 		return iscsit_reject_cmd(cmd, ISCSI_REASON_BOOKMARK_INVALID, buf);
1517 	}
1518 
1519 	if (cmd->unsolicited_data) {
1520 		int dump_unsolicited_data = 0;
1521 
1522 		if (conn->sess->sess_ops->InitialR2T) {
1523 			pr_err("Received unexpected unsolicited data"
1524 				" while InitialR2T=Yes, protocol error.\n");
1525 			transport_send_check_condition_and_sense(&cmd->se_cmd,
1526 					TCM_UNEXPECTED_UNSOLICITED_DATA, 0);
1527 			return -1;
1528 		}
1529 		/*
1530 		 * Special case for dealing with Unsolicited DataOUT
1531 		 * and Unsupported SAM WRITE Opcodes and SE resource allocation
1532 		 * failures;
1533 		 */
1534 
1535 		/* Something's amiss if we're not in WRITE_PENDING state... */
1536 		WARN_ON(se_cmd->t_state != TRANSPORT_WRITE_PENDING);
1537 		if (!(se_cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE))
1538 			dump_unsolicited_data = 1;
1539 
1540 		if (dump_unsolicited_data) {
1541 			/*
1542 			 * Check if a delayed TASK_ABORTED status needs to
1543 			 * be sent now if the ISCSI_FLAG_CMD_FINAL has been
1544 			 * received with the unsolicited data out.
1545 			 */
1546 			if (hdr->flags & ISCSI_FLAG_CMD_FINAL)
1547 				iscsit_stop_dataout_timer(cmd);
1548 
1549 			return iscsit_dump_data_payload(conn, payload_length, 1);
1550 		}
1551 	} else {
1552 		/*
1553 		 * For the normal solicited data path:
1554 		 *
1555 		 * Check for a delayed TASK_ABORTED status and dump any
1556 		 * incoming data out payload if one exists.  Also, when the
1557 		 * ISCSI_FLAG_CMD_FINAL is set to denote the end of the current
1558 		 * data out sequence, we decrement outstanding_r2ts.  Once
1559 		 * outstanding_r2ts reaches zero, go ahead and send the delayed
1560 		 * TASK_ABORTED status.
1561 		 */
1562 		if (se_cmd->transport_state & CMD_T_ABORTED) {
1563 			if (hdr->flags & ISCSI_FLAG_CMD_FINAL &&
1564 			    --cmd->outstanding_r2ts < 1)
1565 				iscsit_stop_dataout_timer(cmd);
1566 
1567 			return iscsit_dump_data_payload(conn, payload_length, 1);
1568 		}
1569 	}
1570 	/*
1571 	 * Perform DataSN, DataSequenceInOrder, DataPDUInOrder, and
1572 	 * within-command recovery checks before receiving the payload.
1573 	 */
1574 	rc = iscsit_check_pre_dataout(cmd, buf);
1575 	if (rc == DATAOUT_WITHIN_COMMAND_RECOVERY)
1576 		return 0;
1577 	else if (rc == DATAOUT_CANNOT_RECOVER)
1578 		return -1;
1579 	*success = true;
1580 	return 0;
1581 }
1582 EXPORT_SYMBOL(__iscsit_check_dataout_hdr);
1583 
1584 int
1585 iscsit_check_dataout_hdr(struct iscsit_conn *conn, void *buf,
1586 			 struct iscsit_cmd **out_cmd)
1587 {
1588 	struct iscsi_data *hdr = buf;
1589 	struct iscsit_cmd *cmd;
1590 	u32 payload_length = ntoh24(hdr->dlength);
1591 	int rc;
1592 	bool success = false;
1593 
1594 	if (!payload_length) {
1595 		pr_warn_ratelimited("DataOUT payload is ZERO, ignoring.\n");
1596 		return 0;
1597 	}
1598 
1599 	if (payload_length > conn->conn_ops->MaxXmitDataSegmentLength) {
1600 		pr_err_ratelimited("DataSegmentLength: %u is greater than"
1601 			" MaxXmitDataSegmentLength: %u\n", payload_length,
1602 			conn->conn_ops->MaxXmitDataSegmentLength);
1603 		return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR, buf);
1604 	}
1605 
1606 	cmd = iscsit_find_cmd_from_itt_or_dump(conn, hdr->itt, payload_length);
1607 	if (!cmd)
1608 		return 0;
1609 
1610 	rc = __iscsit_check_dataout_hdr(conn, buf, cmd, payload_length, &success);
1611 
1612 	if (success)
1613 		*out_cmd = cmd;
1614 
1615 	return rc;
1616 }
1617 EXPORT_SYMBOL(iscsit_check_dataout_hdr);
1618 
1619 static int
1620 iscsit_get_dataout(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
1621 		   struct iscsi_data *hdr)
1622 {
1623 	struct kvec *iov;
1624 	u32 checksum, iov_count = 0, padding = 0, rx_got = 0, rx_size = 0;
1625 	u32 payload_length;
1626 	int iov_ret, data_crc_failed = 0;
1627 
1628 	payload_length = min_t(u32, cmd->se_cmd.data_length,
1629 			       ntoh24(hdr->dlength));
1630 	rx_size += payload_length;
1631 	iov = &cmd->iov_data[0];
1632 
1633 	iov_ret = iscsit_map_iovec(cmd, iov, cmd->orig_iov_data_count - 2,
1634 				   be32_to_cpu(hdr->offset), payload_length);
1635 	if (iov_ret < 0)
1636 		return -1;
1637 
1638 	iov_count += iov_ret;
1639 
1640 	padding = ((-payload_length) & 3);
1641 	if (padding != 0) {
1642 		iov[iov_count].iov_base	= cmd->pad_bytes;
1643 		iov[iov_count++].iov_len = padding;
1644 		rx_size += padding;
1645 		pr_debug("Receiving %u padding bytes.\n", padding);
1646 	}
1647 
1648 	if (conn->conn_ops->DataDigest) {
1649 		iov[iov_count].iov_base = &checksum;
1650 		iov[iov_count++].iov_len = ISCSI_CRC_LEN;
1651 		rx_size += ISCSI_CRC_LEN;
1652 	}
1653 
1654 	WARN_ON_ONCE(iov_count > cmd->orig_iov_data_count);
1655 	rx_got = rx_data(conn, &cmd->iov_data[0], iov_count, rx_size);
1656 
1657 	iscsit_unmap_iovec(cmd);
1658 
1659 	if (rx_got != rx_size)
1660 		return -1;
1661 
1662 	if (conn->conn_ops->DataDigest) {
1663 		u32 data_crc;
1664 
1665 		data_crc = iscsit_do_crypto_hash_sg(conn->conn_rx_hash, cmd,
1666 						    be32_to_cpu(hdr->offset),
1667 						    payload_length, padding,
1668 						    cmd->pad_bytes);
1669 
1670 		if (checksum != data_crc) {
1671 			pr_err("ITT: 0x%08x, Offset: %u, Length: %u,"
1672 				" DataSN: 0x%08x, CRC32C DataDigest 0x%08x"
1673 				" does not match computed 0x%08x\n",
1674 				hdr->itt, hdr->offset, payload_length,
1675 				hdr->datasn, checksum, data_crc);
1676 			data_crc_failed = 1;
1677 		} else {
1678 			pr_debug("Got CRC32C DataDigest 0x%08x for"
1679 				" %u bytes of Data Out\n", checksum,
1680 				payload_length);
1681 		}
1682 	}
1683 
1684 	return data_crc_failed;
1685 }
1686 
1687 int
1688 iscsit_check_dataout_payload(struct iscsit_cmd *cmd, struct iscsi_data *hdr,
1689 			     bool data_crc_failed)
1690 {
1691 	struct iscsit_conn *conn = cmd->conn;
1692 	int rc, ooo_cmdsn;
1693 	/*
1694 	 * Increment post receive data and CRC values or perform
1695 	 * within-command recovery.
1696 	 */
1697 	rc = iscsit_check_post_dataout(cmd, (unsigned char *)hdr, data_crc_failed);
1698 	if ((rc == DATAOUT_NORMAL) || (rc == DATAOUT_WITHIN_COMMAND_RECOVERY))
1699 		return 0;
1700 	else if (rc == DATAOUT_SEND_R2T) {
1701 		iscsit_set_dataout_sequence_values(cmd);
1702 		conn->conn_transport->iscsit_get_dataout(conn, cmd, false);
1703 	} else if (rc == DATAOUT_SEND_TO_TRANSPORT) {
1704 		/*
1705 		 * Handle extra special case for out of order
1706 		 * Unsolicited Data Out.
1707 		 */
1708 		spin_lock_bh(&cmd->istate_lock);
1709 		ooo_cmdsn = (cmd->cmd_flags & ICF_OOO_CMDSN);
1710 		cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
1711 		cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
1712 		spin_unlock_bh(&cmd->istate_lock);
1713 
1714 		iscsit_stop_dataout_timer(cmd);
1715 		if (ooo_cmdsn)
1716 			return 0;
1717 		target_execute_cmd(&cmd->se_cmd);
1718 		return 0;
1719 	} else /* DATAOUT_CANNOT_RECOVER */
1720 		return -1;
1721 
1722 	return 0;
1723 }
1724 EXPORT_SYMBOL(iscsit_check_dataout_payload);
1725 
1726 static int iscsit_handle_data_out(struct iscsit_conn *conn, unsigned char *buf)
1727 {
1728 	struct iscsit_cmd *cmd = NULL;
1729 	struct iscsi_data *hdr = (struct iscsi_data *)buf;
1730 	int rc;
1731 	bool data_crc_failed = false;
1732 
1733 	rc = iscsit_check_dataout_hdr(conn, buf, &cmd);
1734 	if (rc < 0)
1735 		return 0;
1736 	else if (!cmd)
1737 		return 0;
1738 
1739 	rc = iscsit_get_dataout(conn, cmd, hdr);
1740 	if (rc < 0)
1741 		return rc;
1742 	else if (rc > 0)
1743 		data_crc_failed = true;
1744 
1745 	return iscsit_check_dataout_payload(cmd, hdr, data_crc_failed);
1746 }
1747 
1748 int iscsit_setup_nop_out(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
1749 			 struct iscsi_nopout *hdr)
1750 {
1751 	u32 payload_length = ntoh24(hdr->dlength);
1752 
1753 	if (!(hdr->flags & ISCSI_FLAG_CMD_FINAL)) {
1754 		pr_err("NopOUT Flag's, Left Most Bit not set, protocol error.\n");
1755 		if (!cmd)
1756 			return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
1757 						 (unsigned char *)hdr);
1758 
1759 		return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR,
1760 					 (unsigned char *)hdr);
1761 	}
1762 
1763 	if (hdr->itt == RESERVED_ITT && !(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
1764 		pr_err("NOPOUT ITT is reserved, but Immediate Bit is"
1765 			" not set, protocol error.\n");
1766 		if (!cmd)
1767 			return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
1768 						 (unsigned char *)hdr);
1769 
1770 		return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR,
1771 					 (unsigned char *)hdr);
1772 	}
1773 
1774 	if (payload_length > conn->conn_ops->MaxXmitDataSegmentLength) {
1775 		pr_err("NOPOUT Ping Data DataSegmentLength: %u is"
1776 			" greater than MaxXmitDataSegmentLength: %u, protocol"
1777 			" error.\n", payload_length,
1778 			conn->conn_ops->MaxXmitDataSegmentLength);
1779 		if (!cmd)
1780 			return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
1781 						 (unsigned char *)hdr);
1782 
1783 		return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR,
1784 					 (unsigned char *)hdr);
1785 	}
1786 
1787 	pr_debug("Got NOPOUT Ping %s ITT: 0x%08x, TTT: 0x%08x,"
1788 		" CmdSN: 0x%08x, ExpStatSN: 0x%08x, Length: %u\n",
1789 		hdr->itt == RESERVED_ITT ? "Response" : "Request",
1790 		hdr->itt, hdr->ttt, hdr->cmdsn, hdr->exp_statsn,
1791 		payload_length);
1792 	/*
1793 	 * This is not a response to a Unsolicited NopIN, which means
1794 	 * it can either be a NOPOUT ping request (with a valid ITT),
1795 	 * or a NOPOUT not requesting a NOPIN (with a reserved ITT).
1796 	 * Either way, make sure we allocate an struct iscsit_cmd, as both
1797 	 * can contain ping data.
1798 	 */
1799 	if (hdr->ttt == cpu_to_be32(0xFFFFFFFF)) {
1800 		cmd->iscsi_opcode	= ISCSI_OP_NOOP_OUT;
1801 		cmd->i_state		= ISTATE_SEND_NOPIN;
1802 		cmd->immediate_cmd	= ((hdr->opcode & ISCSI_OP_IMMEDIATE) ?
1803 						1 : 0);
1804 		conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt;
1805 		cmd->targ_xfer_tag	= 0xFFFFFFFF;
1806 		cmd->cmd_sn		= be32_to_cpu(hdr->cmdsn);
1807 		cmd->exp_stat_sn	= be32_to_cpu(hdr->exp_statsn);
1808 		cmd->data_direction	= DMA_NONE;
1809 	}
1810 
1811 	return 0;
1812 }
1813 EXPORT_SYMBOL(iscsit_setup_nop_out);
1814 
1815 int iscsit_process_nop_out(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
1816 			   struct iscsi_nopout *hdr)
1817 {
1818 	struct iscsit_cmd *cmd_p = NULL;
1819 	int cmdsn_ret = 0;
1820 	/*
1821 	 * Initiator is expecting a NopIN ping reply..
1822 	 */
1823 	if (hdr->itt != RESERVED_ITT) {
1824 		if (!cmd)
1825 			return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
1826 						(unsigned char *)hdr);
1827 
1828 		spin_lock_bh(&conn->cmd_lock);
1829 		list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
1830 		spin_unlock_bh(&conn->cmd_lock);
1831 
1832 		iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn));
1833 
1834 		if (hdr->opcode & ISCSI_OP_IMMEDIATE) {
1835 			iscsit_add_cmd_to_response_queue(cmd, conn,
1836 							 cmd->i_state);
1837 			return 0;
1838 		}
1839 
1840 		cmdsn_ret = iscsit_sequence_cmd(conn, cmd,
1841 				(unsigned char *)hdr, hdr->cmdsn);
1842                 if (cmdsn_ret == CMDSN_LOWER_THAN_EXP)
1843 			return 0;
1844 		if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
1845 			return -1;
1846 
1847 		return 0;
1848 	}
1849 	/*
1850 	 * This was a response to a unsolicited NOPIN ping.
1851 	 */
1852 	if (hdr->ttt != cpu_to_be32(0xFFFFFFFF)) {
1853 		cmd_p = iscsit_find_cmd_from_ttt(conn, be32_to_cpu(hdr->ttt));
1854 		if (!cmd_p)
1855 			return -EINVAL;
1856 
1857 		iscsit_stop_nopin_response_timer(conn);
1858 
1859 		cmd_p->i_state = ISTATE_REMOVE;
1860 		iscsit_add_cmd_to_immediate_queue(cmd_p, conn, cmd_p->i_state);
1861 
1862 		iscsit_start_nopin_timer(conn);
1863 		return 0;
1864 	}
1865 	/*
1866 	 * Otherwise, initiator is not expecting a NOPIN is response.
1867 	 * Just ignore for now.
1868 	 */
1869 
1870 	if (cmd)
1871 		iscsit_free_cmd(cmd, false);
1872 
1873         return 0;
1874 }
1875 EXPORT_SYMBOL(iscsit_process_nop_out);
1876 
1877 static int iscsit_handle_nop_out(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
1878 				 unsigned char *buf)
1879 {
1880 	unsigned char *ping_data = NULL;
1881 	struct iscsi_nopout *hdr = (struct iscsi_nopout *)buf;
1882 	struct kvec *iov = NULL;
1883 	u32 payload_length = ntoh24(hdr->dlength);
1884 	int ret;
1885 
1886 	ret = iscsit_setup_nop_out(conn, cmd, hdr);
1887 	if (ret < 0)
1888 		return 0;
1889 	/*
1890 	 * Handle NOP-OUT payload for traditional iSCSI sockets
1891 	 */
1892 	if (payload_length && hdr->ttt == cpu_to_be32(0xFFFFFFFF)) {
1893 		u32 checksum, data_crc, padding = 0;
1894 		int niov = 0, rx_got, rx_size = payload_length;
1895 
1896 		ping_data = kzalloc(payload_length + 1, GFP_KERNEL);
1897 		if (!ping_data) {
1898 			ret = -1;
1899 			goto out;
1900 		}
1901 
1902 		iov = &cmd->iov_misc[0];
1903 		iov[niov].iov_base	= ping_data;
1904 		iov[niov++].iov_len	= payload_length;
1905 
1906 		padding = ((-payload_length) & 3);
1907 		if (padding != 0) {
1908 			pr_debug("Receiving %u additional bytes"
1909 				" for padding.\n", padding);
1910 			iov[niov].iov_base	= &cmd->pad_bytes;
1911 			iov[niov++].iov_len	= padding;
1912 			rx_size += padding;
1913 		}
1914 		if (conn->conn_ops->DataDigest) {
1915 			iov[niov].iov_base	= &checksum;
1916 			iov[niov++].iov_len	= ISCSI_CRC_LEN;
1917 			rx_size += ISCSI_CRC_LEN;
1918 		}
1919 
1920 		WARN_ON_ONCE(niov > ARRAY_SIZE(cmd->iov_misc));
1921 		rx_got = rx_data(conn, &cmd->iov_misc[0], niov, rx_size);
1922 		if (rx_got != rx_size) {
1923 			ret = -1;
1924 			goto out;
1925 		}
1926 
1927 		if (conn->conn_ops->DataDigest) {
1928 			iscsit_do_crypto_hash_buf(conn->conn_rx_hash, ping_data,
1929 						  payload_length, padding,
1930 						  cmd->pad_bytes, &data_crc);
1931 
1932 			if (checksum != data_crc) {
1933 				pr_err("Ping data CRC32C DataDigest"
1934 				" 0x%08x does not match computed 0x%08x\n",
1935 					checksum, data_crc);
1936 				if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
1937 					pr_err("Unable to recover from"
1938 					" NOPOUT Ping DataCRC failure while in"
1939 						" ERL=0.\n");
1940 					ret = -1;
1941 					goto out;
1942 				} else {
1943 					/*
1944 					 * Silently drop this PDU and let the
1945 					 * initiator plug the CmdSN gap.
1946 					 */
1947 					pr_debug("Dropping NOPOUT"
1948 					" Command CmdSN: 0x%08x due to"
1949 					" DataCRC error.\n", hdr->cmdsn);
1950 					ret = 0;
1951 					goto out;
1952 				}
1953 			} else {
1954 				pr_debug("Got CRC32C DataDigest"
1955 				" 0x%08x for %u bytes of ping data.\n",
1956 					checksum, payload_length);
1957 			}
1958 		}
1959 
1960 		ping_data[payload_length] = '\0';
1961 		/*
1962 		 * Attach ping data to struct iscsit_cmd->buf_ptr.
1963 		 */
1964 		cmd->buf_ptr = ping_data;
1965 		cmd->buf_ptr_size = payload_length;
1966 
1967 		pr_debug("Got %u bytes of NOPOUT ping"
1968 			" data.\n", payload_length);
1969 		pr_debug("Ping Data: \"%s\"\n", ping_data);
1970 	}
1971 
1972 	return iscsit_process_nop_out(conn, cmd, hdr);
1973 out:
1974 	if (cmd)
1975 		iscsit_free_cmd(cmd, false);
1976 
1977 	kfree(ping_data);
1978 	return ret;
1979 }
1980 
1981 static enum tcm_tmreq_table iscsit_convert_tmf(u8 iscsi_tmf)
1982 {
1983 	switch (iscsi_tmf) {
1984 	case ISCSI_TM_FUNC_ABORT_TASK:
1985 		return TMR_ABORT_TASK;
1986 	case ISCSI_TM_FUNC_ABORT_TASK_SET:
1987 		return TMR_ABORT_TASK_SET;
1988 	case ISCSI_TM_FUNC_CLEAR_ACA:
1989 		return TMR_CLEAR_ACA;
1990 	case ISCSI_TM_FUNC_CLEAR_TASK_SET:
1991 		return TMR_CLEAR_TASK_SET;
1992 	case ISCSI_TM_FUNC_LOGICAL_UNIT_RESET:
1993 		return TMR_LUN_RESET;
1994 	case ISCSI_TM_FUNC_TARGET_WARM_RESET:
1995 		return TMR_TARGET_WARM_RESET;
1996 	case ISCSI_TM_FUNC_TARGET_COLD_RESET:
1997 		return TMR_TARGET_COLD_RESET;
1998 	default:
1999 		return TMR_UNKNOWN;
2000 	}
2001 }
2002 
2003 int
2004 iscsit_handle_task_mgt_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
2005 			   unsigned char *buf)
2006 {
2007 	struct se_tmr_req *se_tmr;
2008 	struct iscsi_tmr_req *tmr_req;
2009 	struct iscsi_tm *hdr;
2010 	int out_of_order_cmdsn = 0, ret;
2011 	u8 function, tcm_function = TMR_UNKNOWN;
2012 
2013 	hdr			= (struct iscsi_tm *) buf;
2014 	hdr->flags &= ~ISCSI_FLAG_CMD_FINAL;
2015 	function = hdr->flags;
2016 
2017 	pr_debug("Got Task Management Request ITT: 0x%08x, CmdSN:"
2018 		" 0x%08x, Function: 0x%02x, RefTaskTag: 0x%08x, RefCmdSN:"
2019 		" 0x%08x, CID: %hu\n", hdr->itt, hdr->cmdsn, function,
2020 		hdr->rtt, hdr->refcmdsn, conn->cid);
2021 
2022 	if ((function != ISCSI_TM_FUNC_ABORT_TASK) &&
2023 	    ((function != ISCSI_TM_FUNC_TASK_REASSIGN) &&
2024 	     hdr->rtt != RESERVED_ITT)) {
2025 		pr_err("RefTaskTag should be set to 0xFFFFFFFF.\n");
2026 		hdr->rtt = RESERVED_ITT;
2027 	}
2028 
2029 	if ((function == ISCSI_TM_FUNC_TASK_REASSIGN) &&
2030 			!(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
2031 		pr_err("Task Management Request TASK_REASSIGN not"
2032 			" issued as immediate command, bad iSCSI Initiator"
2033 				"implementation\n");
2034 		return iscsit_add_reject_cmd(cmd,
2035 					     ISCSI_REASON_PROTOCOL_ERROR, buf);
2036 	}
2037 	if ((function != ISCSI_TM_FUNC_ABORT_TASK) &&
2038 	    be32_to_cpu(hdr->refcmdsn) != ISCSI_RESERVED_TAG)
2039 		hdr->refcmdsn = cpu_to_be32(ISCSI_RESERVED_TAG);
2040 
2041 	cmd->data_direction = DMA_NONE;
2042 	cmd->tmr_req = kzalloc(sizeof(*cmd->tmr_req), GFP_KERNEL);
2043 	if (!cmd->tmr_req) {
2044 		return iscsit_add_reject_cmd(cmd,
2045 					     ISCSI_REASON_BOOKMARK_NO_RESOURCES,
2046 					     buf);
2047 	}
2048 
2049 	__target_init_cmd(&cmd->se_cmd, &iscsi_ops,
2050 			  conn->sess->se_sess, 0, DMA_NONE,
2051 			  TCM_SIMPLE_TAG, cmd->sense_buffer + 2,
2052 			  scsilun_to_int(&hdr->lun),
2053 			  conn->cmd_cnt);
2054 
2055 	target_get_sess_cmd(&cmd->se_cmd, true);
2056 
2057 	/*
2058 	 * TASK_REASSIGN for ERL=2 / connection stays inside of
2059 	 * LIO-Target $FABRIC_MOD
2060 	 */
2061 	if (function != ISCSI_TM_FUNC_TASK_REASSIGN) {
2062 		tcm_function = iscsit_convert_tmf(function);
2063 		if (tcm_function == TMR_UNKNOWN) {
2064 			pr_err("Unknown iSCSI TMR Function:"
2065 			       " 0x%02x\n", function);
2066 			return iscsit_add_reject_cmd(cmd,
2067 				ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
2068 		}
2069 	}
2070 	ret = core_tmr_alloc_req(&cmd->se_cmd, cmd->tmr_req, tcm_function,
2071 				 GFP_KERNEL);
2072 	if (ret < 0)
2073 		return iscsit_add_reject_cmd(cmd,
2074 				ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
2075 
2076 	cmd->tmr_req->se_tmr_req = cmd->se_cmd.se_tmr_req;
2077 
2078 	cmd->iscsi_opcode	= ISCSI_OP_SCSI_TMFUNC;
2079 	cmd->i_state		= ISTATE_SEND_TASKMGTRSP;
2080 	cmd->immediate_cmd	= ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1 : 0);
2081 	cmd->init_task_tag	= hdr->itt;
2082 	cmd->targ_xfer_tag	= 0xFFFFFFFF;
2083 	cmd->cmd_sn		= be32_to_cpu(hdr->cmdsn);
2084 	cmd->exp_stat_sn	= be32_to_cpu(hdr->exp_statsn);
2085 	se_tmr			= cmd->se_cmd.se_tmr_req;
2086 	tmr_req			= cmd->tmr_req;
2087 	/*
2088 	 * Locate the struct se_lun for all TMRs not related to ERL=2 TASK_REASSIGN
2089 	 */
2090 	if (function != ISCSI_TM_FUNC_TASK_REASSIGN) {
2091 		ret = transport_lookup_tmr_lun(&cmd->se_cmd);
2092 		if (ret < 0) {
2093 			se_tmr->response = ISCSI_TMF_RSP_NO_LUN;
2094 			goto attach;
2095 		}
2096 	}
2097 
2098 	switch (function) {
2099 	case ISCSI_TM_FUNC_ABORT_TASK:
2100 		se_tmr->response = iscsit_tmr_abort_task(cmd, buf);
2101 		if (se_tmr->response)
2102 			goto attach;
2103 		break;
2104 	case ISCSI_TM_FUNC_ABORT_TASK_SET:
2105 	case ISCSI_TM_FUNC_CLEAR_ACA:
2106 	case ISCSI_TM_FUNC_CLEAR_TASK_SET:
2107 	case ISCSI_TM_FUNC_LOGICAL_UNIT_RESET:
2108 		break;
2109 	case ISCSI_TM_FUNC_TARGET_WARM_RESET:
2110 		if (iscsit_tmr_task_warm_reset(conn, tmr_req, buf) < 0) {
2111 			se_tmr->response = ISCSI_TMF_RSP_AUTH_FAILED;
2112 			goto attach;
2113 		}
2114 		break;
2115 	case ISCSI_TM_FUNC_TARGET_COLD_RESET:
2116 		if (iscsit_tmr_task_cold_reset(conn, tmr_req, buf) < 0) {
2117 			se_tmr->response = ISCSI_TMF_RSP_AUTH_FAILED;
2118 			goto attach;
2119 		}
2120 		break;
2121 	case ISCSI_TM_FUNC_TASK_REASSIGN:
2122 		se_tmr->response = iscsit_tmr_task_reassign(cmd, buf);
2123 		/*
2124 		 * Perform sanity checks on the ExpDataSN only if the
2125 		 * TASK_REASSIGN was successful.
2126 		 */
2127 		if (se_tmr->response)
2128 			break;
2129 
2130 		if (iscsit_check_task_reassign_expdatasn(tmr_req, conn) < 0)
2131 			return iscsit_add_reject_cmd(cmd,
2132 					ISCSI_REASON_BOOKMARK_INVALID, buf);
2133 		break;
2134 	default:
2135 		pr_err("Unknown TMR function: 0x%02x, protocol"
2136 			" error.\n", function);
2137 		se_tmr->response = ISCSI_TMF_RSP_NOT_SUPPORTED;
2138 		goto attach;
2139 	}
2140 
2141 	if ((function != ISCSI_TM_FUNC_TASK_REASSIGN) &&
2142 	    (se_tmr->response == ISCSI_TMF_RSP_COMPLETE))
2143 		se_tmr->call_transport = 1;
2144 attach:
2145 	spin_lock_bh(&conn->cmd_lock);
2146 	list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
2147 	spin_unlock_bh(&conn->cmd_lock);
2148 
2149 	if (!(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
2150 		int cmdsn_ret = iscsit_sequence_cmd(conn, cmd, buf, hdr->cmdsn);
2151 		if (cmdsn_ret == CMDSN_HIGHER_THAN_EXP) {
2152 			out_of_order_cmdsn = 1;
2153 		} else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) {
2154 			target_put_sess_cmd(&cmd->se_cmd);
2155 			return 0;
2156 		} else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) {
2157 			return -1;
2158 		}
2159 	}
2160 	iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn));
2161 
2162 	if (out_of_order_cmdsn || !(hdr->opcode & ISCSI_OP_IMMEDIATE))
2163 		return 0;
2164 	/*
2165 	 * Found the referenced task, send to transport for processing.
2166 	 */
2167 	if (se_tmr->call_transport)
2168 		return transport_generic_handle_tmr(&cmd->se_cmd);
2169 
2170 	/*
2171 	 * Could not find the referenced LUN, task, or Task Management
2172 	 * command not authorized or supported.  Change state and
2173 	 * let the tx_thread send the response.
2174 	 *
2175 	 * For connection recovery, this is also the default action for
2176 	 * TMR TASK_REASSIGN.
2177 	 */
2178 	iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
2179 	target_put_sess_cmd(&cmd->se_cmd);
2180 	return 0;
2181 }
2182 EXPORT_SYMBOL(iscsit_handle_task_mgt_cmd);
2183 
2184 /* #warning FIXME: Support Text Command parameters besides SendTargets */
2185 int
2186 iscsit_setup_text_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
2187 		      struct iscsi_text *hdr)
2188 {
2189 	u32 payload_length = ntoh24(hdr->dlength);
2190 
2191 	if (payload_length > conn->conn_ops->MaxXmitDataSegmentLength) {
2192 		pr_err("Unable to accept text parameter length: %u"
2193 			"greater than MaxXmitDataSegmentLength %u.\n",
2194 		       payload_length, conn->conn_ops->MaxXmitDataSegmentLength);
2195 		return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR,
2196 					 (unsigned char *)hdr);
2197 	}
2198 
2199 	if (!(hdr->flags & ISCSI_FLAG_CMD_FINAL) ||
2200 	     (hdr->flags & ISCSI_FLAG_TEXT_CONTINUE)) {
2201 		pr_err("Multi sequence text commands currently not supported\n");
2202 		return iscsit_reject_cmd(cmd, ISCSI_REASON_CMD_NOT_SUPPORTED,
2203 					(unsigned char *)hdr);
2204 	}
2205 
2206 	pr_debug("Got Text Request: ITT: 0x%08x, CmdSN: 0x%08x,"
2207 		" ExpStatSN: 0x%08x, Length: %u\n", hdr->itt, hdr->cmdsn,
2208 		hdr->exp_statsn, payload_length);
2209 
2210 	cmd->iscsi_opcode	= ISCSI_OP_TEXT;
2211 	cmd->i_state		= ISTATE_SEND_TEXTRSP;
2212 	cmd->immediate_cmd	= ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1 : 0);
2213 	conn->sess->init_task_tag = cmd->init_task_tag  = hdr->itt;
2214 	cmd->targ_xfer_tag	= 0xFFFFFFFF;
2215 	cmd->cmd_sn		= be32_to_cpu(hdr->cmdsn);
2216 	cmd->exp_stat_sn	= be32_to_cpu(hdr->exp_statsn);
2217 	cmd->data_direction	= DMA_NONE;
2218 	kfree(cmd->text_in_ptr);
2219 	cmd->text_in_ptr	= NULL;
2220 
2221 	return 0;
2222 }
2223 EXPORT_SYMBOL(iscsit_setup_text_cmd);
2224 
2225 int
2226 iscsit_process_text_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
2227 			struct iscsi_text *hdr)
2228 {
2229 	unsigned char *text_in = cmd->text_in_ptr, *text_ptr;
2230 	int cmdsn_ret;
2231 
2232 	if (!text_in) {
2233 		cmd->targ_xfer_tag = be32_to_cpu(hdr->ttt);
2234 		if (cmd->targ_xfer_tag == 0xFFFFFFFF) {
2235 			pr_err("Unable to locate text_in buffer for sendtargets"
2236 			       " discovery\n");
2237 			goto reject;
2238 		}
2239 		goto empty_sendtargets;
2240 	}
2241 	if (strncmp("SendTargets=", text_in, 12) != 0) {
2242 		pr_err("Received Text Data that is not"
2243 			" SendTargets, cannot continue.\n");
2244 		goto reject;
2245 	}
2246 	/* '=' confirmed in strncmp */
2247 	text_ptr = strchr(text_in, '=');
2248 	BUG_ON(!text_ptr);
2249 	if (!strncmp("=All", text_ptr, 5)) {
2250 		cmd->cmd_flags |= ICF_SENDTARGETS_ALL;
2251 	} else if (!strncmp("=iqn.", text_ptr, 5) ||
2252 		   !strncmp("=eui.", text_ptr, 5)) {
2253 		cmd->cmd_flags |= ICF_SENDTARGETS_SINGLE;
2254 	} else {
2255 		pr_err("Unable to locate valid SendTargets%s value\n",
2256 		       text_ptr);
2257 		goto reject;
2258 	}
2259 
2260 	spin_lock_bh(&conn->cmd_lock);
2261 	list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
2262 	spin_unlock_bh(&conn->cmd_lock);
2263 
2264 empty_sendtargets:
2265 	iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn));
2266 
2267 	if (!(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
2268 		cmdsn_ret = iscsit_sequence_cmd(conn, cmd,
2269 				(unsigned char *)hdr, hdr->cmdsn);
2270 		if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
2271 			return -1;
2272 
2273 		return 0;
2274 	}
2275 
2276 	return iscsit_execute_cmd(cmd, 0);
2277 
2278 reject:
2279 	return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR,
2280 				 (unsigned char *)hdr);
2281 }
2282 EXPORT_SYMBOL(iscsit_process_text_cmd);
2283 
2284 static int
2285 iscsit_handle_text_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
2286 		       unsigned char *buf)
2287 {
2288 	struct iscsi_text *hdr = (struct iscsi_text *)buf;
2289 	char *text_in = NULL;
2290 	u32 payload_length = ntoh24(hdr->dlength);
2291 	int rx_size, rc;
2292 
2293 	rc = iscsit_setup_text_cmd(conn, cmd, hdr);
2294 	if (rc < 0)
2295 		return 0;
2296 
2297 	rx_size = payload_length;
2298 	if (payload_length) {
2299 		u32 checksum = 0, data_crc = 0;
2300 		u32 padding = 0;
2301 		int niov = 0, rx_got;
2302 		struct kvec iov[2];
2303 
2304 		rx_size = ALIGN(payload_length, 4);
2305 		text_in = kzalloc(rx_size, GFP_KERNEL);
2306 		if (!text_in)
2307 			goto reject;
2308 
2309 		cmd->text_in_ptr = text_in;
2310 
2311 		memset(iov, 0, sizeof(iov));
2312 		iov[niov].iov_base	= text_in;
2313 		iov[niov++].iov_len	= rx_size;
2314 
2315 		padding = rx_size - payload_length;
2316 		if (padding)
2317 			pr_debug("Receiving %u additional bytes"
2318 					" for padding.\n", padding);
2319 		if (conn->conn_ops->DataDigest) {
2320 			iov[niov].iov_base	= &checksum;
2321 			iov[niov++].iov_len	= ISCSI_CRC_LEN;
2322 			rx_size += ISCSI_CRC_LEN;
2323 		}
2324 
2325 		WARN_ON_ONCE(niov > ARRAY_SIZE(iov));
2326 		rx_got = rx_data(conn, &iov[0], niov, rx_size);
2327 		if (rx_got != rx_size)
2328 			goto reject;
2329 
2330 		if (conn->conn_ops->DataDigest) {
2331 			iscsit_do_crypto_hash_buf(conn->conn_rx_hash,
2332 						  text_in, rx_size, 0, NULL,
2333 						  &data_crc);
2334 
2335 			if (checksum != data_crc) {
2336 				pr_err("Text data CRC32C DataDigest"
2337 					" 0x%08x does not match computed"
2338 					" 0x%08x\n", checksum, data_crc);
2339 				if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
2340 					pr_err("Unable to recover from"
2341 					" Text Data digest failure while in"
2342 						" ERL=0.\n");
2343 					goto reject;
2344 				} else {
2345 					/*
2346 					 * Silently drop this PDU and let the
2347 					 * initiator plug the CmdSN gap.
2348 					 */
2349 					pr_debug("Dropping Text"
2350 					" Command CmdSN: 0x%08x due to"
2351 					" DataCRC error.\n", hdr->cmdsn);
2352 					kfree(text_in);
2353 					return 0;
2354 				}
2355 			} else {
2356 				pr_debug("Got CRC32C DataDigest"
2357 					" 0x%08x for %u bytes of text data.\n",
2358 						checksum, payload_length);
2359 			}
2360 		}
2361 		text_in[payload_length - 1] = '\0';
2362 		pr_debug("Successfully read %d bytes of text"
2363 				" data.\n", payload_length);
2364 	}
2365 
2366 	return iscsit_process_text_cmd(conn, cmd, hdr);
2367 
2368 reject:
2369 	kfree(cmd->text_in_ptr);
2370 	cmd->text_in_ptr = NULL;
2371 	return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR, buf);
2372 }
2373 
2374 int iscsit_logout_closesession(struct iscsit_cmd *cmd, struct iscsit_conn *conn)
2375 {
2376 	struct iscsit_conn *conn_p;
2377 	struct iscsit_session *sess = conn->sess;
2378 
2379 	pr_debug("Received logout request CLOSESESSION on CID: %hu"
2380 		" for SID: %u.\n", conn->cid, conn->sess->sid);
2381 
2382 	atomic_set(&sess->session_logout, 1);
2383 	atomic_set(&conn->conn_logout_remove, 1);
2384 	conn->conn_logout_reason = ISCSI_LOGOUT_REASON_CLOSE_SESSION;
2385 
2386 	iscsit_inc_conn_usage_count(conn);
2387 	iscsit_inc_session_usage_count(sess);
2388 
2389 	spin_lock_bh(&sess->conn_lock);
2390 	list_for_each_entry(conn_p, &sess->sess_conn_list, conn_list) {
2391 		if (conn_p->conn_state != TARG_CONN_STATE_LOGGED_IN)
2392 			continue;
2393 
2394 		pr_debug("Moving to TARG_CONN_STATE_IN_LOGOUT.\n");
2395 		conn_p->conn_state = TARG_CONN_STATE_IN_LOGOUT;
2396 	}
2397 	spin_unlock_bh(&sess->conn_lock);
2398 
2399 	iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
2400 
2401 	return 0;
2402 }
2403 
2404 int iscsit_logout_closeconnection(struct iscsit_cmd *cmd, struct iscsit_conn *conn)
2405 {
2406 	struct iscsit_conn *l_conn;
2407 	struct iscsit_session *sess = conn->sess;
2408 
2409 	pr_debug("Received logout request CLOSECONNECTION for CID:"
2410 		" %hu on CID: %hu.\n", cmd->logout_cid, conn->cid);
2411 
2412 	/*
2413 	 * A Logout Request with a CLOSECONNECTION reason code for a CID
2414 	 * can arrive on a connection with a differing CID.
2415 	 */
2416 	if (conn->cid == cmd->logout_cid) {
2417 		spin_lock_bh(&conn->state_lock);
2418 		pr_debug("Moving to TARG_CONN_STATE_IN_LOGOUT.\n");
2419 		conn->conn_state = TARG_CONN_STATE_IN_LOGOUT;
2420 
2421 		atomic_set(&conn->conn_logout_remove, 1);
2422 		conn->conn_logout_reason = ISCSI_LOGOUT_REASON_CLOSE_CONNECTION;
2423 		iscsit_inc_conn_usage_count(conn);
2424 
2425 		spin_unlock_bh(&conn->state_lock);
2426 	} else {
2427 		/*
2428 		 * Handle all different cid CLOSECONNECTION requests in
2429 		 * iscsit_logout_post_handler_diffcid() as to give enough
2430 		 * time for any non immediate command's CmdSN to be
2431 		 * acknowledged on the connection in question.
2432 		 *
2433 		 * Here we simply make sure the CID is still around.
2434 		 */
2435 		l_conn = iscsit_get_conn_from_cid(sess,
2436 				cmd->logout_cid);
2437 		if (!l_conn) {
2438 			cmd->logout_response = ISCSI_LOGOUT_CID_NOT_FOUND;
2439 			iscsit_add_cmd_to_response_queue(cmd, conn,
2440 					cmd->i_state);
2441 			return 0;
2442 		}
2443 
2444 		iscsit_dec_conn_usage_count(l_conn);
2445 	}
2446 
2447 	iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
2448 
2449 	return 0;
2450 }
2451 
2452 int iscsit_logout_removeconnforrecovery(struct iscsit_cmd *cmd, struct iscsit_conn *conn)
2453 {
2454 	struct iscsit_session *sess = conn->sess;
2455 
2456 	pr_debug("Received explicit REMOVECONNFORRECOVERY logout for"
2457 		" CID: %hu on CID: %hu.\n", cmd->logout_cid, conn->cid);
2458 
2459 	if (sess->sess_ops->ErrorRecoveryLevel != 2) {
2460 		pr_err("Received Logout Request REMOVECONNFORRECOVERY"
2461 			" while ERL!=2.\n");
2462 		cmd->logout_response = ISCSI_LOGOUT_RECOVERY_UNSUPPORTED;
2463 		iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
2464 		return 0;
2465 	}
2466 
2467 	if (conn->cid == cmd->logout_cid) {
2468 		pr_err("Received Logout Request REMOVECONNFORRECOVERY"
2469 			" with CID: %hu on CID: %hu, implementation error.\n",
2470 				cmd->logout_cid, conn->cid);
2471 		cmd->logout_response = ISCSI_LOGOUT_CLEANUP_FAILED;
2472 		iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
2473 		return 0;
2474 	}
2475 
2476 	iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
2477 
2478 	return 0;
2479 }
2480 
2481 int
2482 iscsit_handle_logout_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
2483 			unsigned char *buf)
2484 {
2485 	int cmdsn_ret, logout_remove = 0;
2486 	u8 reason_code = 0;
2487 	struct iscsi_logout *hdr;
2488 	struct iscsi_tiqn *tiqn = iscsit_snmp_get_tiqn(conn);
2489 
2490 	hdr			= (struct iscsi_logout *) buf;
2491 	reason_code		= (hdr->flags & 0x7f);
2492 
2493 	if (tiqn) {
2494 		spin_lock(&tiqn->logout_stats.lock);
2495 		if (reason_code == ISCSI_LOGOUT_REASON_CLOSE_SESSION)
2496 			tiqn->logout_stats.normal_logouts++;
2497 		else
2498 			tiqn->logout_stats.abnormal_logouts++;
2499 		spin_unlock(&tiqn->logout_stats.lock);
2500 	}
2501 
2502 	pr_debug("Got Logout Request ITT: 0x%08x CmdSN: 0x%08x"
2503 		" ExpStatSN: 0x%08x Reason: 0x%02x CID: %hu on CID: %hu\n",
2504 		hdr->itt, hdr->cmdsn, hdr->exp_statsn, reason_code,
2505 		hdr->cid, conn->cid);
2506 
2507 	if (conn->conn_state != TARG_CONN_STATE_LOGGED_IN) {
2508 		pr_err("Received logout request on connection that"
2509 			" is not in logged in state, ignoring request.\n");
2510 		iscsit_free_cmd(cmd, false);
2511 		return 0;
2512 	}
2513 
2514 	cmd->iscsi_opcode       = ISCSI_OP_LOGOUT;
2515 	cmd->i_state            = ISTATE_SEND_LOGOUTRSP;
2516 	cmd->immediate_cmd      = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1 : 0);
2517 	conn->sess->init_task_tag = cmd->init_task_tag  = hdr->itt;
2518 	cmd->targ_xfer_tag      = 0xFFFFFFFF;
2519 	cmd->cmd_sn             = be32_to_cpu(hdr->cmdsn);
2520 	cmd->exp_stat_sn        = be32_to_cpu(hdr->exp_statsn);
2521 	cmd->logout_cid         = be16_to_cpu(hdr->cid);
2522 	cmd->logout_reason      = reason_code;
2523 	cmd->data_direction     = DMA_NONE;
2524 
2525 	/*
2526 	 * We need to sleep in these cases (by returning 1) until the Logout
2527 	 * Response gets sent in the tx thread.
2528 	 */
2529 	if ((reason_code == ISCSI_LOGOUT_REASON_CLOSE_SESSION) ||
2530 	   ((reason_code == ISCSI_LOGOUT_REASON_CLOSE_CONNECTION) &&
2531 	    be16_to_cpu(hdr->cid) == conn->cid))
2532 		logout_remove = 1;
2533 
2534 	spin_lock_bh(&conn->cmd_lock);
2535 	list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
2536 	spin_unlock_bh(&conn->cmd_lock);
2537 
2538 	if (reason_code != ISCSI_LOGOUT_REASON_RECOVERY)
2539 		iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn));
2540 
2541 	/*
2542 	 * Immediate commands are executed, well, immediately.
2543 	 * Non-Immediate Logout Commands are executed in CmdSN order.
2544 	 */
2545 	if (cmd->immediate_cmd) {
2546 		int ret = iscsit_execute_cmd(cmd, 0);
2547 
2548 		if (ret < 0)
2549 			return ret;
2550 	} else {
2551 		cmdsn_ret = iscsit_sequence_cmd(conn, cmd, buf, hdr->cmdsn);
2552 		if (cmdsn_ret == CMDSN_LOWER_THAN_EXP)
2553 			logout_remove = 0;
2554 		else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
2555 			return -1;
2556 	}
2557 
2558 	return logout_remove;
2559 }
2560 EXPORT_SYMBOL(iscsit_handle_logout_cmd);
2561 
2562 int iscsit_handle_snack(
2563 	struct iscsit_conn *conn,
2564 	unsigned char *buf)
2565 {
2566 	struct iscsi_snack *hdr;
2567 
2568 	hdr			= (struct iscsi_snack *) buf;
2569 	hdr->flags		&= ~ISCSI_FLAG_CMD_FINAL;
2570 
2571 	pr_debug("Got ISCSI_INIT_SNACK, ITT: 0x%08x, ExpStatSN:"
2572 		" 0x%08x, Type: 0x%02x, BegRun: 0x%08x, RunLength: 0x%08x,"
2573 		" CID: %hu\n", hdr->itt, hdr->exp_statsn, hdr->flags,
2574 			hdr->begrun, hdr->runlength, conn->cid);
2575 
2576 	if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
2577 		pr_err("Initiator sent SNACK request while in"
2578 			" ErrorRecoveryLevel=0.\n");
2579 		return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
2580 					 buf);
2581 	}
2582 	/*
2583 	 * SNACK_DATA and SNACK_R2T are both 0,  so check which function to
2584 	 * call from inside iscsi_send_recovery_datain_or_r2t().
2585 	 */
2586 	switch (hdr->flags & ISCSI_FLAG_SNACK_TYPE_MASK) {
2587 	case 0:
2588 		return iscsit_handle_recovery_datain_or_r2t(conn, buf,
2589 			hdr->itt,
2590 			be32_to_cpu(hdr->ttt),
2591 			be32_to_cpu(hdr->begrun),
2592 			be32_to_cpu(hdr->runlength));
2593 	case ISCSI_FLAG_SNACK_TYPE_STATUS:
2594 		return iscsit_handle_status_snack(conn, hdr->itt,
2595 			be32_to_cpu(hdr->ttt),
2596 			be32_to_cpu(hdr->begrun), be32_to_cpu(hdr->runlength));
2597 	case ISCSI_FLAG_SNACK_TYPE_DATA_ACK:
2598 		return iscsit_handle_data_ack(conn, be32_to_cpu(hdr->ttt),
2599 			be32_to_cpu(hdr->begrun),
2600 			be32_to_cpu(hdr->runlength));
2601 	case ISCSI_FLAG_SNACK_TYPE_RDATA:
2602 		/* FIXME: Support R-Data SNACK */
2603 		pr_err("R-Data SNACK Not Supported.\n");
2604 		return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
2605 					 buf);
2606 	default:
2607 		pr_err("Unknown SNACK type 0x%02x, protocol"
2608 			" error.\n", hdr->flags & 0x0f);
2609 		return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
2610 					 buf);
2611 	}
2612 
2613 	return 0;
2614 }
2615 EXPORT_SYMBOL(iscsit_handle_snack);
2616 
2617 static void iscsit_rx_thread_wait_for_tcp(struct iscsit_conn *conn)
2618 {
2619 	if ((conn->sock->sk->sk_shutdown & SEND_SHUTDOWN) ||
2620 	    (conn->sock->sk->sk_shutdown & RCV_SHUTDOWN)) {
2621 		wait_for_completion_interruptible_timeout(
2622 					&conn->rx_half_close_comp,
2623 					ISCSI_RX_THREAD_TCP_TIMEOUT * HZ);
2624 	}
2625 }
2626 
2627 static int iscsit_handle_immediate_data(
2628 	struct iscsit_cmd *cmd,
2629 	struct iscsi_scsi_req *hdr,
2630 	u32 length)
2631 {
2632 	int iov_ret, rx_got = 0, rx_size = 0;
2633 	u32 checksum, iov_count = 0, padding = 0;
2634 	struct iscsit_conn *conn = cmd->conn;
2635 	struct kvec *iov;
2636 	void *overflow_buf = NULL;
2637 
2638 	BUG_ON(cmd->write_data_done > cmd->se_cmd.data_length);
2639 	rx_size = min(cmd->se_cmd.data_length - cmd->write_data_done, length);
2640 	iov_ret = iscsit_map_iovec(cmd, cmd->iov_data,
2641 				   cmd->orig_iov_data_count - 2,
2642 				   cmd->write_data_done, rx_size);
2643 	if (iov_ret < 0)
2644 		return IMMEDIATE_DATA_CANNOT_RECOVER;
2645 
2646 	iov_count = iov_ret;
2647 	iov = &cmd->iov_data[0];
2648 	if (rx_size < length) {
2649 		/*
2650 		 * Special case: length of immediate data exceeds the data
2651 		 * buffer size derived from the CDB.
2652 		 */
2653 		overflow_buf = kmalloc(length - rx_size, GFP_KERNEL);
2654 		if (!overflow_buf) {
2655 			iscsit_unmap_iovec(cmd);
2656 			return IMMEDIATE_DATA_CANNOT_RECOVER;
2657 		}
2658 		cmd->overflow_buf = overflow_buf;
2659 		iov[iov_count].iov_base = overflow_buf;
2660 		iov[iov_count].iov_len = length - rx_size;
2661 		iov_count++;
2662 		rx_size = length;
2663 	}
2664 
2665 	padding = ((-length) & 3);
2666 	if (padding != 0) {
2667 		iov[iov_count].iov_base	= cmd->pad_bytes;
2668 		iov[iov_count++].iov_len = padding;
2669 		rx_size += padding;
2670 	}
2671 
2672 	if (conn->conn_ops->DataDigest) {
2673 		iov[iov_count].iov_base		= &checksum;
2674 		iov[iov_count++].iov_len	= ISCSI_CRC_LEN;
2675 		rx_size += ISCSI_CRC_LEN;
2676 	}
2677 
2678 	WARN_ON_ONCE(iov_count > cmd->orig_iov_data_count);
2679 	rx_got = rx_data(conn, &cmd->iov_data[0], iov_count, rx_size);
2680 
2681 	iscsit_unmap_iovec(cmd);
2682 
2683 	if (rx_got != rx_size) {
2684 		iscsit_rx_thread_wait_for_tcp(conn);
2685 		return IMMEDIATE_DATA_CANNOT_RECOVER;
2686 	}
2687 
2688 	if (conn->conn_ops->DataDigest) {
2689 		u32 data_crc;
2690 
2691 		data_crc = iscsit_do_crypto_hash_sg(conn->conn_rx_hash, cmd,
2692 						    cmd->write_data_done, length, padding,
2693 						    cmd->pad_bytes);
2694 
2695 		if (checksum != data_crc) {
2696 			pr_err("ImmediateData CRC32C DataDigest 0x%08x"
2697 				" does not match computed 0x%08x\n", checksum,
2698 				data_crc);
2699 
2700 			if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
2701 				pr_err("Unable to recover from"
2702 					" Immediate Data digest failure while"
2703 					" in ERL=0.\n");
2704 				iscsit_reject_cmd(cmd,
2705 						ISCSI_REASON_DATA_DIGEST_ERROR,
2706 						(unsigned char *)hdr);
2707 				return IMMEDIATE_DATA_CANNOT_RECOVER;
2708 			} else {
2709 				iscsit_reject_cmd(cmd,
2710 						ISCSI_REASON_DATA_DIGEST_ERROR,
2711 						(unsigned char *)hdr);
2712 				return IMMEDIATE_DATA_ERL1_CRC_FAILURE;
2713 			}
2714 		} else {
2715 			pr_debug("Got CRC32C DataDigest 0x%08x for"
2716 				" %u bytes of Immediate Data\n", checksum,
2717 				length);
2718 		}
2719 	}
2720 
2721 	cmd->write_data_done += length;
2722 
2723 	if (cmd->write_data_done == cmd->se_cmd.data_length) {
2724 		spin_lock_bh(&cmd->istate_lock);
2725 		cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
2726 		cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
2727 		spin_unlock_bh(&cmd->istate_lock);
2728 	}
2729 
2730 	return IMMEDIATE_DATA_NORMAL_OPERATION;
2731 }
2732 
2733 /* #warning iscsi_build_conn_drop_async_message() only sends out on connections
2734 	with active network interface */
2735 static void iscsit_build_conn_drop_async_message(struct iscsit_conn *conn)
2736 {
2737 	struct iscsit_cmd *cmd;
2738 	struct iscsit_conn *conn_p;
2739 	bool found = false;
2740 
2741 	lockdep_assert_held(&conn->sess->conn_lock);
2742 
2743 	/*
2744 	 * Only send a Asynchronous Message on connections whos network
2745 	 * interface is still functional.
2746 	 */
2747 	list_for_each_entry(conn_p, &conn->sess->sess_conn_list, conn_list) {
2748 		if (conn_p->conn_state == TARG_CONN_STATE_LOGGED_IN) {
2749 			iscsit_inc_conn_usage_count(conn_p);
2750 			found = true;
2751 			break;
2752 		}
2753 	}
2754 
2755 	if (!found)
2756 		return;
2757 
2758 	cmd = iscsit_allocate_cmd(conn_p, TASK_RUNNING);
2759 	if (!cmd) {
2760 		iscsit_dec_conn_usage_count(conn_p);
2761 		return;
2762 	}
2763 
2764 	cmd->logout_cid = conn->cid;
2765 	cmd->iscsi_opcode = ISCSI_OP_ASYNC_EVENT;
2766 	cmd->i_state = ISTATE_SEND_ASYNCMSG;
2767 
2768 	spin_lock_bh(&conn_p->cmd_lock);
2769 	list_add_tail(&cmd->i_conn_node, &conn_p->conn_cmd_list);
2770 	spin_unlock_bh(&conn_p->cmd_lock);
2771 
2772 	iscsit_add_cmd_to_response_queue(cmd, conn_p, cmd->i_state);
2773 	iscsit_dec_conn_usage_count(conn_p);
2774 }
2775 
2776 static int iscsit_send_conn_drop_async_message(
2777 	struct iscsit_cmd *cmd,
2778 	struct iscsit_conn *conn)
2779 {
2780 	struct iscsi_async *hdr;
2781 
2782 	cmd->iscsi_opcode = ISCSI_OP_ASYNC_EVENT;
2783 
2784 	hdr			= (struct iscsi_async *) cmd->pdu;
2785 	hdr->opcode		= ISCSI_OP_ASYNC_EVENT;
2786 	hdr->flags		= ISCSI_FLAG_CMD_FINAL;
2787 	cmd->init_task_tag	= RESERVED_ITT;
2788 	cmd->targ_xfer_tag	= 0xFFFFFFFF;
2789 	put_unaligned_be64(0xFFFFFFFFFFFFFFFFULL, &hdr->rsvd4[0]);
2790 	cmd->stat_sn		= conn->stat_sn++;
2791 	hdr->statsn		= cpu_to_be32(cmd->stat_sn);
2792 	hdr->exp_cmdsn		= cpu_to_be32(conn->sess->exp_cmd_sn);
2793 	hdr->max_cmdsn		= cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn));
2794 	hdr->async_event	= ISCSI_ASYNC_MSG_DROPPING_CONNECTION;
2795 	hdr->param1		= cpu_to_be16(cmd->logout_cid);
2796 	hdr->param2		= cpu_to_be16(conn->sess->sess_ops->DefaultTime2Wait);
2797 	hdr->param3		= cpu_to_be16(conn->sess->sess_ops->DefaultTime2Retain);
2798 
2799 	pr_debug("Sending Connection Dropped Async Message StatSN:"
2800 		" 0x%08x, for CID: %hu on CID: %hu\n", cmd->stat_sn,
2801 			cmd->logout_cid, conn->cid);
2802 
2803 	return conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL, NULL, 0);
2804 }
2805 
2806 static void iscsit_tx_thread_wait_for_tcp(struct iscsit_conn *conn)
2807 {
2808 	if ((conn->sock->sk->sk_shutdown & SEND_SHUTDOWN) ||
2809 	    (conn->sock->sk->sk_shutdown & RCV_SHUTDOWN)) {
2810 		wait_for_completion_interruptible_timeout(
2811 					&conn->tx_half_close_comp,
2812 					ISCSI_TX_THREAD_TCP_TIMEOUT * HZ);
2813 	}
2814 }
2815 
2816 void
2817 iscsit_build_datain_pdu(struct iscsit_cmd *cmd, struct iscsit_conn *conn,
2818 			struct iscsi_datain *datain, struct iscsi_data_rsp *hdr,
2819 			bool set_statsn)
2820 {
2821 	hdr->opcode		= ISCSI_OP_SCSI_DATA_IN;
2822 	hdr->flags		= datain->flags;
2823 	if (hdr->flags & ISCSI_FLAG_DATA_STATUS) {
2824 		if (cmd->se_cmd.se_cmd_flags & SCF_OVERFLOW_BIT) {
2825 			hdr->flags |= ISCSI_FLAG_DATA_OVERFLOW;
2826 			hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
2827 		} else if (cmd->se_cmd.se_cmd_flags & SCF_UNDERFLOW_BIT) {
2828 			hdr->flags |= ISCSI_FLAG_DATA_UNDERFLOW;
2829 			hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
2830 		}
2831 	}
2832 	hton24(hdr->dlength, datain->length);
2833 	if (hdr->flags & ISCSI_FLAG_DATA_ACK)
2834 		int_to_scsilun(cmd->se_cmd.orig_fe_lun,
2835 				(struct scsi_lun *)&hdr->lun);
2836 	else
2837 		put_unaligned_le64(0xFFFFFFFFFFFFFFFFULL, &hdr->lun);
2838 
2839 	hdr->itt		= cmd->init_task_tag;
2840 
2841 	if (hdr->flags & ISCSI_FLAG_DATA_ACK)
2842 		hdr->ttt		= cpu_to_be32(cmd->targ_xfer_tag);
2843 	else
2844 		hdr->ttt		= cpu_to_be32(0xFFFFFFFF);
2845 	if (set_statsn)
2846 		hdr->statsn		= cpu_to_be32(cmd->stat_sn);
2847 	else
2848 		hdr->statsn		= cpu_to_be32(0xFFFFFFFF);
2849 
2850 	hdr->exp_cmdsn		= cpu_to_be32(conn->sess->exp_cmd_sn);
2851 	hdr->max_cmdsn		= cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn));
2852 	hdr->datasn		= cpu_to_be32(datain->data_sn);
2853 	hdr->offset		= cpu_to_be32(datain->offset);
2854 
2855 	pr_debug("Built DataIN ITT: 0x%08x, StatSN: 0x%08x,"
2856 		" DataSN: 0x%08x, Offset: %u, Length: %u, CID: %hu\n",
2857 		cmd->init_task_tag, ntohl(hdr->statsn), ntohl(hdr->datasn),
2858 		ntohl(hdr->offset), datain->length, conn->cid);
2859 }
2860 EXPORT_SYMBOL(iscsit_build_datain_pdu);
2861 
2862 static int iscsit_send_datain(struct iscsit_cmd *cmd, struct iscsit_conn *conn)
2863 {
2864 	struct iscsi_data_rsp *hdr = (struct iscsi_data_rsp *)&cmd->pdu[0];
2865 	struct iscsi_datain datain;
2866 	struct iscsi_datain_req *dr;
2867 	int eodr = 0, ret;
2868 	bool set_statsn = false;
2869 
2870 	memset(&datain, 0, sizeof(struct iscsi_datain));
2871 	dr = iscsit_get_datain_values(cmd, &datain);
2872 	if (!dr) {
2873 		pr_err("iscsit_get_datain_values failed for ITT: 0x%08x\n",
2874 				cmd->init_task_tag);
2875 		return -1;
2876 	}
2877 	/*
2878 	 * Be paranoid and double check the logic for now.
2879 	 */
2880 	if ((datain.offset + datain.length) > cmd->se_cmd.data_length) {
2881 		pr_err("Command ITT: 0x%08x, datain.offset: %u and"
2882 			" datain.length: %u exceeds cmd->data_length: %u\n",
2883 			cmd->init_task_tag, datain.offset, datain.length,
2884 			cmd->se_cmd.data_length);
2885 		return -1;
2886 	}
2887 
2888 	atomic_long_add(datain.length, &conn->sess->tx_data_octets);
2889 	/*
2890 	 * Special case for successfully execution w/ both DATAIN
2891 	 * and Sense Data.
2892 	 */
2893 	if ((datain.flags & ISCSI_FLAG_DATA_STATUS) &&
2894 	    (cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE))
2895 		datain.flags &= ~ISCSI_FLAG_DATA_STATUS;
2896 	else {
2897 		if ((dr->dr_complete == DATAIN_COMPLETE_NORMAL) ||
2898 		    (dr->dr_complete == DATAIN_COMPLETE_CONNECTION_RECOVERY)) {
2899 			iscsit_increment_maxcmdsn(cmd, conn->sess);
2900 			cmd->stat_sn = conn->stat_sn++;
2901 			set_statsn = true;
2902 		} else if (dr->dr_complete ==
2903 			   DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY)
2904 			set_statsn = true;
2905 	}
2906 
2907 	iscsit_build_datain_pdu(cmd, conn, &datain, hdr, set_statsn);
2908 
2909 	ret = conn->conn_transport->iscsit_xmit_pdu(conn, cmd, dr, &datain, 0);
2910 	if (ret < 0)
2911 		return ret;
2912 
2913 	if (dr->dr_complete) {
2914 		eodr = (cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ?
2915 				2 : 1;
2916 		iscsit_free_datain_req(cmd, dr);
2917 	}
2918 
2919 	return eodr;
2920 }
2921 
2922 int
2923 iscsit_build_logout_rsp(struct iscsit_cmd *cmd, struct iscsit_conn *conn,
2924 			struct iscsi_logout_rsp *hdr)
2925 {
2926 	struct iscsit_conn *logout_conn = NULL;
2927 	struct iscsi_conn_recovery *cr = NULL;
2928 	struct iscsit_session *sess = conn->sess;
2929 	/*
2930 	 * The actual shutting down of Sessions and/or Connections
2931 	 * for CLOSESESSION and CLOSECONNECTION Logout Requests
2932 	 * is done in scsi_logout_post_handler().
2933 	 */
2934 	switch (cmd->logout_reason) {
2935 	case ISCSI_LOGOUT_REASON_CLOSE_SESSION:
2936 		pr_debug("iSCSI session logout successful, setting"
2937 			" logout response to ISCSI_LOGOUT_SUCCESS.\n");
2938 		cmd->logout_response = ISCSI_LOGOUT_SUCCESS;
2939 		break;
2940 	case ISCSI_LOGOUT_REASON_CLOSE_CONNECTION:
2941 		if (cmd->logout_response == ISCSI_LOGOUT_CID_NOT_FOUND)
2942 			break;
2943 		/*
2944 		 * For CLOSECONNECTION logout requests carrying
2945 		 * a matching logout CID -> local CID, the reference
2946 		 * for the local CID will have been incremented in
2947 		 * iscsi_logout_closeconnection().
2948 		 *
2949 		 * For CLOSECONNECTION logout requests carrying
2950 		 * a different CID than the connection it arrived
2951 		 * on, the connection responding to cmd->logout_cid
2952 		 * is stopped in iscsit_logout_post_handler_diffcid().
2953 		 */
2954 
2955 		pr_debug("iSCSI CID: %hu logout on CID: %hu"
2956 			" successful.\n", cmd->logout_cid, conn->cid);
2957 		cmd->logout_response = ISCSI_LOGOUT_SUCCESS;
2958 		break;
2959 	case ISCSI_LOGOUT_REASON_RECOVERY:
2960 		if ((cmd->logout_response == ISCSI_LOGOUT_RECOVERY_UNSUPPORTED) ||
2961 		    (cmd->logout_response == ISCSI_LOGOUT_CLEANUP_FAILED))
2962 			break;
2963 		/*
2964 		 * If the connection is still active from our point of view
2965 		 * force connection recovery to occur.
2966 		 */
2967 		logout_conn = iscsit_get_conn_from_cid_rcfr(sess,
2968 				cmd->logout_cid);
2969 		if (logout_conn) {
2970 			iscsit_connection_reinstatement_rcfr(logout_conn);
2971 			iscsit_dec_conn_usage_count(logout_conn);
2972 		}
2973 
2974 		cr = iscsit_get_inactive_connection_recovery_entry(
2975 				conn->sess, cmd->logout_cid);
2976 		if (!cr) {
2977 			pr_err("Unable to locate CID: %hu for"
2978 			" REMOVECONNFORRECOVERY Logout Request.\n",
2979 				cmd->logout_cid);
2980 			cmd->logout_response = ISCSI_LOGOUT_CID_NOT_FOUND;
2981 			break;
2982 		}
2983 
2984 		iscsit_discard_cr_cmds_by_expstatsn(cr, cmd->exp_stat_sn);
2985 
2986 		pr_debug("iSCSI REMOVECONNFORRECOVERY logout"
2987 			" for recovery for CID: %hu on CID: %hu successful.\n",
2988 				cmd->logout_cid, conn->cid);
2989 		cmd->logout_response = ISCSI_LOGOUT_SUCCESS;
2990 		break;
2991 	default:
2992 		pr_err("Unknown cmd->logout_reason: 0x%02x\n",
2993 				cmd->logout_reason);
2994 		return -1;
2995 	}
2996 
2997 	hdr->opcode		= ISCSI_OP_LOGOUT_RSP;
2998 	hdr->flags		|= ISCSI_FLAG_CMD_FINAL;
2999 	hdr->response		= cmd->logout_response;
3000 	hdr->itt		= cmd->init_task_tag;
3001 	cmd->stat_sn		= conn->stat_sn++;
3002 	hdr->statsn		= cpu_to_be32(cmd->stat_sn);
3003 
3004 	iscsit_increment_maxcmdsn(cmd, conn->sess);
3005 	hdr->exp_cmdsn		= cpu_to_be32(conn->sess->exp_cmd_sn);
3006 	hdr->max_cmdsn		= cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn));
3007 
3008 	pr_debug("Built Logout Response ITT: 0x%08x StatSN:"
3009 		" 0x%08x Response: 0x%02x CID: %hu on CID: %hu\n",
3010 		cmd->init_task_tag, cmd->stat_sn, hdr->response,
3011 		cmd->logout_cid, conn->cid);
3012 
3013 	return 0;
3014 }
3015 EXPORT_SYMBOL(iscsit_build_logout_rsp);
3016 
3017 static int
3018 iscsit_send_logout(struct iscsit_cmd *cmd, struct iscsit_conn *conn)
3019 {
3020 	int rc;
3021 
3022 	rc = iscsit_build_logout_rsp(cmd, conn,
3023 			(struct iscsi_logout_rsp *)&cmd->pdu[0]);
3024 	if (rc < 0)
3025 		return rc;
3026 
3027 	return conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL, NULL, 0);
3028 }
3029 
3030 void
3031 iscsit_build_nopin_rsp(struct iscsit_cmd *cmd, struct iscsit_conn *conn,
3032 		       struct iscsi_nopin *hdr, bool nopout_response)
3033 {
3034 	hdr->opcode		= ISCSI_OP_NOOP_IN;
3035 	hdr->flags		|= ISCSI_FLAG_CMD_FINAL;
3036         hton24(hdr->dlength, cmd->buf_ptr_size);
3037 	if (nopout_response)
3038 		put_unaligned_le64(0xFFFFFFFFFFFFFFFFULL, &hdr->lun);
3039 	hdr->itt		= cmd->init_task_tag;
3040 	hdr->ttt		= cpu_to_be32(cmd->targ_xfer_tag);
3041 	cmd->stat_sn		= (nopout_response) ? conn->stat_sn++ :
3042 				  conn->stat_sn;
3043 	hdr->statsn		= cpu_to_be32(cmd->stat_sn);
3044 
3045 	if (nopout_response)
3046 		iscsit_increment_maxcmdsn(cmd, conn->sess);
3047 
3048 	hdr->exp_cmdsn		= cpu_to_be32(conn->sess->exp_cmd_sn);
3049 	hdr->max_cmdsn		= cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn));
3050 
3051 	pr_debug("Built NOPIN %s Response ITT: 0x%08x, TTT: 0x%08x,"
3052 		" StatSN: 0x%08x, Length %u\n", (nopout_response) ?
3053 		"Solicited" : "Unsolicited", cmd->init_task_tag,
3054 		cmd->targ_xfer_tag, cmd->stat_sn, cmd->buf_ptr_size);
3055 }
3056 EXPORT_SYMBOL(iscsit_build_nopin_rsp);
3057 
3058 /*
3059  *	Unsolicited NOPIN, either requesting a response or not.
3060  */
3061 static int iscsit_send_unsolicited_nopin(
3062 	struct iscsit_cmd *cmd,
3063 	struct iscsit_conn *conn,
3064 	int want_response)
3065 {
3066 	struct iscsi_nopin *hdr = (struct iscsi_nopin *)&cmd->pdu[0];
3067 	int ret;
3068 
3069 	iscsit_build_nopin_rsp(cmd, conn, hdr, false);
3070 
3071 	pr_debug("Sending Unsolicited NOPIN TTT: 0x%08x StatSN:"
3072 		" 0x%08x CID: %hu\n", hdr->ttt, cmd->stat_sn, conn->cid);
3073 
3074 	ret = conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL, NULL, 0);
3075 	if (ret < 0)
3076 		return ret;
3077 
3078 	spin_lock_bh(&cmd->istate_lock);
3079 	cmd->i_state = want_response ?
3080 		ISTATE_SENT_NOPIN_WANT_RESPONSE : ISTATE_SENT_STATUS;
3081 	spin_unlock_bh(&cmd->istate_lock);
3082 
3083 	return 0;
3084 }
3085 
3086 static int
3087 iscsit_send_nopin(struct iscsit_cmd *cmd, struct iscsit_conn *conn)
3088 {
3089 	struct iscsi_nopin *hdr = (struct iscsi_nopin *)&cmd->pdu[0];
3090 
3091 	iscsit_build_nopin_rsp(cmd, conn, hdr, true);
3092 
3093 	/*
3094 	 * NOPOUT Ping Data is attached to struct iscsit_cmd->buf_ptr.
3095 	 * NOPOUT DataSegmentLength is at struct iscsit_cmd->buf_ptr_size.
3096 	 */
3097 	pr_debug("Echoing back %u bytes of ping data.\n", cmd->buf_ptr_size);
3098 
3099 	return conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL,
3100 						     cmd->buf_ptr,
3101 						     cmd->buf_ptr_size);
3102 }
3103 
3104 static int iscsit_send_r2t(
3105 	struct iscsit_cmd *cmd,
3106 	struct iscsit_conn *conn)
3107 {
3108 	struct iscsi_r2t *r2t;
3109 	struct iscsi_r2t_rsp *hdr;
3110 	int ret;
3111 
3112 	r2t = iscsit_get_r2t_from_list(cmd);
3113 	if (!r2t)
3114 		return -1;
3115 
3116 	hdr			= (struct iscsi_r2t_rsp *) cmd->pdu;
3117 	memset(hdr, 0, ISCSI_HDR_LEN);
3118 	hdr->opcode		= ISCSI_OP_R2T;
3119 	hdr->flags		|= ISCSI_FLAG_CMD_FINAL;
3120 	int_to_scsilun(cmd->se_cmd.orig_fe_lun,
3121 			(struct scsi_lun *)&hdr->lun);
3122 	hdr->itt		= cmd->init_task_tag;
3123 	if (conn->conn_transport->iscsit_get_r2t_ttt)
3124 		conn->conn_transport->iscsit_get_r2t_ttt(conn, cmd, r2t);
3125 	else
3126 		r2t->targ_xfer_tag = session_get_next_ttt(conn->sess);
3127 	hdr->ttt		= cpu_to_be32(r2t->targ_xfer_tag);
3128 	hdr->statsn		= cpu_to_be32(conn->stat_sn);
3129 	hdr->exp_cmdsn		= cpu_to_be32(conn->sess->exp_cmd_sn);
3130 	hdr->max_cmdsn		= cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn));
3131 	hdr->r2tsn		= cpu_to_be32(r2t->r2t_sn);
3132 	hdr->data_offset	= cpu_to_be32(r2t->offset);
3133 	hdr->data_length	= cpu_to_be32(r2t->xfer_len);
3134 
3135 	pr_debug("Built %sR2T, ITT: 0x%08x, TTT: 0x%08x, StatSN:"
3136 		" 0x%08x, R2TSN: 0x%08x, Offset: %u, DDTL: %u, CID: %hu\n",
3137 		(!r2t->recovery_r2t) ? "" : "Recovery ", cmd->init_task_tag,
3138 		r2t->targ_xfer_tag, ntohl(hdr->statsn), r2t->r2t_sn,
3139 			r2t->offset, r2t->xfer_len, conn->cid);
3140 
3141 	spin_lock_bh(&cmd->r2t_lock);
3142 	r2t->sent_r2t = 1;
3143 	spin_unlock_bh(&cmd->r2t_lock);
3144 
3145 	ret = conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL, NULL, 0);
3146 	if (ret < 0) {
3147 		return ret;
3148 	}
3149 
3150 	spin_lock_bh(&cmd->dataout_timeout_lock);
3151 	iscsit_start_dataout_timer(cmd, conn);
3152 	spin_unlock_bh(&cmd->dataout_timeout_lock);
3153 
3154 	return 0;
3155 }
3156 
3157 /*
3158  *	@recovery: If called from iscsi_task_reassign_complete_write() for
3159  *		connection recovery.
3160  */
3161 int iscsit_build_r2ts_for_cmd(
3162 	struct iscsit_conn *conn,
3163 	struct iscsit_cmd *cmd,
3164 	bool recovery)
3165 {
3166 	int first_r2t = 1;
3167 	u32 offset = 0, xfer_len = 0;
3168 
3169 	spin_lock_bh(&cmd->r2t_lock);
3170 	if (cmd->cmd_flags & ICF_SENT_LAST_R2T) {
3171 		spin_unlock_bh(&cmd->r2t_lock);
3172 		return 0;
3173 	}
3174 
3175 	if (conn->sess->sess_ops->DataSequenceInOrder &&
3176 	    !recovery)
3177 		cmd->r2t_offset = max(cmd->r2t_offset, cmd->write_data_done);
3178 
3179 	while (cmd->outstanding_r2ts < conn->sess->sess_ops->MaxOutstandingR2T) {
3180 		if (conn->sess->sess_ops->DataSequenceInOrder) {
3181 			offset = cmd->r2t_offset;
3182 
3183 			if (first_r2t && recovery) {
3184 				int new_data_end = offset +
3185 					conn->sess->sess_ops->MaxBurstLength -
3186 					cmd->next_burst_len;
3187 
3188 				if (new_data_end > cmd->se_cmd.data_length)
3189 					xfer_len = cmd->se_cmd.data_length - offset;
3190 				else
3191 					xfer_len =
3192 						conn->sess->sess_ops->MaxBurstLength -
3193 						cmd->next_burst_len;
3194 			} else {
3195 				int new_data_end = offset +
3196 					conn->sess->sess_ops->MaxBurstLength;
3197 
3198 				if (new_data_end > cmd->se_cmd.data_length)
3199 					xfer_len = cmd->se_cmd.data_length - offset;
3200 				else
3201 					xfer_len = conn->sess->sess_ops->MaxBurstLength;
3202 			}
3203 
3204 			if ((s32)xfer_len < 0) {
3205 				cmd->cmd_flags |= ICF_SENT_LAST_R2T;
3206 				break;
3207 			}
3208 
3209 			cmd->r2t_offset += xfer_len;
3210 
3211 			if (cmd->r2t_offset == cmd->se_cmd.data_length)
3212 				cmd->cmd_flags |= ICF_SENT_LAST_R2T;
3213 		} else {
3214 			struct iscsi_seq *seq;
3215 
3216 			seq = iscsit_get_seq_holder_for_r2t(cmd);
3217 			if (!seq) {
3218 				spin_unlock_bh(&cmd->r2t_lock);
3219 				return -1;
3220 			}
3221 
3222 			offset = seq->offset;
3223 			xfer_len = seq->xfer_len;
3224 
3225 			if (cmd->seq_send_order == cmd->seq_count)
3226 				cmd->cmd_flags |= ICF_SENT_LAST_R2T;
3227 		}
3228 		cmd->outstanding_r2ts++;
3229 		first_r2t = 0;
3230 
3231 		if (iscsit_add_r2t_to_list(cmd, offset, xfer_len, 0, 0) < 0) {
3232 			spin_unlock_bh(&cmd->r2t_lock);
3233 			return -1;
3234 		}
3235 
3236 		if (cmd->cmd_flags & ICF_SENT_LAST_R2T)
3237 			break;
3238 	}
3239 	spin_unlock_bh(&cmd->r2t_lock);
3240 
3241 	return 0;
3242 }
3243 EXPORT_SYMBOL(iscsit_build_r2ts_for_cmd);
3244 
3245 void iscsit_build_rsp_pdu(struct iscsit_cmd *cmd, struct iscsit_conn *conn,
3246 			bool inc_stat_sn, struct iscsi_scsi_rsp *hdr)
3247 {
3248 	if (inc_stat_sn)
3249 		cmd->stat_sn = conn->stat_sn++;
3250 
3251 	atomic_long_inc(&conn->sess->rsp_pdus);
3252 
3253 	memset(hdr, 0, ISCSI_HDR_LEN);
3254 	hdr->opcode		= ISCSI_OP_SCSI_CMD_RSP;
3255 	hdr->flags		|= ISCSI_FLAG_CMD_FINAL;
3256 	if (cmd->se_cmd.se_cmd_flags & SCF_OVERFLOW_BIT) {
3257 		hdr->flags |= ISCSI_FLAG_CMD_OVERFLOW;
3258 		hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
3259 	} else if (cmd->se_cmd.se_cmd_flags & SCF_UNDERFLOW_BIT) {
3260 		hdr->flags |= ISCSI_FLAG_CMD_UNDERFLOW;
3261 		hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
3262 	}
3263 	hdr->response		= cmd->iscsi_response;
3264 	hdr->cmd_status		= cmd->se_cmd.scsi_status;
3265 	hdr->itt		= cmd->init_task_tag;
3266 	hdr->statsn		= cpu_to_be32(cmd->stat_sn);
3267 
3268 	iscsit_increment_maxcmdsn(cmd, conn->sess);
3269 	hdr->exp_cmdsn		= cpu_to_be32(conn->sess->exp_cmd_sn);
3270 	hdr->max_cmdsn		= cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn));
3271 
3272 	pr_debug("Built SCSI Response, ITT: 0x%08x, StatSN: 0x%08x,"
3273 		" Response: 0x%02x, SAM Status: 0x%02x, CID: %hu\n",
3274 		cmd->init_task_tag, cmd->stat_sn, cmd->se_cmd.scsi_status,
3275 		cmd->se_cmd.scsi_status, conn->cid);
3276 }
3277 EXPORT_SYMBOL(iscsit_build_rsp_pdu);
3278 
3279 static int iscsit_send_response(struct iscsit_cmd *cmd, struct iscsit_conn *conn)
3280 {
3281 	struct iscsi_scsi_rsp *hdr = (struct iscsi_scsi_rsp *)&cmd->pdu[0];
3282 	bool inc_stat_sn = (cmd->i_state == ISTATE_SEND_STATUS);
3283 	void *data_buf = NULL;
3284 	u32 padding = 0, data_buf_len = 0;
3285 
3286 	iscsit_build_rsp_pdu(cmd, conn, inc_stat_sn, hdr);
3287 
3288 	/*
3289 	 * Attach SENSE DATA payload to iSCSI Response PDU
3290 	 */
3291 	if (cmd->se_cmd.sense_buffer &&
3292 	   ((cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
3293 	    (cmd->se_cmd.se_cmd_flags & SCF_EMULATED_TASK_SENSE))) {
3294 		put_unaligned_be16(cmd->se_cmd.scsi_sense_length, cmd->sense_buffer);
3295 		cmd->se_cmd.scsi_sense_length += sizeof (__be16);
3296 
3297 		padding		= -(cmd->se_cmd.scsi_sense_length) & 3;
3298 		hton24(hdr->dlength, (u32)cmd->se_cmd.scsi_sense_length);
3299 		data_buf = cmd->sense_buffer;
3300 		data_buf_len = cmd->se_cmd.scsi_sense_length + padding;
3301 
3302 		if (padding) {
3303 			memset(cmd->sense_buffer +
3304 				cmd->se_cmd.scsi_sense_length, 0, padding);
3305 			pr_debug("Adding %u bytes of padding to"
3306 				" SENSE.\n", padding);
3307 		}
3308 
3309 		pr_debug("Attaching SENSE DATA: %u bytes to iSCSI"
3310 				" Response PDU\n",
3311 				cmd->se_cmd.scsi_sense_length);
3312 	}
3313 
3314 	return conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL, data_buf,
3315 						     data_buf_len);
3316 }
3317 
3318 static u8 iscsit_convert_tcm_tmr_rsp(struct se_tmr_req *se_tmr)
3319 {
3320 	switch (se_tmr->response) {
3321 	case TMR_FUNCTION_COMPLETE:
3322 		return ISCSI_TMF_RSP_COMPLETE;
3323 	case TMR_TASK_DOES_NOT_EXIST:
3324 		return ISCSI_TMF_RSP_NO_TASK;
3325 	case TMR_LUN_DOES_NOT_EXIST:
3326 		return ISCSI_TMF_RSP_NO_LUN;
3327 	case TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED:
3328 		return ISCSI_TMF_RSP_NOT_SUPPORTED;
3329 	case TMR_FUNCTION_REJECTED:
3330 	default:
3331 		return ISCSI_TMF_RSP_REJECTED;
3332 	}
3333 }
3334 
3335 void
3336 iscsit_build_task_mgt_rsp(struct iscsit_cmd *cmd, struct iscsit_conn *conn,
3337 			  struct iscsi_tm_rsp *hdr)
3338 {
3339 	struct se_tmr_req *se_tmr = cmd->se_cmd.se_tmr_req;
3340 
3341 	hdr->opcode		= ISCSI_OP_SCSI_TMFUNC_RSP;
3342 	hdr->flags		= ISCSI_FLAG_CMD_FINAL;
3343 	hdr->response		= iscsit_convert_tcm_tmr_rsp(se_tmr);
3344 	hdr->itt		= cmd->init_task_tag;
3345 	cmd->stat_sn		= conn->stat_sn++;
3346 	hdr->statsn		= cpu_to_be32(cmd->stat_sn);
3347 
3348 	iscsit_increment_maxcmdsn(cmd, conn->sess);
3349 	hdr->exp_cmdsn		= cpu_to_be32(conn->sess->exp_cmd_sn);
3350 	hdr->max_cmdsn		= cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn));
3351 
3352 	pr_debug("Built Task Management Response ITT: 0x%08x,"
3353 		" StatSN: 0x%08x, Response: 0x%02x, CID: %hu\n",
3354 		cmd->init_task_tag, cmd->stat_sn, hdr->response, conn->cid);
3355 }
3356 EXPORT_SYMBOL(iscsit_build_task_mgt_rsp);
3357 
3358 static int
3359 iscsit_send_task_mgt_rsp(struct iscsit_cmd *cmd, struct iscsit_conn *conn)
3360 {
3361 	struct iscsi_tm_rsp *hdr = (struct iscsi_tm_rsp *)&cmd->pdu[0];
3362 
3363 	iscsit_build_task_mgt_rsp(cmd, conn, hdr);
3364 
3365 	return conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL, NULL, 0);
3366 }
3367 
3368 #define SENDTARGETS_BUF_LIMIT 32768U
3369 
3370 static int
3371 iscsit_build_sendtargets_response(struct iscsit_cmd *cmd,
3372 				  enum iscsit_transport_type network_transport,
3373 				  int skip_bytes, bool *completed)
3374 {
3375 	char *payload = NULL;
3376 	struct iscsit_conn *conn = cmd->conn;
3377 	struct iscsi_portal_group *tpg;
3378 	struct iscsi_tiqn *tiqn;
3379 	struct iscsi_tpg_np *tpg_np;
3380 	int buffer_len, end_of_buf = 0, len = 0, payload_len = 0;
3381 	int target_name_printed;
3382 	unsigned char buf[ISCSI_IQN_LEN+12]; /* iqn + "TargetName=" + \0 */
3383 	unsigned char *text_in = cmd->text_in_ptr, *text_ptr = NULL;
3384 	bool active;
3385 
3386 	buffer_len = min(conn->conn_ops->MaxRecvDataSegmentLength,
3387 			 SENDTARGETS_BUF_LIMIT);
3388 
3389 	payload = kzalloc(buffer_len, GFP_KERNEL);
3390 	if (!payload)
3391 		return -ENOMEM;
3392 
3393 	/*
3394 	 * Locate pointer to iqn./eui. string for ICF_SENDTARGETS_SINGLE
3395 	 * explicit case..
3396 	 */
3397 	if (cmd->cmd_flags & ICF_SENDTARGETS_SINGLE) {
3398 		text_ptr = strchr(text_in, '=');
3399 		if (!text_ptr) {
3400 			pr_err("Unable to locate '=' string in text_in:"
3401 			       " %s\n", text_in);
3402 			kfree(payload);
3403 			return -EINVAL;
3404 		}
3405 		/*
3406 		 * Skip over '=' character..
3407 		 */
3408 		text_ptr += 1;
3409 	}
3410 
3411 	spin_lock(&tiqn_lock);
3412 	list_for_each_entry(tiqn, &g_tiqn_list, tiqn_list) {
3413 		if ((cmd->cmd_flags & ICF_SENDTARGETS_SINGLE) &&
3414 		     strcmp(tiqn->tiqn, text_ptr)) {
3415 			continue;
3416 		}
3417 
3418 		target_name_printed = 0;
3419 
3420 		spin_lock(&tiqn->tiqn_tpg_lock);
3421 		list_for_each_entry(tpg, &tiqn->tiqn_tpg_list, tpg_list) {
3422 
3423 			/* If demo_mode_discovery=0 and generate_node_acls=0
3424 			 * (demo mode dislabed) do not return
3425 			 * TargetName+TargetAddress unless a NodeACL exists.
3426 			 */
3427 
3428 			if ((tpg->tpg_attrib.generate_node_acls == 0) &&
3429 			    (tpg->tpg_attrib.demo_mode_discovery == 0) &&
3430 			    (!target_tpg_has_node_acl(&tpg->tpg_se_tpg,
3431 				cmd->conn->sess->sess_ops->InitiatorName))) {
3432 				continue;
3433 			}
3434 
3435 			spin_lock(&tpg->tpg_state_lock);
3436 			active = (tpg->tpg_state == TPG_STATE_ACTIVE);
3437 			spin_unlock(&tpg->tpg_state_lock);
3438 
3439 			if (!active && tpg->tpg_attrib.tpg_enabled_sendtargets)
3440 				continue;
3441 
3442 			spin_lock(&tpg->tpg_np_lock);
3443 			list_for_each_entry(tpg_np, &tpg->tpg_gnp_list,
3444 						tpg_np_list) {
3445 				struct iscsi_np *np = tpg_np->tpg_np;
3446 				struct sockaddr_storage *sockaddr;
3447 
3448 				if (np->np_network_transport != network_transport)
3449 					continue;
3450 
3451 				if (!target_name_printed) {
3452 					len = sprintf(buf, "TargetName=%s",
3453 						      tiqn->tiqn);
3454 					len += 1;
3455 
3456 					if ((len + payload_len) > buffer_len) {
3457 						spin_unlock(&tpg->tpg_np_lock);
3458 						spin_unlock(&tiqn->tiqn_tpg_lock);
3459 						end_of_buf = 1;
3460 						goto eob;
3461 					}
3462 
3463 					if (skip_bytes && len <= skip_bytes) {
3464 						skip_bytes -= len;
3465 					} else {
3466 						memcpy(payload + payload_len, buf, len);
3467 						payload_len += len;
3468 						target_name_printed = 1;
3469 						if (len > skip_bytes)
3470 							skip_bytes = 0;
3471 					}
3472 				}
3473 
3474 				if (inet_addr_is_any((struct sockaddr *)&np->np_sockaddr))
3475 					sockaddr = &conn->local_sockaddr;
3476 				else
3477 					sockaddr = &np->np_sockaddr;
3478 
3479 				len = sprintf(buf, "TargetAddress="
3480 					      "%pISpc,%hu",
3481 					      sockaddr,
3482 					      tpg->tpgt);
3483 				len += 1;
3484 
3485 				if ((len + payload_len) > buffer_len) {
3486 					spin_unlock(&tpg->tpg_np_lock);
3487 					spin_unlock(&tiqn->tiqn_tpg_lock);
3488 					end_of_buf = 1;
3489 					goto eob;
3490 				}
3491 
3492 				if (skip_bytes && len <= skip_bytes) {
3493 					skip_bytes -= len;
3494 				} else {
3495 					memcpy(payload + payload_len, buf, len);
3496 					payload_len += len;
3497 					if (len > skip_bytes)
3498 						skip_bytes = 0;
3499 				}
3500 			}
3501 			spin_unlock(&tpg->tpg_np_lock);
3502 		}
3503 		spin_unlock(&tiqn->tiqn_tpg_lock);
3504 eob:
3505 		if (end_of_buf) {
3506 			*completed = false;
3507 			break;
3508 		}
3509 
3510 		if (cmd->cmd_flags & ICF_SENDTARGETS_SINGLE)
3511 			break;
3512 	}
3513 	spin_unlock(&tiqn_lock);
3514 
3515 	cmd->buf_ptr = payload;
3516 
3517 	return payload_len;
3518 }
3519 
3520 int
3521 iscsit_build_text_rsp(struct iscsit_cmd *cmd, struct iscsit_conn *conn,
3522 		      struct iscsi_text_rsp *hdr,
3523 		      enum iscsit_transport_type network_transport)
3524 {
3525 	int text_length, padding;
3526 	bool completed = true;
3527 
3528 	text_length = iscsit_build_sendtargets_response(cmd, network_transport,
3529 							cmd->read_data_done,
3530 							&completed);
3531 	if (text_length < 0)
3532 		return text_length;
3533 
3534 	if (completed) {
3535 		hdr->flags = ISCSI_FLAG_CMD_FINAL;
3536 	} else {
3537 		hdr->flags = ISCSI_FLAG_TEXT_CONTINUE;
3538 		cmd->read_data_done += text_length;
3539 		if (cmd->targ_xfer_tag == 0xFFFFFFFF)
3540 			cmd->targ_xfer_tag = session_get_next_ttt(conn->sess);
3541 	}
3542 	hdr->opcode = ISCSI_OP_TEXT_RSP;
3543 	padding = ((-text_length) & 3);
3544 	hton24(hdr->dlength, text_length);
3545 	hdr->itt = cmd->init_task_tag;
3546 	hdr->ttt = cpu_to_be32(cmd->targ_xfer_tag);
3547 	cmd->stat_sn = conn->stat_sn++;
3548 	hdr->statsn = cpu_to_be32(cmd->stat_sn);
3549 
3550 	iscsit_increment_maxcmdsn(cmd, conn->sess);
3551 	/*
3552 	 * Reset maxcmdsn_inc in multi-part text payload exchanges to
3553 	 * correctly increment MaxCmdSN for each response answering a
3554 	 * non immediate text request with a valid CmdSN.
3555 	 */
3556 	cmd->maxcmdsn_inc = 0;
3557 	hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
3558 	hdr->max_cmdsn = cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn));
3559 
3560 	pr_debug("Built Text Response: ITT: 0x%08x, TTT: 0x%08x, StatSN: 0x%08x,"
3561 		" Length: %u, CID: %hu F: %d C: %d\n", cmd->init_task_tag,
3562 		cmd->targ_xfer_tag, cmd->stat_sn, text_length, conn->cid,
3563 		!!(hdr->flags & ISCSI_FLAG_CMD_FINAL),
3564 		!!(hdr->flags & ISCSI_FLAG_TEXT_CONTINUE));
3565 
3566 	return text_length + padding;
3567 }
3568 EXPORT_SYMBOL(iscsit_build_text_rsp);
3569 
3570 static int iscsit_send_text_rsp(
3571 	struct iscsit_cmd *cmd,
3572 	struct iscsit_conn *conn)
3573 {
3574 	struct iscsi_text_rsp *hdr = (struct iscsi_text_rsp *)cmd->pdu;
3575 	int text_length;
3576 
3577 	text_length = iscsit_build_text_rsp(cmd, conn, hdr,
3578 				conn->conn_transport->transport_type);
3579 	if (text_length < 0)
3580 		return text_length;
3581 
3582 	return conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL,
3583 						     cmd->buf_ptr,
3584 						     text_length);
3585 }
3586 
3587 void
3588 iscsit_build_reject(struct iscsit_cmd *cmd, struct iscsit_conn *conn,
3589 		    struct iscsi_reject *hdr)
3590 {
3591 	hdr->opcode		= ISCSI_OP_REJECT;
3592 	hdr->reason		= cmd->reject_reason;
3593 	hdr->flags		|= ISCSI_FLAG_CMD_FINAL;
3594 	hton24(hdr->dlength, ISCSI_HDR_LEN);
3595 	hdr->ffffffff		= cpu_to_be32(0xffffffff);
3596 	cmd->stat_sn		= conn->stat_sn++;
3597 	hdr->statsn		= cpu_to_be32(cmd->stat_sn);
3598 	hdr->exp_cmdsn		= cpu_to_be32(conn->sess->exp_cmd_sn);
3599 	hdr->max_cmdsn		= cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn));
3600 
3601 }
3602 EXPORT_SYMBOL(iscsit_build_reject);
3603 
3604 static int iscsit_send_reject(
3605 	struct iscsit_cmd *cmd,
3606 	struct iscsit_conn *conn)
3607 {
3608 	struct iscsi_reject *hdr = (struct iscsi_reject *)&cmd->pdu[0];
3609 
3610 	iscsit_build_reject(cmd, conn, hdr);
3611 
3612 	pr_debug("Built Reject PDU StatSN: 0x%08x, Reason: 0x%02x,"
3613 		" CID: %hu\n", ntohl(hdr->statsn), hdr->reason, conn->cid);
3614 
3615 	return conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL,
3616 						     cmd->buf_ptr,
3617 						     ISCSI_HDR_LEN);
3618 }
3619 
3620 void iscsit_thread_get_cpumask(struct iscsit_conn *conn)
3621 {
3622 	int ord, cpu;
3623 	cpumask_var_t conn_allowed_cpumask;
3624 
3625 	/*
3626 	 * bitmap_id is assigned from iscsit_global->ts_bitmap from
3627 	 * within iscsit_start_kthreads()
3628 	 *
3629 	 * Here we use bitmap_id to determine which CPU that this
3630 	 * iSCSI connection's RX/TX threads will be scheduled to
3631 	 * execute upon.
3632 	 */
3633 	if (!zalloc_cpumask_var(&conn_allowed_cpumask, GFP_KERNEL)) {
3634 		ord = conn->bitmap_id % cpumask_weight(cpu_online_mask);
3635 		for_each_online_cpu(cpu) {
3636 			if (ord-- == 0) {
3637 				cpumask_set_cpu(cpu, conn->conn_cpumask);
3638 				return;
3639 			}
3640 		}
3641 	} else {
3642 		cpumask_and(conn_allowed_cpumask, iscsit_global->allowed_cpumask,
3643 			cpu_online_mask);
3644 
3645 		cpumask_clear(conn->conn_cpumask);
3646 		ord = conn->bitmap_id % cpumask_weight(conn_allowed_cpumask);
3647 		for_each_cpu(cpu, conn_allowed_cpumask) {
3648 			if (ord-- == 0) {
3649 				cpumask_set_cpu(cpu, conn->conn_cpumask);
3650 				free_cpumask_var(conn_allowed_cpumask);
3651 				return;
3652 			}
3653 		}
3654 		free_cpumask_var(conn_allowed_cpumask);
3655 	}
3656 	/*
3657 	 * This should never be reached..
3658 	 */
3659 	dump_stack();
3660 	cpumask_setall(conn->conn_cpumask);
3661 }
3662 
3663 static void iscsit_thread_reschedule(struct iscsit_conn *conn)
3664 {
3665 	/*
3666 	 * If iscsit_global->allowed_cpumask modified, reschedule iSCSI
3667 	 * connection's RX/TX threads update conn->allowed_cpumask.
3668 	 */
3669 	if (!cpumask_equal(iscsit_global->allowed_cpumask,
3670 			   conn->allowed_cpumask)) {
3671 		iscsit_thread_get_cpumask(conn);
3672 		conn->conn_tx_reset_cpumask = 1;
3673 		conn->conn_rx_reset_cpumask = 1;
3674 		cpumask_copy(conn->allowed_cpumask,
3675 			     iscsit_global->allowed_cpumask);
3676 	}
3677 }
3678 
3679 void iscsit_thread_check_cpumask(
3680 	struct iscsit_conn *conn,
3681 	struct task_struct *p,
3682 	int mode)
3683 {
3684 	/*
3685 	 * The TX and RX threads maybe call iscsit_thread_check_cpumask()
3686 	 * at the same time. The RX thread might be faster and return from
3687 	 * iscsit_thread_reschedule() with conn_rx_reset_cpumask set to 0.
3688 	 * Then the TX thread sets it back to 1.
3689 	 * The next time the RX thread loops, it sees conn_rx_reset_cpumask
3690 	 * set to 1 and calls set_cpus_allowed_ptr() again and set it to 0.
3691 	 */
3692 	iscsit_thread_reschedule(conn);
3693 
3694 	/*
3695 	 * mode == 1 signals iscsi_target_tx_thread() usage.
3696 	 * mode == 0 signals iscsi_target_rx_thread() usage.
3697 	 */
3698 	if (mode == 1) {
3699 		if (!conn->conn_tx_reset_cpumask)
3700 			return;
3701 	} else {
3702 		if (!conn->conn_rx_reset_cpumask)
3703 			return;
3704 	}
3705 
3706 	/*
3707 	 * Update the CPU mask for this single kthread so that
3708 	 * both TX and RX kthreads are scheduled to run on the
3709 	 * same CPU.
3710 	 */
3711 	set_cpus_allowed_ptr(p, conn->conn_cpumask);
3712 	if (mode == 1)
3713 		conn->conn_tx_reset_cpumask = 0;
3714 	else
3715 		conn->conn_rx_reset_cpumask = 0;
3716 }
3717 EXPORT_SYMBOL(iscsit_thread_check_cpumask);
3718 
3719 int
3720 iscsit_immediate_queue(struct iscsit_conn *conn, struct iscsit_cmd *cmd, int state)
3721 {
3722 	int ret;
3723 
3724 	switch (state) {
3725 	case ISTATE_SEND_R2T:
3726 		ret = iscsit_send_r2t(cmd, conn);
3727 		if (ret < 0)
3728 			goto err;
3729 		break;
3730 	case ISTATE_REMOVE:
3731 		spin_lock_bh(&conn->cmd_lock);
3732 		list_del_init(&cmd->i_conn_node);
3733 		spin_unlock_bh(&conn->cmd_lock);
3734 
3735 		iscsit_free_cmd(cmd, false);
3736 		break;
3737 	case ISTATE_SEND_NOPIN_WANT_RESPONSE:
3738 		iscsit_mod_nopin_response_timer(conn);
3739 		ret = iscsit_send_unsolicited_nopin(cmd, conn, 1);
3740 		if (ret < 0)
3741 			goto err;
3742 		break;
3743 	case ISTATE_SEND_NOPIN_NO_RESPONSE:
3744 		ret = iscsit_send_unsolicited_nopin(cmd, conn, 0);
3745 		if (ret < 0)
3746 			goto err;
3747 		break;
3748 	default:
3749 		pr_err("Unknown Opcode: 0x%02x ITT:"
3750 		       " 0x%08x, i_state: %d on CID: %hu\n",
3751 		       cmd->iscsi_opcode, cmd->init_task_tag, state,
3752 		       conn->cid);
3753 		goto err;
3754 	}
3755 
3756 	return 0;
3757 
3758 err:
3759 	return -1;
3760 }
3761 EXPORT_SYMBOL(iscsit_immediate_queue);
3762 
3763 static int
3764 iscsit_handle_immediate_queue(struct iscsit_conn *conn)
3765 {
3766 	struct iscsit_transport *t = conn->conn_transport;
3767 	struct iscsi_queue_req *qr;
3768 	struct iscsit_cmd *cmd;
3769 	u8 state;
3770 	int ret;
3771 
3772 	while ((qr = iscsit_get_cmd_from_immediate_queue(conn))) {
3773 		atomic_set(&conn->check_immediate_queue, 0);
3774 		cmd = qr->cmd;
3775 		state = qr->state;
3776 		kmem_cache_free(lio_qr_cache, qr);
3777 
3778 		ret = t->iscsit_immediate_queue(conn, cmd, state);
3779 		if (ret < 0)
3780 			return ret;
3781 	}
3782 
3783 	return 0;
3784 }
3785 
3786 int
3787 iscsit_response_queue(struct iscsit_conn *conn, struct iscsit_cmd *cmd, int state)
3788 {
3789 	int ret;
3790 
3791 check_rsp_state:
3792 	switch (state) {
3793 	case ISTATE_SEND_DATAIN:
3794 		ret = iscsit_send_datain(cmd, conn);
3795 		if (ret < 0)
3796 			goto err;
3797 		else if (!ret)
3798 			/* more drs */
3799 			goto check_rsp_state;
3800 		else if (ret == 1) {
3801 			/* all done */
3802 			spin_lock_bh(&cmd->istate_lock);
3803 			cmd->i_state = ISTATE_SENT_STATUS;
3804 			spin_unlock_bh(&cmd->istate_lock);
3805 
3806 			if (atomic_read(&conn->check_immediate_queue))
3807 				return 1;
3808 
3809 			return 0;
3810 		} else if (ret == 2) {
3811 			/* Still must send status,
3812 			   SCF_TRANSPORT_TASK_SENSE was set */
3813 			spin_lock_bh(&cmd->istate_lock);
3814 			cmd->i_state = ISTATE_SEND_STATUS;
3815 			spin_unlock_bh(&cmd->istate_lock);
3816 			state = ISTATE_SEND_STATUS;
3817 			goto check_rsp_state;
3818 		}
3819 
3820 		break;
3821 	case ISTATE_SEND_STATUS:
3822 	case ISTATE_SEND_STATUS_RECOVERY:
3823 		ret = iscsit_send_response(cmd, conn);
3824 		break;
3825 	case ISTATE_SEND_LOGOUTRSP:
3826 		ret = iscsit_send_logout(cmd, conn);
3827 		break;
3828 	case ISTATE_SEND_ASYNCMSG:
3829 		ret = iscsit_send_conn_drop_async_message(
3830 			cmd, conn);
3831 		break;
3832 	case ISTATE_SEND_NOPIN:
3833 		ret = iscsit_send_nopin(cmd, conn);
3834 		break;
3835 	case ISTATE_SEND_REJECT:
3836 		ret = iscsit_send_reject(cmd, conn);
3837 		break;
3838 	case ISTATE_SEND_TASKMGTRSP:
3839 		ret = iscsit_send_task_mgt_rsp(cmd, conn);
3840 		if (ret != 0)
3841 			break;
3842 		ret = iscsit_tmr_post_handler(cmd, conn);
3843 		if (ret != 0)
3844 			iscsit_fall_back_to_erl0(conn->sess);
3845 		break;
3846 	case ISTATE_SEND_TEXTRSP:
3847 		ret = iscsit_send_text_rsp(cmd, conn);
3848 		break;
3849 	default:
3850 		pr_err("Unknown Opcode: 0x%02x ITT:"
3851 		       " 0x%08x, i_state: %d on CID: %hu\n",
3852 		       cmd->iscsi_opcode, cmd->init_task_tag,
3853 		       state, conn->cid);
3854 		goto err;
3855 	}
3856 	if (ret < 0)
3857 		goto err;
3858 
3859 	switch (state) {
3860 	case ISTATE_SEND_LOGOUTRSP:
3861 		if (!iscsit_logout_post_handler(cmd, conn))
3862 			return -ECONNRESET;
3863 		fallthrough;
3864 	case ISTATE_SEND_STATUS:
3865 	case ISTATE_SEND_ASYNCMSG:
3866 	case ISTATE_SEND_NOPIN:
3867 	case ISTATE_SEND_STATUS_RECOVERY:
3868 	case ISTATE_SEND_TEXTRSP:
3869 	case ISTATE_SEND_TASKMGTRSP:
3870 	case ISTATE_SEND_REJECT:
3871 		spin_lock_bh(&cmd->istate_lock);
3872 		cmd->i_state = ISTATE_SENT_STATUS;
3873 		spin_unlock_bh(&cmd->istate_lock);
3874 		break;
3875 	default:
3876 		pr_err("Unknown Opcode: 0x%02x ITT:"
3877 		       " 0x%08x, i_state: %d on CID: %hu\n",
3878 		       cmd->iscsi_opcode, cmd->init_task_tag,
3879 		       cmd->i_state, conn->cid);
3880 		goto err;
3881 	}
3882 
3883 	if (atomic_read(&conn->check_immediate_queue))
3884 		return 1;
3885 
3886 	return 0;
3887 
3888 err:
3889 	return -1;
3890 }
3891 EXPORT_SYMBOL(iscsit_response_queue);
3892 
3893 static int iscsit_handle_response_queue(struct iscsit_conn *conn)
3894 {
3895 	struct iscsit_transport *t = conn->conn_transport;
3896 	struct iscsi_queue_req *qr;
3897 	struct iscsit_cmd *cmd;
3898 	u8 state;
3899 	int ret;
3900 
3901 	while ((qr = iscsit_get_cmd_from_response_queue(conn))) {
3902 		cmd = qr->cmd;
3903 		state = qr->state;
3904 		kmem_cache_free(lio_qr_cache, qr);
3905 
3906 		ret = t->iscsit_response_queue(conn, cmd, state);
3907 		if (ret == 1 || ret < 0)
3908 			return ret;
3909 	}
3910 
3911 	return 0;
3912 }
3913 
3914 int iscsi_target_tx_thread(void *arg)
3915 {
3916 	int ret = 0;
3917 	struct iscsit_conn *conn = arg;
3918 	bool conn_freed = false;
3919 
3920 	/*
3921 	 * Allow ourselves to be interrupted by SIGINT so that a
3922 	 * connection recovery / failure event can be triggered externally.
3923 	 */
3924 	allow_signal(SIGINT);
3925 
3926 	while (!kthread_should_stop()) {
3927 		/*
3928 		 * Ensure that both TX and RX per connection kthreads
3929 		 * are scheduled to run on the same CPU.
3930 		 */
3931 		iscsit_thread_check_cpumask(conn, current, 1);
3932 
3933 		wait_event_interruptible(conn->queues_wq,
3934 					 !iscsit_conn_all_queues_empty(conn));
3935 
3936 		if (signal_pending(current))
3937 			goto transport_err;
3938 
3939 get_immediate:
3940 		ret = iscsit_handle_immediate_queue(conn);
3941 		if (ret < 0)
3942 			goto transport_err;
3943 
3944 		ret = iscsit_handle_response_queue(conn);
3945 		if (ret == 1) {
3946 			goto get_immediate;
3947 		} else if (ret == -ECONNRESET) {
3948 			conn_freed = true;
3949 			goto out;
3950 		} else if (ret < 0) {
3951 			goto transport_err;
3952 		}
3953 	}
3954 
3955 transport_err:
3956 	/*
3957 	 * Avoid the normal connection failure code-path if this connection
3958 	 * is still within LOGIN mode, and iscsi_np process context is
3959 	 * responsible for cleaning up the early connection failure.
3960 	 */
3961 	if (conn->conn_state != TARG_CONN_STATE_IN_LOGIN)
3962 		iscsit_take_action_for_connection_exit(conn, &conn_freed);
3963 out:
3964 	if (!conn_freed) {
3965 		while (!kthread_should_stop()) {
3966 			msleep(100);
3967 		}
3968 	}
3969 	return 0;
3970 }
3971 
3972 static int iscsi_target_rx_opcode(struct iscsit_conn *conn, unsigned char *buf)
3973 {
3974 	struct iscsi_hdr *hdr = (struct iscsi_hdr *)buf;
3975 	struct iscsit_cmd *cmd;
3976 	int ret = 0;
3977 
3978 	switch (hdr->opcode & ISCSI_OPCODE_MASK) {
3979 	case ISCSI_OP_SCSI_CMD:
3980 		cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
3981 		if (!cmd)
3982 			goto reject;
3983 
3984 		ret = iscsit_handle_scsi_cmd(conn, cmd, buf);
3985 		break;
3986 	case ISCSI_OP_SCSI_DATA_OUT:
3987 		ret = iscsit_handle_data_out(conn, buf);
3988 		break;
3989 	case ISCSI_OP_NOOP_OUT:
3990 		cmd = NULL;
3991 		if (hdr->ttt == cpu_to_be32(0xFFFFFFFF)) {
3992 			cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
3993 			if (!cmd)
3994 				goto reject;
3995 		}
3996 		ret = iscsit_handle_nop_out(conn, cmd, buf);
3997 		break;
3998 	case ISCSI_OP_SCSI_TMFUNC:
3999 		cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
4000 		if (!cmd)
4001 			goto reject;
4002 
4003 		ret = iscsit_handle_task_mgt_cmd(conn, cmd, buf);
4004 		break;
4005 	case ISCSI_OP_TEXT:
4006 		if (hdr->ttt != cpu_to_be32(0xFFFFFFFF)) {
4007 			cmd = iscsit_find_cmd_from_itt(conn, hdr->itt);
4008 			if (!cmd)
4009 				goto reject;
4010 		} else {
4011 			cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
4012 			if (!cmd)
4013 				goto reject;
4014 		}
4015 
4016 		ret = iscsit_handle_text_cmd(conn, cmd, buf);
4017 		break;
4018 	case ISCSI_OP_LOGOUT:
4019 		cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
4020 		if (!cmd)
4021 			goto reject;
4022 
4023 		ret = iscsit_handle_logout_cmd(conn, cmd, buf);
4024 		if (ret > 0)
4025 			wait_for_completion_timeout(&conn->conn_logout_comp,
4026 					SECONDS_FOR_LOGOUT_COMP * HZ);
4027 		break;
4028 	case ISCSI_OP_SNACK:
4029 		ret = iscsit_handle_snack(conn, buf);
4030 		break;
4031 	default:
4032 		pr_err("Got unknown iSCSI OpCode: 0x%02x\n", hdr->opcode);
4033 		if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
4034 			pr_err("Cannot recover from unknown"
4035 			" opcode while ERL=0, closing iSCSI connection.\n");
4036 			return -1;
4037 		}
4038 		pr_err("Unable to recover from unknown opcode while OFMarker=No,"
4039 		       " closing iSCSI connection.\n");
4040 		ret = -1;
4041 		break;
4042 	}
4043 
4044 	return ret;
4045 reject:
4046 	return iscsit_add_reject(conn, ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
4047 }
4048 
4049 static bool iscsi_target_check_conn_state(struct iscsit_conn *conn)
4050 {
4051 	bool ret;
4052 
4053 	spin_lock_bh(&conn->state_lock);
4054 	ret = (conn->conn_state != TARG_CONN_STATE_LOGGED_IN);
4055 	spin_unlock_bh(&conn->state_lock);
4056 
4057 	return ret;
4058 }
4059 
4060 static void iscsit_get_rx_pdu(struct iscsit_conn *conn)
4061 {
4062 	int ret;
4063 	u8 *buffer, *tmp_buf, opcode;
4064 	u32 checksum = 0, digest = 0;
4065 	struct iscsi_hdr *hdr;
4066 	struct kvec iov;
4067 
4068 	buffer = kcalloc(ISCSI_HDR_LEN, sizeof(*buffer), GFP_KERNEL);
4069 	if (!buffer)
4070 		return;
4071 
4072 	while (!kthread_should_stop()) {
4073 		/*
4074 		 * Ensure that both TX and RX per connection kthreads
4075 		 * are scheduled to run on the same CPU.
4076 		 */
4077 		iscsit_thread_check_cpumask(conn, current, 0);
4078 
4079 		memset(&iov, 0, sizeof(struct kvec));
4080 
4081 		iov.iov_base	= buffer;
4082 		iov.iov_len	= ISCSI_HDR_LEN;
4083 
4084 		ret = rx_data(conn, &iov, 1, ISCSI_HDR_LEN);
4085 		if (ret != ISCSI_HDR_LEN) {
4086 			iscsit_rx_thread_wait_for_tcp(conn);
4087 			break;
4088 		}
4089 
4090 		hdr = (struct iscsi_hdr *) buffer;
4091 		if (hdr->hlength) {
4092 			iov.iov_len = hdr->hlength * 4;
4093 			tmp_buf = krealloc(buffer,
4094 					  ISCSI_HDR_LEN + iov.iov_len,
4095 					  GFP_KERNEL);
4096 			if (!tmp_buf)
4097 				break;
4098 
4099 			buffer = tmp_buf;
4100 			iov.iov_base = &buffer[ISCSI_HDR_LEN];
4101 
4102 			ret = rx_data(conn, &iov, 1, iov.iov_len);
4103 			if (ret != iov.iov_len) {
4104 				iscsit_rx_thread_wait_for_tcp(conn);
4105 				break;
4106 			}
4107 		}
4108 
4109 		if (conn->conn_ops->HeaderDigest) {
4110 			iov.iov_base	= &digest;
4111 			iov.iov_len	= ISCSI_CRC_LEN;
4112 
4113 			ret = rx_data(conn, &iov, 1, ISCSI_CRC_LEN);
4114 			if (ret != ISCSI_CRC_LEN) {
4115 				iscsit_rx_thread_wait_for_tcp(conn);
4116 				break;
4117 			}
4118 
4119 			iscsit_do_crypto_hash_buf(conn->conn_rx_hash, buffer,
4120 						  ISCSI_HDR_LEN, 0, NULL,
4121 						  &checksum);
4122 
4123 			if (digest != checksum) {
4124 				pr_err("HeaderDigest CRC32C failed,"
4125 					" received 0x%08x, computed 0x%08x\n",
4126 					digest, checksum);
4127 				/*
4128 				 * Set the PDU to 0xff so it will intentionally
4129 				 * hit default in the switch below.
4130 				 */
4131 				memset(buffer, 0xff, ISCSI_HDR_LEN);
4132 				atomic_long_inc(&conn->sess->conn_digest_errors);
4133 			} else {
4134 				pr_debug("Got HeaderDigest CRC32C"
4135 						" 0x%08x\n", checksum);
4136 			}
4137 		}
4138 
4139 		if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT)
4140 			break;
4141 
4142 		opcode = buffer[0] & ISCSI_OPCODE_MASK;
4143 
4144 		if (conn->sess->sess_ops->SessionType &&
4145 		   ((!(opcode & ISCSI_OP_TEXT)) ||
4146 		    (!(opcode & ISCSI_OP_LOGOUT)))) {
4147 			pr_err("Received illegal iSCSI Opcode: 0x%02x"
4148 			" while in Discovery Session, rejecting.\n", opcode);
4149 			iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
4150 					  buffer);
4151 			break;
4152 		}
4153 
4154 		ret = iscsi_target_rx_opcode(conn, buffer);
4155 		if (ret < 0)
4156 			break;
4157 	}
4158 
4159 	kfree(buffer);
4160 }
4161 
4162 int iscsi_target_rx_thread(void *arg)
4163 {
4164 	int rc;
4165 	struct iscsit_conn *conn = arg;
4166 	bool conn_freed = false;
4167 
4168 	/*
4169 	 * Allow ourselves to be interrupted by SIGINT so that a
4170 	 * connection recovery / failure event can be triggered externally.
4171 	 */
4172 	allow_signal(SIGINT);
4173 	/*
4174 	 * Wait for iscsi_post_login_handler() to complete before allowing
4175 	 * incoming iscsi/tcp socket I/O, and/or failing the connection.
4176 	 */
4177 	rc = wait_for_completion_interruptible(&conn->rx_login_comp);
4178 	if (rc < 0 || iscsi_target_check_conn_state(conn))
4179 		goto out;
4180 
4181 	if (!conn->conn_transport->iscsit_get_rx_pdu)
4182 		return 0;
4183 
4184 	conn->conn_transport->iscsit_get_rx_pdu(conn);
4185 
4186 	if (!signal_pending(current))
4187 		atomic_set(&conn->transport_failed, 1);
4188 	iscsit_take_action_for_connection_exit(conn, &conn_freed);
4189 
4190 out:
4191 	if (!conn_freed) {
4192 		while (!kthread_should_stop()) {
4193 			msleep(100);
4194 		}
4195 	}
4196 
4197 	return 0;
4198 }
4199 
4200 static void iscsit_release_commands_from_conn(struct iscsit_conn *conn)
4201 {
4202 	LIST_HEAD(tmp_list);
4203 	struct iscsit_cmd *cmd = NULL, *cmd_tmp = NULL;
4204 	struct iscsit_session *sess = conn->sess;
4205 	/*
4206 	 * We expect this function to only ever be called from either RX or TX
4207 	 * thread context via iscsit_close_connection() once the other context
4208 	 * has been reset -> returned sleeping pre-handler state.
4209 	 */
4210 	spin_lock_bh(&conn->cmd_lock);
4211 	list_splice_init(&conn->conn_cmd_list, &tmp_list);
4212 
4213 	list_for_each_entry_safe(cmd, cmd_tmp, &tmp_list, i_conn_node) {
4214 		struct se_cmd *se_cmd = &cmd->se_cmd;
4215 
4216 		if (!se_cmd->se_tfo)
4217 			continue;
4218 
4219 		spin_lock_irq(&se_cmd->t_state_lock);
4220 		if (se_cmd->transport_state & CMD_T_ABORTED) {
4221 			if (!(se_cmd->transport_state & CMD_T_TAS))
4222 				/*
4223 				 * LIO's abort path owns the cleanup for this,
4224 				 * so put it back on the list and let
4225 				 * aborted_task handle it.
4226 				 */
4227 				list_move_tail(&cmd->i_conn_node,
4228 					       &conn->conn_cmd_list);
4229 		} else {
4230 			se_cmd->transport_state |= CMD_T_FABRIC_STOP;
4231 		}
4232 
4233 		if (cmd->se_cmd.t_state == TRANSPORT_WRITE_PENDING) {
4234 			/*
4235 			 * We never submitted the cmd to LIO core, so we have
4236 			 * to tell LIO to perform the completion process.
4237 			 */
4238 			spin_unlock_irq(&se_cmd->t_state_lock);
4239 			target_complete_cmd(&cmd->se_cmd, SAM_STAT_TASK_ABORTED);
4240 			continue;
4241 		}
4242 		spin_unlock_irq(&se_cmd->t_state_lock);
4243 	}
4244 	spin_unlock_bh(&conn->cmd_lock);
4245 
4246 	list_for_each_entry_safe(cmd, cmd_tmp, &tmp_list, i_conn_node) {
4247 		list_del_init(&cmd->i_conn_node);
4248 
4249 		iscsit_increment_maxcmdsn(cmd, sess);
4250 		iscsit_free_cmd(cmd, true);
4251 
4252 	}
4253 
4254 	/*
4255 	 * Wait on commands that were cleaned up via the aborted_task path.
4256 	 * LLDs that implement iscsit_wait_conn will already have waited for
4257 	 * commands.
4258 	 */
4259 	if (!conn->conn_transport->iscsit_wait_conn) {
4260 		target_stop_cmd_counter(conn->cmd_cnt);
4261 		target_wait_for_cmds(conn->cmd_cnt);
4262 	}
4263 }
4264 
4265 static void iscsit_stop_timers_for_cmds(
4266 	struct iscsit_conn *conn)
4267 {
4268 	struct iscsit_cmd *cmd;
4269 
4270 	spin_lock_bh(&conn->cmd_lock);
4271 	list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) {
4272 		if (cmd->data_direction == DMA_TO_DEVICE)
4273 			iscsit_stop_dataout_timer(cmd);
4274 	}
4275 	spin_unlock_bh(&conn->cmd_lock);
4276 }
4277 
4278 int iscsit_close_connection(
4279 	struct iscsit_conn *conn)
4280 {
4281 	int conn_logout = (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT);
4282 	struct iscsit_session	*sess = conn->sess;
4283 
4284 	pr_debug("Closing iSCSI connection CID %hu on SID:"
4285 		" %u\n", conn->cid, sess->sid);
4286 	/*
4287 	 * Always up conn_logout_comp for the traditional TCP and HW_OFFLOAD
4288 	 * case just in case the RX Thread in iscsi_target_rx_opcode() is
4289 	 * sleeping and the logout response never got sent because the
4290 	 * connection failed.
4291 	 *
4292 	 * However for iser-target, isert_wait4logout() is using conn_logout_comp
4293 	 * to signal logout response TX interrupt completion.  Go ahead and skip
4294 	 * this for iser since isert_rx_opcode() does not wait on logout failure,
4295 	 * and to avoid iscsit_conn pointer dereference in iser-target code.
4296 	 */
4297 	if (!conn->conn_transport->rdma_shutdown)
4298 		complete(&conn->conn_logout_comp);
4299 
4300 	if (!strcmp(current->comm, ISCSI_RX_THREAD_NAME)) {
4301 		if (conn->tx_thread &&
4302 		    cmpxchg(&conn->tx_thread_active, true, false)) {
4303 			send_sig(SIGINT, conn->tx_thread, 1);
4304 			kthread_stop(conn->tx_thread);
4305 		}
4306 	} else if (!strcmp(current->comm, ISCSI_TX_THREAD_NAME)) {
4307 		if (conn->rx_thread &&
4308 		    cmpxchg(&conn->rx_thread_active, true, false)) {
4309 			send_sig(SIGINT, conn->rx_thread, 1);
4310 			kthread_stop(conn->rx_thread);
4311 		}
4312 	}
4313 
4314 	spin_lock(&iscsit_global->ts_bitmap_lock);
4315 	bitmap_release_region(iscsit_global->ts_bitmap, conn->bitmap_id,
4316 			      get_order(1));
4317 	spin_unlock(&iscsit_global->ts_bitmap_lock);
4318 
4319 	iscsit_stop_timers_for_cmds(conn);
4320 	iscsit_stop_nopin_response_timer(conn);
4321 	iscsit_stop_nopin_timer(conn);
4322 
4323 	if (conn->conn_transport->iscsit_wait_conn)
4324 		conn->conn_transport->iscsit_wait_conn(conn);
4325 
4326 	/*
4327 	 * During Connection recovery drop unacknowledged out of order
4328 	 * commands for this connection, and prepare the other commands
4329 	 * for reallegiance.
4330 	 *
4331 	 * During normal operation clear the out of order commands (but
4332 	 * do not free the struct iscsi_ooo_cmdsn's) and release all
4333 	 * struct iscsit_cmds.
4334 	 */
4335 	if (atomic_read(&conn->connection_recovery)) {
4336 		iscsit_discard_unacknowledged_ooo_cmdsns_for_conn(conn);
4337 		iscsit_prepare_cmds_for_reallegiance(conn);
4338 	} else {
4339 		iscsit_clear_ooo_cmdsns_for_conn(conn);
4340 		iscsit_release_commands_from_conn(conn);
4341 	}
4342 	iscsit_free_queue_reqs_for_conn(conn);
4343 
4344 	/*
4345 	 * Handle decrementing session or connection usage count if
4346 	 * a logout response was not able to be sent because the
4347 	 * connection failed.  Fall back to Session Recovery here.
4348 	 */
4349 	if (atomic_read(&conn->conn_logout_remove)) {
4350 		if (conn->conn_logout_reason == ISCSI_LOGOUT_REASON_CLOSE_SESSION) {
4351 			iscsit_dec_conn_usage_count(conn);
4352 			iscsit_dec_session_usage_count(sess);
4353 		}
4354 		if (conn->conn_logout_reason == ISCSI_LOGOUT_REASON_CLOSE_CONNECTION)
4355 			iscsit_dec_conn_usage_count(conn);
4356 
4357 		atomic_set(&conn->conn_logout_remove, 0);
4358 		atomic_set(&sess->session_reinstatement, 0);
4359 		atomic_set(&sess->session_fall_back_to_erl0, 1);
4360 	}
4361 
4362 	spin_lock_bh(&sess->conn_lock);
4363 	list_del(&conn->conn_list);
4364 
4365 	/*
4366 	 * Attempt to let the Initiator know this connection failed by
4367 	 * sending an Connection Dropped Async Message on another
4368 	 * active connection.
4369 	 */
4370 	if (atomic_read(&conn->connection_recovery))
4371 		iscsit_build_conn_drop_async_message(conn);
4372 
4373 	spin_unlock_bh(&sess->conn_lock);
4374 
4375 	/*
4376 	 * If connection reinstatement is being performed on this connection,
4377 	 * up the connection reinstatement semaphore that is being blocked on
4378 	 * in iscsit_cause_connection_reinstatement().
4379 	 */
4380 	spin_lock_bh(&conn->state_lock);
4381 	if (atomic_read(&conn->sleep_on_conn_wait_comp)) {
4382 		spin_unlock_bh(&conn->state_lock);
4383 		complete(&conn->conn_wait_comp);
4384 		wait_for_completion(&conn->conn_post_wait_comp);
4385 		spin_lock_bh(&conn->state_lock);
4386 	}
4387 
4388 	/*
4389 	 * If connection reinstatement is being performed on this connection
4390 	 * by receiving a REMOVECONNFORRECOVERY logout request, up the
4391 	 * connection wait rcfr semaphore that is being blocked on
4392 	 * an iscsit_connection_reinstatement_rcfr().
4393 	 */
4394 	if (atomic_read(&conn->connection_wait_rcfr)) {
4395 		spin_unlock_bh(&conn->state_lock);
4396 		complete(&conn->conn_wait_rcfr_comp);
4397 		wait_for_completion(&conn->conn_post_wait_comp);
4398 		spin_lock_bh(&conn->state_lock);
4399 	}
4400 	atomic_set(&conn->connection_reinstatement, 1);
4401 	spin_unlock_bh(&conn->state_lock);
4402 
4403 	/*
4404 	 * If any other processes are accessing this connection pointer we
4405 	 * must wait until they have completed.
4406 	 */
4407 	iscsit_check_conn_usage_count(conn);
4408 
4409 	ahash_request_free(conn->conn_tx_hash);
4410 	if (conn->conn_rx_hash) {
4411 		struct crypto_ahash *tfm;
4412 
4413 		tfm = crypto_ahash_reqtfm(conn->conn_rx_hash);
4414 		ahash_request_free(conn->conn_rx_hash);
4415 		crypto_free_ahash(tfm);
4416 	}
4417 
4418 	if (conn->sock)
4419 		sock_release(conn->sock);
4420 
4421 	if (conn->conn_transport->iscsit_free_conn)
4422 		conn->conn_transport->iscsit_free_conn(conn);
4423 
4424 	pr_debug("Moving to TARG_CONN_STATE_FREE.\n");
4425 	conn->conn_state = TARG_CONN_STATE_FREE;
4426 	iscsit_free_conn(conn);
4427 
4428 	spin_lock_bh(&sess->conn_lock);
4429 	atomic_dec(&sess->nconn);
4430 	pr_debug("Decremented iSCSI connection count to %d from node:"
4431 		" %s\n", atomic_read(&sess->nconn),
4432 		sess->sess_ops->InitiatorName);
4433 	/*
4434 	 * Make sure that if one connection fails in an non ERL=2 iSCSI
4435 	 * Session that they all fail.
4436 	 */
4437 	if ((sess->sess_ops->ErrorRecoveryLevel != 2) && !conn_logout &&
4438 	     !atomic_read(&sess->session_logout))
4439 		atomic_set(&sess->session_fall_back_to_erl0, 1);
4440 
4441 	/*
4442 	 * If this was not the last connection in the session, and we are
4443 	 * performing session reinstatement or falling back to ERL=0, call
4444 	 * iscsit_stop_session() without sleeping to shutdown the other
4445 	 * active connections.
4446 	 */
4447 	if (atomic_read(&sess->nconn)) {
4448 		if (!atomic_read(&sess->session_reinstatement) &&
4449 		    !atomic_read(&sess->session_fall_back_to_erl0)) {
4450 			spin_unlock_bh(&sess->conn_lock);
4451 			return 0;
4452 		}
4453 		if (!atomic_read(&sess->session_stop_active)) {
4454 			atomic_set(&sess->session_stop_active, 1);
4455 			spin_unlock_bh(&sess->conn_lock);
4456 			iscsit_stop_session(sess, 0, 0);
4457 			return 0;
4458 		}
4459 		spin_unlock_bh(&sess->conn_lock);
4460 		return 0;
4461 	}
4462 
4463 	/*
4464 	 * If this was the last connection in the session and one of the
4465 	 * following is occurring:
4466 	 *
4467 	 * Session Reinstatement is not being performed, and are falling back
4468 	 * to ERL=0 call iscsit_close_session().
4469 	 *
4470 	 * Session Logout was requested.  iscsit_close_session() will be called
4471 	 * elsewhere.
4472 	 *
4473 	 * Session Continuation is not being performed, start the Time2Retain
4474 	 * handler and check if sleep_on_sess_wait_sem is active.
4475 	 */
4476 	if (!atomic_read(&sess->session_reinstatement) &&
4477 	     atomic_read(&sess->session_fall_back_to_erl0)) {
4478 		spin_unlock_bh(&sess->conn_lock);
4479 		complete_all(&sess->session_wait_comp);
4480 		iscsit_close_session(sess, true);
4481 
4482 		return 0;
4483 	} else if (atomic_read(&sess->session_logout)) {
4484 		pr_debug("Moving to TARG_SESS_STATE_FREE.\n");
4485 		sess->session_state = TARG_SESS_STATE_FREE;
4486 
4487 		if (atomic_read(&sess->session_close)) {
4488 			spin_unlock_bh(&sess->conn_lock);
4489 			complete_all(&sess->session_wait_comp);
4490 			iscsit_close_session(sess, true);
4491 		} else {
4492 			spin_unlock_bh(&sess->conn_lock);
4493 		}
4494 
4495 		return 0;
4496 	} else {
4497 		pr_debug("Moving to TARG_SESS_STATE_FAILED.\n");
4498 		sess->session_state = TARG_SESS_STATE_FAILED;
4499 
4500 		if (!atomic_read(&sess->session_continuation))
4501 			iscsit_start_time2retain_handler(sess);
4502 
4503 		if (atomic_read(&sess->session_close)) {
4504 			spin_unlock_bh(&sess->conn_lock);
4505 			complete_all(&sess->session_wait_comp);
4506 			iscsit_close_session(sess, true);
4507 		} else {
4508 			spin_unlock_bh(&sess->conn_lock);
4509 		}
4510 
4511 		return 0;
4512 	}
4513 }
4514 
4515 /*
4516  * If the iSCSI Session for the iSCSI Initiator Node exists,
4517  * forcefully shutdown the iSCSI NEXUS.
4518  */
4519 int iscsit_close_session(struct iscsit_session *sess, bool can_sleep)
4520 {
4521 	struct iscsi_portal_group *tpg = sess->tpg;
4522 	struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
4523 
4524 	if (atomic_read(&sess->nconn)) {
4525 		pr_err("%d connection(s) still exist for iSCSI session"
4526 			" to %s\n", atomic_read(&sess->nconn),
4527 			sess->sess_ops->InitiatorName);
4528 		BUG();
4529 	}
4530 
4531 	spin_lock_bh(&se_tpg->session_lock);
4532 	atomic_set(&sess->session_logout, 1);
4533 	atomic_set(&sess->session_reinstatement, 1);
4534 	iscsit_stop_time2retain_timer(sess);
4535 	spin_unlock_bh(&se_tpg->session_lock);
4536 
4537 	if (sess->sess_ops->ErrorRecoveryLevel == 2)
4538 		iscsit_free_connection_recovery_entries(sess);
4539 
4540 	/*
4541 	 * transport_deregister_session_configfs() will clear the
4542 	 * struct se_node_acl->nacl_sess pointer now as a iscsi_np process context
4543 	 * can be setting it again with __transport_register_session() in
4544 	 * iscsi_post_login_handler() again after the iscsit_stop_session()
4545 	 * completes in iscsi_np context.
4546 	 */
4547 	transport_deregister_session_configfs(sess->se_sess);
4548 
4549 	/*
4550 	 * If any other processes are accessing this session pointer we must
4551 	 * wait until they have completed.  If we are in an interrupt (the
4552 	 * time2retain handler) and contain and active session usage count we
4553 	 * restart the timer and exit.
4554 	 */
4555 	if (iscsit_check_session_usage_count(sess, can_sleep)) {
4556 		atomic_set(&sess->session_logout, 0);
4557 		iscsit_start_time2retain_handler(sess);
4558 		return 0;
4559 	}
4560 
4561 	transport_deregister_session(sess->se_sess);
4562 
4563 	iscsit_free_all_ooo_cmdsns(sess);
4564 
4565 	spin_lock_bh(&se_tpg->session_lock);
4566 	pr_debug("Moving to TARG_SESS_STATE_FREE.\n");
4567 	sess->session_state = TARG_SESS_STATE_FREE;
4568 	pr_debug("Released iSCSI session from node: %s\n",
4569 			sess->sess_ops->InitiatorName);
4570 	tpg->nsessions--;
4571 	if (tpg->tpg_tiqn)
4572 		tpg->tpg_tiqn->tiqn_nsessions--;
4573 
4574 	pr_debug("Decremented number of active iSCSI Sessions on"
4575 		" iSCSI TPG: %hu to %u\n", tpg->tpgt, tpg->nsessions);
4576 
4577 	ida_free(&sess_ida, sess->session_index);
4578 	kfree(sess->sess_ops);
4579 	sess->sess_ops = NULL;
4580 	spin_unlock_bh(&se_tpg->session_lock);
4581 
4582 	kfree(sess);
4583 	return 0;
4584 }
4585 
4586 static void iscsit_logout_post_handler_closesession(
4587 	struct iscsit_conn *conn)
4588 {
4589 	struct iscsit_session *sess = conn->sess;
4590 	int sleep = 1;
4591 	/*
4592 	 * Traditional iscsi/tcp will invoke this logic from TX thread
4593 	 * context during session logout, so clear tx_thread_active and
4594 	 * sleep if iscsit_close_connection() has not already occured.
4595 	 *
4596 	 * Since iser-target invokes this logic from it's own workqueue,
4597 	 * always sleep waiting for RX/TX thread shutdown to complete
4598 	 * within iscsit_close_connection().
4599 	 */
4600 	if (!conn->conn_transport->rdma_shutdown) {
4601 		sleep = cmpxchg(&conn->tx_thread_active, true, false);
4602 		if (!sleep)
4603 			return;
4604 	}
4605 
4606 	atomic_set(&conn->conn_logout_remove, 0);
4607 	complete(&conn->conn_logout_comp);
4608 
4609 	iscsit_dec_conn_usage_count(conn);
4610 	atomic_set(&sess->session_close, 1);
4611 	iscsit_stop_session(sess, sleep, sleep);
4612 	iscsit_dec_session_usage_count(sess);
4613 }
4614 
4615 static void iscsit_logout_post_handler_samecid(
4616 	struct iscsit_conn *conn)
4617 {
4618 	int sleep = 1;
4619 
4620 	if (!conn->conn_transport->rdma_shutdown) {
4621 		sleep = cmpxchg(&conn->tx_thread_active, true, false);
4622 		if (!sleep)
4623 			return;
4624 	}
4625 
4626 	atomic_set(&conn->conn_logout_remove, 0);
4627 	complete(&conn->conn_logout_comp);
4628 
4629 	iscsit_cause_connection_reinstatement(conn, sleep);
4630 	iscsit_dec_conn_usage_count(conn);
4631 }
4632 
4633 static void iscsit_logout_post_handler_diffcid(
4634 	struct iscsit_conn *conn,
4635 	u16 cid)
4636 {
4637 	struct iscsit_conn *l_conn;
4638 	struct iscsit_session *sess = conn->sess;
4639 	bool conn_found = false;
4640 
4641 	if (!sess)
4642 		return;
4643 
4644 	spin_lock_bh(&sess->conn_lock);
4645 	list_for_each_entry(l_conn, &sess->sess_conn_list, conn_list) {
4646 		if (l_conn->cid == cid) {
4647 			iscsit_inc_conn_usage_count(l_conn);
4648 			conn_found = true;
4649 			break;
4650 		}
4651 	}
4652 	spin_unlock_bh(&sess->conn_lock);
4653 
4654 	if (!conn_found)
4655 		return;
4656 
4657 	if (l_conn->sock)
4658 		l_conn->sock->ops->shutdown(l_conn->sock, RCV_SHUTDOWN);
4659 
4660 	spin_lock_bh(&l_conn->state_lock);
4661 	pr_debug("Moving to TARG_CONN_STATE_IN_LOGOUT.\n");
4662 	l_conn->conn_state = TARG_CONN_STATE_IN_LOGOUT;
4663 	spin_unlock_bh(&l_conn->state_lock);
4664 
4665 	iscsit_cause_connection_reinstatement(l_conn, 1);
4666 	iscsit_dec_conn_usage_count(l_conn);
4667 }
4668 
4669 /*
4670  *	Return of 0 causes the TX thread to restart.
4671  */
4672 int iscsit_logout_post_handler(
4673 	struct iscsit_cmd *cmd,
4674 	struct iscsit_conn *conn)
4675 {
4676 	int ret = 0;
4677 
4678 	switch (cmd->logout_reason) {
4679 	case ISCSI_LOGOUT_REASON_CLOSE_SESSION:
4680 		switch (cmd->logout_response) {
4681 		case ISCSI_LOGOUT_SUCCESS:
4682 		case ISCSI_LOGOUT_CLEANUP_FAILED:
4683 		default:
4684 			iscsit_logout_post_handler_closesession(conn);
4685 			break;
4686 		}
4687 		break;
4688 	case ISCSI_LOGOUT_REASON_CLOSE_CONNECTION:
4689 		if (conn->cid == cmd->logout_cid) {
4690 			switch (cmd->logout_response) {
4691 			case ISCSI_LOGOUT_SUCCESS:
4692 			case ISCSI_LOGOUT_CLEANUP_FAILED:
4693 			default:
4694 				iscsit_logout_post_handler_samecid(conn);
4695 				break;
4696 			}
4697 		} else {
4698 			switch (cmd->logout_response) {
4699 			case ISCSI_LOGOUT_SUCCESS:
4700 				iscsit_logout_post_handler_diffcid(conn,
4701 					cmd->logout_cid);
4702 				break;
4703 			case ISCSI_LOGOUT_CID_NOT_FOUND:
4704 			case ISCSI_LOGOUT_CLEANUP_FAILED:
4705 			default:
4706 				break;
4707 			}
4708 			ret = 1;
4709 		}
4710 		break;
4711 	case ISCSI_LOGOUT_REASON_RECOVERY:
4712 		switch (cmd->logout_response) {
4713 		case ISCSI_LOGOUT_SUCCESS:
4714 		case ISCSI_LOGOUT_CID_NOT_FOUND:
4715 		case ISCSI_LOGOUT_RECOVERY_UNSUPPORTED:
4716 		case ISCSI_LOGOUT_CLEANUP_FAILED:
4717 		default:
4718 			break;
4719 		}
4720 		ret = 1;
4721 		break;
4722 	default:
4723 		break;
4724 
4725 	}
4726 	return ret;
4727 }
4728 EXPORT_SYMBOL(iscsit_logout_post_handler);
4729 
4730 void iscsit_fail_session(struct iscsit_session *sess)
4731 {
4732 	struct iscsit_conn *conn;
4733 
4734 	spin_lock_bh(&sess->conn_lock);
4735 	list_for_each_entry(conn, &sess->sess_conn_list, conn_list) {
4736 		pr_debug("Moving to TARG_CONN_STATE_CLEANUP_WAIT.\n");
4737 		conn->conn_state = TARG_CONN_STATE_CLEANUP_WAIT;
4738 	}
4739 	spin_unlock_bh(&sess->conn_lock);
4740 
4741 	pr_debug("Moving to TARG_SESS_STATE_FAILED.\n");
4742 	sess->session_state = TARG_SESS_STATE_FAILED;
4743 }
4744 
4745 void iscsit_stop_session(
4746 	struct iscsit_session *sess,
4747 	int session_sleep,
4748 	int connection_sleep)
4749 {
4750 	u16 conn_count = atomic_read(&sess->nconn);
4751 	struct iscsit_conn *conn, *conn_tmp = NULL;
4752 	int is_last;
4753 
4754 	spin_lock_bh(&sess->conn_lock);
4755 
4756 	if (connection_sleep) {
4757 		list_for_each_entry_safe(conn, conn_tmp, &sess->sess_conn_list,
4758 				conn_list) {
4759 			if (conn_count == 0)
4760 				break;
4761 
4762 			if (list_is_last(&conn->conn_list, &sess->sess_conn_list)) {
4763 				is_last = 1;
4764 			} else {
4765 				iscsit_inc_conn_usage_count(conn_tmp);
4766 				is_last = 0;
4767 			}
4768 			iscsit_inc_conn_usage_count(conn);
4769 
4770 			spin_unlock_bh(&sess->conn_lock);
4771 			iscsit_cause_connection_reinstatement(conn, 1);
4772 			spin_lock_bh(&sess->conn_lock);
4773 
4774 			iscsit_dec_conn_usage_count(conn);
4775 			if (is_last == 0)
4776 				iscsit_dec_conn_usage_count(conn_tmp);
4777 			conn_count--;
4778 		}
4779 	} else {
4780 		list_for_each_entry(conn, &sess->sess_conn_list, conn_list)
4781 			iscsit_cause_connection_reinstatement(conn, 0);
4782 	}
4783 
4784 	if (session_sleep && atomic_read(&sess->nconn)) {
4785 		spin_unlock_bh(&sess->conn_lock);
4786 		wait_for_completion(&sess->session_wait_comp);
4787 	} else
4788 		spin_unlock_bh(&sess->conn_lock);
4789 }
4790 
4791 int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *tpg, int force)
4792 {
4793 	struct iscsit_session *sess;
4794 	struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
4795 	struct se_session *se_sess, *se_sess_tmp;
4796 	LIST_HEAD(free_list);
4797 	int session_count = 0;
4798 
4799 	spin_lock_bh(&se_tpg->session_lock);
4800 	if (tpg->nsessions && !force) {
4801 		spin_unlock_bh(&se_tpg->session_lock);
4802 		return -1;
4803 	}
4804 
4805 	list_for_each_entry_safe(se_sess, se_sess_tmp, &se_tpg->tpg_sess_list,
4806 			sess_list) {
4807 		sess = (struct iscsit_session *)se_sess->fabric_sess_ptr;
4808 
4809 		spin_lock(&sess->conn_lock);
4810 		if (atomic_read(&sess->session_fall_back_to_erl0) ||
4811 		    atomic_read(&sess->session_logout) ||
4812 		    atomic_read(&sess->session_close) ||
4813 		    (sess->time2retain_timer_flags & ISCSI_TF_EXPIRED)) {
4814 			spin_unlock(&sess->conn_lock);
4815 			continue;
4816 		}
4817 		iscsit_inc_session_usage_count(sess);
4818 		atomic_set(&sess->session_reinstatement, 1);
4819 		atomic_set(&sess->session_fall_back_to_erl0, 1);
4820 		atomic_set(&sess->session_close, 1);
4821 		spin_unlock(&sess->conn_lock);
4822 
4823 		list_move_tail(&se_sess->sess_list, &free_list);
4824 	}
4825 	spin_unlock_bh(&se_tpg->session_lock);
4826 
4827 	list_for_each_entry_safe(se_sess, se_sess_tmp, &free_list, sess_list) {
4828 		sess = (struct iscsit_session *)se_sess->fabric_sess_ptr;
4829 
4830 		list_del_init(&se_sess->sess_list);
4831 		iscsit_stop_session(sess, 1, 1);
4832 		iscsit_dec_session_usage_count(sess);
4833 		session_count++;
4834 	}
4835 
4836 	pr_debug("Released %d iSCSI Session(s) from Target Portal"
4837 			" Group: %hu\n", session_count, tpg->tpgt);
4838 	return 0;
4839 }
4840 
4841 MODULE_DESCRIPTION("iSCSI-Target Driver for mainline target infrastructure");
4842 MODULE_VERSION("4.1.x");
4843 MODULE_AUTHOR("nab@Linux-iSCSI.org");
4844 MODULE_LICENSE("GPL");
4845 
4846 module_init(iscsi_target_init_module);
4847 module_exit(iscsi_target_cleanup_module);
4848