xref: /linux/drivers/target/target_core_transport.c (revision 5148fa52a12fa1b97c730b2fe321f2aad7ea041c)
1 /*******************************************************************************
2  * Filename:  target_core_transport.c
3  *
4  * This file contains the Generic Target Engine Core.
5  *
6  * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc.
7  * Copyright (c) 2005, 2006, 2007 SBE, Inc.
8  * Copyright (c) 2007-2010 Rising Tide Systems
9  * Copyright (c) 2008-2010 Linux-iSCSI.org
10  *
11  * Nicholas A. Bellinger <nab@kernel.org>
12  *
13  * This program is free software; you can redistribute it and/or modify
14  * it under the terms of the GNU General Public License as published by
15  * the Free Software Foundation; either version 2 of the License, or
16  * (at your option) any later version.
17  *
18  * This program is distributed in the hope that it will be useful,
19  * but WITHOUT ANY WARRANTY; without even the implied warranty of
20  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
21  * GNU General Public License for more details.
22  *
23  * You should have received a copy of the GNU General Public License
24  * along with this program; if not, write to the Free Software
25  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
26  *
27  ******************************************************************************/
28 
29 #include <linux/net.h>
30 #include <linux/delay.h>
31 #include <linux/string.h>
32 #include <linux/timer.h>
33 #include <linux/slab.h>
34 #include <linux/blkdev.h>
35 #include <linux/spinlock.h>
36 #include <linux/kthread.h>
37 #include <linux/in.h>
38 #include <linux/cdrom.h>
39 #include <linux/module.h>
40 #include <linux/ratelimit.h>
41 #include <asm/unaligned.h>
42 #include <net/sock.h>
43 #include <net/tcp.h>
44 #include <scsi/scsi.h>
45 #include <scsi/scsi_cmnd.h>
46 #include <scsi/scsi_tcq.h>
47 
48 #include <target/target_core_base.h>
49 #include <target/target_core_backend.h>
50 #include <target/target_core_fabric.h>
51 #include <target/target_core_configfs.h>
52 
53 #include "target_core_internal.h"
54 #include "target_core_alua.h"
55 #include "target_core_pr.h"
56 #include "target_core_ua.h"
57 
58 static int sub_api_initialized;
59 
60 static struct workqueue_struct *target_completion_wq;
61 static struct kmem_cache *se_sess_cache;
62 struct kmem_cache *se_ua_cache;
63 struct kmem_cache *t10_pr_reg_cache;
64 struct kmem_cache *t10_alua_lu_gp_cache;
65 struct kmem_cache *t10_alua_lu_gp_mem_cache;
66 struct kmem_cache *t10_alua_tg_pt_gp_cache;
67 struct kmem_cache *t10_alua_tg_pt_gp_mem_cache;
68 
69 static int transport_generic_write_pending(struct se_cmd *);
70 static int transport_processing_thread(void *param);
71 static int __transport_execute_tasks(struct se_device *dev, struct se_cmd *);
72 static void transport_complete_task_attr(struct se_cmd *cmd);
73 static void transport_handle_queue_full(struct se_cmd *cmd,
74 		struct se_device *dev);
75 static int transport_generic_get_mem(struct se_cmd *cmd);
76 static void transport_put_cmd(struct se_cmd *cmd);
77 static void transport_remove_cmd_from_queue(struct se_cmd *cmd);
78 static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq);
79 static void target_complete_ok_work(struct work_struct *work);
80 
81 int init_se_kmem_caches(void)
82 {
83 	se_sess_cache = kmem_cache_create("se_sess_cache",
84 			sizeof(struct se_session), __alignof__(struct se_session),
85 			0, NULL);
86 	if (!se_sess_cache) {
87 		pr_err("kmem_cache_create() for struct se_session"
88 				" failed\n");
89 		goto out;
90 	}
91 	se_ua_cache = kmem_cache_create("se_ua_cache",
92 			sizeof(struct se_ua), __alignof__(struct se_ua),
93 			0, NULL);
94 	if (!se_ua_cache) {
95 		pr_err("kmem_cache_create() for struct se_ua failed\n");
96 		goto out_free_sess_cache;
97 	}
98 	t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache",
99 			sizeof(struct t10_pr_registration),
100 			__alignof__(struct t10_pr_registration), 0, NULL);
101 	if (!t10_pr_reg_cache) {
102 		pr_err("kmem_cache_create() for struct t10_pr_registration"
103 				" failed\n");
104 		goto out_free_ua_cache;
105 	}
106 	t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache",
107 			sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp),
108 			0, NULL);
109 	if (!t10_alua_lu_gp_cache) {
110 		pr_err("kmem_cache_create() for t10_alua_lu_gp_cache"
111 				" failed\n");
112 		goto out_free_pr_reg_cache;
113 	}
114 	t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache",
115 			sizeof(struct t10_alua_lu_gp_member),
116 			__alignof__(struct t10_alua_lu_gp_member), 0, NULL);
117 	if (!t10_alua_lu_gp_mem_cache) {
118 		pr_err("kmem_cache_create() for t10_alua_lu_gp_mem_"
119 				"cache failed\n");
120 		goto out_free_lu_gp_cache;
121 	}
122 	t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache",
123 			sizeof(struct t10_alua_tg_pt_gp),
124 			__alignof__(struct t10_alua_tg_pt_gp), 0, NULL);
125 	if (!t10_alua_tg_pt_gp_cache) {
126 		pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_"
127 				"cache failed\n");
128 		goto out_free_lu_gp_mem_cache;
129 	}
130 	t10_alua_tg_pt_gp_mem_cache = kmem_cache_create(
131 			"t10_alua_tg_pt_gp_mem_cache",
132 			sizeof(struct t10_alua_tg_pt_gp_member),
133 			__alignof__(struct t10_alua_tg_pt_gp_member),
134 			0, NULL);
135 	if (!t10_alua_tg_pt_gp_mem_cache) {
136 		pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_"
137 				"mem_t failed\n");
138 		goto out_free_tg_pt_gp_cache;
139 	}
140 
141 	target_completion_wq = alloc_workqueue("target_completion",
142 					       WQ_MEM_RECLAIM, 0);
143 	if (!target_completion_wq)
144 		goto out_free_tg_pt_gp_mem_cache;
145 
146 	return 0;
147 
148 out_free_tg_pt_gp_mem_cache:
149 	kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
150 out_free_tg_pt_gp_cache:
151 	kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
152 out_free_lu_gp_mem_cache:
153 	kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
154 out_free_lu_gp_cache:
155 	kmem_cache_destroy(t10_alua_lu_gp_cache);
156 out_free_pr_reg_cache:
157 	kmem_cache_destroy(t10_pr_reg_cache);
158 out_free_ua_cache:
159 	kmem_cache_destroy(se_ua_cache);
160 out_free_sess_cache:
161 	kmem_cache_destroy(se_sess_cache);
162 out:
163 	return -ENOMEM;
164 }
165 
166 void release_se_kmem_caches(void)
167 {
168 	destroy_workqueue(target_completion_wq);
169 	kmem_cache_destroy(se_sess_cache);
170 	kmem_cache_destroy(se_ua_cache);
171 	kmem_cache_destroy(t10_pr_reg_cache);
172 	kmem_cache_destroy(t10_alua_lu_gp_cache);
173 	kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
174 	kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
175 	kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
176 }
177 
178 /* This code ensures unique mib indexes are handed out. */
179 static DEFINE_SPINLOCK(scsi_mib_index_lock);
180 static u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX];
181 
182 /*
183  * Allocate a new row index for the entry type specified
184  */
185 u32 scsi_get_new_index(scsi_index_t type)
186 {
187 	u32 new_index;
188 
189 	BUG_ON((type < 0) || (type >= SCSI_INDEX_TYPE_MAX));
190 
191 	spin_lock(&scsi_mib_index_lock);
192 	new_index = ++scsi_mib_index[type];
193 	spin_unlock(&scsi_mib_index_lock);
194 
195 	return new_index;
196 }
197 
198 static void transport_init_queue_obj(struct se_queue_obj *qobj)
199 {
200 	atomic_set(&qobj->queue_cnt, 0);
201 	INIT_LIST_HEAD(&qobj->qobj_list);
202 	init_waitqueue_head(&qobj->thread_wq);
203 	spin_lock_init(&qobj->cmd_queue_lock);
204 }
205 
206 void transport_subsystem_check_init(void)
207 {
208 	int ret;
209 
210 	if (sub_api_initialized)
211 		return;
212 
213 	ret = request_module("target_core_iblock");
214 	if (ret != 0)
215 		pr_err("Unable to load target_core_iblock\n");
216 
217 	ret = request_module("target_core_file");
218 	if (ret != 0)
219 		pr_err("Unable to load target_core_file\n");
220 
221 	ret = request_module("target_core_pscsi");
222 	if (ret != 0)
223 		pr_err("Unable to load target_core_pscsi\n");
224 
225 	ret = request_module("target_core_stgt");
226 	if (ret != 0)
227 		pr_err("Unable to load target_core_stgt\n");
228 
229 	sub_api_initialized = 1;
230 	return;
231 }
232 
233 struct se_session *transport_init_session(void)
234 {
235 	struct se_session *se_sess;
236 
237 	se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL);
238 	if (!se_sess) {
239 		pr_err("Unable to allocate struct se_session from"
240 				" se_sess_cache\n");
241 		return ERR_PTR(-ENOMEM);
242 	}
243 	INIT_LIST_HEAD(&se_sess->sess_list);
244 	INIT_LIST_HEAD(&se_sess->sess_acl_list);
245 	INIT_LIST_HEAD(&se_sess->sess_cmd_list);
246 	INIT_LIST_HEAD(&se_sess->sess_wait_list);
247 	spin_lock_init(&se_sess->sess_cmd_lock);
248 	kref_init(&se_sess->sess_kref);
249 
250 	return se_sess;
251 }
252 EXPORT_SYMBOL(transport_init_session);
253 
254 /*
255  * Called with spin_lock_irqsave(&struct se_portal_group->session_lock called.
256  */
257 void __transport_register_session(
258 	struct se_portal_group *se_tpg,
259 	struct se_node_acl *se_nacl,
260 	struct se_session *se_sess,
261 	void *fabric_sess_ptr)
262 {
263 	unsigned char buf[PR_REG_ISID_LEN];
264 
265 	se_sess->se_tpg = se_tpg;
266 	se_sess->fabric_sess_ptr = fabric_sess_ptr;
267 	/*
268 	 * Used by struct se_node_acl's under ConfigFS to locate active se_session-t
269 	 *
270 	 * Only set for struct se_session's that will actually be moving I/O.
271 	 * eg: *NOT* discovery sessions.
272 	 */
273 	if (se_nacl) {
274 		/*
275 		 * If the fabric module supports an ISID based TransportID,
276 		 * save this value in binary from the fabric I_T Nexus now.
277 		 */
278 		if (se_tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) {
279 			memset(&buf[0], 0, PR_REG_ISID_LEN);
280 			se_tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess,
281 					&buf[0], PR_REG_ISID_LEN);
282 			se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]);
283 		}
284 		kref_get(&se_nacl->acl_kref);
285 
286 		spin_lock_irq(&se_nacl->nacl_sess_lock);
287 		/*
288 		 * The se_nacl->nacl_sess pointer will be set to the
289 		 * last active I_T Nexus for each struct se_node_acl.
290 		 */
291 		se_nacl->nacl_sess = se_sess;
292 
293 		list_add_tail(&se_sess->sess_acl_list,
294 			      &se_nacl->acl_sess_list);
295 		spin_unlock_irq(&se_nacl->nacl_sess_lock);
296 	}
297 	list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list);
298 
299 	pr_debug("TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n",
300 		se_tpg->se_tpg_tfo->get_fabric_name(), se_sess->fabric_sess_ptr);
301 }
302 EXPORT_SYMBOL(__transport_register_session);
303 
304 void transport_register_session(
305 	struct se_portal_group *se_tpg,
306 	struct se_node_acl *se_nacl,
307 	struct se_session *se_sess,
308 	void *fabric_sess_ptr)
309 {
310 	unsigned long flags;
311 
312 	spin_lock_irqsave(&se_tpg->session_lock, flags);
313 	__transport_register_session(se_tpg, se_nacl, se_sess, fabric_sess_ptr);
314 	spin_unlock_irqrestore(&se_tpg->session_lock, flags);
315 }
316 EXPORT_SYMBOL(transport_register_session);
317 
318 static void target_release_session(struct kref *kref)
319 {
320 	struct se_session *se_sess = container_of(kref,
321 			struct se_session, sess_kref);
322 	struct se_portal_group *se_tpg = se_sess->se_tpg;
323 
324 	se_tpg->se_tpg_tfo->close_session(se_sess);
325 }
326 
327 void target_get_session(struct se_session *se_sess)
328 {
329 	kref_get(&se_sess->sess_kref);
330 }
331 EXPORT_SYMBOL(target_get_session);
332 
333 void target_put_session(struct se_session *se_sess)
334 {
335 	kref_put(&se_sess->sess_kref, target_release_session);
336 }
337 EXPORT_SYMBOL(target_put_session);
338 
339 static void target_complete_nacl(struct kref *kref)
340 {
341 	struct se_node_acl *nacl = container_of(kref,
342 				struct se_node_acl, acl_kref);
343 
344 	complete(&nacl->acl_free_comp);
345 }
346 
347 void target_put_nacl(struct se_node_acl *nacl)
348 {
349 	kref_put(&nacl->acl_kref, target_complete_nacl);
350 }
351 
352 void transport_deregister_session_configfs(struct se_session *se_sess)
353 {
354 	struct se_node_acl *se_nacl;
355 	unsigned long flags;
356 	/*
357 	 * Used by struct se_node_acl's under ConfigFS to locate active struct se_session
358 	 */
359 	se_nacl = se_sess->se_node_acl;
360 	if (se_nacl) {
361 		spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
362 		if (se_nacl->acl_stop == 0)
363 			list_del(&se_sess->sess_acl_list);
364 		/*
365 		 * If the session list is empty, then clear the pointer.
366 		 * Otherwise, set the struct se_session pointer from the tail
367 		 * element of the per struct se_node_acl active session list.
368 		 */
369 		if (list_empty(&se_nacl->acl_sess_list))
370 			se_nacl->nacl_sess = NULL;
371 		else {
372 			se_nacl->nacl_sess = container_of(
373 					se_nacl->acl_sess_list.prev,
374 					struct se_session, sess_acl_list);
375 		}
376 		spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
377 	}
378 }
379 EXPORT_SYMBOL(transport_deregister_session_configfs);
380 
381 void transport_free_session(struct se_session *se_sess)
382 {
383 	kmem_cache_free(se_sess_cache, se_sess);
384 }
385 EXPORT_SYMBOL(transport_free_session);
386 
387 void transport_deregister_session(struct se_session *se_sess)
388 {
389 	struct se_portal_group *se_tpg = se_sess->se_tpg;
390 	struct target_core_fabric_ops *se_tfo;
391 	struct se_node_acl *se_nacl;
392 	unsigned long flags;
393 	bool comp_nacl = true;
394 
395 	if (!se_tpg) {
396 		transport_free_session(se_sess);
397 		return;
398 	}
399 	se_tfo = se_tpg->se_tpg_tfo;
400 
401 	spin_lock_irqsave(&se_tpg->session_lock, flags);
402 	list_del(&se_sess->sess_list);
403 	se_sess->se_tpg = NULL;
404 	se_sess->fabric_sess_ptr = NULL;
405 	spin_unlock_irqrestore(&se_tpg->session_lock, flags);
406 
407 	/*
408 	 * Determine if we need to do extra work for this initiator node's
409 	 * struct se_node_acl if it had been previously dynamically generated.
410 	 */
411 	se_nacl = se_sess->se_node_acl;
412 
413 	spin_lock_irqsave(&se_tpg->acl_node_lock, flags);
414 	if (se_nacl && se_nacl->dynamic_node_acl) {
415 		if (!se_tfo->tpg_check_demo_mode_cache(se_tpg)) {
416 			list_del(&se_nacl->acl_list);
417 			se_tpg->num_node_acls--;
418 			spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags);
419 			core_tpg_wait_for_nacl_pr_ref(se_nacl);
420 			core_free_device_list_for_node(se_nacl, se_tpg);
421 			se_tfo->tpg_release_fabric_acl(se_tpg, se_nacl);
422 
423 			comp_nacl = false;
424 			spin_lock_irqsave(&se_tpg->acl_node_lock, flags);
425 		}
426 	}
427 	spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags);
428 
429 	pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n",
430 		se_tpg->se_tpg_tfo->get_fabric_name());
431 	/*
432 	 * If last kref is dropping now for an explict NodeACL, awake sleeping
433 	 * ->acl_free_comp caller to wakeup configfs se_node_acl->acl_group
434 	 * removal context.
435 	 */
436 	if (se_nacl && comp_nacl == true)
437 		target_put_nacl(se_nacl);
438 
439 	transport_free_session(se_sess);
440 }
441 EXPORT_SYMBOL(transport_deregister_session);
442 
443 /*
444  * Called with cmd->t_state_lock held.
445  */
446 static void target_remove_from_state_list(struct se_cmd *cmd)
447 {
448 	struct se_device *dev = cmd->se_dev;
449 	unsigned long flags;
450 
451 	if (!dev)
452 		return;
453 
454 	if (cmd->transport_state & CMD_T_BUSY)
455 		return;
456 
457 	spin_lock_irqsave(&dev->execute_task_lock, flags);
458 	if (cmd->state_active) {
459 		list_del(&cmd->state_list);
460 		cmd->state_active = false;
461 	}
462 	spin_unlock_irqrestore(&dev->execute_task_lock, flags);
463 }
464 
465 /*	transport_cmd_check_stop():
466  *
467  *	'transport_off = 1' determines if CMD_T_ACTIVE should be cleared.
468  *	'transport_off = 2' determines if task_dev_state should be removed.
469  *
470  *	A non-zero u8 t_state sets cmd->t_state.
471  *	Returns 1 when command is stopped, else 0.
472  */
473 static int transport_cmd_check_stop(
474 	struct se_cmd *cmd,
475 	int transport_off,
476 	u8 t_state)
477 {
478 	unsigned long flags;
479 
480 	spin_lock_irqsave(&cmd->t_state_lock, flags);
481 	/*
482 	 * Determine if IOCTL context caller in requesting the stopping of this
483 	 * command for LUN shutdown purposes.
484 	 */
485 	if (cmd->transport_state & CMD_T_LUN_STOP) {
486 		pr_debug("%s:%d CMD_T_LUN_STOP for ITT: 0x%08x\n",
487 			__func__, __LINE__, cmd->se_tfo->get_task_tag(cmd));
488 
489 		cmd->transport_state &= ~CMD_T_ACTIVE;
490 		if (transport_off == 2)
491 			target_remove_from_state_list(cmd);
492 		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
493 
494 		complete(&cmd->transport_lun_stop_comp);
495 		return 1;
496 	}
497 	/*
498 	 * Determine if frontend context caller is requesting the stopping of
499 	 * this command for frontend exceptions.
500 	 */
501 	if (cmd->transport_state & CMD_T_STOP) {
502 		pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08x\n",
503 			__func__, __LINE__,
504 			cmd->se_tfo->get_task_tag(cmd));
505 
506 		if (transport_off == 2)
507 			target_remove_from_state_list(cmd);
508 
509 		/*
510 		 * Clear struct se_cmd->se_lun before the transport_off == 2 handoff
511 		 * to FE.
512 		 */
513 		if (transport_off == 2)
514 			cmd->se_lun = NULL;
515 		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
516 
517 		complete(&cmd->t_transport_stop_comp);
518 		return 1;
519 	}
520 	if (transport_off) {
521 		cmd->transport_state &= ~CMD_T_ACTIVE;
522 		if (transport_off == 2) {
523 			target_remove_from_state_list(cmd);
524 			/*
525 			 * Clear struct se_cmd->se_lun before the transport_off == 2
526 			 * handoff to fabric module.
527 			 */
528 			cmd->se_lun = NULL;
529 			/*
530 			 * Some fabric modules like tcm_loop can release
531 			 * their internally allocated I/O reference now and
532 			 * struct se_cmd now.
533 			 *
534 			 * Fabric modules are expected to return '1' here if the
535 			 * se_cmd being passed is released at this point,
536 			 * or zero if not being released.
537 			 */
538 			if (cmd->se_tfo->check_stop_free != NULL) {
539 				spin_unlock_irqrestore(
540 					&cmd->t_state_lock, flags);
541 
542 				return cmd->se_tfo->check_stop_free(cmd);
543 			}
544 		}
545 		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
546 
547 		return 0;
548 	} else if (t_state)
549 		cmd->t_state = t_state;
550 	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
551 
552 	return 0;
553 }
554 
555 static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd)
556 {
557 	return transport_cmd_check_stop(cmd, 2, 0);
558 }
559 
560 static void transport_lun_remove_cmd(struct se_cmd *cmd)
561 {
562 	struct se_lun *lun = cmd->se_lun;
563 	unsigned long flags;
564 
565 	if (!lun)
566 		return;
567 
568 	spin_lock_irqsave(&cmd->t_state_lock, flags);
569 	if (cmd->transport_state & CMD_T_DEV_ACTIVE) {
570 		cmd->transport_state &= ~CMD_T_DEV_ACTIVE;
571 		target_remove_from_state_list(cmd);
572 	}
573 	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
574 
575 	spin_lock_irqsave(&lun->lun_cmd_lock, flags);
576 	if (!list_empty(&cmd->se_lun_node))
577 		list_del_init(&cmd->se_lun_node);
578 	spin_unlock_irqrestore(&lun->lun_cmd_lock, flags);
579 }
580 
581 void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
582 {
583 	if (!(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
584 		transport_lun_remove_cmd(cmd);
585 
586 	if (transport_cmd_check_stop_to_fabric(cmd))
587 		return;
588 	if (remove) {
589 		transport_remove_cmd_from_queue(cmd);
590 		transport_put_cmd(cmd);
591 	}
592 }
593 
594 static void transport_add_cmd_to_queue(struct se_cmd *cmd, int t_state,
595 		bool at_head)
596 {
597 	struct se_device *dev = cmd->se_dev;
598 	struct se_queue_obj *qobj = &dev->dev_queue_obj;
599 	unsigned long flags;
600 
601 	if (t_state) {
602 		spin_lock_irqsave(&cmd->t_state_lock, flags);
603 		cmd->t_state = t_state;
604 		cmd->transport_state |= CMD_T_ACTIVE;
605 		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
606 	}
607 
608 	spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
609 
610 	/* If the cmd is already on the list, remove it before we add it */
611 	if (!list_empty(&cmd->se_queue_node))
612 		list_del(&cmd->se_queue_node);
613 	else
614 		atomic_inc(&qobj->queue_cnt);
615 
616 	if (at_head)
617 		list_add(&cmd->se_queue_node, &qobj->qobj_list);
618 	else
619 		list_add_tail(&cmd->se_queue_node, &qobj->qobj_list);
620 	cmd->transport_state |= CMD_T_QUEUED;
621 	spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
622 
623 	wake_up_interruptible(&qobj->thread_wq);
624 }
625 
626 static struct se_cmd *
627 transport_get_cmd_from_queue(struct se_queue_obj *qobj)
628 {
629 	struct se_cmd *cmd;
630 	unsigned long flags;
631 
632 	spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
633 	if (list_empty(&qobj->qobj_list)) {
634 		spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
635 		return NULL;
636 	}
637 	cmd = list_first_entry(&qobj->qobj_list, struct se_cmd, se_queue_node);
638 
639 	cmd->transport_state &= ~CMD_T_QUEUED;
640 	list_del_init(&cmd->se_queue_node);
641 	atomic_dec(&qobj->queue_cnt);
642 	spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
643 
644 	return cmd;
645 }
646 
647 static void transport_remove_cmd_from_queue(struct se_cmd *cmd)
648 {
649 	struct se_queue_obj *qobj = &cmd->se_dev->dev_queue_obj;
650 	unsigned long flags;
651 
652 	spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
653 	if (!(cmd->transport_state & CMD_T_QUEUED)) {
654 		spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
655 		return;
656 	}
657 	cmd->transport_state &= ~CMD_T_QUEUED;
658 	atomic_dec(&qobj->queue_cnt);
659 	list_del_init(&cmd->se_queue_node);
660 	spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
661 }
662 
663 static void target_complete_failure_work(struct work_struct *work)
664 {
665 	struct se_cmd *cmd = container_of(work, struct se_cmd, work);
666 
667 	transport_generic_request_failure(cmd);
668 }
669 
670 void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
671 {
672 	struct se_device *dev = cmd->se_dev;
673 	int success = scsi_status == GOOD;
674 	unsigned long flags;
675 
676 	cmd->scsi_status = scsi_status;
677 
678 
679 	spin_lock_irqsave(&cmd->t_state_lock, flags);
680 	cmd->transport_state &= ~CMD_T_BUSY;
681 
682 	if (dev && dev->transport->transport_complete) {
683 		if (dev->transport->transport_complete(cmd,
684 				cmd->t_data_sg) != 0) {
685 			cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE;
686 			success = 1;
687 		}
688 	}
689 
690 	/*
691 	 * See if we are waiting to complete for an exception condition.
692 	 */
693 	if (cmd->transport_state & CMD_T_REQUEST_STOP) {
694 		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
695 		complete(&cmd->task_stop_comp);
696 		return;
697 	}
698 
699 	if (!success)
700 		cmd->transport_state |= CMD_T_FAILED;
701 
702 	/*
703 	 * Check for case where an explict ABORT_TASK has been received
704 	 * and transport_wait_for_tasks() will be waiting for completion..
705 	 */
706 	if (cmd->transport_state & CMD_T_ABORTED &&
707 	    cmd->transport_state & CMD_T_STOP) {
708 		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
709 		complete(&cmd->t_transport_stop_comp);
710 		return;
711 	} else if (cmd->transport_state & CMD_T_FAILED) {
712 		cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
713 		INIT_WORK(&cmd->work, target_complete_failure_work);
714 	} else {
715 		INIT_WORK(&cmd->work, target_complete_ok_work);
716 	}
717 
718 	cmd->t_state = TRANSPORT_COMPLETE;
719 	cmd->transport_state |= (CMD_T_COMPLETE | CMD_T_ACTIVE);
720 	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
721 
722 	queue_work(target_completion_wq, &cmd->work);
723 }
724 EXPORT_SYMBOL(target_complete_cmd);
725 
726 static void target_add_to_state_list(struct se_cmd *cmd)
727 {
728 	struct se_device *dev = cmd->se_dev;
729 	unsigned long flags;
730 
731 	spin_lock_irqsave(&dev->execute_task_lock, flags);
732 	if (!cmd->state_active) {
733 		list_add_tail(&cmd->state_list, &dev->state_list);
734 		cmd->state_active = true;
735 	}
736 	spin_unlock_irqrestore(&dev->execute_task_lock, flags);
737 }
738 
739 static void __target_add_to_execute_list(struct se_cmd *cmd)
740 {
741 	struct se_device *dev = cmd->se_dev;
742 	bool head_of_queue = false;
743 
744 	if (!list_empty(&cmd->execute_list))
745 		return;
746 
747 	if (dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED &&
748 	    cmd->sam_task_attr == MSG_HEAD_TAG)
749 		head_of_queue = true;
750 
751 	if (head_of_queue)
752 		list_add(&cmd->execute_list, &dev->execute_list);
753 	else
754 		list_add_tail(&cmd->execute_list, &dev->execute_list);
755 
756 	atomic_inc(&dev->execute_tasks);
757 
758 	if (cmd->state_active)
759 		return;
760 
761 	if (head_of_queue)
762 		list_add(&cmd->state_list, &dev->state_list);
763 	else
764 		list_add_tail(&cmd->state_list, &dev->state_list);
765 
766 	cmd->state_active = true;
767 }
768 
769 static void target_add_to_execute_list(struct se_cmd *cmd)
770 {
771 	unsigned long flags;
772 	struct se_device *dev = cmd->se_dev;
773 
774 	spin_lock_irqsave(&dev->execute_task_lock, flags);
775 	__target_add_to_execute_list(cmd);
776 	spin_unlock_irqrestore(&dev->execute_task_lock, flags);
777 }
778 
779 void __target_remove_from_execute_list(struct se_cmd *cmd)
780 {
781 	list_del_init(&cmd->execute_list);
782 	atomic_dec(&cmd->se_dev->execute_tasks);
783 }
784 
785 static void target_remove_from_execute_list(struct se_cmd *cmd)
786 {
787 	struct se_device *dev = cmd->se_dev;
788 	unsigned long flags;
789 
790 	if (WARN_ON(list_empty(&cmd->execute_list)))
791 		return;
792 
793 	spin_lock_irqsave(&dev->execute_task_lock, flags);
794 	__target_remove_from_execute_list(cmd);
795 	spin_unlock_irqrestore(&dev->execute_task_lock, flags);
796 }
797 
798 /*
799  * Handle QUEUE_FULL / -EAGAIN and -ENOMEM status
800  */
801 
802 static void target_qf_do_work(struct work_struct *work)
803 {
804 	struct se_device *dev = container_of(work, struct se_device,
805 					qf_work_queue);
806 	LIST_HEAD(qf_cmd_list);
807 	struct se_cmd *cmd, *cmd_tmp;
808 
809 	spin_lock_irq(&dev->qf_cmd_lock);
810 	list_splice_init(&dev->qf_cmd_list, &qf_cmd_list);
811 	spin_unlock_irq(&dev->qf_cmd_lock);
812 
813 	list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) {
814 		list_del(&cmd->se_qf_node);
815 		atomic_dec(&dev->dev_qf_count);
816 		smp_mb__after_atomic_dec();
817 
818 		pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue"
819 			" context: %s\n", cmd->se_tfo->get_fabric_name(), cmd,
820 			(cmd->t_state == TRANSPORT_COMPLETE_QF_OK) ? "COMPLETE_OK" :
821 			(cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING"
822 			: "UNKNOWN");
823 
824 		transport_add_cmd_to_queue(cmd, cmd->t_state, true);
825 	}
826 }
827 
828 unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd)
829 {
830 	switch (cmd->data_direction) {
831 	case DMA_NONE:
832 		return "NONE";
833 	case DMA_FROM_DEVICE:
834 		return "READ";
835 	case DMA_TO_DEVICE:
836 		return "WRITE";
837 	case DMA_BIDIRECTIONAL:
838 		return "BIDI";
839 	default:
840 		break;
841 	}
842 
843 	return "UNKNOWN";
844 }
845 
846 void transport_dump_dev_state(
847 	struct se_device *dev,
848 	char *b,
849 	int *bl)
850 {
851 	*bl += sprintf(b + *bl, "Status: ");
852 	switch (dev->dev_status) {
853 	case TRANSPORT_DEVICE_ACTIVATED:
854 		*bl += sprintf(b + *bl, "ACTIVATED");
855 		break;
856 	case TRANSPORT_DEVICE_DEACTIVATED:
857 		*bl += sprintf(b + *bl, "DEACTIVATED");
858 		break;
859 	case TRANSPORT_DEVICE_SHUTDOWN:
860 		*bl += sprintf(b + *bl, "SHUTDOWN");
861 		break;
862 	case TRANSPORT_DEVICE_OFFLINE_ACTIVATED:
863 	case TRANSPORT_DEVICE_OFFLINE_DEACTIVATED:
864 		*bl += sprintf(b + *bl, "OFFLINE");
865 		break;
866 	default:
867 		*bl += sprintf(b + *bl, "UNKNOWN=%d", dev->dev_status);
868 		break;
869 	}
870 
871 	*bl += sprintf(b + *bl, "  Execute/Max Queue Depth: %d/%d",
872 		atomic_read(&dev->execute_tasks), dev->queue_depth);
873 	*bl += sprintf(b + *bl, "  SectorSize: %u  HwMaxSectors: %u\n",
874 		dev->se_sub_dev->se_dev_attrib.block_size,
875 		dev->se_sub_dev->se_dev_attrib.hw_max_sectors);
876 	*bl += sprintf(b + *bl, "        ");
877 }
878 
879 void transport_dump_vpd_proto_id(
880 	struct t10_vpd *vpd,
881 	unsigned char *p_buf,
882 	int p_buf_len)
883 {
884 	unsigned char buf[VPD_TMP_BUF_SIZE];
885 	int len;
886 
887 	memset(buf, 0, VPD_TMP_BUF_SIZE);
888 	len = sprintf(buf, "T10 VPD Protocol Identifier: ");
889 
890 	switch (vpd->protocol_identifier) {
891 	case 0x00:
892 		sprintf(buf+len, "Fibre Channel\n");
893 		break;
894 	case 0x10:
895 		sprintf(buf+len, "Parallel SCSI\n");
896 		break;
897 	case 0x20:
898 		sprintf(buf+len, "SSA\n");
899 		break;
900 	case 0x30:
901 		sprintf(buf+len, "IEEE 1394\n");
902 		break;
903 	case 0x40:
904 		sprintf(buf+len, "SCSI Remote Direct Memory Access"
905 				" Protocol\n");
906 		break;
907 	case 0x50:
908 		sprintf(buf+len, "Internet SCSI (iSCSI)\n");
909 		break;
910 	case 0x60:
911 		sprintf(buf+len, "SAS Serial SCSI Protocol\n");
912 		break;
913 	case 0x70:
914 		sprintf(buf+len, "Automation/Drive Interface Transport"
915 				" Protocol\n");
916 		break;
917 	case 0x80:
918 		sprintf(buf+len, "AT Attachment Interface ATA/ATAPI\n");
919 		break;
920 	default:
921 		sprintf(buf+len, "Unknown 0x%02x\n",
922 				vpd->protocol_identifier);
923 		break;
924 	}
925 
926 	if (p_buf)
927 		strncpy(p_buf, buf, p_buf_len);
928 	else
929 		pr_debug("%s", buf);
930 }
931 
932 void
933 transport_set_vpd_proto_id(struct t10_vpd *vpd, unsigned char *page_83)
934 {
935 	/*
936 	 * Check if the Protocol Identifier Valid (PIV) bit is set..
937 	 *
938 	 * from spc3r23.pdf section 7.5.1
939 	 */
940 	 if (page_83[1] & 0x80) {
941 		vpd->protocol_identifier = (page_83[0] & 0xf0);
942 		vpd->protocol_identifier_set = 1;
943 		transport_dump_vpd_proto_id(vpd, NULL, 0);
944 	}
945 }
946 EXPORT_SYMBOL(transport_set_vpd_proto_id);
947 
948 int transport_dump_vpd_assoc(
949 	struct t10_vpd *vpd,
950 	unsigned char *p_buf,
951 	int p_buf_len)
952 {
953 	unsigned char buf[VPD_TMP_BUF_SIZE];
954 	int ret = 0;
955 	int len;
956 
957 	memset(buf, 0, VPD_TMP_BUF_SIZE);
958 	len = sprintf(buf, "T10 VPD Identifier Association: ");
959 
960 	switch (vpd->association) {
961 	case 0x00:
962 		sprintf(buf+len, "addressed logical unit\n");
963 		break;
964 	case 0x10:
965 		sprintf(buf+len, "target port\n");
966 		break;
967 	case 0x20:
968 		sprintf(buf+len, "SCSI target device\n");
969 		break;
970 	default:
971 		sprintf(buf+len, "Unknown 0x%02x\n", vpd->association);
972 		ret = -EINVAL;
973 		break;
974 	}
975 
976 	if (p_buf)
977 		strncpy(p_buf, buf, p_buf_len);
978 	else
979 		pr_debug("%s", buf);
980 
981 	return ret;
982 }
983 
984 int transport_set_vpd_assoc(struct t10_vpd *vpd, unsigned char *page_83)
985 {
986 	/*
987 	 * The VPD identification association..
988 	 *
989 	 * from spc3r23.pdf Section 7.6.3.1 Table 297
990 	 */
991 	vpd->association = (page_83[1] & 0x30);
992 	return transport_dump_vpd_assoc(vpd, NULL, 0);
993 }
994 EXPORT_SYMBOL(transport_set_vpd_assoc);
995 
996 int transport_dump_vpd_ident_type(
997 	struct t10_vpd *vpd,
998 	unsigned char *p_buf,
999 	int p_buf_len)
1000 {
1001 	unsigned char buf[VPD_TMP_BUF_SIZE];
1002 	int ret = 0;
1003 	int len;
1004 
1005 	memset(buf, 0, VPD_TMP_BUF_SIZE);
1006 	len = sprintf(buf, "T10 VPD Identifier Type: ");
1007 
1008 	switch (vpd->device_identifier_type) {
1009 	case 0x00:
1010 		sprintf(buf+len, "Vendor specific\n");
1011 		break;
1012 	case 0x01:
1013 		sprintf(buf+len, "T10 Vendor ID based\n");
1014 		break;
1015 	case 0x02:
1016 		sprintf(buf+len, "EUI-64 based\n");
1017 		break;
1018 	case 0x03:
1019 		sprintf(buf+len, "NAA\n");
1020 		break;
1021 	case 0x04:
1022 		sprintf(buf+len, "Relative target port identifier\n");
1023 		break;
1024 	case 0x08:
1025 		sprintf(buf+len, "SCSI name string\n");
1026 		break;
1027 	default:
1028 		sprintf(buf+len, "Unsupported: 0x%02x\n",
1029 				vpd->device_identifier_type);
1030 		ret = -EINVAL;
1031 		break;
1032 	}
1033 
1034 	if (p_buf) {
1035 		if (p_buf_len < strlen(buf)+1)
1036 			return -EINVAL;
1037 		strncpy(p_buf, buf, p_buf_len);
1038 	} else {
1039 		pr_debug("%s", buf);
1040 	}
1041 
1042 	return ret;
1043 }
1044 
1045 int transport_set_vpd_ident_type(struct t10_vpd *vpd, unsigned char *page_83)
1046 {
1047 	/*
1048 	 * The VPD identifier type..
1049 	 *
1050 	 * from spc3r23.pdf Section 7.6.3.1 Table 298
1051 	 */
1052 	vpd->device_identifier_type = (page_83[1] & 0x0f);
1053 	return transport_dump_vpd_ident_type(vpd, NULL, 0);
1054 }
1055 EXPORT_SYMBOL(transport_set_vpd_ident_type);
1056 
1057 int transport_dump_vpd_ident(
1058 	struct t10_vpd *vpd,
1059 	unsigned char *p_buf,
1060 	int p_buf_len)
1061 {
1062 	unsigned char buf[VPD_TMP_BUF_SIZE];
1063 	int ret = 0;
1064 
1065 	memset(buf, 0, VPD_TMP_BUF_SIZE);
1066 
1067 	switch (vpd->device_identifier_code_set) {
1068 	case 0x01: /* Binary */
1069 		sprintf(buf, "T10 VPD Binary Device Identifier: %s\n",
1070 			&vpd->device_identifier[0]);
1071 		break;
1072 	case 0x02: /* ASCII */
1073 		sprintf(buf, "T10 VPD ASCII Device Identifier: %s\n",
1074 			&vpd->device_identifier[0]);
1075 		break;
1076 	case 0x03: /* UTF-8 */
1077 		sprintf(buf, "T10 VPD UTF-8 Device Identifier: %s\n",
1078 			&vpd->device_identifier[0]);
1079 		break;
1080 	default:
1081 		sprintf(buf, "T10 VPD Device Identifier encoding unsupported:"
1082 			" 0x%02x", vpd->device_identifier_code_set);
1083 		ret = -EINVAL;
1084 		break;
1085 	}
1086 
1087 	if (p_buf)
1088 		strncpy(p_buf, buf, p_buf_len);
1089 	else
1090 		pr_debug("%s", buf);
1091 
1092 	return ret;
1093 }
1094 
1095 int
1096 transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83)
1097 {
1098 	static const char hex_str[] = "0123456789abcdef";
1099 	int j = 0, i = 4; /* offset to start of the identifer */
1100 
1101 	/*
1102 	 * The VPD Code Set (encoding)
1103 	 *
1104 	 * from spc3r23.pdf Section 7.6.3.1 Table 296
1105 	 */
1106 	vpd->device_identifier_code_set = (page_83[0] & 0x0f);
1107 	switch (vpd->device_identifier_code_set) {
1108 	case 0x01: /* Binary */
1109 		vpd->device_identifier[j++] =
1110 				hex_str[vpd->device_identifier_type];
1111 		while (i < (4 + page_83[3])) {
1112 			vpd->device_identifier[j++] =
1113 				hex_str[(page_83[i] & 0xf0) >> 4];
1114 			vpd->device_identifier[j++] =
1115 				hex_str[page_83[i] & 0x0f];
1116 			i++;
1117 		}
1118 		break;
1119 	case 0x02: /* ASCII */
1120 	case 0x03: /* UTF-8 */
1121 		while (i < (4 + page_83[3]))
1122 			vpd->device_identifier[j++] = page_83[i++];
1123 		break;
1124 	default:
1125 		break;
1126 	}
1127 
1128 	return transport_dump_vpd_ident(vpd, NULL, 0);
1129 }
1130 EXPORT_SYMBOL(transport_set_vpd_ident);
1131 
1132 static void core_setup_task_attr_emulation(struct se_device *dev)
1133 {
1134 	/*
1135 	 * If this device is from Target_Core_Mod/pSCSI, disable the
1136 	 * SAM Task Attribute emulation.
1137 	 *
1138 	 * This is currently not available in upsream Linux/SCSI Target
1139 	 * mode code, and is assumed to be disabled while using TCM/pSCSI.
1140 	 */
1141 	if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1142 		dev->dev_task_attr_type = SAM_TASK_ATTR_PASSTHROUGH;
1143 		return;
1144 	}
1145 
1146 	dev->dev_task_attr_type = SAM_TASK_ATTR_EMULATED;
1147 	pr_debug("%s: Using SAM_TASK_ATTR_EMULATED for SPC: 0x%02x"
1148 		" device\n", dev->transport->name,
1149 		dev->transport->get_device_rev(dev));
1150 }
1151 
1152 static void scsi_dump_inquiry(struct se_device *dev)
1153 {
1154 	struct t10_wwn *wwn = &dev->se_sub_dev->t10_wwn;
1155 	char buf[17];
1156 	int i, device_type;
1157 	/*
1158 	 * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer
1159 	 */
1160 	for (i = 0; i < 8; i++)
1161 		if (wwn->vendor[i] >= 0x20)
1162 			buf[i] = wwn->vendor[i];
1163 		else
1164 			buf[i] = ' ';
1165 	buf[i] = '\0';
1166 	pr_debug("  Vendor: %s\n", buf);
1167 
1168 	for (i = 0; i < 16; i++)
1169 		if (wwn->model[i] >= 0x20)
1170 			buf[i] = wwn->model[i];
1171 		else
1172 			buf[i] = ' ';
1173 	buf[i] = '\0';
1174 	pr_debug("  Model: %s\n", buf);
1175 
1176 	for (i = 0; i < 4; i++)
1177 		if (wwn->revision[i] >= 0x20)
1178 			buf[i] = wwn->revision[i];
1179 		else
1180 			buf[i] = ' ';
1181 	buf[i] = '\0';
1182 	pr_debug("  Revision: %s\n", buf);
1183 
1184 	device_type = dev->transport->get_device_type(dev);
1185 	pr_debug("  Type:   %s ", scsi_device_type(device_type));
1186 	pr_debug("                 ANSI SCSI revision: %02x\n",
1187 				dev->transport->get_device_rev(dev));
1188 }
1189 
1190 struct se_device *transport_add_device_to_core_hba(
1191 	struct se_hba *hba,
1192 	struct se_subsystem_api *transport,
1193 	struct se_subsystem_dev *se_dev,
1194 	u32 device_flags,
1195 	void *transport_dev,
1196 	struct se_dev_limits *dev_limits,
1197 	const char *inquiry_prod,
1198 	const char *inquiry_rev)
1199 {
1200 	int force_pt;
1201 	struct se_device  *dev;
1202 
1203 	dev = kzalloc(sizeof(struct se_device), GFP_KERNEL);
1204 	if (!dev) {
1205 		pr_err("Unable to allocate memory for se_dev_t\n");
1206 		return NULL;
1207 	}
1208 
1209 	transport_init_queue_obj(&dev->dev_queue_obj);
1210 	dev->dev_flags		= device_flags;
1211 	dev->dev_status		|= TRANSPORT_DEVICE_DEACTIVATED;
1212 	dev->dev_ptr		= transport_dev;
1213 	dev->se_hba		= hba;
1214 	dev->se_sub_dev		= se_dev;
1215 	dev->transport		= transport;
1216 	INIT_LIST_HEAD(&dev->dev_list);
1217 	INIT_LIST_HEAD(&dev->dev_sep_list);
1218 	INIT_LIST_HEAD(&dev->dev_tmr_list);
1219 	INIT_LIST_HEAD(&dev->execute_list);
1220 	INIT_LIST_HEAD(&dev->delayed_cmd_list);
1221 	INIT_LIST_HEAD(&dev->state_list);
1222 	INIT_LIST_HEAD(&dev->qf_cmd_list);
1223 	spin_lock_init(&dev->execute_task_lock);
1224 	spin_lock_init(&dev->delayed_cmd_lock);
1225 	spin_lock_init(&dev->dev_reservation_lock);
1226 	spin_lock_init(&dev->dev_status_lock);
1227 	spin_lock_init(&dev->se_port_lock);
1228 	spin_lock_init(&dev->se_tmr_lock);
1229 	spin_lock_init(&dev->qf_cmd_lock);
1230 	atomic_set(&dev->dev_ordered_id, 0);
1231 
1232 	se_dev_set_default_attribs(dev, dev_limits);
1233 
1234 	dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX);
1235 	dev->creation_time = get_jiffies_64();
1236 	spin_lock_init(&dev->stats_lock);
1237 
1238 	spin_lock(&hba->device_lock);
1239 	list_add_tail(&dev->dev_list, &hba->hba_dev_list);
1240 	hba->dev_count++;
1241 	spin_unlock(&hba->device_lock);
1242 	/*
1243 	 * Setup the SAM Task Attribute emulation for struct se_device
1244 	 */
1245 	core_setup_task_attr_emulation(dev);
1246 	/*
1247 	 * Force PR and ALUA passthrough emulation with internal object use.
1248 	 */
1249 	force_pt = (hba->hba_flags & HBA_FLAGS_INTERNAL_USE);
1250 	/*
1251 	 * Setup the Reservations infrastructure for struct se_device
1252 	 */
1253 	core_setup_reservations(dev, force_pt);
1254 	/*
1255 	 * Setup the Asymmetric Logical Unit Assignment for struct se_device
1256 	 */
1257 	if (core_setup_alua(dev, force_pt) < 0)
1258 		goto out;
1259 
1260 	/*
1261 	 * Startup the struct se_device processing thread
1262 	 */
1263 	dev->process_thread = kthread_run(transport_processing_thread, dev,
1264 					  "LIO_%s", dev->transport->name);
1265 	if (IS_ERR(dev->process_thread)) {
1266 		pr_err("Unable to create kthread: LIO_%s\n",
1267 			dev->transport->name);
1268 		goto out;
1269 	}
1270 	/*
1271 	 * Setup work_queue for QUEUE_FULL
1272 	 */
1273 	INIT_WORK(&dev->qf_work_queue, target_qf_do_work);
1274 	/*
1275 	 * Preload the initial INQUIRY const values if we are doing
1276 	 * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI
1277 	 * passthrough because this is being provided by the backend LLD.
1278 	 * This is required so that transport_get_inquiry() copies these
1279 	 * originals once back into DEV_T10_WWN(dev) for the virtual device
1280 	 * setup.
1281 	 */
1282 	if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) {
1283 		if (!inquiry_prod || !inquiry_rev) {
1284 			pr_err("All non TCM/pSCSI plugins require"
1285 				" INQUIRY consts\n");
1286 			goto out;
1287 		}
1288 
1289 		strncpy(&dev->se_sub_dev->t10_wwn.vendor[0], "LIO-ORG", 8);
1290 		strncpy(&dev->se_sub_dev->t10_wwn.model[0], inquiry_prod, 16);
1291 		strncpy(&dev->se_sub_dev->t10_wwn.revision[0], inquiry_rev, 4);
1292 	}
1293 	scsi_dump_inquiry(dev);
1294 
1295 	return dev;
1296 out:
1297 	kthread_stop(dev->process_thread);
1298 
1299 	spin_lock(&hba->device_lock);
1300 	list_del(&dev->dev_list);
1301 	hba->dev_count--;
1302 	spin_unlock(&hba->device_lock);
1303 
1304 	se_release_vpd_for_dev(dev);
1305 
1306 	kfree(dev);
1307 
1308 	return NULL;
1309 }
1310 EXPORT_SYMBOL(transport_add_device_to_core_hba);
1311 
1312 /*	transport_generic_prepare_cdb():
1313  *
1314  *	Since the Initiator sees iSCSI devices as LUNs,  the SCSI CDB will
1315  *	contain the iSCSI LUN in bits 7-5 of byte 1 as per SAM-2.
1316  *	The point of this is since we are mapping iSCSI LUNs to
1317  *	SCSI Target IDs having a non-zero LUN in the CDB will throw the
1318  *	devices and HBAs for a loop.
1319  */
1320 static inline void transport_generic_prepare_cdb(
1321 	unsigned char *cdb)
1322 {
1323 	switch (cdb[0]) {
1324 	case READ_10: /* SBC - RDProtect */
1325 	case READ_12: /* SBC - RDProtect */
1326 	case READ_16: /* SBC - RDProtect */
1327 	case SEND_DIAGNOSTIC: /* SPC - SELF-TEST Code */
1328 	case VERIFY: /* SBC - VRProtect */
1329 	case VERIFY_16: /* SBC - VRProtect */
1330 	case WRITE_VERIFY: /* SBC - VRProtect */
1331 	case WRITE_VERIFY_12: /* SBC - VRProtect */
1332 	case MAINTENANCE_IN: /* SPC - Parameter Data Format for SA RTPG */
1333 		break;
1334 	default:
1335 		cdb[1] &= 0x1f; /* clear logical unit number */
1336 		break;
1337 	}
1338 }
1339 
1340 static int transport_generic_cmd_sequencer(struct se_cmd *, unsigned char *);
1341 
1342 /*
1343  * Used by fabric modules containing a local struct se_cmd within their
1344  * fabric dependent per I/O descriptor.
1345  */
1346 void transport_init_se_cmd(
1347 	struct se_cmd *cmd,
1348 	struct target_core_fabric_ops *tfo,
1349 	struct se_session *se_sess,
1350 	u32 data_length,
1351 	int data_direction,
1352 	int task_attr,
1353 	unsigned char *sense_buffer)
1354 {
1355 	INIT_LIST_HEAD(&cmd->se_lun_node);
1356 	INIT_LIST_HEAD(&cmd->se_delayed_node);
1357 	INIT_LIST_HEAD(&cmd->se_qf_node);
1358 	INIT_LIST_HEAD(&cmd->se_queue_node);
1359 	INIT_LIST_HEAD(&cmd->se_cmd_list);
1360 	INIT_LIST_HEAD(&cmd->execute_list);
1361 	INIT_LIST_HEAD(&cmd->state_list);
1362 	init_completion(&cmd->transport_lun_fe_stop_comp);
1363 	init_completion(&cmd->transport_lun_stop_comp);
1364 	init_completion(&cmd->t_transport_stop_comp);
1365 	init_completion(&cmd->cmd_wait_comp);
1366 	init_completion(&cmd->task_stop_comp);
1367 	spin_lock_init(&cmd->t_state_lock);
1368 	cmd->transport_state = CMD_T_DEV_ACTIVE;
1369 
1370 	cmd->se_tfo = tfo;
1371 	cmd->se_sess = se_sess;
1372 	cmd->data_length = data_length;
1373 	cmd->data_direction = data_direction;
1374 	cmd->sam_task_attr = task_attr;
1375 	cmd->sense_buffer = sense_buffer;
1376 
1377 	cmd->state_active = false;
1378 }
1379 EXPORT_SYMBOL(transport_init_se_cmd);
1380 
1381 static int transport_check_alloc_task_attr(struct se_cmd *cmd)
1382 {
1383 	/*
1384 	 * Check if SAM Task Attribute emulation is enabled for this
1385 	 * struct se_device storage object
1386 	 */
1387 	if (cmd->se_dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
1388 		return 0;
1389 
1390 	if (cmd->sam_task_attr == MSG_ACA_TAG) {
1391 		pr_debug("SAM Task Attribute ACA"
1392 			" emulation is not supported\n");
1393 		return -EINVAL;
1394 	}
1395 	/*
1396 	 * Used to determine when ORDERED commands should go from
1397 	 * Dormant to Active status.
1398 	 */
1399 	cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id);
1400 	smp_mb__after_atomic_inc();
1401 	pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
1402 			cmd->se_ordered_id, cmd->sam_task_attr,
1403 			cmd->se_dev->transport->name);
1404 	return 0;
1405 }
1406 
1407 /*	target_setup_cmd_from_cdb():
1408  *
1409  *	Called from fabric RX Thread.
1410  */
1411 int target_setup_cmd_from_cdb(
1412 	struct se_cmd *cmd,
1413 	unsigned char *cdb)
1414 {
1415 	int ret;
1416 
1417 	transport_generic_prepare_cdb(cdb);
1418 	/*
1419 	 * Ensure that the received CDB is less than the max (252 + 8) bytes
1420 	 * for VARIABLE_LENGTH_CMD
1421 	 */
1422 	if (scsi_command_size(cdb) > SCSI_MAX_VARLEN_CDB_SIZE) {
1423 		pr_err("Received SCSI CDB with command_size: %d that"
1424 			" exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
1425 			scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE);
1426 		cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
1427 		cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
1428 		return -EINVAL;
1429 	}
1430 	/*
1431 	 * If the received CDB is larger than TCM_MAX_COMMAND_SIZE,
1432 	 * allocate the additional extended CDB buffer now..  Otherwise
1433 	 * setup the pointer from __t_task_cdb to t_task_cdb.
1434 	 */
1435 	if (scsi_command_size(cdb) > sizeof(cmd->__t_task_cdb)) {
1436 		cmd->t_task_cdb = kzalloc(scsi_command_size(cdb),
1437 						GFP_KERNEL);
1438 		if (!cmd->t_task_cdb) {
1439 			pr_err("Unable to allocate cmd->t_task_cdb"
1440 				" %u > sizeof(cmd->__t_task_cdb): %lu ops\n",
1441 				scsi_command_size(cdb),
1442 				(unsigned long)sizeof(cmd->__t_task_cdb));
1443 			cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
1444 			cmd->scsi_sense_reason =
1445 					TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1446 			return -ENOMEM;
1447 		}
1448 	} else
1449 		cmd->t_task_cdb = &cmd->__t_task_cdb[0];
1450 	/*
1451 	 * Copy the original CDB into cmd->
1452 	 */
1453 	memcpy(cmd->t_task_cdb, cdb, scsi_command_size(cdb));
1454 	/*
1455 	 * Setup the received CDB based on SCSI defined opcodes and
1456 	 * perform unit attention, persistent reservations and ALUA
1457 	 * checks for virtual device backends.  The cmd->t_task_cdb
1458 	 * pointer is expected to be setup before we reach this point.
1459 	 */
1460 	ret = transport_generic_cmd_sequencer(cmd, cdb);
1461 	if (ret < 0)
1462 		return ret;
1463 	/*
1464 	 * Check for SAM Task Attribute Emulation
1465 	 */
1466 	if (transport_check_alloc_task_attr(cmd) < 0) {
1467 		cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
1468 		cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
1469 		return -EINVAL;
1470 	}
1471 	spin_lock(&cmd->se_lun->lun_sep_lock);
1472 	if (cmd->se_lun->lun_sep)
1473 		cmd->se_lun->lun_sep->sep_stats.cmd_pdus++;
1474 	spin_unlock(&cmd->se_lun->lun_sep_lock);
1475 	return 0;
1476 }
1477 EXPORT_SYMBOL(target_setup_cmd_from_cdb);
1478 
1479 /*
1480  * Used by fabric module frontends to queue tasks directly.
1481  * Many only be used from process context only
1482  */
1483 int transport_handle_cdb_direct(
1484 	struct se_cmd *cmd)
1485 {
1486 	int ret;
1487 
1488 	if (!cmd->se_lun) {
1489 		dump_stack();
1490 		pr_err("cmd->se_lun is NULL\n");
1491 		return -EINVAL;
1492 	}
1493 	if (in_interrupt()) {
1494 		dump_stack();
1495 		pr_err("transport_generic_handle_cdb cannot be called"
1496 				" from interrupt context\n");
1497 		return -EINVAL;
1498 	}
1499 	/*
1500 	 * Set TRANSPORT_NEW_CMD state and CMD_T_ACTIVE following
1501 	 * transport_generic_handle_cdb*() -> transport_add_cmd_to_queue()
1502 	 * in existing usage to ensure that outstanding descriptors are handled
1503 	 * correctly during shutdown via transport_wait_for_tasks()
1504 	 *
1505 	 * Also, we don't take cmd->t_state_lock here as we only expect
1506 	 * this to be called for initial descriptor submission.
1507 	 */
1508 	cmd->t_state = TRANSPORT_NEW_CMD;
1509 	cmd->transport_state |= CMD_T_ACTIVE;
1510 
1511 	/*
1512 	 * transport_generic_new_cmd() is already handling QUEUE_FULL,
1513 	 * so follow TRANSPORT_NEW_CMD processing thread context usage
1514 	 * and call transport_generic_request_failure() if necessary..
1515 	 */
1516 	ret = transport_generic_new_cmd(cmd);
1517 	if (ret < 0)
1518 		transport_generic_request_failure(cmd);
1519 
1520 	return 0;
1521 }
1522 EXPORT_SYMBOL(transport_handle_cdb_direct);
1523 
1524 /**
1525  * target_submit_cmd - lookup unpacked lun and submit uninitialized se_cmd
1526  *
1527  * @se_cmd: command descriptor to submit
1528  * @se_sess: associated se_sess for endpoint
1529  * @cdb: pointer to SCSI CDB
1530  * @sense: pointer to SCSI sense buffer
1531  * @unpacked_lun: unpacked LUN to reference for struct se_lun
1532  * @data_length: fabric expected data transfer length
1533  * @task_addr: SAM task attribute
1534  * @data_dir: DMA data direction
1535  * @flags: flags for command submission from target_sc_flags_tables
1536  *
1537  * This may only be called from process context, and also currently
1538  * assumes internal allocation of fabric payload buffer by target-core.
1539  **/
1540 void target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
1541 		unsigned char *cdb, unsigned char *sense, u32 unpacked_lun,
1542 		u32 data_length, int task_attr, int data_dir, int flags)
1543 {
1544 	struct se_portal_group *se_tpg;
1545 	int rc;
1546 
1547 	se_tpg = se_sess->se_tpg;
1548 	BUG_ON(!se_tpg);
1549 	BUG_ON(se_cmd->se_tfo || se_cmd->se_sess);
1550 	BUG_ON(in_interrupt());
1551 	/*
1552 	 * Initialize se_cmd for target operation.  From this point
1553 	 * exceptions are handled by sending exception status via
1554 	 * target_core_fabric_ops->queue_status() callback
1555 	 */
1556 	transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
1557 				data_length, data_dir, task_attr, sense);
1558 	if (flags & TARGET_SCF_UNKNOWN_SIZE)
1559 		se_cmd->unknown_data_length = 1;
1560 	/*
1561 	 * Obtain struct se_cmd->cmd_kref reference and add new cmd to
1562 	 * se_sess->sess_cmd_list.  A second kref_get here is necessary
1563 	 * for fabrics using TARGET_SCF_ACK_KREF that expect a second
1564 	 * kref_put() to happen during fabric packet acknowledgement.
1565 	 */
1566 	target_get_sess_cmd(se_sess, se_cmd, (flags & TARGET_SCF_ACK_KREF));
1567 	/*
1568 	 * Signal bidirectional data payloads to target-core
1569 	 */
1570 	if (flags & TARGET_SCF_BIDI_OP)
1571 		se_cmd->se_cmd_flags |= SCF_BIDI;
1572 	/*
1573 	 * Locate se_lun pointer and attach it to struct se_cmd
1574 	 */
1575 	if (transport_lookup_cmd_lun(se_cmd, unpacked_lun) < 0) {
1576 		transport_send_check_condition_and_sense(se_cmd,
1577 				se_cmd->scsi_sense_reason, 0);
1578 		target_put_sess_cmd(se_sess, se_cmd);
1579 		return;
1580 	}
1581 	/*
1582 	 * Sanitize CDBs via transport_generic_cmd_sequencer() and
1583 	 * allocate the necessary tasks to complete the received CDB+data
1584 	 */
1585 	rc = target_setup_cmd_from_cdb(se_cmd, cdb);
1586 	if (rc != 0) {
1587 		transport_generic_request_failure(se_cmd);
1588 		return;
1589 	}
1590 
1591 	/*
1592 	 * Check if we need to delay processing because of ALUA
1593 	 * Active/NonOptimized primary access state..
1594 	 */
1595 	core_alua_check_nonop_delay(se_cmd);
1596 
1597 	/*
1598 	 * Dispatch se_cmd descriptor to se_lun->lun_se_dev backend
1599 	 * for immediate execution of READs, otherwise wait for
1600 	 * transport_generic_handle_data() to be called for WRITEs
1601 	 * when fabric has filled the incoming buffer.
1602 	 */
1603 	transport_handle_cdb_direct(se_cmd);
1604 	return;
1605 }
1606 EXPORT_SYMBOL(target_submit_cmd);
1607 
1608 static void target_complete_tmr_failure(struct work_struct *work)
1609 {
1610 	struct se_cmd *se_cmd = container_of(work, struct se_cmd, work);
1611 
1612 	se_cmd->se_tmr_req->response = TMR_LUN_DOES_NOT_EXIST;
1613 	se_cmd->se_tfo->queue_tm_rsp(se_cmd);
1614 	transport_generic_free_cmd(se_cmd, 0);
1615 }
1616 
1617 /**
1618  * target_submit_tmr - lookup unpacked lun and submit uninitialized se_cmd
1619  *                     for TMR CDBs
1620  *
1621  * @se_cmd: command descriptor to submit
1622  * @se_sess: associated se_sess for endpoint
1623  * @sense: pointer to SCSI sense buffer
1624  * @unpacked_lun: unpacked LUN to reference for struct se_lun
1625  * @fabric_context: fabric context for TMR req
1626  * @tm_type: Type of TM request
1627  * @gfp: gfp type for caller
1628  * @tag: referenced task tag for TMR_ABORT_TASK
1629  * @flags: submit cmd flags
1630  *
1631  * Callable from all contexts.
1632  **/
1633 
1634 int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess,
1635 		unsigned char *sense, u32 unpacked_lun,
1636 		void *fabric_tmr_ptr, unsigned char tm_type,
1637 		gfp_t gfp, unsigned int tag, int flags)
1638 {
1639 	struct se_portal_group *se_tpg;
1640 	int ret;
1641 
1642 	se_tpg = se_sess->se_tpg;
1643 	BUG_ON(!se_tpg);
1644 
1645 	transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
1646 			      0, DMA_NONE, MSG_SIMPLE_TAG, sense);
1647 	/*
1648 	 * FIXME: Currently expect caller to handle se_cmd->se_tmr_req
1649 	 * allocation failure.
1650 	 */
1651 	ret = core_tmr_alloc_req(se_cmd, fabric_tmr_ptr, tm_type, gfp);
1652 	if (ret < 0)
1653 		return -ENOMEM;
1654 
1655 	if (tm_type == TMR_ABORT_TASK)
1656 		se_cmd->se_tmr_req->ref_task_tag = tag;
1657 
1658 	/* See target_submit_cmd for commentary */
1659 	target_get_sess_cmd(se_sess, se_cmd, (flags & TARGET_SCF_ACK_KREF));
1660 
1661 	ret = transport_lookup_tmr_lun(se_cmd, unpacked_lun);
1662 	if (ret) {
1663 		/*
1664 		 * For callback during failure handling, push this work off
1665 		 * to process context with TMR_LUN_DOES_NOT_EXIST status.
1666 		 */
1667 		INIT_WORK(&se_cmd->work, target_complete_tmr_failure);
1668 		schedule_work(&se_cmd->work);
1669 		return 0;
1670 	}
1671 	transport_generic_handle_tmr(se_cmd);
1672 	return 0;
1673 }
1674 EXPORT_SYMBOL(target_submit_tmr);
1675 
1676 /*
1677  * Used by fabric module frontends defining a TFO->new_cmd_map() caller
1678  * to  queue up a newly setup se_cmd w/ TRANSPORT_NEW_CMD_MAP in order to
1679  * complete setup in TCM process context w/ TFO->new_cmd_map().
1680  */
1681 int transport_generic_handle_cdb_map(
1682 	struct se_cmd *cmd)
1683 {
1684 	if (!cmd->se_lun) {
1685 		dump_stack();
1686 		pr_err("cmd->se_lun is NULL\n");
1687 		return -EINVAL;
1688 	}
1689 
1690 	transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD_MAP, false);
1691 	return 0;
1692 }
1693 EXPORT_SYMBOL(transport_generic_handle_cdb_map);
1694 
1695 /*	transport_generic_handle_data():
1696  *
1697  *
1698  */
1699 int transport_generic_handle_data(
1700 	struct se_cmd *cmd)
1701 {
1702 	/*
1703 	 * For the software fabric case, then we assume the nexus is being
1704 	 * failed/shutdown when signals are pending from the kthread context
1705 	 * caller, so we return a failure.  For the HW target mode case running
1706 	 * in interrupt code, the signal_pending() check is skipped.
1707 	 */
1708 	if (!in_interrupt() && signal_pending(current))
1709 		return -EPERM;
1710 	/*
1711 	 * If the received CDB has aleady been ABORTED by the generic
1712 	 * target engine, we now call transport_check_aborted_status()
1713 	 * to queue any delated TASK_ABORTED status for the received CDB to the
1714 	 * fabric module as we are expecting no further incoming DATA OUT
1715 	 * sequences at this point.
1716 	 */
1717 	if (transport_check_aborted_status(cmd, 1) != 0)
1718 		return 0;
1719 
1720 	transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_WRITE, false);
1721 	return 0;
1722 }
1723 EXPORT_SYMBOL(transport_generic_handle_data);
1724 
1725 /*	transport_generic_handle_tmr():
1726  *
1727  *
1728  */
1729 int transport_generic_handle_tmr(
1730 	struct se_cmd *cmd)
1731 {
1732 	transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_TMR, false);
1733 	return 0;
1734 }
1735 EXPORT_SYMBOL(transport_generic_handle_tmr);
1736 
1737 /*
1738  * If the cmd is active, request it to be stopped and sleep until it
1739  * has completed.
1740  */
1741 bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags)
1742 {
1743 	bool was_active = false;
1744 
1745 	if (cmd->transport_state & CMD_T_BUSY) {
1746 		cmd->transport_state |= CMD_T_REQUEST_STOP;
1747 		spin_unlock_irqrestore(&cmd->t_state_lock, *flags);
1748 
1749 		pr_debug("cmd %p waiting to complete\n", cmd);
1750 		wait_for_completion(&cmd->task_stop_comp);
1751 		pr_debug("cmd %p stopped successfully\n", cmd);
1752 
1753 		spin_lock_irqsave(&cmd->t_state_lock, *flags);
1754 		cmd->transport_state &= ~CMD_T_REQUEST_STOP;
1755 		cmd->transport_state &= ~CMD_T_BUSY;
1756 		was_active = true;
1757 	}
1758 
1759 	return was_active;
1760 }
1761 
1762 /*
1763  * Handle SAM-esque emulation for generic transport request failures.
1764  */
1765 void transport_generic_request_failure(struct se_cmd *cmd)
1766 {
1767 	int ret = 0;
1768 
1769 	pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x"
1770 		" CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd),
1771 		cmd->t_task_cdb[0]);
1772 	pr_debug("-----[ i_state: %d t_state: %d scsi_sense_reason: %d\n",
1773 		cmd->se_tfo->get_cmd_state(cmd),
1774 		cmd->t_state, cmd->scsi_sense_reason);
1775 	pr_debug("-----[ CMD_T_ACTIVE: %d CMD_T_STOP: %d CMD_T_SENT: %d\n",
1776 		(cmd->transport_state & CMD_T_ACTIVE) != 0,
1777 		(cmd->transport_state & CMD_T_STOP) != 0,
1778 		(cmd->transport_state & CMD_T_SENT) != 0);
1779 
1780 	/*
1781 	 * For SAM Task Attribute emulation for failed struct se_cmd
1782 	 */
1783 	if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
1784 		transport_complete_task_attr(cmd);
1785 
1786 	switch (cmd->scsi_sense_reason) {
1787 	case TCM_NON_EXISTENT_LUN:
1788 	case TCM_UNSUPPORTED_SCSI_OPCODE:
1789 	case TCM_INVALID_CDB_FIELD:
1790 	case TCM_INVALID_PARAMETER_LIST:
1791 	case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
1792 	case TCM_UNKNOWN_MODE_PAGE:
1793 	case TCM_WRITE_PROTECTED:
1794 	case TCM_CHECK_CONDITION_ABORT_CMD:
1795 	case TCM_CHECK_CONDITION_UNIT_ATTENTION:
1796 	case TCM_CHECK_CONDITION_NOT_READY:
1797 		break;
1798 	case TCM_RESERVATION_CONFLICT:
1799 		/*
1800 		 * No SENSE Data payload for this case, set SCSI Status
1801 		 * and queue the response to $FABRIC_MOD.
1802 		 *
1803 		 * Uses linux/include/scsi/scsi.h SAM status codes defs
1804 		 */
1805 		cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
1806 		/*
1807 		 * For UA Interlock Code 11b, a RESERVATION CONFLICT will
1808 		 * establish a UNIT ATTENTION with PREVIOUS RESERVATION
1809 		 * CONFLICT STATUS.
1810 		 *
1811 		 * See spc4r17, section 7.4.6 Control Mode Page, Table 349
1812 		 */
1813 		if (cmd->se_sess &&
1814 		    cmd->se_dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 2)
1815 			core_scsi3_ua_allocate(cmd->se_sess->se_node_acl,
1816 				cmd->orig_fe_lun, 0x2C,
1817 				ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
1818 
1819 		ret = cmd->se_tfo->queue_status(cmd);
1820 		if (ret == -EAGAIN || ret == -ENOMEM)
1821 			goto queue_full;
1822 		goto check_stop;
1823 	default:
1824 		pr_err("Unknown transport error for CDB 0x%02x: %d\n",
1825 			cmd->t_task_cdb[0], cmd->scsi_sense_reason);
1826 		cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
1827 		break;
1828 	}
1829 	/*
1830 	 * If a fabric does not define a cmd->se_tfo->new_cmd_map caller,
1831 	 * make the call to transport_send_check_condition_and_sense()
1832 	 * directly.  Otherwise expect the fabric to make the call to
1833 	 * transport_send_check_condition_and_sense() after handling
1834 	 * possible unsoliticied write data payloads.
1835 	 */
1836 	ret = transport_send_check_condition_and_sense(cmd,
1837 			cmd->scsi_sense_reason, 0);
1838 	if (ret == -EAGAIN || ret == -ENOMEM)
1839 		goto queue_full;
1840 
1841 check_stop:
1842 	transport_lun_remove_cmd(cmd);
1843 	if (!transport_cmd_check_stop_to_fabric(cmd))
1844 		;
1845 	return;
1846 
1847 queue_full:
1848 	cmd->t_state = TRANSPORT_COMPLETE_QF_OK;
1849 	transport_handle_queue_full(cmd, cmd->se_dev);
1850 }
1851 EXPORT_SYMBOL(transport_generic_request_failure);
1852 
1853 static inline u32 transport_lba_21(unsigned char *cdb)
1854 {
1855 	return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3];
1856 }
1857 
1858 static inline u32 transport_lba_32(unsigned char *cdb)
1859 {
1860 	return (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
1861 }
1862 
1863 static inline unsigned long long transport_lba_64(unsigned char *cdb)
1864 {
1865 	unsigned int __v1, __v2;
1866 
1867 	__v1 = (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
1868 	__v2 = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
1869 
1870 	return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
1871 }
1872 
1873 /*
1874  * For VARIABLE_LENGTH_CDB w/ 32 byte extended CDBs
1875  */
1876 static inline unsigned long long transport_lba_64_ext(unsigned char *cdb)
1877 {
1878 	unsigned int __v1, __v2;
1879 
1880 	__v1 = (cdb[12] << 24) | (cdb[13] << 16) | (cdb[14] << 8) | cdb[15];
1881 	__v2 = (cdb[16] << 24) | (cdb[17] << 16) | (cdb[18] << 8) | cdb[19];
1882 
1883 	return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
1884 }
1885 
1886 static void transport_set_supported_SAM_opcode(struct se_cmd *se_cmd)
1887 {
1888 	unsigned long flags;
1889 
1890 	spin_lock_irqsave(&se_cmd->t_state_lock, flags);
1891 	se_cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE;
1892 	spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
1893 }
1894 
1895 /*
1896  * Called from Fabric Module context from transport_execute_tasks()
1897  *
1898  * The return of this function determins if the tasks from struct se_cmd
1899  * get added to the execution queue in transport_execute_tasks(),
1900  * or are added to the delayed or ordered lists here.
1901  */
1902 static inline int transport_execute_task_attr(struct se_cmd *cmd)
1903 {
1904 	if (cmd->se_dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
1905 		return 1;
1906 	/*
1907 	 * Check for the existence of HEAD_OF_QUEUE, and if true return 1
1908 	 * to allow the passed struct se_cmd list of tasks to the front of the list.
1909 	 */
1910 	 if (cmd->sam_task_attr == MSG_HEAD_TAG) {
1911 		pr_debug("Added HEAD_OF_QUEUE for CDB:"
1912 			" 0x%02x, se_ordered_id: %u\n",
1913 			cmd->t_task_cdb[0],
1914 			cmd->se_ordered_id);
1915 		return 1;
1916 	} else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
1917 		atomic_inc(&cmd->se_dev->dev_ordered_sync);
1918 		smp_mb__after_atomic_inc();
1919 
1920 		pr_debug("Added ORDERED for CDB: 0x%02x to ordered"
1921 				" list, se_ordered_id: %u\n",
1922 				cmd->t_task_cdb[0],
1923 				cmd->se_ordered_id);
1924 		/*
1925 		 * Add ORDERED command to tail of execution queue if
1926 		 * no other older commands exist that need to be
1927 		 * completed first.
1928 		 */
1929 		if (!atomic_read(&cmd->se_dev->simple_cmds))
1930 			return 1;
1931 	} else {
1932 		/*
1933 		 * For SIMPLE and UNTAGGED Task Attribute commands
1934 		 */
1935 		atomic_inc(&cmd->se_dev->simple_cmds);
1936 		smp_mb__after_atomic_inc();
1937 	}
1938 	/*
1939 	 * Otherwise if one or more outstanding ORDERED task attribute exist,
1940 	 * add the dormant task(s) built for the passed struct se_cmd to the
1941 	 * execution queue and become in Active state for this struct se_device.
1942 	 */
1943 	if (atomic_read(&cmd->se_dev->dev_ordered_sync) != 0) {
1944 		/*
1945 		 * Otherwise, add cmd w/ tasks to delayed cmd queue that
1946 		 * will be drained upon completion of HEAD_OF_QUEUE task.
1947 		 */
1948 		spin_lock(&cmd->se_dev->delayed_cmd_lock);
1949 		cmd->se_cmd_flags |= SCF_DELAYED_CMD_FROM_SAM_ATTR;
1950 		list_add_tail(&cmd->se_delayed_node,
1951 				&cmd->se_dev->delayed_cmd_list);
1952 		spin_unlock(&cmd->se_dev->delayed_cmd_lock);
1953 
1954 		pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to"
1955 			" delayed CMD list, se_ordered_id: %u\n",
1956 			cmd->t_task_cdb[0], cmd->sam_task_attr,
1957 			cmd->se_ordered_id);
1958 		/*
1959 		 * Return zero to let transport_execute_tasks() know
1960 		 * not to add the delayed tasks to the execution list.
1961 		 */
1962 		return 0;
1963 	}
1964 	/*
1965 	 * Otherwise, no ORDERED task attributes exist..
1966 	 */
1967 	return 1;
1968 }
1969 
1970 /*
1971  * Called from fabric module context in transport_generic_new_cmd() and
1972  * transport_generic_process_write()
1973  */
1974 static void transport_execute_tasks(struct se_cmd *cmd)
1975 {
1976 	int add_tasks;
1977 	struct se_device *se_dev = cmd->se_dev;
1978 	/*
1979 	 * Call transport_cmd_check_stop() to see if a fabric exception
1980 	 * has occurred that prevents execution.
1981 	 */
1982 	if (!transport_cmd_check_stop(cmd, 0, TRANSPORT_PROCESSING)) {
1983 		/*
1984 		 * Check for SAM Task Attribute emulation and HEAD_OF_QUEUE
1985 		 * attribute for the tasks of the received struct se_cmd CDB
1986 		 */
1987 		add_tasks = transport_execute_task_attr(cmd);
1988 		if (add_tasks) {
1989 			__transport_execute_tasks(se_dev, cmd);
1990 			return;
1991 		}
1992 	}
1993 	__transport_execute_tasks(se_dev, NULL);
1994 }
1995 
1996 static int __transport_execute_tasks(struct se_device *dev, struct se_cmd *new_cmd)
1997 {
1998 	int error;
1999 	struct se_cmd *cmd = NULL;
2000 	unsigned long flags;
2001 
2002 check_depth:
2003 	spin_lock_irq(&dev->execute_task_lock);
2004 	if (new_cmd != NULL)
2005 		__target_add_to_execute_list(new_cmd);
2006 
2007 	if (list_empty(&dev->execute_list)) {
2008 		spin_unlock_irq(&dev->execute_task_lock);
2009 		return 0;
2010 	}
2011 	cmd = list_first_entry(&dev->execute_list, struct se_cmd, execute_list);
2012 	__target_remove_from_execute_list(cmd);
2013 	spin_unlock_irq(&dev->execute_task_lock);
2014 
2015 	spin_lock_irqsave(&cmd->t_state_lock, flags);
2016 	cmd->transport_state |= CMD_T_BUSY;
2017 	cmd->transport_state |= CMD_T_SENT;
2018 
2019 	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2020 
2021 	if (cmd->execute_cmd)
2022 		error = cmd->execute_cmd(cmd);
2023 	else {
2024 		error = dev->transport->execute_cmd(cmd, cmd->t_data_sg,
2025 				cmd->t_data_nents, cmd->data_direction);
2026 	}
2027 
2028 	if (error != 0) {
2029 		spin_lock_irqsave(&cmd->t_state_lock, flags);
2030 		cmd->transport_state &= ~CMD_T_BUSY;
2031 		cmd->transport_state &= ~CMD_T_SENT;
2032 		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2033 
2034 		transport_generic_request_failure(cmd);
2035 	}
2036 
2037 	new_cmd = NULL;
2038 	goto check_depth;
2039 
2040 	return 0;
2041 }
2042 
2043 static inline u32 transport_get_sectors_6(
2044 	unsigned char *cdb,
2045 	struct se_cmd *cmd,
2046 	int *ret)
2047 {
2048 	struct se_device *dev = cmd->se_dev;
2049 
2050 	/*
2051 	 * Assume TYPE_DISK for non struct se_device objects.
2052 	 * Use 8-bit sector value.
2053 	 */
2054 	if (!dev)
2055 		goto type_disk;
2056 
2057 	/*
2058 	 * Use 24-bit allocation length for TYPE_TAPE.
2059 	 */
2060 	if (dev->transport->get_device_type(dev) == TYPE_TAPE)
2061 		return (u32)(cdb[2] << 16) + (cdb[3] << 8) + cdb[4];
2062 
2063 	/*
2064 	 * Everything else assume TYPE_DISK Sector CDB location.
2065 	 * Use 8-bit sector value.  SBC-3 says:
2066 	 *
2067 	 *   A TRANSFER LENGTH field set to zero specifies that 256
2068 	 *   logical blocks shall be written.  Any other value
2069 	 *   specifies the number of logical blocks that shall be
2070 	 *   written.
2071 	 */
2072 type_disk:
2073 	return cdb[4] ? : 256;
2074 }
2075 
2076 static inline u32 transport_get_sectors_10(
2077 	unsigned char *cdb,
2078 	struct se_cmd *cmd,
2079 	int *ret)
2080 {
2081 	struct se_device *dev = cmd->se_dev;
2082 
2083 	/*
2084 	 * Assume TYPE_DISK for non struct se_device objects.
2085 	 * Use 16-bit sector value.
2086 	 */
2087 	if (!dev)
2088 		goto type_disk;
2089 
2090 	/*
2091 	 * XXX_10 is not defined in SSC, throw an exception
2092 	 */
2093 	if (dev->transport->get_device_type(dev) == TYPE_TAPE) {
2094 		*ret = -EINVAL;
2095 		return 0;
2096 	}
2097 
2098 	/*
2099 	 * Everything else assume TYPE_DISK Sector CDB location.
2100 	 * Use 16-bit sector value.
2101 	 */
2102 type_disk:
2103 	return (u32)(cdb[7] << 8) + cdb[8];
2104 }
2105 
2106 static inline u32 transport_get_sectors_12(
2107 	unsigned char *cdb,
2108 	struct se_cmd *cmd,
2109 	int *ret)
2110 {
2111 	struct se_device *dev = cmd->se_dev;
2112 
2113 	/*
2114 	 * Assume TYPE_DISK for non struct se_device objects.
2115 	 * Use 32-bit sector value.
2116 	 */
2117 	if (!dev)
2118 		goto type_disk;
2119 
2120 	/*
2121 	 * XXX_12 is not defined in SSC, throw an exception
2122 	 */
2123 	if (dev->transport->get_device_type(dev) == TYPE_TAPE) {
2124 		*ret = -EINVAL;
2125 		return 0;
2126 	}
2127 
2128 	/*
2129 	 * Everything else assume TYPE_DISK Sector CDB location.
2130 	 * Use 32-bit sector value.
2131 	 */
2132 type_disk:
2133 	return (u32)(cdb[6] << 24) + (cdb[7] << 16) + (cdb[8] << 8) + cdb[9];
2134 }
2135 
2136 static inline u32 transport_get_sectors_16(
2137 	unsigned char *cdb,
2138 	struct se_cmd *cmd,
2139 	int *ret)
2140 {
2141 	struct se_device *dev = cmd->se_dev;
2142 
2143 	/*
2144 	 * Assume TYPE_DISK for non struct se_device objects.
2145 	 * Use 32-bit sector value.
2146 	 */
2147 	if (!dev)
2148 		goto type_disk;
2149 
2150 	/*
2151 	 * Use 24-bit allocation length for TYPE_TAPE.
2152 	 */
2153 	if (dev->transport->get_device_type(dev) == TYPE_TAPE)
2154 		return (u32)(cdb[12] << 16) + (cdb[13] << 8) + cdb[14];
2155 
2156 type_disk:
2157 	return (u32)(cdb[10] << 24) + (cdb[11] << 16) +
2158 		    (cdb[12] << 8) + cdb[13];
2159 }
2160 
2161 /*
2162  * Used for VARIABLE_LENGTH_CDB WRITE_32 and READ_32 variants
2163  */
2164 static inline u32 transport_get_sectors_32(
2165 	unsigned char *cdb,
2166 	struct se_cmd *cmd,
2167 	int *ret)
2168 {
2169 	/*
2170 	 * Assume TYPE_DISK for non struct se_device objects.
2171 	 * Use 32-bit sector value.
2172 	 */
2173 	return (u32)(cdb[28] << 24) + (cdb[29] << 16) +
2174 		    (cdb[30] << 8) + cdb[31];
2175 
2176 }
2177 
2178 static inline u32 transport_get_size(
2179 	u32 sectors,
2180 	unsigned char *cdb,
2181 	struct se_cmd *cmd)
2182 {
2183 	struct se_device *dev = cmd->se_dev;
2184 
2185 	if (dev->transport->get_device_type(dev) == TYPE_TAPE) {
2186 		if (cdb[1] & 1) { /* sectors */
2187 			return dev->se_sub_dev->se_dev_attrib.block_size * sectors;
2188 		} else /* bytes */
2189 			return sectors;
2190 	}
2191 
2192 	pr_debug("Returning block_size: %u, sectors: %u == %u for"
2193 		" %s object\n", dev->se_sub_dev->se_dev_attrib.block_size,
2194 		sectors, dev->se_sub_dev->se_dev_attrib.block_size * sectors,
2195 		dev->transport->name);
2196 
2197 	return dev->se_sub_dev->se_dev_attrib.block_size * sectors;
2198 }
2199 
2200 static void transport_xor_callback(struct se_cmd *cmd)
2201 {
2202 	unsigned char *buf, *addr;
2203 	struct scatterlist *sg;
2204 	unsigned int offset;
2205 	int i;
2206 	int count;
2207 	/*
2208 	 * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command
2209 	 *
2210 	 * 1) read the specified logical block(s);
2211 	 * 2) transfer logical blocks from the data-out buffer;
2212 	 * 3) XOR the logical blocks transferred from the data-out buffer with
2213 	 *    the logical blocks read, storing the resulting XOR data in a buffer;
2214 	 * 4) if the DISABLE WRITE bit is set to zero, then write the logical
2215 	 *    blocks transferred from the data-out buffer; and
2216 	 * 5) transfer the resulting XOR data to the data-in buffer.
2217 	 */
2218 	buf = kmalloc(cmd->data_length, GFP_KERNEL);
2219 	if (!buf) {
2220 		pr_err("Unable to allocate xor_callback buf\n");
2221 		return;
2222 	}
2223 	/*
2224 	 * Copy the scatterlist WRITE buffer located at cmd->t_data_sg
2225 	 * into the locally allocated *buf
2226 	 */
2227 	sg_copy_to_buffer(cmd->t_data_sg,
2228 			  cmd->t_data_nents,
2229 			  buf,
2230 			  cmd->data_length);
2231 
2232 	/*
2233 	 * Now perform the XOR against the BIDI read memory located at
2234 	 * cmd->t_mem_bidi_list
2235 	 */
2236 
2237 	offset = 0;
2238 	for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) {
2239 		addr = kmap_atomic(sg_page(sg));
2240 		if (!addr)
2241 			goto out;
2242 
2243 		for (i = 0; i < sg->length; i++)
2244 			*(addr + sg->offset + i) ^= *(buf + offset + i);
2245 
2246 		offset += sg->length;
2247 		kunmap_atomic(addr);
2248 	}
2249 
2250 out:
2251 	kfree(buf);
2252 }
2253 
2254 /*
2255  * Used to obtain Sense Data from underlying Linux/SCSI struct scsi_cmnd
2256  */
2257 static int transport_get_sense_data(struct se_cmd *cmd)
2258 {
2259 	unsigned char *buffer = cmd->sense_buffer, *sense_buffer = NULL;
2260 	struct se_device *dev = cmd->se_dev;
2261 	unsigned long flags;
2262 	u32 offset = 0;
2263 
2264 	WARN_ON(!cmd->se_lun);
2265 
2266 	if (!dev)
2267 		return 0;
2268 
2269 	spin_lock_irqsave(&cmd->t_state_lock, flags);
2270 	if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
2271 		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2272 		return 0;
2273 	}
2274 
2275 	if (!(cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE))
2276 		goto out;
2277 
2278 	if (!dev->transport->get_sense_buffer) {
2279 		pr_err("dev->transport->get_sense_buffer is NULL\n");
2280 		goto out;
2281 	}
2282 
2283 	sense_buffer = dev->transport->get_sense_buffer(cmd);
2284 	if (!sense_buffer) {
2285 		pr_err("ITT 0x%08x cmd %p: Unable to locate"
2286 			" sense buffer for task with sense\n",
2287 			cmd->se_tfo->get_task_tag(cmd), cmd);
2288 		goto out;
2289 	}
2290 
2291 	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2292 
2293 	offset = cmd->se_tfo->set_fabric_sense_len(cmd, TRANSPORT_SENSE_BUFFER);
2294 
2295 	memcpy(&buffer[offset], sense_buffer, TRANSPORT_SENSE_BUFFER);
2296 
2297 	/* Automatically padded */
2298 	cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER + offset;
2299 
2300 	pr_debug("HBA_[%u]_PLUG[%s]: Set SAM STATUS: 0x%02x and sense\n",
2301 		dev->se_hba->hba_id, dev->transport->name, cmd->scsi_status);
2302 	return 0;
2303 
2304 out:
2305 	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2306 	return -1;
2307 }
2308 
2309 static inline long long transport_dev_end_lba(struct se_device *dev)
2310 {
2311 	return dev->transport->get_blocks(dev) + 1;
2312 }
2313 
2314 static int transport_cmd_get_valid_sectors(struct se_cmd *cmd)
2315 {
2316 	struct se_device *dev = cmd->se_dev;
2317 	u32 sectors;
2318 
2319 	if (dev->transport->get_device_type(dev) != TYPE_DISK)
2320 		return 0;
2321 
2322 	sectors = (cmd->data_length / dev->se_sub_dev->se_dev_attrib.block_size);
2323 
2324 	if ((cmd->t_task_lba + sectors) > transport_dev_end_lba(dev)) {
2325 		pr_err("LBA: %llu Sectors: %u exceeds"
2326 			" transport_dev_end_lba(): %llu\n",
2327 			cmd->t_task_lba, sectors,
2328 			transport_dev_end_lba(dev));
2329 		return -EINVAL;
2330 	}
2331 
2332 	return 0;
2333 }
2334 
2335 static int target_check_write_same_discard(unsigned char *flags, struct se_device *dev)
2336 {
2337 	/*
2338 	 * Determine if the received WRITE_SAME is used to for direct
2339 	 * passthrough into Linux/SCSI with struct request via TCM/pSCSI
2340 	 * or we are signaling the use of internal WRITE_SAME + UNMAP=1
2341 	 * emulation for -> Linux/BLOCK disbard with TCM/IBLOCK code.
2342 	 */
2343 	int passthrough = (dev->transport->transport_type ==
2344 				TRANSPORT_PLUGIN_PHBA_PDEV);
2345 
2346 	if (!passthrough) {
2347 		if ((flags[0] & 0x04) || (flags[0] & 0x02)) {
2348 			pr_err("WRITE_SAME PBDATA and LBDATA"
2349 				" bits not supported for Block Discard"
2350 				" Emulation\n");
2351 			return -ENOSYS;
2352 		}
2353 		/*
2354 		 * Currently for the emulated case we only accept
2355 		 * tpws with the UNMAP=1 bit set.
2356 		 */
2357 		if (!(flags[0] & 0x08)) {
2358 			pr_err("WRITE_SAME w/o UNMAP bit not"
2359 				" supported for Block Discard Emulation\n");
2360 			return -ENOSYS;
2361 		}
2362 	}
2363 
2364 	return 0;
2365 }
2366 
2367 /*	transport_generic_cmd_sequencer():
2368  *
2369  *	Generic Command Sequencer that should work for most DAS transport
2370  *	drivers.
2371  *
2372  *	Called from target_setup_cmd_from_cdb() in the $FABRIC_MOD
2373  *	RX Thread.
2374  *
2375  *	FIXME: Need to support other SCSI OPCODES where as well.
2376  */
2377 static int transport_generic_cmd_sequencer(
2378 	struct se_cmd *cmd,
2379 	unsigned char *cdb)
2380 {
2381 	struct se_device *dev = cmd->se_dev;
2382 	struct se_subsystem_dev *su_dev = dev->se_sub_dev;
2383 	int ret = 0, sector_ret = 0, passthrough;
2384 	u32 sectors = 0, size = 0, pr_reg_type = 0;
2385 	u16 service_action;
2386 	u8 alua_ascq = 0;
2387 	/*
2388 	 * Check for an existing UNIT ATTENTION condition
2389 	 */
2390 	if (core_scsi3_ua_check(cmd, cdb) < 0) {
2391 		cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
2392 		cmd->scsi_sense_reason = TCM_CHECK_CONDITION_UNIT_ATTENTION;
2393 		return -EINVAL;
2394 	}
2395 	/*
2396 	 * Check status of Asymmetric Logical Unit Assignment port
2397 	 */
2398 	ret = su_dev->t10_alua.alua_state_check(cmd, cdb, &alua_ascq);
2399 	if (ret != 0) {
2400 		/*
2401 		 * Set SCSI additional sense code (ASC) to 'LUN Not Accessible';
2402 		 * The ALUA additional sense code qualifier (ASCQ) is determined
2403 		 * by the ALUA primary or secondary access state..
2404 		 */
2405 		if (ret > 0) {
2406 			pr_debug("[%s]: ALUA TG Port not available,"
2407 				" SenseKey: NOT_READY, ASC/ASCQ: 0x04/0x%02x\n",
2408 				cmd->se_tfo->get_fabric_name(), alua_ascq);
2409 
2410 			transport_set_sense_codes(cmd, 0x04, alua_ascq);
2411 			cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
2412 			cmd->scsi_sense_reason = TCM_CHECK_CONDITION_NOT_READY;
2413 			return -EINVAL;
2414 		}
2415 		goto out_invalid_cdb_field;
2416 	}
2417 	/*
2418 	 * Check status for SPC-3 Persistent Reservations
2419 	 */
2420 	if (su_dev->t10_pr.pr_ops.t10_reservation_check(cmd, &pr_reg_type) != 0) {
2421 		if (su_dev->t10_pr.pr_ops.t10_seq_non_holder(
2422 					cmd, cdb, pr_reg_type) != 0) {
2423 			cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
2424 			cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT;
2425 			cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
2426 			cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
2427 			return -EBUSY;
2428 		}
2429 		/*
2430 		 * This means the CDB is allowed for the SCSI Initiator port
2431 		 * when said port is *NOT* holding the legacy SPC-2 or
2432 		 * SPC-3 Persistent Reservation.
2433 		 */
2434 	}
2435 
2436 	/*
2437 	 * If we operate in passthrough mode we skip most CDB emulation and
2438 	 * instead hand the commands down to the physical SCSI device.
2439 	 */
2440 	passthrough =
2441 		(dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV);
2442 
2443 	switch (cdb[0]) {
2444 	case READ_6:
2445 		sectors = transport_get_sectors_6(cdb, cmd, &sector_ret);
2446 		if (sector_ret)
2447 			goto out_unsupported_cdb;
2448 		size = transport_get_size(sectors, cdb, cmd);
2449 		cmd->t_task_lba = transport_lba_21(cdb);
2450 		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2451 		break;
2452 	case READ_10:
2453 		sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
2454 		if (sector_ret)
2455 			goto out_unsupported_cdb;
2456 		size = transport_get_size(sectors, cdb, cmd);
2457 		cmd->t_task_lba = transport_lba_32(cdb);
2458 		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2459 		break;
2460 	case READ_12:
2461 		sectors = transport_get_sectors_12(cdb, cmd, &sector_ret);
2462 		if (sector_ret)
2463 			goto out_unsupported_cdb;
2464 		size = transport_get_size(sectors, cdb, cmd);
2465 		cmd->t_task_lba = transport_lba_32(cdb);
2466 		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2467 		break;
2468 	case READ_16:
2469 		sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
2470 		if (sector_ret)
2471 			goto out_unsupported_cdb;
2472 		size = transport_get_size(sectors, cdb, cmd);
2473 		cmd->t_task_lba = transport_lba_64(cdb);
2474 		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2475 		break;
2476 	case WRITE_6:
2477 		sectors = transport_get_sectors_6(cdb, cmd, &sector_ret);
2478 		if (sector_ret)
2479 			goto out_unsupported_cdb;
2480 		size = transport_get_size(sectors, cdb, cmd);
2481 		cmd->t_task_lba = transport_lba_21(cdb);
2482 		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2483 		break;
2484 	case WRITE_10:
2485 	case WRITE_VERIFY:
2486 		sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
2487 		if (sector_ret)
2488 			goto out_unsupported_cdb;
2489 		size = transport_get_size(sectors, cdb, cmd);
2490 		cmd->t_task_lba = transport_lba_32(cdb);
2491 		if (cdb[1] & 0x8)
2492 			cmd->se_cmd_flags |= SCF_FUA;
2493 		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2494 		break;
2495 	case WRITE_12:
2496 		sectors = transport_get_sectors_12(cdb, cmd, &sector_ret);
2497 		if (sector_ret)
2498 			goto out_unsupported_cdb;
2499 		size = transport_get_size(sectors, cdb, cmd);
2500 		cmd->t_task_lba = transport_lba_32(cdb);
2501 		if (cdb[1] & 0x8)
2502 			cmd->se_cmd_flags |= SCF_FUA;
2503 		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2504 		break;
2505 	case WRITE_16:
2506 		sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
2507 		if (sector_ret)
2508 			goto out_unsupported_cdb;
2509 		size = transport_get_size(sectors, cdb, cmd);
2510 		cmd->t_task_lba = transport_lba_64(cdb);
2511 		if (cdb[1] & 0x8)
2512 			cmd->se_cmd_flags |= SCF_FUA;
2513 		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2514 		break;
2515 	case XDWRITEREAD_10:
2516 		if ((cmd->data_direction != DMA_TO_DEVICE) ||
2517 		    !(cmd->se_cmd_flags & SCF_BIDI))
2518 			goto out_invalid_cdb_field;
2519 		sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
2520 		if (sector_ret)
2521 			goto out_unsupported_cdb;
2522 		size = transport_get_size(sectors, cdb, cmd);
2523 		cmd->t_task_lba = transport_lba_32(cdb);
2524 		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2525 
2526 		/*
2527 		 * Do now allow BIDI commands for passthrough mode.
2528 		 */
2529 		if (passthrough)
2530 			goto out_unsupported_cdb;
2531 
2532 		/*
2533 		 * Setup BIDI XOR callback to be run after I/O completion.
2534 		 */
2535 		cmd->transport_complete_callback = &transport_xor_callback;
2536 		if (cdb[1] & 0x8)
2537 			cmd->se_cmd_flags |= SCF_FUA;
2538 		break;
2539 	case VARIABLE_LENGTH_CMD:
2540 		service_action = get_unaligned_be16(&cdb[8]);
2541 		switch (service_action) {
2542 		case XDWRITEREAD_32:
2543 			sectors = transport_get_sectors_32(cdb, cmd, &sector_ret);
2544 			if (sector_ret)
2545 				goto out_unsupported_cdb;
2546 			size = transport_get_size(sectors, cdb, cmd);
2547 			/*
2548 			 * Use WRITE_32 and READ_32 opcodes for the emulated
2549 			 * XDWRITE_READ_32 logic.
2550 			 */
2551 			cmd->t_task_lba = transport_lba_64_ext(cdb);
2552 			cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2553 
2554 			/*
2555 			 * Do now allow BIDI commands for passthrough mode.
2556 			 */
2557 			if (passthrough)
2558 				goto out_unsupported_cdb;
2559 
2560 			/*
2561 			 * Setup BIDI XOR callback to be run during after I/O
2562 			 * completion.
2563 			 */
2564 			cmd->transport_complete_callback = &transport_xor_callback;
2565 			if (cdb[1] & 0x8)
2566 				cmd->se_cmd_flags |= SCF_FUA;
2567 			break;
2568 		case WRITE_SAME_32:
2569 			sectors = transport_get_sectors_32(cdb, cmd, &sector_ret);
2570 			if (sector_ret)
2571 				goto out_unsupported_cdb;
2572 
2573 			if (sectors)
2574 				size = transport_get_size(1, cdb, cmd);
2575 			else {
2576 				pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not"
2577 				       " supported\n");
2578 				goto out_invalid_cdb_field;
2579 			}
2580 
2581 			cmd->t_task_lba = get_unaligned_be64(&cdb[12]);
2582 			cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2583 
2584 			if (target_check_write_same_discard(&cdb[10], dev) < 0)
2585 				goto out_unsupported_cdb;
2586 			if (!passthrough)
2587 				cmd->execute_cmd = target_emulate_write_same;
2588 			break;
2589 		default:
2590 			pr_err("VARIABLE_LENGTH_CMD service action"
2591 				" 0x%04x not supported\n", service_action);
2592 			goto out_unsupported_cdb;
2593 		}
2594 		break;
2595 	case MAINTENANCE_IN:
2596 		if (dev->transport->get_device_type(dev) != TYPE_ROM) {
2597 			/* MAINTENANCE_IN from SCC-2 */
2598 			/*
2599 			 * Check for emulated MI_REPORT_TARGET_PGS.
2600 			 */
2601 			if ((cdb[1] & 0x1f) == MI_REPORT_TARGET_PGS &&
2602 			    su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) {
2603 				cmd->execute_cmd =
2604 					target_emulate_report_target_port_groups;
2605 			}
2606 			size = (cdb[6] << 24) | (cdb[7] << 16) |
2607 			       (cdb[8] << 8) | cdb[9];
2608 		} else {
2609 			/* GPCMD_SEND_KEY from multi media commands */
2610 			size = (cdb[8] << 8) + cdb[9];
2611 		}
2612 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2613 		break;
2614 	case MODE_SELECT:
2615 		size = cdb[4];
2616 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2617 		break;
2618 	case MODE_SELECT_10:
2619 		size = (cdb[7] << 8) + cdb[8];
2620 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2621 		break;
2622 	case MODE_SENSE:
2623 		size = cdb[4];
2624 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2625 		if (!passthrough)
2626 			cmd->execute_cmd = target_emulate_modesense;
2627 		break;
2628 	case MODE_SENSE_10:
2629 		size = (cdb[7] << 8) + cdb[8];
2630 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2631 		if (!passthrough)
2632 			cmd->execute_cmd = target_emulate_modesense;
2633 		break;
2634 	case GPCMD_READ_BUFFER_CAPACITY:
2635 	case GPCMD_SEND_OPC:
2636 	case LOG_SELECT:
2637 	case LOG_SENSE:
2638 		size = (cdb[7] << 8) + cdb[8];
2639 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2640 		break;
2641 	case READ_BLOCK_LIMITS:
2642 		size = READ_BLOCK_LEN;
2643 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2644 		break;
2645 	case GPCMD_GET_CONFIGURATION:
2646 	case GPCMD_READ_FORMAT_CAPACITIES:
2647 	case GPCMD_READ_DISC_INFO:
2648 	case GPCMD_READ_TRACK_RZONE_INFO:
2649 		size = (cdb[7] << 8) + cdb[8];
2650 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2651 		break;
2652 	case PERSISTENT_RESERVE_IN:
2653 		if (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS)
2654 			cmd->execute_cmd = target_scsi3_emulate_pr_in;
2655 		size = (cdb[7] << 8) + cdb[8];
2656 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2657 		break;
2658 	case PERSISTENT_RESERVE_OUT:
2659 		if (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS)
2660 			cmd->execute_cmd = target_scsi3_emulate_pr_out;
2661 		size = (cdb[7] << 8) + cdb[8];
2662 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2663 		break;
2664 	case GPCMD_MECHANISM_STATUS:
2665 	case GPCMD_READ_DVD_STRUCTURE:
2666 		size = (cdb[8] << 8) + cdb[9];
2667 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2668 		break;
2669 	case READ_POSITION:
2670 		size = READ_POSITION_LEN;
2671 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2672 		break;
2673 	case MAINTENANCE_OUT:
2674 		if (dev->transport->get_device_type(dev) != TYPE_ROM) {
2675 			/* MAINTENANCE_OUT from SCC-2
2676 			 *
2677 			 * Check for emulated MO_SET_TARGET_PGS.
2678 			 */
2679 			if (cdb[1] == MO_SET_TARGET_PGS &&
2680 			    su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) {
2681 				cmd->execute_cmd =
2682 					target_emulate_set_target_port_groups;
2683 			}
2684 
2685 			size = (cdb[6] << 24) | (cdb[7] << 16) |
2686 			       (cdb[8] << 8) | cdb[9];
2687 		} else  {
2688 			/* GPCMD_REPORT_KEY from multi media commands */
2689 			size = (cdb[8] << 8) + cdb[9];
2690 		}
2691 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2692 		break;
2693 	case INQUIRY:
2694 		size = (cdb[3] << 8) + cdb[4];
2695 		/*
2696 		 * Do implict HEAD_OF_QUEUE processing for INQUIRY.
2697 		 * See spc4r17 section 5.3
2698 		 */
2699 		if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
2700 			cmd->sam_task_attr = MSG_HEAD_TAG;
2701 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2702 		if (!passthrough)
2703 			cmd->execute_cmd = target_emulate_inquiry;
2704 		break;
2705 	case READ_BUFFER:
2706 		size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
2707 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2708 		break;
2709 	case READ_CAPACITY:
2710 		size = READ_CAP_LEN;
2711 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2712 		if (!passthrough)
2713 			cmd->execute_cmd = target_emulate_readcapacity;
2714 		break;
2715 	case READ_MEDIA_SERIAL_NUMBER:
2716 	case SECURITY_PROTOCOL_IN:
2717 	case SECURITY_PROTOCOL_OUT:
2718 		size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
2719 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2720 		break;
2721 	case SERVICE_ACTION_IN:
2722 		switch (cmd->t_task_cdb[1] & 0x1f) {
2723 		case SAI_READ_CAPACITY_16:
2724 			if (!passthrough)
2725 				cmd->execute_cmd =
2726 					target_emulate_readcapacity_16;
2727 			break;
2728 		default:
2729 			if (passthrough)
2730 				break;
2731 
2732 			pr_err("Unsupported SA: 0x%02x\n",
2733 				cmd->t_task_cdb[1] & 0x1f);
2734 			goto out_invalid_cdb_field;
2735 		}
2736 		/*FALLTHROUGH*/
2737 	case ACCESS_CONTROL_IN:
2738 	case ACCESS_CONTROL_OUT:
2739 	case EXTENDED_COPY:
2740 	case READ_ATTRIBUTE:
2741 	case RECEIVE_COPY_RESULTS:
2742 	case WRITE_ATTRIBUTE:
2743 		size = (cdb[10] << 24) | (cdb[11] << 16) |
2744 		       (cdb[12] << 8) | cdb[13];
2745 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2746 		break;
2747 	case RECEIVE_DIAGNOSTIC:
2748 	case SEND_DIAGNOSTIC:
2749 		size = (cdb[3] << 8) | cdb[4];
2750 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2751 		break;
2752 /* #warning FIXME: Figure out correct GPCMD_READ_CD blocksize. */
2753 #if 0
2754 	case GPCMD_READ_CD:
2755 		sectors = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
2756 		size = (2336 * sectors);
2757 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2758 		break;
2759 #endif
2760 	case READ_TOC:
2761 		size = cdb[8];
2762 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2763 		break;
2764 	case REQUEST_SENSE:
2765 		size = cdb[4];
2766 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2767 		if (!passthrough)
2768 			cmd->execute_cmd = target_emulate_request_sense;
2769 		break;
2770 	case READ_ELEMENT_STATUS:
2771 		size = 65536 * cdb[7] + 256 * cdb[8] + cdb[9];
2772 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2773 		break;
2774 	case WRITE_BUFFER:
2775 		size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
2776 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2777 		break;
2778 	case RESERVE:
2779 	case RESERVE_10:
2780 		/*
2781 		 * The SPC-2 RESERVE does not contain a size in the SCSI CDB.
2782 		 * Assume the passthrough or $FABRIC_MOD will tell us about it.
2783 		 */
2784 		if (cdb[0] == RESERVE_10)
2785 			size = (cdb[7] << 8) | cdb[8];
2786 		else
2787 			size = cmd->data_length;
2788 
2789 		/*
2790 		 * Setup the legacy emulated handler for SPC-2 and
2791 		 * >= SPC-3 compatible reservation handling (CRH=1)
2792 		 * Otherwise, we assume the underlying SCSI logic is
2793 		 * is running in SPC_PASSTHROUGH, and wants reservations
2794 		 * emulation disabled.
2795 		 */
2796 		if (su_dev->t10_pr.res_type != SPC_PASSTHROUGH)
2797 			cmd->execute_cmd = target_scsi2_reservation_reserve;
2798 		cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
2799 		break;
2800 	case RELEASE:
2801 	case RELEASE_10:
2802 		/*
2803 		 * The SPC-2 RELEASE does not contain a size in the SCSI CDB.
2804 		 * Assume the passthrough or $FABRIC_MOD will tell us about it.
2805 		*/
2806 		if (cdb[0] == RELEASE_10)
2807 			size = (cdb[7] << 8) | cdb[8];
2808 		else
2809 			size = cmd->data_length;
2810 
2811 		if (su_dev->t10_pr.res_type != SPC_PASSTHROUGH)
2812 			cmd->execute_cmd = target_scsi2_reservation_release;
2813 		cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
2814 		break;
2815 	case SYNCHRONIZE_CACHE:
2816 	case SYNCHRONIZE_CACHE_16:
2817 		/*
2818 		 * Extract LBA and range to be flushed for emulated SYNCHRONIZE_CACHE
2819 		 */
2820 		if (cdb[0] == SYNCHRONIZE_CACHE) {
2821 			sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
2822 			cmd->t_task_lba = transport_lba_32(cdb);
2823 		} else {
2824 			sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
2825 			cmd->t_task_lba = transport_lba_64(cdb);
2826 		}
2827 		if (sector_ret)
2828 			goto out_unsupported_cdb;
2829 
2830 		size = transport_get_size(sectors, cdb, cmd);
2831 		cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
2832 
2833 		if (passthrough)
2834 			break;
2835 
2836 		/*
2837 		 * Check to ensure that LBA + Range does not exceed past end of
2838 		 * device for IBLOCK and FILEIO ->do_sync_cache() backend calls
2839 		 */
2840 		if ((cmd->t_task_lba != 0) || (sectors != 0)) {
2841 			if (transport_cmd_get_valid_sectors(cmd) < 0)
2842 				goto out_invalid_cdb_field;
2843 		}
2844 		cmd->execute_cmd = target_emulate_synchronize_cache;
2845 		break;
2846 	case UNMAP:
2847 		size = get_unaligned_be16(&cdb[7]);
2848 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2849 		if (!passthrough)
2850 			cmd->execute_cmd = target_emulate_unmap;
2851 		break;
2852 	case WRITE_SAME_16:
2853 		sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
2854 		if (sector_ret)
2855 			goto out_unsupported_cdb;
2856 
2857 		if (sectors)
2858 			size = transport_get_size(1, cdb, cmd);
2859 		else {
2860 			pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
2861 			goto out_invalid_cdb_field;
2862 		}
2863 
2864 		cmd->t_task_lba = get_unaligned_be64(&cdb[2]);
2865 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2866 
2867 		if (target_check_write_same_discard(&cdb[1], dev) < 0)
2868 			goto out_unsupported_cdb;
2869 		if (!passthrough)
2870 			cmd->execute_cmd = target_emulate_write_same;
2871 		break;
2872 	case WRITE_SAME:
2873 		sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
2874 		if (sector_ret)
2875 			goto out_unsupported_cdb;
2876 
2877 		if (sectors)
2878 			size = transport_get_size(1, cdb, cmd);
2879 		else {
2880 			pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
2881 			goto out_invalid_cdb_field;
2882 		}
2883 
2884 		cmd->t_task_lba = get_unaligned_be32(&cdb[2]);
2885 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2886 		/*
2887 		 * Follow sbcr26 with WRITE_SAME (10) and check for the existence
2888 		 * of byte 1 bit 3 UNMAP instead of original reserved field
2889 		 */
2890 		if (target_check_write_same_discard(&cdb[1], dev) < 0)
2891 			goto out_unsupported_cdb;
2892 		if (!passthrough)
2893 			cmd->execute_cmd = target_emulate_write_same;
2894 		break;
2895 	case ALLOW_MEDIUM_REMOVAL:
2896 	case ERASE:
2897 	case REZERO_UNIT:
2898 	case SEEK_10:
2899 	case SPACE:
2900 	case START_STOP:
2901 	case TEST_UNIT_READY:
2902 	case VERIFY:
2903 	case WRITE_FILEMARKS:
2904 		cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
2905 		if (!passthrough)
2906 			cmd->execute_cmd = target_emulate_noop;
2907 		break;
2908 	case GPCMD_CLOSE_TRACK:
2909 	case INITIALIZE_ELEMENT_STATUS:
2910 	case GPCMD_LOAD_UNLOAD:
2911 	case GPCMD_SET_SPEED:
2912 	case MOVE_MEDIUM:
2913 		cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
2914 		break;
2915 	case REPORT_LUNS:
2916 		cmd->execute_cmd = target_report_luns;
2917 		size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
2918 		/*
2919 		 * Do implict HEAD_OF_QUEUE processing for REPORT_LUNS
2920 		 * See spc4r17 section 5.3
2921 		 */
2922 		if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
2923 			cmd->sam_task_attr = MSG_HEAD_TAG;
2924 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2925 		break;
2926 	case GET_EVENT_STATUS_NOTIFICATION:
2927 		size = (cdb[7] << 8) | cdb[8];
2928 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2929 		break;
2930 	case ATA_16:
2931 		/* Only support ATA passthrough to pSCSI backends.. */
2932 		if (!passthrough)
2933 			goto out_unsupported_cdb;
2934 
2935 		/* T_LENGTH */
2936 		switch (cdb[2] & 0x3) {
2937 		case 0x0:
2938 			sectors = 0;
2939 			break;
2940 		case 0x1:
2941 			sectors = (((cdb[1] & 0x1) ? cdb[3] : 0) << 8) | cdb[4];
2942 			break;
2943 		case 0x2:
2944 			sectors = (((cdb[1] & 0x1) ? cdb[5] : 0) << 8) | cdb[6];
2945 			break;
2946 		case 0x3:
2947 			pr_err("T_LENGTH=0x3 not supported for ATA_16\n");
2948 			goto out_invalid_cdb_field;
2949 		}
2950 
2951 		/* BYTE_BLOCK */
2952 		if (cdb[2] & 0x4) {
2953 			/* BLOCK T_TYPE: 512 or sector */
2954 			size = sectors * ((cdb[2] & 0x10) ?
2955 				dev->se_sub_dev->se_dev_attrib.block_size : 512);
2956 		} else {
2957 			/* BYTE */
2958 			size = sectors;
2959 		}
2960 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2961 		break;
2962 	default:
2963 		pr_warn("TARGET_CORE[%s]: Unsupported SCSI Opcode"
2964 			" 0x%02x, sending CHECK_CONDITION.\n",
2965 			cmd->se_tfo->get_fabric_name(), cdb[0]);
2966 		goto out_unsupported_cdb;
2967 	}
2968 
2969 	if (cmd->unknown_data_length)
2970 		cmd->data_length = size;
2971 
2972 	if (size != cmd->data_length) {
2973 		pr_warn("TARGET_CORE[%s]: Expected Transfer Length:"
2974 			" %u does not match SCSI CDB Length: %u for SAM Opcode:"
2975 			" 0x%02x\n", cmd->se_tfo->get_fabric_name(),
2976 				cmd->data_length, size, cdb[0]);
2977 
2978 		cmd->cmd_spdtl = size;
2979 
2980 		if (cmd->data_direction == DMA_TO_DEVICE) {
2981 			pr_err("Rejecting underflow/overflow"
2982 					" WRITE data\n");
2983 			goto out_invalid_cdb_field;
2984 		}
2985 		/*
2986 		 * Reject READ_* or WRITE_* with overflow/underflow for
2987 		 * type SCF_SCSI_DATA_SG_IO_CDB.
2988 		 */
2989 		if (!ret && (dev->se_sub_dev->se_dev_attrib.block_size != 512))  {
2990 			pr_err("Failing OVERFLOW/UNDERFLOW for LBA op"
2991 				" CDB on non 512-byte sector setup subsystem"
2992 				" plugin: %s\n", dev->transport->name);
2993 			/* Returns CHECK_CONDITION + INVALID_CDB_FIELD */
2994 			goto out_invalid_cdb_field;
2995 		}
2996 
2997 		if (size > cmd->data_length) {
2998 			cmd->se_cmd_flags |= SCF_OVERFLOW_BIT;
2999 			cmd->residual_count = (size - cmd->data_length);
3000 		} else {
3001 			cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
3002 			cmd->residual_count = (cmd->data_length - size);
3003 		}
3004 		cmd->data_length = size;
3005 	}
3006 
3007 	if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) {
3008 		if (sectors > su_dev->se_dev_attrib.fabric_max_sectors) {
3009 			printk_ratelimited(KERN_ERR "SCSI OP %02xh with too"
3010 				" big sectors %u exceeds fabric_max_sectors:"
3011 				" %u\n", cdb[0], sectors,
3012 				su_dev->se_dev_attrib.fabric_max_sectors);
3013 			goto out_invalid_cdb_field;
3014 		}
3015 		if (sectors > su_dev->se_dev_attrib.hw_max_sectors) {
3016 			printk_ratelimited(KERN_ERR "SCSI OP %02xh with too"
3017 				" big sectors %u exceeds backend hw_max_sectors:"
3018 				" %u\n", cdb[0], sectors,
3019 				su_dev->se_dev_attrib.hw_max_sectors);
3020 			goto out_invalid_cdb_field;
3021 		}
3022 	}
3023 
3024 	/* reject any command that we don't have a handler for */
3025 	if (!(passthrough || cmd->execute_cmd ||
3026 	     (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)))
3027 		goto out_unsupported_cdb;
3028 
3029 	transport_set_supported_SAM_opcode(cmd);
3030 	return ret;
3031 
3032 out_unsupported_cdb:
3033 	cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
3034 	cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
3035 	return -EINVAL;
3036 out_invalid_cdb_field:
3037 	cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
3038 	cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
3039 	return -EINVAL;
3040 }
3041 
3042 /*
3043  * Called from I/O completion to determine which dormant/delayed
3044  * and ordered cmds need to have their tasks added to the execution queue.
3045  */
3046 static void transport_complete_task_attr(struct se_cmd *cmd)
3047 {
3048 	struct se_device *dev = cmd->se_dev;
3049 	struct se_cmd *cmd_p, *cmd_tmp;
3050 	int new_active_tasks = 0;
3051 
3052 	if (cmd->sam_task_attr == MSG_SIMPLE_TAG) {
3053 		atomic_dec(&dev->simple_cmds);
3054 		smp_mb__after_atomic_dec();
3055 		dev->dev_cur_ordered_id++;
3056 		pr_debug("Incremented dev->dev_cur_ordered_id: %u for"
3057 			" SIMPLE: %u\n", dev->dev_cur_ordered_id,
3058 			cmd->se_ordered_id);
3059 	} else if (cmd->sam_task_attr == MSG_HEAD_TAG) {
3060 		dev->dev_cur_ordered_id++;
3061 		pr_debug("Incremented dev_cur_ordered_id: %u for"
3062 			" HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id,
3063 			cmd->se_ordered_id);
3064 	} else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
3065 		atomic_dec(&dev->dev_ordered_sync);
3066 		smp_mb__after_atomic_dec();
3067 
3068 		dev->dev_cur_ordered_id++;
3069 		pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:"
3070 			" %u\n", dev->dev_cur_ordered_id, cmd->se_ordered_id);
3071 	}
3072 	/*
3073 	 * Process all commands up to the last received
3074 	 * ORDERED task attribute which requires another blocking
3075 	 * boundary
3076 	 */
3077 	spin_lock(&dev->delayed_cmd_lock);
3078 	list_for_each_entry_safe(cmd_p, cmd_tmp,
3079 			&dev->delayed_cmd_list, se_delayed_node) {
3080 
3081 		list_del(&cmd_p->se_delayed_node);
3082 		spin_unlock(&dev->delayed_cmd_lock);
3083 
3084 		pr_debug("Calling add_tasks() for"
3085 			" cmd_p: 0x%02x Task Attr: 0x%02x"
3086 			" Dormant -> Active, se_ordered_id: %u\n",
3087 			cmd_p->t_task_cdb[0],
3088 			cmd_p->sam_task_attr, cmd_p->se_ordered_id);
3089 
3090 		target_add_to_execute_list(cmd_p);
3091 		new_active_tasks++;
3092 
3093 		spin_lock(&dev->delayed_cmd_lock);
3094 		if (cmd_p->sam_task_attr == MSG_ORDERED_TAG)
3095 			break;
3096 	}
3097 	spin_unlock(&dev->delayed_cmd_lock);
3098 	/*
3099 	 * If new tasks have become active, wake up the transport thread
3100 	 * to do the processing of the Active tasks.
3101 	 */
3102 	if (new_active_tasks != 0)
3103 		wake_up_interruptible(&dev->dev_queue_obj.thread_wq);
3104 }
3105 
3106 static void transport_complete_qf(struct se_cmd *cmd)
3107 {
3108 	int ret = 0;
3109 
3110 	if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
3111 		transport_complete_task_attr(cmd);
3112 
3113 	if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
3114 		ret = cmd->se_tfo->queue_status(cmd);
3115 		if (ret)
3116 			goto out;
3117 	}
3118 
3119 	switch (cmd->data_direction) {
3120 	case DMA_FROM_DEVICE:
3121 		ret = cmd->se_tfo->queue_data_in(cmd);
3122 		break;
3123 	case DMA_TO_DEVICE:
3124 		if (cmd->t_bidi_data_sg) {
3125 			ret = cmd->se_tfo->queue_data_in(cmd);
3126 			if (ret < 0)
3127 				break;
3128 		}
3129 		/* Fall through for DMA_TO_DEVICE */
3130 	case DMA_NONE:
3131 		ret = cmd->se_tfo->queue_status(cmd);
3132 		break;
3133 	default:
3134 		break;
3135 	}
3136 
3137 out:
3138 	if (ret < 0) {
3139 		transport_handle_queue_full(cmd, cmd->se_dev);
3140 		return;
3141 	}
3142 	transport_lun_remove_cmd(cmd);
3143 	transport_cmd_check_stop_to_fabric(cmd);
3144 }
3145 
3146 static void transport_handle_queue_full(
3147 	struct se_cmd *cmd,
3148 	struct se_device *dev)
3149 {
3150 	spin_lock_irq(&dev->qf_cmd_lock);
3151 	list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list);
3152 	atomic_inc(&dev->dev_qf_count);
3153 	smp_mb__after_atomic_inc();
3154 	spin_unlock_irq(&cmd->se_dev->qf_cmd_lock);
3155 
3156 	schedule_work(&cmd->se_dev->qf_work_queue);
3157 }
3158 
3159 static void target_complete_ok_work(struct work_struct *work)
3160 {
3161 	struct se_cmd *cmd = container_of(work, struct se_cmd, work);
3162 	int reason = 0, ret;
3163 
3164 	/*
3165 	 * Check if we need to move delayed/dormant tasks from cmds on the
3166 	 * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task
3167 	 * Attribute.
3168 	 */
3169 	if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
3170 		transport_complete_task_attr(cmd);
3171 	/*
3172 	 * Check to schedule QUEUE_FULL work, or execute an existing
3173 	 * cmd->transport_qf_callback()
3174 	 */
3175 	if (atomic_read(&cmd->se_dev->dev_qf_count) != 0)
3176 		schedule_work(&cmd->se_dev->qf_work_queue);
3177 
3178 	/*
3179 	 * Check if we need to retrieve a sense buffer from
3180 	 * the struct se_cmd in question.
3181 	 */
3182 	if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
3183 		if (transport_get_sense_data(cmd) < 0)
3184 			reason = TCM_NON_EXISTENT_LUN;
3185 
3186 		if (cmd->scsi_status) {
3187 			ret = transport_send_check_condition_and_sense(
3188 					cmd, reason, 1);
3189 			if (ret == -EAGAIN || ret == -ENOMEM)
3190 				goto queue_full;
3191 
3192 			transport_lun_remove_cmd(cmd);
3193 			transport_cmd_check_stop_to_fabric(cmd);
3194 			return;
3195 		}
3196 	}
3197 	/*
3198 	 * Check for a callback, used by amongst other things
3199 	 * XDWRITE_READ_10 emulation.
3200 	 */
3201 	if (cmd->transport_complete_callback)
3202 		cmd->transport_complete_callback(cmd);
3203 
3204 	switch (cmd->data_direction) {
3205 	case DMA_FROM_DEVICE:
3206 		spin_lock(&cmd->se_lun->lun_sep_lock);
3207 		if (cmd->se_lun->lun_sep) {
3208 			cmd->se_lun->lun_sep->sep_stats.tx_data_octets +=
3209 					cmd->data_length;
3210 		}
3211 		spin_unlock(&cmd->se_lun->lun_sep_lock);
3212 
3213 		ret = cmd->se_tfo->queue_data_in(cmd);
3214 		if (ret == -EAGAIN || ret == -ENOMEM)
3215 			goto queue_full;
3216 		break;
3217 	case DMA_TO_DEVICE:
3218 		spin_lock(&cmd->se_lun->lun_sep_lock);
3219 		if (cmd->se_lun->lun_sep) {
3220 			cmd->se_lun->lun_sep->sep_stats.rx_data_octets +=
3221 				cmd->data_length;
3222 		}
3223 		spin_unlock(&cmd->se_lun->lun_sep_lock);
3224 		/*
3225 		 * Check if we need to send READ payload for BIDI-COMMAND
3226 		 */
3227 		if (cmd->t_bidi_data_sg) {
3228 			spin_lock(&cmd->se_lun->lun_sep_lock);
3229 			if (cmd->se_lun->lun_sep) {
3230 				cmd->se_lun->lun_sep->sep_stats.tx_data_octets +=
3231 					cmd->data_length;
3232 			}
3233 			spin_unlock(&cmd->se_lun->lun_sep_lock);
3234 			ret = cmd->se_tfo->queue_data_in(cmd);
3235 			if (ret == -EAGAIN || ret == -ENOMEM)
3236 				goto queue_full;
3237 			break;
3238 		}
3239 		/* Fall through for DMA_TO_DEVICE */
3240 	case DMA_NONE:
3241 		ret = cmd->se_tfo->queue_status(cmd);
3242 		if (ret == -EAGAIN || ret == -ENOMEM)
3243 			goto queue_full;
3244 		break;
3245 	default:
3246 		break;
3247 	}
3248 
3249 	transport_lun_remove_cmd(cmd);
3250 	transport_cmd_check_stop_to_fabric(cmd);
3251 	return;
3252 
3253 queue_full:
3254 	pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p,"
3255 		" data_direction: %d\n", cmd, cmd->data_direction);
3256 	cmd->t_state = TRANSPORT_COMPLETE_QF_OK;
3257 	transport_handle_queue_full(cmd, cmd->se_dev);
3258 }
3259 
3260 static inline void transport_free_sgl(struct scatterlist *sgl, int nents)
3261 {
3262 	struct scatterlist *sg;
3263 	int count;
3264 
3265 	for_each_sg(sgl, sg, nents, count)
3266 		__free_page(sg_page(sg));
3267 
3268 	kfree(sgl);
3269 }
3270 
3271 static inline void transport_free_pages(struct se_cmd *cmd)
3272 {
3273 	if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC)
3274 		return;
3275 
3276 	transport_free_sgl(cmd->t_data_sg, cmd->t_data_nents);
3277 	cmd->t_data_sg = NULL;
3278 	cmd->t_data_nents = 0;
3279 
3280 	transport_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents);
3281 	cmd->t_bidi_data_sg = NULL;
3282 	cmd->t_bidi_data_nents = 0;
3283 }
3284 
3285 /**
3286  * transport_release_cmd - free a command
3287  * @cmd:       command to free
3288  *
3289  * This routine unconditionally frees a command, and reference counting
3290  * or list removal must be done in the caller.
3291  */
3292 static void transport_release_cmd(struct se_cmd *cmd)
3293 {
3294 	BUG_ON(!cmd->se_tfo);
3295 
3296 	if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
3297 		core_tmr_release_req(cmd->se_tmr_req);
3298 	if (cmd->t_task_cdb != cmd->__t_task_cdb)
3299 		kfree(cmd->t_task_cdb);
3300 	/*
3301 	 * If this cmd has been setup with target_get_sess_cmd(), drop
3302 	 * the kref and call ->release_cmd() in kref callback.
3303 	 */
3304 	 if (cmd->check_release != 0) {
3305 		target_put_sess_cmd(cmd->se_sess, cmd);
3306 		return;
3307 	}
3308 	cmd->se_tfo->release_cmd(cmd);
3309 }
3310 
3311 /**
3312  * transport_put_cmd - release a reference to a command
3313  * @cmd:       command to release
3314  *
3315  * This routine releases our reference to the command and frees it if possible.
3316  */
3317 static void transport_put_cmd(struct se_cmd *cmd)
3318 {
3319 	unsigned long flags;
3320 
3321 	spin_lock_irqsave(&cmd->t_state_lock, flags);
3322 	if (atomic_read(&cmd->t_fe_count)) {
3323 		if (!atomic_dec_and_test(&cmd->t_fe_count))
3324 			goto out_busy;
3325 	}
3326 
3327 	if (cmd->transport_state & CMD_T_DEV_ACTIVE) {
3328 		cmd->transport_state &= ~CMD_T_DEV_ACTIVE;
3329 		target_remove_from_state_list(cmd);
3330 	}
3331 	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3332 
3333 	transport_free_pages(cmd);
3334 	transport_release_cmd(cmd);
3335 	return;
3336 out_busy:
3337 	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3338 }
3339 
3340 /*
3341  * transport_generic_map_mem_to_cmd - Use fabric-alloced pages instead of
3342  * allocating in the core.
3343  * @cmd:  Associated se_cmd descriptor
3344  * @mem:  SGL style memory for TCM WRITE / READ
3345  * @sg_mem_num: Number of SGL elements
3346  * @mem_bidi_in: SGL style memory for TCM BIDI READ
3347  * @sg_mem_bidi_num: Number of BIDI READ SGL elements
3348  *
3349  * Return: nonzero return cmd was rejected for -ENOMEM or inproper usage
3350  * of parameters.
3351  */
3352 int transport_generic_map_mem_to_cmd(
3353 	struct se_cmd *cmd,
3354 	struct scatterlist *sgl,
3355 	u32 sgl_count,
3356 	struct scatterlist *sgl_bidi,
3357 	u32 sgl_bidi_count)
3358 {
3359 	if (!sgl || !sgl_count)
3360 		return 0;
3361 
3362 	if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) ||
3363 	    (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB)) {
3364 		/*
3365 		 * Reject SCSI data overflow with map_mem_to_cmd() as incoming
3366 		 * scatterlists already have been set to follow what the fabric
3367 		 * passes for the original expected data transfer length.
3368 		 */
3369 		if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
3370 			pr_warn("Rejecting SCSI DATA overflow for fabric using"
3371 				" SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC\n");
3372 			cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
3373 			cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
3374 			return -EINVAL;
3375 		}
3376 
3377 		cmd->t_data_sg = sgl;
3378 		cmd->t_data_nents = sgl_count;
3379 
3380 		if (sgl_bidi && sgl_bidi_count) {
3381 			cmd->t_bidi_data_sg = sgl_bidi;
3382 			cmd->t_bidi_data_nents = sgl_bidi_count;
3383 		}
3384 		cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
3385 	}
3386 
3387 	return 0;
3388 }
3389 EXPORT_SYMBOL(transport_generic_map_mem_to_cmd);
3390 
3391 void *transport_kmap_data_sg(struct se_cmd *cmd)
3392 {
3393 	struct scatterlist *sg = cmd->t_data_sg;
3394 	struct page **pages;
3395 	int i;
3396 
3397 	BUG_ON(!sg);
3398 	/*
3399 	 * We need to take into account a possible offset here for fabrics like
3400 	 * tcm_loop who may be using a contig buffer from the SCSI midlayer for
3401 	 * control CDBs passed as SGLs via transport_generic_map_mem_to_cmd()
3402 	 */
3403 	if (!cmd->t_data_nents)
3404 		return NULL;
3405 	else if (cmd->t_data_nents == 1)
3406 		return kmap(sg_page(sg)) + sg->offset;
3407 
3408 	/* >1 page. use vmap */
3409 	pages = kmalloc(sizeof(*pages) * cmd->t_data_nents, GFP_KERNEL);
3410 	if (!pages)
3411 		return NULL;
3412 
3413 	/* convert sg[] to pages[] */
3414 	for_each_sg(cmd->t_data_sg, sg, cmd->t_data_nents, i) {
3415 		pages[i] = sg_page(sg);
3416 	}
3417 
3418 	cmd->t_data_vmap = vmap(pages, cmd->t_data_nents,  VM_MAP, PAGE_KERNEL);
3419 	kfree(pages);
3420 	if (!cmd->t_data_vmap)
3421 		return NULL;
3422 
3423 	return cmd->t_data_vmap + cmd->t_data_sg[0].offset;
3424 }
3425 EXPORT_SYMBOL(transport_kmap_data_sg);
3426 
3427 void transport_kunmap_data_sg(struct se_cmd *cmd)
3428 {
3429 	if (!cmd->t_data_nents) {
3430 		return;
3431 	} else if (cmd->t_data_nents == 1) {
3432 		kunmap(sg_page(cmd->t_data_sg));
3433 		return;
3434 	}
3435 
3436 	vunmap(cmd->t_data_vmap);
3437 	cmd->t_data_vmap = NULL;
3438 }
3439 EXPORT_SYMBOL(transport_kunmap_data_sg);
3440 
3441 static int
3442 transport_generic_get_mem(struct se_cmd *cmd)
3443 {
3444 	u32 length = cmd->data_length;
3445 	unsigned int nents;
3446 	struct page *page;
3447 	gfp_t zero_flag;
3448 	int i = 0;
3449 
3450 	nents = DIV_ROUND_UP(length, PAGE_SIZE);
3451 	cmd->t_data_sg = kmalloc(sizeof(struct scatterlist) * nents, GFP_KERNEL);
3452 	if (!cmd->t_data_sg)
3453 		return -ENOMEM;
3454 
3455 	cmd->t_data_nents = nents;
3456 	sg_init_table(cmd->t_data_sg, nents);
3457 
3458 	zero_flag = cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB ? 0 : __GFP_ZERO;
3459 
3460 	while (length) {
3461 		u32 page_len = min_t(u32, length, PAGE_SIZE);
3462 		page = alloc_page(GFP_KERNEL | zero_flag);
3463 		if (!page)
3464 			goto out;
3465 
3466 		sg_set_page(&cmd->t_data_sg[i], page, page_len, 0);
3467 		length -= page_len;
3468 		i++;
3469 	}
3470 	return 0;
3471 
3472 out:
3473 	while (i >= 0) {
3474 		__free_page(sg_page(&cmd->t_data_sg[i]));
3475 		i--;
3476 	}
3477 	kfree(cmd->t_data_sg);
3478 	cmd->t_data_sg = NULL;
3479 	return -ENOMEM;
3480 }
3481 
3482 /*
3483  * Allocate any required resources to execute the command.  For writes we
3484  * might not have the payload yet, so notify the fabric via a call to
3485  * ->write_pending instead. Otherwise place it on the execution queue.
3486  */
3487 int transport_generic_new_cmd(struct se_cmd *cmd)
3488 {
3489 	struct se_device *dev = cmd->se_dev;
3490 	int ret = 0;
3491 
3492 	/*
3493 	 * Determine is the TCM fabric module has already allocated physical
3494 	 * memory, and is directly calling transport_generic_map_mem_to_cmd()
3495 	 * beforehand.
3496 	 */
3497 	if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) &&
3498 	    cmd->data_length) {
3499 		ret = transport_generic_get_mem(cmd);
3500 		if (ret < 0)
3501 			goto out_fail;
3502 	}
3503 
3504 	/* Workaround for handling zero-length control CDBs */
3505 	if ((cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) &&
3506 	    !cmd->data_length) {
3507 		spin_lock_irq(&cmd->t_state_lock);
3508 		cmd->t_state = TRANSPORT_COMPLETE;
3509 		cmd->transport_state |= CMD_T_ACTIVE;
3510 		spin_unlock_irq(&cmd->t_state_lock);
3511 
3512 		if (cmd->t_task_cdb[0] == REQUEST_SENSE) {
3513 			u8 ua_asc = 0, ua_ascq = 0;
3514 
3515 			core_scsi3_ua_clear_for_request_sense(cmd,
3516 					&ua_asc, &ua_ascq);
3517 		}
3518 
3519 		INIT_WORK(&cmd->work, target_complete_ok_work);
3520 		queue_work(target_completion_wq, &cmd->work);
3521 		return 0;
3522 	}
3523 
3524 	if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) {
3525 		struct se_dev_attrib *attr = &dev->se_sub_dev->se_dev_attrib;
3526 
3527 		if (transport_cmd_get_valid_sectors(cmd) < 0)
3528 			return -EINVAL;
3529 
3530 		BUG_ON(cmd->data_length % attr->block_size);
3531 		BUG_ON(DIV_ROUND_UP(cmd->data_length, attr->block_size) >
3532 			attr->hw_max_sectors);
3533 	}
3534 
3535 	atomic_inc(&cmd->t_fe_count);
3536 
3537 	/*
3538 	 * For WRITEs, let the fabric know its buffer is ready.
3539 	 *
3540 	 * The command will be added to the execution queue after its write
3541 	 * data has arrived.
3542 	 */
3543 	if (cmd->data_direction == DMA_TO_DEVICE) {
3544 		target_add_to_state_list(cmd);
3545 		return transport_generic_write_pending(cmd);
3546 	}
3547 	/*
3548 	 * Everything else but a WRITE, add the command to the execution queue.
3549 	 */
3550 	transport_execute_tasks(cmd);
3551 	return 0;
3552 
3553 out_fail:
3554 	cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
3555 	cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
3556 	return -EINVAL;
3557 }
3558 EXPORT_SYMBOL(transport_generic_new_cmd);
3559 
3560 /*	transport_generic_process_write():
3561  *
3562  *
3563  */
3564 void transport_generic_process_write(struct se_cmd *cmd)
3565 {
3566 	transport_execute_tasks(cmd);
3567 }
3568 EXPORT_SYMBOL(transport_generic_process_write);
3569 
3570 static void transport_write_pending_qf(struct se_cmd *cmd)
3571 {
3572 	int ret;
3573 
3574 	ret = cmd->se_tfo->write_pending(cmd);
3575 	if (ret == -EAGAIN || ret == -ENOMEM) {
3576 		pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n",
3577 			 cmd);
3578 		transport_handle_queue_full(cmd, cmd->se_dev);
3579 	}
3580 }
3581 
3582 static int transport_generic_write_pending(struct se_cmd *cmd)
3583 {
3584 	unsigned long flags;
3585 	int ret;
3586 
3587 	spin_lock_irqsave(&cmd->t_state_lock, flags);
3588 	cmd->t_state = TRANSPORT_WRITE_PENDING;
3589 	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3590 
3591 	/*
3592 	 * Clear the se_cmd for WRITE_PENDING status in order to set
3593 	 * CMD_T_ACTIVE so that transport_generic_handle_data can be called
3594 	 * from HW target mode interrupt code.  This is safe to be called
3595 	 * with transport_off=1 before the cmd->se_tfo->write_pending
3596 	 * because the se_cmd->se_lun pointer is not being cleared.
3597 	 */
3598 	transport_cmd_check_stop(cmd, 1, 0);
3599 
3600 	/*
3601 	 * Call the fabric write_pending function here to let the
3602 	 * frontend know that WRITE buffers are ready.
3603 	 */
3604 	ret = cmd->se_tfo->write_pending(cmd);
3605 	if (ret == -EAGAIN || ret == -ENOMEM)
3606 		goto queue_full;
3607 	else if (ret < 0)
3608 		return ret;
3609 
3610 	return 1;
3611 
3612 queue_full:
3613 	pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd);
3614 	cmd->t_state = TRANSPORT_COMPLETE_QF_WP;
3615 	transport_handle_queue_full(cmd, cmd->se_dev);
3616 	return 0;
3617 }
3618 
3619 void transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
3620 {
3621 	if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) {
3622 		if (wait_for_tasks && (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
3623 			 transport_wait_for_tasks(cmd);
3624 
3625 		transport_release_cmd(cmd);
3626 	} else {
3627 		if (wait_for_tasks)
3628 			transport_wait_for_tasks(cmd);
3629 
3630 		core_dec_lacl_count(cmd->se_sess->se_node_acl, cmd);
3631 
3632 		if (cmd->se_lun)
3633 			transport_lun_remove_cmd(cmd);
3634 
3635 		transport_put_cmd(cmd);
3636 	}
3637 }
3638 EXPORT_SYMBOL(transport_generic_free_cmd);
3639 
3640 /* target_get_sess_cmd - Add command to active ->sess_cmd_list
3641  * @se_sess:	session to reference
3642  * @se_cmd:	command descriptor to add
3643  * @ack_kref:	Signal that fabric will perform an ack target_put_sess_cmd()
3644  */
3645 void target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd,
3646 			bool ack_kref)
3647 {
3648 	unsigned long flags;
3649 
3650 	kref_init(&se_cmd->cmd_kref);
3651 	/*
3652 	 * Add a second kref if the fabric caller is expecting to handle
3653 	 * fabric acknowledgement that requires two target_put_sess_cmd()
3654 	 * invocations before se_cmd descriptor release.
3655 	 */
3656 	if (ack_kref == true) {
3657 		kref_get(&se_cmd->cmd_kref);
3658 		se_cmd->se_cmd_flags |= SCF_ACK_KREF;
3659 	}
3660 
3661 	spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
3662 	list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list);
3663 	se_cmd->check_release = 1;
3664 	spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
3665 }
3666 EXPORT_SYMBOL(target_get_sess_cmd);
3667 
3668 static void target_release_cmd_kref(struct kref *kref)
3669 {
3670 	struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref);
3671 	struct se_session *se_sess = se_cmd->se_sess;
3672 	unsigned long flags;
3673 
3674 	spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
3675 	if (list_empty(&se_cmd->se_cmd_list)) {
3676 		spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
3677 		se_cmd->se_tfo->release_cmd(se_cmd);
3678 		return;
3679 	}
3680 	if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) {
3681 		spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
3682 		complete(&se_cmd->cmd_wait_comp);
3683 		return;
3684 	}
3685 	list_del(&se_cmd->se_cmd_list);
3686 	spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
3687 
3688 	se_cmd->se_tfo->release_cmd(se_cmd);
3689 }
3690 
3691 /* target_put_sess_cmd - Check for active I/O shutdown via kref_put
3692  * @se_sess:	session to reference
3693  * @se_cmd:	command descriptor to drop
3694  */
3695 int target_put_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd)
3696 {
3697 	return kref_put(&se_cmd->cmd_kref, target_release_cmd_kref);
3698 }
3699 EXPORT_SYMBOL(target_put_sess_cmd);
3700 
3701 /* target_splice_sess_cmd_list - Split active cmds into sess_wait_list
3702  * @se_sess:	session to split
3703  */
3704 void target_splice_sess_cmd_list(struct se_session *se_sess)
3705 {
3706 	struct se_cmd *se_cmd;
3707 	unsigned long flags;
3708 
3709 	WARN_ON(!list_empty(&se_sess->sess_wait_list));
3710 	INIT_LIST_HEAD(&se_sess->sess_wait_list);
3711 
3712 	spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
3713 	se_sess->sess_tearing_down = 1;
3714 
3715 	list_splice_init(&se_sess->sess_cmd_list, &se_sess->sess_wait_list);
3716 
3717 	list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list)
3718 		se_cmd->cmd_wait_set = 1;
3719 
3720 	spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
3721 }
3722 EXPORT_SYMBOL(target_splice_sess_cmd_list);
3723 
3724 /* target_wait_for_sess_cmds - Wait for outstanding descriptors
3725  * @se_sess:    session to wait for active I/O
3726  * @wait_for_tasks:	Make extra transport_wait_for_tasks call
3727  */
3728 void target_wait_for_sess_cmds(
3729 	struct se_session *se_sess,
3730 	int wait_for_tasks)
3731 {
3732 	struct se_cmd *se_cmd, *tmp_cmd;
3733 	bool rc = false;
3734 
3735 	list_for_each_entry_safe(se_cmd, tmp_cmd,
3736 				&se_sess->sess_wait_list, se_cmd_list) {
3737 		list_del(&se_cmd->se_cmd_list);
3738 
3739 		pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:"
3740 			" %d\n", se_cmd, se_cmd->t_state,
3741 			se_cmd->se_tfo->get_cmd_state(se_cmd));
3742 
3743 		if (wait_for_tasks) {
3744 			pr_debug("Calling transport_wait_for_tasks se_cmd: %p t_state: %d,"
3745 				" fabric state: %d\n", se_cmd, se_cmd->t_state,
3746 				se_cmd->se_tfo->get_cmd_state(se_cmd));
3747 
3748 			rc = transport_wait_for_tasks(se_cmd);
3749 
3750 			pr_debug("After transport_wait_for_tasks se_cmd: %p t_state: %d,"
3751 				" fabric state: %d\n", se_cmd, se_cmd->t_state,
3752 				se_cmd->se_tfo->get_cmd_state(se_cmd));
3753 		}
3754 
3755 		if (!rc) {
3756 			wait_for_completion(&se_cmd->cmd_wait_comp);
3757 			pr_debug("After cmd_wait_comp: se_cmd: %p t_state: %d"
3758 				" fabric state: %d\n", se_cmd, se_cmd->t_state,
3759 				se_cmd->se_tfo->get_cmd_state(se_cmd));
3760 		}
3761 
3762 		se_cmd->se_tfo->release_cmd(se_cmd);
3763 	}
3764 }
3765 EXPORT_SYMBOL(target_wait_for_sess_cmds);
3766 
3767 /*	transport_lun_wait_for_tasks():
3768  *
3769  *	Called from ConfigFS context to stop the passed struct se_cmd to allow
3770  *	an struct se_lun to be successfully shutdown.
3771  */
3772 static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun)
3773 {
3774 	unsigned long flags;
3775 	int ret = 0;
3776 
3777 	/*
3778 	 * If the frontend has already requested this struct se_cmd to
3779 	 * be stopped, we can safely ignore this struct se_cmd.
3780 	 */
3781 	spin_lock_irqsave(&cmd->t_state_lock, flags);
3782 	if (cmd->transport_state & CMD_T_STOP) {
3783 		cmd->transport_state &= ~CMD_T_LUN_STOP;
3784 
3785 		pr_debug("ConfigFS ITT[0x%08x] - CMD_T_STOP, skipping\n",
3786 			 cmd->se_tfo->get_task_tag(cmd));
3787 		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3788 		transport_cmd_check_stop(cmd, 1, 0);
3789 		return -EPERM;
3790 	}
3791 	cmd->transport_state |= CMD_T_LUN_FE_STOP;
3792 	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3793 
3794 	wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq);
3795 
3796 	// XXX: audit task_flags checks.
3797 	spin_lock_irqsave(&cmd->t_state_lock, flags);
3798 	if ((cmd->transport_state & CMD_T_BUSY) &&
3799 	    (cmd->transport_state & CMD_T_SENT)) {
3800 		if (!target_stop_cmd(cmd, &flags))
3801 			ret++;
3802 		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3803 	} else {
3804 		spin_unlock_irqrestore(&cmd->t_state_lock,
3805 				flags);
3806 		target_remove_from_execute_list(cmd);
3807 	}
3808 
3809 	pr_debug("ConfigFS: cmd: %p stop tasks ret:"
3810 			" %d\n", cmd, ret);
3811 	if (!ret) {
3812 		pr_debug("ConfigFS: ITT[0x%08x] - stopping cmd....\n",
3813 				cmd->se_tfo->get_task_tag(cmd));
3814 		wait_for_completion(&cmd->transport_lun_stop_comp);
3815 		pr_debug("ConfigFS: ITT[0x%08x] - stopped cmd....\n",
3816 				cmd->se_tfo->get_task_tag(cmd));
3817 	}
3818 	transport_remove_cmd_from_queue(cmd);
3819 
3820 	return 0;
3821 }
3822 
3823 static void __transport_clear_lun_from_sessions(struct se_lun *lun)
3824 {
3825 	struct se_cmd *cmd = NULL;
3826 	unsigned long lun_flags, cmd_flags;
3827 	/*
3828 	 * Do exception processing and return CHECK_CONDITION status to the
3829 	 * Initiator Port.
3830 	 */
3831 	spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
3832 	while (!list_empty(&lun->lun_cmd_list)) {
3833 		cmd = list_first_entry(&lun->lun_cmd_list,
3834 		       struct se_cmd, se_lun_node);
3835 		list_del_init(&cmd->se_lun_node);
3836 
3837 		/*
3838 		 * This will notify iscsi_target_transport.c:
3839 		 * transport_cmd_check_stop() that a LUN shutdown is in
3840 		 * progress for the iscsi_cmd_t.
3841 		 */
3842 		spin_lock(&cmd->t_state_lock);
3843 		pr_debug("SE_LUN[%d] - Setting cmd->transport"
3844 			"_lun_stop for  ITT: 0x%08x\n",
3845 			cmd->se_lun->unpacked_lun,
3846 			cmd->se_tfo->get_task_tag(cmd));
3847 		cmd->transport_state |= CMD_T_LUN_STOP;
3848 		spin_unlock(&cmd->t_state_lock);
3849 
3850 		spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags);
3851 
3852 		if (!cmd->se_lun) {
3853 			pr_err("ITT: 0x%08x, [i,t]_state: %u/%u\n",
3854 				cmd->se_tfo->get_task_tag(cmd),
3855 				cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
3856 			BUG();
3857 		}
3858 		/*
3859 		 * If the Storage engine still owns the iscsi_cmd_t, determine
3860 		 * and/or stop its context.
3861 		 */
3862 		pr_debug("SE_LUN[%d] - ITT: 0x%08x before transport"
3863 			"_lun_wait_for_tasks()\n", cmd->se_lun->unpacked_lun,
3864 			cmd->se_tfo->get_task_tag(cmd));
3865 
3866 		if (transport_lun_wait_for_tasks(cmd, cmd->se_lun) < 0) {
3867 			spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
3868 			continue;
3869 		}
3870 
3871 		pr_debug("SE_LUN[%d] - ITT: 0x%08x after transport_lun"
3872 			"_wait_for_tasks(): SUCCESS\n",
3873 			cmd->se_lun->unpacked_lun,
3874 			cmd->se_tfo->get_task_tag(cmd));
3875 
3876 		spin_lock_irqsave(&cmd->t_state_lock, cmd_flags);
3877 		if (!(cmd->transport_state & CMD_T_DEV_ACTIVE)) {
3878 			spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
3879 			goto check_cond;
3880 		}
3881 		cmd->transport_state &= ~CMD_T_DEV_ACTIVE;
3882 		target_remove_from_state_list(cmd);
3883 		spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
3884 
3885 		/*
3886 		 * The Storage engine stopped this struct se_cmd before it was
3887 		 * send to the fabric frontend for delivery back to the
3888 		 * Initiator Node.  Return this SCSI CDB back with an
3889 		 * CHECK_CONDITION status.
3890 		 */
3891 check_cond:
3892 		transport_send_check_condition_and_sense(cmd,
3893 				TCM_NON_EXISTENT_LUN, 0);
3894 		/*
3895 		 *  If the fabric frontend is waiting for this iscsi_cmd_t to
3896 		 * be released, notify the waiting thread now that LU has
3897 		 * finished accessing it.
3898 		 */
3899 		spin_lock_irqsave(&cmd->t_state_lock, cmd_flags);
3900 		if (cmd->transport_state & CMD_T_LUN_FE_STOP) {
3901 			pr_debug("SE_LUN[%d] - Detected FE stop for"
3902 				" struct se_cmd: %p ITT: 0x%08x\n",
3903 				lun->unpacked_lun,
3904 				cmd, cmd->se_tfo->get_task_tag(cmd));
3905 
3906 			spin_unlock_irqrestore(&cmd->t_state_lock,
3907 					cmd_flags);
3908 			transport_cmd_check_stop(cmd, 1, 0);
3909 			complete(&cmd->transport_lun_fe_stop_comp);
3910 			spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
3911 			continue;
3912 		}
3913 		pr_debug("SE_LUN[%d] - ITT: 0x%08x finished processing\n",
3914 			lun->unpacked_lun, cmd->se_tfo->get_task_tag(cmd));
3915 
3916 		spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
3917 		spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
3918 	}
3919 	spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags);
3920 }
3921 
3922 static int transport_clear_lun_thread(void *p)
3923 {
3924 	struct se_lun *lun = p;
3925 
3926 	__transport_clear_lun_from_sessions(lun);
3927 	complete(&lun->lun_shutdown_comp);
3928 
3929 	return 0;
3930 }
3931 
3932 int transport_clear_lun_from_sessions(struct se_lun *lun)
3933 {
3934 	struct task_struct *kt;
3935 
3936 	kt = kthread_run(transport_clear_lun_thread, lun,
3937 			"tcm_cl_%u", lun->unpacked_lun);
3938 	if (IS_ERR(kt)) {
3939 		pr_err("Unable to start clear_lun thread\n");
3940 		return PTR_ERR(kt);
3941 	}
3942 	wait_for_completion(&lun->lun_shutdown_comp);
3943 
3944 	return 0;
3945 }
3946 
3947 /**
3948  * transport_wait_for_tasks - wait for completion to occur
3949  * @cmd:	command to wait
3950  *
3951  * Called from frontend fabric context to wait for storage engine
3952  * to pause and/or release frontend generated struct se_cmd.
3953  */
3954 bool transport_wait_for_tasks(struct se_cmd *cmd)
3955 {
3956 	unsigned long flags;
3957 
3958 	spin_lock_irqsave(&cmd->t_state_lock, flags);
3959 	if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) &&
3960 	    !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) {
3961 		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3962 		return false;
3963 	}
3964 	/*
3965 	 * Only perform a possible wait_for_tasks if SCF_SUPPORTED_SAM_OPCODE
3966 	 * has been set in transport_set_supported_SAM_opcode().
3967 	 */
3968 	if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) &&
3969 	    !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) {
3970 		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3971 		return false;
3972 	}
3973 	/*
3974 	 * If we are already stopped due to an external event (ie: LUN shutdown)
3975 	 * sleep until the connection can have the passed struct se_cmd back.
3976 	 * The cmd->transport_lun_stopped_sem will be upped by
3977 	 * transport_clear_lun_from_sessions() once the ConfigFS context caller
3978 	 * has completed its operation on the struct se_cmd.
3979 	 */
3980 	if (cmd->transport_state & CMD_T_LUN_STOP) {
3981 		pr_debug("wait_for_tasks: Stopping"
3982 			" wait_for_completion(&cmd->t_tasktransport_lun_fe"
3983 			"_stop_comp); for ITT: 0x%08x\n",
3984 			cmd->se_tfo->get_task_tag(cmd));
3985 		/*
3986 		 * There is a special case for WRITES where a FE exception +
3987 		 * LUN shutdown means ConfigFS context is still sleeping on
3988 		 * transport_lun_stop_comp in transport_lun_wait_for_tasks().
3989 		 * We go ahead and up transport_lun_stop_comp just to be sure
3990 		 * here.
3991 		 */
3992 		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3993 		complete(&cmd->transport_lun_stop_comp);
3994 		wait_for_completion(&cmd->transport_lun_fe_stop_comp);
3995 		spin_lock_irqsave(&cmd->t_state_lock, flags);
3996 
3997 		target_remove_from_state_list(cmd);
3998 		/*
3999 		 * At this point, the frontend who was the originator of this
4000 		 * struct se_cmd, now owns the structure and can be released through
4001 		 * normal means below.
4002 		 */
4003 		pr_debug("wait_for_tasks: Stopped"
4004 			" wait_for_completion(&cmd->t_tasktransport_lun_fe_"
4005 			"stop_comp); for ITT: 0x%08x\n",
4006 			cmd->se_tfo->get_task_tag(cmd));
4007 
4008 		cmd->transport_state &= ~CMD_T_LUN_STOP;
4009 	}
4010 
4011 	if (!(cmd->transport_state & CMD_T_ACTIVE)) {
4012 		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4013 		return false;
4014 	}
4015 
4016 	cmd->transport_state |= CMD_T_STOP;
4017 
4018 	pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08x"
4019 		" i_state: %d, t_state: %d, CMD_T_STOP\n",
4020 		cmd, cmd->se_tfo->get_task_tag(cmd),
4021 		cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
4022 
4023 	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4024 
4025 	wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq);
4026 
4027 	wait_for_completion(&cmd->t_transport_stop_comp);
4028 
4029 	spin_lock_irqsave(&cmd->t_state_lock, flags);
4030 	cmd->transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP);
4031 
4032 	pr_debug("wait_for_tasks: Stopped wait_for_compltion("
4033 		"&cmd->t_transport_stop_comp) for ITT: 0x%08x\n",
4034 		cmd->se_tfo->get_task_tag(cmd));
4035 
4036 	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4037 
4038 	return true;
4039 }
4040 EXPORT_SYMBOL(transport_wait_for_tasks);
4041 
4042 static int transport_get_sense_codes(
4043 	struct se_cmd *cmd,
4044 	u8 *asc,
4045 	u8 *ascq)
4046 {
4047 	*asc = cmd->scsi_asc;
4048 	*ascq = cmd->scsi_ascq;
4049 
4050 	return 0;
4051 }
4052 
4053 static int transport_set_sense_codes(
4054 	struct se_cmd *cmd,
4055 	u8 asc,
4056 	u8 ascq)
4057 {
4058 	cmd->scsi_asc = asc;
4059 	cmd->scsi_ascq = ascq;
4060 
4061 	return 0;
4062 }
4063 
4064 int transport_send_check_condition_and_sense(
4065 	struct se_cmd *cmd,
4066 	u8 reason,
4067 	int from_transport)
4068 {
4069 	unsigned char *buffer = cmd->sense_buffer;
4070 	unsigned long flags;
4071 	int offset;
4072 	u8 asc = 0, ascq = 0;
4073 
4074 	spin_lock_irqsave(&cmd->t_state_lock, flags);
4075 	if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
4076 		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4077 		return 0;
4078 	}
4079 	cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION;
4080 	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4081 
4082 	if (!reason && from_transport)
4083 		goto after_reason;
4084 
4085 	if (!from_transport)
4086 		cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE;
4087 	/*
4088 	 * Data Segment and SenseLength of the fabric response PDU.
4089 	 *
4090 	 * TRANSPORT_SENSE_BUFFER is now set to SCSI_SENSE_BUFFERSIZE
4091 	 * from include/scsi/scsi_cmnd.h
4092 	 */
4093 	offset = cmd->se_tfo->set_fabric_sense_len(cmd,
4094 				TRANSPORT_SENSE_BUFFER);
4095 	/*
4096 	 * Actual SENSE DATA, see SPC-3 7.23.2  SPC_SENSE_KEY_OFFSET uses
4097 	 * SENSE KEY values from include/scsi/scsi.h
4098 	 */
4099 	switch (reason) {
4100 	case TCM_NON_EXISTENT_LUN:
4101 		/* CURRENT ERROR */
4102 		buffer[offset] = 0x70;
4103 		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4104 		/* ILLEGAL REQUEST */
4105 		buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
4106 		/* LOGICAL UNIT NOT SUPPORTED */
4107 		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x25;
4108 		break;
4109 	case TCM_UNSUPPORTED_SCSI_OPCODE:
4110 	case TCM_SECTOR_COUNT_TOO_MANY:
4111 		/* CURRENT ERROR */
4112 		buffer[offset] = 0x70;
4113 		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4114 		/* ILLEGAL REQUEST */
4115 		buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
4116 		/* INVALID COMMAND OPERATION CODE */
4117 		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x20;
4118 		break;
4119 	case TCM_UNKNOWN_MODE_PAGE:
4120 		/* CURRENT ERROR */
4121 		buffer[offset] = 0x70;
4122 		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4123 		/* ILLEGAL REQUEST */
4124 		buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
4125 		/* INVALID FIELD IN CDB */
4126 		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24;
4127 		break;
4128 	case TCM_CHECK_CONDITION_ABORT_CMD:
4129 		/* CURRENT ERROR */
4130 		buffer[offset] = 0x70;
4131 		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4132 		/* ABORTED COMMAND */
4133 		buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
4134 		/* BUS DEVICE RESET FUNCTION OCCURRED */
4135 		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x29;
4136 		buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x03;
4137 		break;
4138 	case TCM_INCORRECT_AMOUNT_OF_DATA:
4139 		/* CURRENT ERROR */
4140 		buffer[offset] = 0x70;
4141 		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4142 		/* ABORTED COMMAND */
4143 		buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
4144 		/* WRITE ERROR */
4145 		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x0c;
4146 		/* NOT ENOUGH UNSOLICITED DATA */
4147 		buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x0d;
4148 		break;
4149 	case TCM_INVALID_CDB_FIELD:
4150 		/* CURRENT ERROR */
4151 		buffer[offset] = 0x70;
4152 		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4153 		/* ILLEGAL REQUEST */
4154 		buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
4155 		/* INVALID FIELD IN CDB */
4156 		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24;
4157 		break;
4158 	case TCM_INVALID_PARAMETER_LIST:
4159 		/* CURRENT ERROR */
4160 		buffer[offset] = 0x70;
4161 		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4162 		/* ILLEGAL REQUEST */
4163 		buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
4164 		/* INVALID FIELD IN PARAMETER LIST */
4165 		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x26;
4166 		break;
4167 	case TCM_UNEXPECTED_UNSOLICITED_DATA:
4168 		/* CURRENT ERROR */
4169 		buffer[offset] = 0x70;
4170 		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4171 		/* ABORTED COMMAND */
4172 		buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
4173 		/* WRITE ERROR */
4174 		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x0c;
4175 		/* UNEXPECTED_UNSOLICITED_DATA */
4176 		buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x0c;
4177 		break;
4178 	case TCM_SERVICE_CRC_ERROR:
4179 		/* CURRENT ERROR */
4180 		buffer[offset] = 0x70;
4181 		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4182 		/* ABORTED COMMAND */
4183 		buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
4184 		/* PROTOCOL SERVICE CRC ERROR */
4185 		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x47;
4186 		/* N/A */
4187 		buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x05;
4188 		break;
4189 	case TCM_SNACK_REJECTED:
4190 		/* CURRENT ERROR */
4191 		buffer[offset] = 0x70;
4192 		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4193 		/* ABORTED COMMAND */
4194 		buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
4195 		/* READ ERROR */
4196 		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x11;
4197 		/* FAILED RETRANSMISSION REQUEST */
4198 		buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x13;
4199 		break;
4200 	case TCM_WRITE_PROTECTED:
4201 		/* CURRENT ERROR */
4202 		buffer[offset] = 0x70;
4203 		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4204 		/* DATA PROTECT */
4205 		buffer[offset+SPC_SENSE_KEY_OFFSET] = DATA_PROTECT;
4206 		/* WRITE PROTECTED */
4207 		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x27;
4208 		break;
4209 	case TCM_CHECK_CONDITION_UNIT_ATTENTION:
4210 		/* CURRENT ERROR */
4211 		buffer[offset] = 0x70;
4212 		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4213 		/* UNIT ATTENTION */
4214 		buffer[offset+SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION;
4215 		core_scsi3_ua_for_check_condition(cmd, &asc, &ascq);
4216 		buffer[offset+SPC_ASC_KEY_OFFSET] = asc;
4217 		buffer[offset+SPC_ASCQ_KEY_OFFSET] = ascq;
4218 		break;
4219 	case TCM_CHECK_CONDITION_NOT_READY:
4220 		/* CURRENT ERROR */
4221 		buffer[offset] = 0x70;
4222 		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4223 		/* Not Ready */
4224 		buffer[offset+SPC_SENSE_KEY_OFFSET] = NOT_READY;
4225 		transport_get_sense_codes(cmd, &asc, &ascq);
4226 		buffer[offset+SPC_ASC_KEY_OFFSET] = asc;
4227 		buffer[offset+SPC_ASCQ_KEY_OFFSET] = ascq;
4228 		break;
4229 	case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
4230 	default:
4231 		/* CURRENT ERROR */
4232 		buffer[offset] = 0x70;
4233 		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4234 		/* ILLEGAL REQUEST */
4235 		buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
4236 		/* LOGICAL UNIT COMMUNICATION FAILURE */
4237 		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x80;
4238 		break;
4239 	}
4240 	/*
4241 	 * This code uses linux/include/scsi/scsi.h SAM status codes!
4242 	 */
4243 	cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
4244 	/*
4245 	 * Automatically padded, this value is encoded in the fabric's
4246 	 * data_length response PDU containing the SCSI defined sense data.
4247 	 */
4248 	cmd->scsi_sense_length  = TRANSPORT_SENSE_BUFFER + offset;
4249 
4250 after_reason:
4251 	return cmd->se_tfo->queue_status(cmd);
4252 }
4253 EXPORT_SYMBOL(transport_send_check_condition_and_sense);
4254 
4255 int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
4256 {
4257 	int ret = 0;
4258 
4259 	if (cmd->transport_state & CMD_T_ABORTED) {
4260 		if (!send_status ||
4261 		     (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
4262 			return 1;
4263 
4264 		pr_debug("Sending delayed SAM_STAT_TASK_ABORTED"
4265 			" status for CDB: 0x%02x ITT: 0x%08x\n",
4266 			cmd->t_task_cdb[0],
4267 			cmd->se_tfo->get_task_tag(cmd));
4268 
4269 		cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS;
4270 		cmd->se_tfo->queue_status(cmd);
4271 		ret = 1;
4272 	}
4273 	return ret;
4274 }
4275 EXPORT_SYMBOL(transport_check_aborted_status);
4276 
4277 void transport_send_task_abort(struct se_cmd *cmd)
4278 {
4279 	unsigned long flags;
4280 
4281 	spin_lock_irqsave(&cmd->t_state_lock, flags);
4282 	if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
4283 		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4284 		return;
4285 	}
4286 	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4287 
4288 	/*
4289 	 * If there are still expected incoming fabric WRITEs, we wait
4290 	 * until until they have completed before sending a TASK_ABORTED
4291 	 * response.  This response with TASK_ABORTED status will be
4292 	 * queued back to fabric module by transport_check_aborted_status().
4293 	 */
4294 	if (cmd->data_direction == DMA_TO_DEVICE) {
4295 		if (cmd->se_tfo->write_pending_status(cmd) != 0) {
4296 			cmd->transport_state |= CMD_T_ABORTED;
4297 			smp_mb__after_atomic_inc();
4298 		}
4299 	}
4300 	cmd->scsi_status = SAM_STAT_TASK_ABORTED;
4301 
4302 	pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x,"
4303 		" ITT: 0x%08x\n", cmd->t_task_cdb[0],
4304 		cmd->se_tfo->get_task_tag(cmd));
4305 
4306 	cmd->se_tfo->queue_status(cmd);
4307 }
4308 
4309 static int transport_generic_do_tmr(struct se_cmd *cmd)
4310 {
4311 	struct se_device *dev = cmd->se_dev;
4312 	struct se_tmr_req *tmr = cmd->se_tmr_req;
4313 	int ret;
4314 
4315 	switch (tmr->function) {
4316 	case TMR_ABORT_TASK:
4317 		core_tmr_abort_task(dev, tmr, cmd->se_sess);
4318 		break;
4319 	case TMR_ABORT_TASK_SET:
4320 	case TMR_CLEAR_ACA:
4321 	case TMR_CLEAR_TASK_SET:
4322 		tmr->response = TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED;
4323 		break;
4324 	case TMR_LUN_RESET:
4325 		ret = core_tmr_lun_reset(dev, tmr, NULL, NULL);
4326 		tmr->response = (!ret) ? TMR_FUNCTION_COMPLETE :
4327 					 TMR_FUNCTION_REJECTED;
4328 		break;
4329 	case TMR_TARGET_WARM_RESET:
4330 		tmr->response = TMR_FUNCTION_REJECTED;
4331 		break;
4332 	case TMR_TARGET_COLD_RESET:
4333 		tmr->response = TMR_FUNCTION_REJECTED;
4334 		break;
4335 	default:
4336 		pr_err("Uknown TMR function: 0x%02x.\n",
4337 				tmr->function);
4338 		tmr->response = TMR_FUNCTION_REJECTED;
4339 		break;
4340 	}
4341 
4342 	cmd->t_state = TRANSPORT_ISTATE_PROCESSING;
4343 	cmd->se_tfo->queue_tm_rsp(cmd);
4344 
4345 	transport_cmd_check_stop_to_fabric(cmd);
4346 	return 0;
4347 }
4348 
4349 /*	transport_processing_thread():
4350  *
4351  *
4352  */
4353 static int transport_processing_thread(void *param)
4354 {
4355 	int ret;
4356 	struct se_cmd *cmd;
4357 	struct se_device *dev = param;
4358 
4359 	while (!kthread_should_stop()) {
4360 		ret = wait_event_interruptible(dev->dev_queue_obj.thread_wq,
4361 				atomic_read(&dev->dev_queue_obj.queue_cnt) ||
4362 				kthread_should_stop());
4363 		if (ret < 0)
4364 			goto out;
4365 
4366 get_cmd:
4367 		cmd = transport_get_cmd_from_queue(&dev->dev_queue_obj);
4368 		if (!cmd)
4369 			continue;
4370 
4371 		switch (cmd->t_state) {
4372 		case TRANSPORT_NEW_CMD:
4373 			BUG();
4374 			break;
4375 		case TRANSPORT_NEW_CMD_MAP:
4376 			if (!cmd->se_tfo->new_cmd_map) {
4377 				pr_err("cmd->se_tfo->new_cmd_map is"
4378 					" NULL for TRANSPORT_NEW_CMD_MAP\n");
4379 				BUG();
4380 			}
4381 			ret = cmd->se_tfo->new_cmd_map(cmd);
4382 			if (ret < 0) {
4383 				transport_generic_request_failure(cmd);
4384 				break;
4385 			}
4386 			ret = transport_generic_new_cmd(cmd);
4387 			if (ret < 0) {
4388 				transport_generic_request_failure(cmd);
4389 				break;
4390 			}
4391 			break;
4392 		case TRANSPORT_PROCESS_WRITE:
4393 			transport_generic_process_write(cmd);
4394 			break;
4395 		case TRANSPORT_PROCESS_TMR:
4396 			transport_generic_do_tmr(cmd);
4397 			break;
4398 		case TRANSPORT_COMPLETE_QF_WP:
4399 			transport_write_pending_qf(cmd);
4400 			break;
4401 		case TRANSPORT_COMPLETE_QF_OK:
4402 			transport_complete_qf(cmd);
4403 			break;
4404 		default:
4405 			pr_err("Unknown t_state: %d  for ITT: 0x%08x "
4406 				"i_state: %d on SE LUN: %u\n",
4407 				cmd->t_state,
4408 				cmd->se_tfo->get_task_tag(cmd),
4409 				cmd->se_tfo->get_cmd_state(cmd),
4410 				cmd->se_lun->unpacked_lun);
4411 			BUG();
4412 		}
4413 
4414 		goto get_cmd;
4415 	}
4416 
4417 out:
4418 	WARN_ON(!list_empty(&dev->state_list));
4419 	WARN_ON(!list_empty(&dev->dev_queue_obj.qobj_list));
4420 	dev->process_thread = NULL;
4421 	return 0;
4422 }
4423