xref: /linux/drivers/target/target_core_transport.c (revision e35fa8c2d0feb977c2f7d14a973b4132483ffef3)
1 /*******************************************************************************
2  * Filename:  target_core_transport.c
3  *
4  * This file contains the Generic Target Engine Core.
5  *
6  * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc.
7  * Copyright (c) 2005, 2006, 2007 SBE, Inc.
8  * Copyright (c) 2007-2010 Rising Tide Systems
9  * Copyright (c) 2008-2010 Linux-iSCSI.org
10  *
11  * Nicholas A. Bellinger <nab@kernel.org>
12  *
13  * This program is free software; you can redistribute it and/or modify
14  * it under the terms of the GNU General Public License as published by
15  * the Free Software Foundation; either version 2 of the License, or
16  * (at your option) any later version.
17  *
18  * This program is distributed in the hope that it will be useful,
19  * but WITHOUT ANY WARRANTY; without even the implied warranty of
20  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
21  * GNU General Public License for more details.
22  *
23  * You should have received a copy of the GNU General Public License
24  * along with this program; if not, write to the Free Software
25  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
26  *
27  ******************************************************************************/
28 
29 #include <linux/net.h>
30 #include <linux/delay.h>
31 #include <linux/string.h>
32 #include <linux/timer.h>
33 #include <linux/slab.h>
34 #include <linux/blkdev.h>
35 #include <linux/spinlock.h>
36 #include <linux/kthread.h>
37 #include <linux/in.h>
38 #include <linux/cdrom.h>
39 #include <linux/module.h>
40 #include <asm/unaligned.h>
41 #include <net/sock.h>
42 #include <net/tcp.h>
43 #include <scsi/scsi.h>
44 #include <scsi/scsi_cmnd.h>
45 #include <scsi/scsi_tcq.h>
46 
47 #include <target/target_core_base.h>
48 #include <target/target_core_backend.h>
49 #include <target/target_core_fabric.h>
50 #include <target/target_core_configfs.h>
51 
52 #include "target_core_internal.h"
53 #include "target_core_alua.h"
54 #include "target_core_pr.h"
55 #include "target_core_ua.h"
56 
57 static int sub_api_initialized;
58 
59 static struct workqueue_struct *target_completion_wq;
60 static struct kmem_cache *se_sess_cache;
61 struct kmem_cache *se_tmr_req_cache;
62 struct kmem_cache *se_ua_cache;
63 struct kmem_cache *t10_pr_reg_cache;
64 struct kmem_cache *t10_alua_lu_gp_cache;
65 struct kmem_cache *t10_alua_lu_gp_mem_cache;
66 struct kmem_cache *t10_alua_tg_pt_gp_cache;
67 struct kmem_cache *t10_alua_tg_pt_gp_mem_cache;
68 
69 static int transport_generic_write_pending(struct se_cmd *);
70 static int transport_processing_thread(void *param);
71 static int __transport_execute_tasks(struct se_device *dev, struct se_cmd *);
72 static void transport_complete_task_attr(struct se_cmd *cmd);
73 static void transport_handle_queue_full(struct se_cmd *cmd,
74 		struct se_device *dev);
75 static void transport_free_dev_tasks(struct se_cmd *cmd);
76 static int transport_generic_get_mem(struct se_cmd *cmd);
77 static void transport_put_cmd(struct se_cmd *cmd);
78 static void transport_remove_cmd_from_queue(struct se_cmd *cmd);
79 static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq);
80 static void transport_generic_request_failure(struct se_cmd *);
81 static void target_complete_ok_work(struct work_struct *work);
82 
83 int init_se_kmem_caches(void)
84 {
85 	se_tmr_req_cache = kmem_cache_create("se_tmr_cache",
86 			sizeof(struct se_tmr_req), __alignof__(struct se_tmr_req),
87 			0, NULL);
88 	if (!se_tmr_req_cache) {
89 		pr_err("kmem_cache_create() for struct se_tmr_req"
90 				" failed\n");
91 		goto out;
92 	}
93 	se_sess_cache = kmem_cache_create("se_sess_cache",
94 			sizeof(struct se_session), __alignof__(struct se_session),
95 			0, NULL);
96 	if (!se_sess_cache) {
97 		pr_err("kmem_cache_create() for struct se_session"
98 				" failed\n");
99 		goto out_free_tmr_req_cache;
100 	}
101 	se_ua_cache = kmem_cache_create("se_ua_cache",
102 			sizeof(struct se_ua), __alignof__(struct se_ua),
103 			0, NULL);
104 	if (!se_ua_cache) {
105 		pr_err("kmem_cache_create() for struct se_ua failed\n");
106 		goto out_free_sess_cache;
107 	}
108 	t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache",
109 			sizeof(struct t10_pr_registration),
110 			__alignof__(struct t10_pr_registration), 0, NULL);
111 	if (!t10_pr_reg_cache) {
112 		pr_err("kmem_cache_create() for struct t10_pr_registration"
113 				" failed\n");
114 		goto out_free_ua_cache;
115 	}
116 	t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache",
117 			sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp),
118 			0, NULL);
119 	if (!t10_alua_lu_gp_cache) {
120 		pr_err("kmem_cache_create() for t10_alua_lu_gp_cache"
121 				" failed\n");
122 		goto out_free_pr_reg_cache;
123 	}
124 	t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache",
125 			sizeof(struct t10_alua_lu_gp_member),
126 			__alignof__(struct t10_alua_lu_gp_member), 0, NULL);
127 	if (!t10_alua_lu_gp_mem_cache) {
128 		pr_err("kmem_cache_create() for t10_alua_lu_gp_mem_"
129 				"cache failed\n");
130 		goto out_free_lu_gp_cache;
131 	}
132 	t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache",
133 			sizeof(struct t10_alua_tg_pt_gp),
134 			__alignof__(struct t10_alua_tg_pt_gp), 0, NULL);
135 	if (!t10_alua_tg_pt_gp_cache) {
136 		pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_"
137 				"cache failed\n");
138 		goto out_free_lu_gp_mem_cache;
139 	}
140 	t10_alua_tg_pt_gp_mem_cache = kmem_cache_create(
141 			"t10_alua_tg_pt_gp_mem_cache",
142 			sizeof(struct t10_alua_tg_pt_gp_member),
143 			__alignof__(struct t10_alua_tg_pt_gp_member),
144 			0, NULL);
145 	if (!t10_alua_tg_pt_gp_mem_cache) {
146 		pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_"
147 				"mem_t failed\n");
148 		goto out_free_tg_pt_gp_cache;
149 	}
150 
151 	target_completion_wq = alloc_workqueue("target_completion",
152 					       WQ_MEM_RECLAIM, 0);
153 	if (!target_completion_wq)
154 		goto out_free_tg_pt_gp_mem_cache;
155 
156 	return 0;
157 
158 out_free_tg_pt_gp_mem_cache:
159 	kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
160 out_free_tg_pt_gp_cache:
161 	kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
162 out_free_lu_gp_mem_cache:
163 	kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
164 out_free_lu_gp_cache:
165 	kmem_cache_destroy(t10_alua_lu_gp_cache);
166 out_free_pr_reg_cache:
167 	kmem_cache_destroy(t10_pr_reg_cache);
168 out_free_ua_cache:
169 	kmem_cache_destroy(se_ua_cache);
170 out_free_sess_cache:
171 	kmem_cache_destroy(se_sess_cache);
172 out_free_tmr_req_cache:
173 	kmem_cache_destroy(se_tmr_req_cache);
174 out:
175 	return -ENOMEM;
176 }
177 
178 void release_se_kmem_caches(void)
179 {
180 	destroy_workqueue(target_completion_wq);
181 	kmem_cache_destroy(se_tmr_req_cache);
182 	kmem_cache_destroy(se_sess_cache);
183 	kmem_cache_destroy(se_ua_cache);
184 	kmem_cache_destroy(t10_pr_reg_cache);
185 	kmem_cache_destroy(t10_alua_lu_gp_cache);
186 	kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
187 	kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
188 	kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
189 }
190 
191 /* This code ensures unique mib indexes are handed out. */
192 static DEFINE_SPINLOCK(scsi_mib_index_lock);
193 static u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX];
194 
195 /*
196  * Allocate a new row index for the entry type specified
197  */
198 u32 scsi_get_new_index(scsi_index_t type)
199 {
200 	u32 new_index;
201 
202 	BUG_ON((type < 0) || (type >= SCSI_INDEX_TYPE_MAX));
203 
204 	spin_lock(&scsi_mib_index_lock);
205 	new_index = ++scsi_mib_index[type];
206 	spin_unlock(&scsi_mib_index_lock);
207 
208 	return new_index;
209 }
210 
211 static void transport_init_queue_obj(struct se_queue_obj *qobj)
212 {
213 	atomic_set(&qobj->queue_cnt, 0);
214 	INIT_LIST_HEAD(&qobj->qobj_list);
215 	init_waitqueue_head(&qobj->thread_wq);
216 	spin_lock_init(&qobj->cmd_queue_lock);
217 }
218 
219 void transport_subsystem_check_init(void)
220 {
221 	int ret;
222 
223 	if (sub_api_initialized)
224 		return;
225 
226 	ret = request_module("target_core_iblock");
227 	if (ret != 0)
228 		pr_err("Unable to load target_core_iblock\n");
229 
230 	ret = request_module("target_core_file");
231 	if (ret != 0)
232 		pr_err("Unable to load target_core_file\n");
233 
234 	ret = request_module("target_core_pscsi");
235 	if (ret != 0)
236 		pr_err("Unable to load target_core_pscsi\n");
237 
238 	ret = request_module("target_core_stgt");
239 	if (ret != 0)
240 		pr_err("Unable to load target_core_stgt\n");
241 
242 	sub_api_initialized = 1;
243 	return;
244 }
245 
246 struct se_session *transport_init_session(void)
247 {
248 	struct se_session *se_sess;
249 
250 	se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL);
251 	if (!se_sess) {
252 		pr_err("Unable to allocate struct se_session from"
253 				" se_sess_cache\n");
254 		return ERR_PTR(-ENOMEM);
255 	}
256 	INIT_LIST_HEAD(&se_sess->sess_list);
257 	INIT_LIST_HEAD(&se_sess->sess_acl_list);
258 	INIT_LIST_HEAD(&se_sess->sess_cmd_list);
259 	INIT_LIST_HEAD(&se_sess->sess_wait_list);
260 	spin_lock_init(&se_sess->sess_cmd_lock);
261 
262 	return se_sess;
263 }
264 EXPORT_SYMBOL(transport_init_session);
265 
266 /*
267  * Called with spin_lock_bh(&struct se_portal_group->session_lock called.
268  */
269 void __transport_register_session(
270 	struct se_portal_group *se_tpg,
271 	struct se_node_acl *se_nacl,
272 	struct se_session *se_sess,
273 	void *fabric_sess_ptr)
274 {
275 	unsigned char buf[PR_REG_ISID_LEN];
276 
277 	se_sess->se_tpg = se_tpg;
278 	se_sess->fabric_sess_ptr = fabric_sess_ptr;
279 	/*
280 	 * Used by struct se_node_acl's under ConfigFS to locate active se_session-t
281 	 *
282 	 * Only set for struct se_session's that will actually be moving I/O.
283 	 * eg: *NOT* discovery sessions.
284 	 */
285 	if (se_nacl) {
286 		/*
287 		 * If the fabric module supports an ISID based TransportID,
288 		 * save this value in binary from the fabric I_T Nexus now.
289 		 */
290 		if (se_tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) {
291 			memset(&buf[0], 0, PR_REG_ISID_LEN);
292 			se_tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess,
293 					&buf[0], PR_REG_ISID_LEN);
294 			se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]);
295 		}
296 		spin_lock_irq(&se_nacl->nacl_sess_lock);
297 		/*
298 		 * The se_nacl->nacl_sess pointer will be set to the
299 		 * last active I_T Nexus for each struct se_node_acl.
300 		 */
301 		se_nacl->nacl_sess = se_sess;
302 
303 		list_add_tail(&se_sess->sess_acl_list,
304 			      &se_nacl->acl_sess_list);
305 		spin_unlock_irq(&se_nacl->nacl_sess_lock);
306 	}
307 	list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list);
308 
309 	pr_debug("TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n",
310 		se_tpg->se_tpg_tfo->get_fabric_name(), se_sess->fabric_sess_ptr);
311 }
312 EXPORT_SYMBOL(__transport_register_session);
313 
314 void transport_register_session(
315 	struct se_portal_group *se_tpg,
316 	struct se_node_acl *se_nacl,
317 	struct se_session *se_sess,
318 	void *fabric_sess_ptr)
319 {
320 	spin_lock_bh(&se_tpg->session_lock);
321 	__transport_register_session(se_tpg, se_nacl, se_sess, fabric_sess_ptr);
322 	spin_unlock_bh(&se_tpg->session_lock);
323 }
324 EXPORT_SYMBOL(transport_register_session);
325 
326 void transport_deregister_session_configfs(struct se_session *se_sess)
327 {
328 	struct se_node_acl *se_nacl;
329 	unsigned long flags;
330 	/*
331 	 * Used by struct se_node_acl's under ConfigFS to locate active struct se_session
332 	 */
333 	se_nacl = se_sess->se_node_acl;
334 	if (se_nacl) {
335 		spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
336 		list_del(&se_sess->sess_acl_list);
337 		/*
338 		 * If the session list is empty, then clear the pointer.
339 		 * Otherwise, set the struct se_session pointer from the tail
340 		 * element of the per struct se_node_acl active session list.
341 		 */
342 		if (list_empty(&se_nacl->acl_sess_list))
343 			se_nacl->nacl_sess = NULL;
344 		else {
345 			se_nacl->nacl_sess = container_of(
346 					se_nacl->acl_sess_list.prev,
347 					struct se_session, sess_acl_list);
348 		}
349 		spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
350 	}
351 }
352 EXPORT_SYMBOL(transport_deregister_session_configfs);
353 
354 void transport_free_session(struct se_session *se_sess)
355 {
356 	kmem_cache_free(se_sess_cache, se_sess);
357 }
358 EXPORT_SYMBOL(transport_free_session);
359 
360 void transport_deregister_session(struct se_session *se_sess)
361 {
362 	struct se_portal_group *se_tpg = se_sess->se_tpg;
363 	struct se_node_acl *se_nacl;
364 	unsigned long flags;
365 
366 	if (!se_tpg) {
367 		transport_free_session(se_sess);
368 		return;
369 	}
370 
371 	spin_lock_irqsave(&se_tpg->session_lock, flags);
372 	list_del(&se_sess->sess_list);
373 	se_sess->se_tpg = NULL;
374 	se_sess->fabric_sess_ptr = NULL;
375 	spin_unlock_irqrestore(&se_tpg->session_lock, flags);
376 
377 	/*
378 	 * Determine if we need to do extra work for this initiator node's
379 	 * struct se_node_acl if it had been previously dynamically generated.
380 	 */
381 	se_nacl = se_sess->se_node_acl;
382 	if (se_nacl) {
383 		spin_lock_irqsave(&se_tpg->acl_node_lock, flags);
384 		if (se_nacl->dynamic_node_acl) {
385 			if (!se_tpg->se_tpg_tfo->tpg_check_demo_mode_cache(
386 					se_tpg)) {
387 				list_del(&se_nacl->acl_list);
388 				se_tpg->num_node_acls--;
389 				spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags);
390 
391 				core_tpg_wait_for_nacl_pr_ref(se_nacl);
392 				core_free_device_list_for_node(se_nacl, se_tpg);
393 				se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg,
394 						se_nacl);
395 				spin_lock_irqsave(&se_tpg->acl_node_lock, flags);
396 			}
397 		}
398 		spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags);
399 	}
400 
401 	transport_free_session(se_sess);
402 
403 	pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n",
404 		se_tpg->se_tpg_tfo->get_fabric_name());
405 }
406 EXPORT_SYMBOL(transport_deregister_session);
407 
408 /*
409  * Called with cmd->t_state_lock held.
410  */
411 static void transport_all_task_dev_remove_state(struct se_cmd *cmd)
412 {
413 	struct se_device *dev = cmd->se_dev;
414 	struct se_task *task;
415 	unsigned long flags;
416 
417 	if (!dev)
418 		return;
419 
420 	list_for_each_entry(task, &cmd->t_task_list, t_list) {
421 		if (task->task_flags & TF_ACTIVE)
422 			continue;
423 
424 		spin_lock_irqsave(&dev->execute_task_lock, flags);
425 		if (task->t_state_active) {
426 			pr_debug("Removed ITT: 0x%08x dev: %p task[%p]\n",
427 				cmd->se_tfo->get_task_tag(cmd), dev, task);
428 
429 			list_del(&task->t_state_list);
430 			atomic_dec(&cmd->t_task_cdbs_ex_left);
431 			task->t_state_active = false;
432 		}
433 		spin_unlock_irqrestore(&dev->execute_task_lock, flags);
434 	}
435 
436 }
437 
438 /*	transport_cmd_check_stop():
439  *
440  *	'transport_off = 1' determines if CMD_T_ACTIVE should be cleared.
441  *	'transport_off = 2' determines if task_dev_state should be removed.
442  *
443  *	A non-zero u8 t_state sets cmd->t_state.
444  *	Returns 1 when command is stopped, else 0.
445  */
446 static int transport_cmd_check_stop(
447 	struct se_cmd *cmd,
448 	int transport_off,
449 	u8 t_state)
450 {
451 	unsigned long flags;
452 
453 	spin_lock_irqsave(&cmd->t_state_lock, flags);
454 	/*
455 	 * Determine if IOCTL context caller in requesting the stopping of this
456 	 * command for LUN shutdown purposes.
457 	 */
458 	if (cmd->transport_state & CMD_T_LUN_STOP) {
459 		pr_debug("%s:%d CMD_T_LUN_STOP for ITT: 0x%08x\n",
460 			__func__, __LINE__, cmd->se_tfo->get_task_tag(cmd));
461 
462 		cmd->transport_state &= ~CMD_T_ACTIVE;
463 		if (transport_off == 2)
464 			transport_all_task_dev_remove_state(cmd);
465 		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
466 
467 		complete(&cmd->transport_lun_stop_comp);
468 		return 1;
469 	}
470 	/*
471 	 * Determine if frontend context caller is requesting the stopping of
472 	 * this command for frontend exceptions.
473 	 */
474 	if (cmd->transport_state & CMD_T_STOP) {
475 		pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08x\n",
476 			__func__, __LINE__,
477 			cmd->se_tfo->get_task_tag(cmd));
478 
479 		if (transport_off == 2)
480 			transport_all_task_dev_remove_state(cmd);
481 
482 		/*
483 		 * Clear struct se_cmd->se_lun before the transport_off == 2 handoff
484 		 * to FE.
485 		 */
486 		if (transport_off == 2)
487 			cmd->se_lun = NULL;
488 		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
489 
490 		complete(&cmd->t_transport_stop_comp);
491 		return 1;
492 	}
493 	if (transport_off) {
494 		cmd->transport_state &= ~CMD_T_ACTIVE;
495 		if (transport_off == 2) {
496 			transport_all_task_dev_remove_state(cmd);
497 			/*
498 			 * Clear struct se_cmd->se_lun before the transport_off == 2
499 			 * handoff to fabric module.
500 			 */
501 			cmd->se_lun = NULL;
502 			/*
503 			 * Some fabric modules like tcm_loop can release
504 			 * their internally allocated I/O reference now and
505 			 * struct se_cmd now.
506 			 *
507 			 * Fabric modules are expected to return '1' here if the
508 			 * se_cmd being passed is released at this point,
509 			 * or zero if not being released.
510 			 */
511 			if (cmd->se_tfo->check_stop_free != NULL) {
512 				spin_unlock_irqrestore(
513 					&cmd->t_state_lock, flags);
514 
515 				return cmd->se_tfo->check_stop_free(cmd);
516 			}
517 		}
518 		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
519 
520 		return 0;
521 	} else if (t_state)
522 		cmd->t_state = t_state;
523 	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
524 
525 	return 0;
526 }
527 
528 static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd)
529 {
530 	return transport_cmd_check_stop(cmd, 2, 0);
531 }
532 
533 static void transport_lun_remove_cmd(struct se_cmd *cmd)
534 {
535 	struct se_lun *lun = cmd->se_lun;
536 	unsigned long flags;
537 
538 	if (!lun)
539 		return;
540 
541 	spin_lock_irqsave(&cmd->t_state_lock, flags);
542 	if (cmd->transport_state & CMD_T_DEV_ACTIVE) {
543 		cmd->transport_state &= ~CMD_T_DEV_ACTIVE;
544 		transport_all_task_dev_remove_state(cmd);
545 	}
546 	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
547 
548 	spin_lock_irqsave(&lun->lun_cmd_lock, flags);
549 	if (!list_empty(&cmd->se_lun_node))
550 		list_del_init(&cmd->se_lun_node);
551 	spin_unlock_irqrestore(&lun->lun_cmd_lock, flags);
552 }
553 
554 void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
555 {
556 	if (!cmd->se_tmr_req)
557 		transport_lun_remove_cmd(cmd);
558 
559 	if (transport_cmd_check_stop_to_fabric(cmd))
560 		return;
561 	if (remove) {
562 		transport_remove_cmd_from_queue(cmd);
563 		transport_put_cmd(cmd);
564 	}
565 }
566 
567 static void transport_add_cmd_to_queue(struct se_cmd *cmd, int t_state,
568 		bool at_head)
569 {
570 	struct se_device *dev = cmd->se_dev;
571 	struct se_queue_obj *qobj = &dev->dev_queue_obj;
572 	unsigned long flags;
573 
574 	if (t_state) {
575 		spin_lock_irqsave(&cmd->t_state_lock, flags);
576 		cmd->t_state = t_state;
577 		cmd->transport_state |= CMD_T_ACTIVE;
578 		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
579 	}
580 
581 	spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
582 
583 	/* If the cmd is already on the list, remove it before we add it */
584 	if (!list_empty(&cmd->se_queue_node))
585 		list_del(&cmd->se_queue_node);
586 	else
587 		atomic_inc(&qobj->queue_cnt);
588 
589 	if (at_head)
590 		list_add(&cmd->se_queue_node, &qobj->qobj_list);
591 	else
592 		list_add_tail(&cmd->se_queue_node, &qobj->qobj_list);
593 	cmd->transport_state |= CMD_T_QUEUED;
594 	spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
595 
596 	wake_up_interruptible(&qobj->thread_wq);
597 }
598 
599 static struct se_cmd *
600 transport_get_cmd_from_queue(struct se_queue_obj *qobj)
601 {
602 	struct se_cmd *cmd;
603 	unsigned long flags;
604 
605 	spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
606 	if (list_empty(&qobj->qobj_list)) {
607 		spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
608 		return NULL;
609 	}
610 	cmd = list_first_entry(&qobj->qobj_list, struct se_cmd, se_queue_node);
611 
612 	cmd->transport_state &= ~CMD_T_QUEUED;
613 	list_del_init(&cmd->se_queue_node);
614 	atomic_dec(&qobj->queue_cnt);
615 	spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
616 
617 	return cmd;
618 }
619 
620 static void transport_remove_cmd_from_queue(struct se_cmd *cmd)
621 {
622 	struct se_queue_obj *qobj = &cmd->se_dev->dev_queue_obj;
623 	unsigned long flags;
624 
625 	spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
626 	if (!(cmd->transport_state & CMD_T_QUEUED)) {
627 		spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
628 		return;
629 	}
630 	cmd->transport_state &= ~CMD_T_QUEUED;
631 	atomic_dec(&qobj->queue_cnt);
632 	list_del_init(&cmd->se_queue_node);
633 	spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
634 }
635 
636 /*
637  * Completion function used by TCM subsystem plugins (such as FILEIO)
638  * for queueing up response from struct se_subsystem_api->do_task()
639  */
640 void transport_complete_sync_cache(struct se_cmd *cmd, int good)
641 {
642 	struct se_task *task = list_entry(cmd->t_task_list.next,
643 				struct se_task, t_list);
644 
645 	if (good) {
646 		cmd->scsi_status = SAM_STAT_GOOD;
647 		task->task_scsi_status = GOOD;
648 	} else {
649 		task->task_scsi_status = SAM_STAT_CHECK_CONDITION;
650 		task->task_se_cmd->scsi_sense_reason =
651 				TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
652 
653 	}
654 
655 	transport_complete_task(task, good);
656 }
657 EXPORT_SYMBOL(transport_complete_sync_cache);
658 
659 static void target_complete_failure_work(struct work_struct *work)
660 {
661 	struct se_cmd *cmd = container_of(work, struct se_cmd, work);
662 
663 	transport_generic_request_failure(cmd);
664 }
665 
666 /*	transport_complete_task():
667  *
668  *	Called from interrupt and non interrupt context depending
669  *	on the transport plugin.
670  */
671 void transport_complete_task(struct se_task *task, int success)
672 {
673 	struct se_cmd *cmd = task->task_se_cmd;
674 	struct se_device *dev = cmd->se_dev;
675 	unsigned long flags;
676 
677 	spin_lock_irqsave(&cmd->t_state_lock, flags);
678 	task->task_flags &= ~TF_ACTIVE;
679 
680 	/*
681 	 * See if any sense data exists, if so set the TASK_SENSE flag.
682 	 * Also check for any other post completion work that needs to be
683 	 * done by the plugins.
684 	 */
685 	if (dev && dev->transport->transport_complete) {
686 		if (dev->transport->transport_complete(task) != 0) {
687 			cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE;
688 			task->task_flags |= TF_HAS_SENSE;
689 			success = 1;
690 		}
691 	}
692 
693 	/*
694 	 * See if we are waiting for outstanding struct se_task
695 	 * to complete for an exception condition
696 	 */
697 	if (task->task_flags & TF_REQUEST_STOP) {
698 		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
699 		complete(&task->task_stop_comp);
700 		return;
701 	}
702 
703 	if (!success)
704 		cmd->transport_state |= CMD_T_FAILED;
705 
706 	/*
707 	 * Decrement the outstanding t_task_cdbs_left count.  The last
708 	 * struct se_task from struct se_cmd will complete itself into the
709 	 * device queue depending upon int success.
710 	 */
711 	if (!atomic_dec_and_test(&cmd->t_task_cdbs_left)) {
712 		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
713 		return;
714 	}
715 
716 	if (cmd->transport_state & CMD_T_FAILED) {
717 		cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
718 		INIT_WORK(&cmd->work, target_complete_failure_work);
719 	} else {
720 		cmd->transport_state |= CMD_T_COMPLETE;
721 		INIT_WORK(&cmd->work, target_complete_ok_work);
722 	}
723 
724 	cmd->t_state = TRANSPORT_COMPLETE;
725 	cmd->transport_state |= CMD_T_ACTIVE;
726 	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
727 
728 	queue_work(target_completion_wq, &cmd->work);
729 }
730 EXPORT_SYMBOL(transport_complete_task);
731 
732 /*
733  * Called by transport_add_tasks_from_cmd() once a struct se_cmd's
734  * struct se_task list are ready to be added to the active execution list
735  * struct se_device
736 
737  * Called with se_dev_t->execute_task_lock called.
738  */
739 static inline int transport_add_task_check_sam_attr(
740 	struct se_task *task,
741 	struct se_task *task_prev,
742 	struct se_device *dev)
743 {
744 	/*
745 	 * No SAM Task attribute emulation enabled, add to tail of
746 	 * execution queue
747 	 */
748 	if (dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) {
749 		list_add_tail(&task->t_execute_list, &dev->execute_task_list);
750 		return 0;
751 	}
752 	/*
753 	 * HEAD_OF_QUEUE attribute for received CDB, which means
754 	 * the first task that is associated with a struct se_cmd goes to
755 	 * head of the struct se_device->execute_task_list, and task_prev
756 	 * after that for each subsequent task
757 	 */
758 	if (task->task_se_cmd->sam_task_attr == MSG_HEAD_TAG) {
759 		list_add(&task->t_execute_list,
760 				(task_prev != NULL) ?
761 				&task_prev->t_execute_list :
762 				&dev->execute_task_list);
763 
764 		pr_debug("Set HEAD_OF_QUEUE for task CDB: 0x%02x"
765 				" in execution queue\n",
766 				task->task_se_cmd->t_task_cdb[0]);
767 		return 1;
768 	}
769 	/*
770 	 * For ORDERED, SIMPLE or UNTAGGED attribute tasks once they have been
771 	 * transitioned from Dermant -> Active state, and are added to the end
772 	 * of the struct se_device->execute_task_list
773 	 */
774 	list_add_tail(&task->t_execute_list, &dev->execute_task_list);
775 	return 0;
776 }
777 
778 /*	__transport_add_task_to_execute_queue():
779  *
780  *	Called with se_dev_t->execute_task_lock called.
781  */
782 static void __transport_add_task_to_execute_queue(
783 	struct se_task *task,
784 	struct se_task *task_prev,
785 	struct se_device *dev)
786 {
787 	int head_of_queue;
788 
789 	head_of_queue = transport_add_task_check_sam_attr(task, task_prev, dev);
790 	atomic_inc(&dev->execute_tasks);
791 
792 	if (task->t_state_active)
793 		return;
794 	/*
795 	 * Determine if this task needs to go to HEAD_OF_QUEUE for the
796 	 * state list as well.  Running with SAM Task Attribute emulation
797 	 * will always return head_of_queue == 0 here
798 	 */
799 	if (head_of_queue)
800 		list_add(&task->t_state_list, (task_prev) ?
801 				&task_prev->t_state_list :
802 				&dev->state_task_list);
803 	else
804 		list_add_tail(&task->t_state_list, &dev->state_task_list);
805 
806 	task->t_state_active = true;
807 
808 	pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n",
809 		task->task_se_cmd->se_tfo->get_task_tag(task->task_se_cmd),
810 		task, dev);
811 }
812 
813 static void transport_add_tasks_to_state_queue(struct se_cmd *cmd)
814 {
815 	struct se_device *dev = cmd->se_dev;
816 	struct se_task *task;
817 	unsigned long flags;
818 
819 	spin_lock_irqsave(&cmd->t_state_lock, flags);
820 	list_for_each_entry(task, &cmd->t_task_list, t_list) {
821 		spin_lock(&dev->execute_task_lock);
822 		if (!task->t_state_active) {
823 			list_add_tail(&task->t_state_list,
824 				      &dev->state_task_list);
825 			task->t_state_active = true;
826 
827 			pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n",
828 				task->task_se_cmd->se_tfo->get_task_tag(
829 				task->task_se_cmd), task, dev);
830 		}
831 		spin_unlock(&dev->execute_task_lock);
832 	}
833 	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
834 }
835 
836 static void __transport_add_tasks_from_cmd(struct se_cmd *cmd)
837 {
838 	struct se_device *dev = cmd->se_dev;
839 	struct se_task *task, *task_prev = NULL;
840 
841 	list_for_each_entry(task, &cmd->t_task_list, t_list) {
842 		if (!list_empty(&task->t_execute_list))
843 			continue;
844 		/*
845 		 * __transport_add_task_to_execute_queue() handles the
846 		 * SAM Task Attribute emulation if enabled
847 		 */
848 		__transport_add_task_to_execute_queue(task, task_prev, dev);
849 		task_prev = task;
850 	}
851 }
852 
853 static void transport_add_tasks_from_cmd(struct se_cmd *cmd)
854 {
855 	unsigned long flags;
856 	struct se_device *dev = cmd->se_dev;
857 
858 	spin_lock_irqsave(&dev->execute_task_lock, flags);
859 	__transport_add_tasks_from_cmd(cmd);
860 	spin_unlock_irqrestore(&dev->execute_task_lock, flags);
861 }
862 
863 void __transport_remove_task_from_execute_queue(struct se_task *task,
864 		struct se_device *dev)
865 {
866 	list_del_init(&task->t_execute_list);
867 	atomic_dec(&dev->execute_tasks);
868 }
869 
870 static void transport_remove_task_from_execute_queue(
871 	struct se_task *task,
872 	struct se_device *dev)
873 {
874 	unsigned long flags;
875 
876 	if (WARN_ON(list_empty(&task->t_execute_list)))
877 		return;
878 
879 	spin_lock_irqsave(&dev->execute_task_lock, flags);
880 	__transport_remove_task_from_execute_queue(task, dev);
881 	spin_unlock_irqrestore(&dev->execute_task_lock, flags);
882 }
883 
884 /*
885  * Handle QUEUE_FULL / -EAGAIN and -ENOMEM status
886  */
887 
888 static void target_qf_do_work(struct work_struct *work)
889 {
890 	struct se_device *dev = container_of(work, struct se_device,
891 					qf_work_queue);
892 	LIST_HEAD(qf_cmd_list);
893 	struct se_cmd *cmd, *cmd_tmp;
894 
895 	spin_lock_irq(&dev->qf_cmd_lock);
896 	list_splice_init(&dev->qf_cmd_list, &qf_cmd_list);
897 	spin_unlock_irq(&dev->qf_cmd_lock);
898 
899 	list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) {
900 		list_del(&cmd->se_qf_node);
901 		atomic_dec(&dev->dev_qf_count);
902 		smp_mb__after_atomic_dec();
903 
904 		pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue"
905 			" context: %s\n", cmd->se_tfo->get_fabric_name(), cmd,
906 			(cmd->t_state == TRANSPORT_COMPLETE_QF_OK) ? "COMPLETE_OK" :
907 			(cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING"
908 			: "UNKNOWN");
909 
910 		transport_add_cmd_to_queue(cmd, cmd->t_state, true);
911 	}
912 }
913 
914 unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd)
915 {
916 	switch (cmd->data_direction) {
917 	case DMA_NONE:
918 		return "NONE";
919 	case DMA_FROM_DEVICE:
920 		return "READ";
921 	case DMA_TO_DEVICE:
922 		return "WRITE";
923 	case DMA_BIDIRECTIONAL:
924 		return "BIDI";
925 	default:
926 		break;
927 	}
928 
929 	return "UNKNOWN";
930 }
931 
932 void transport_dump_dev_state(
933 	struct se_device *dev,
934 	char *b,
935 	int *bl)
936 {
937 	*bl += sprintf(b + *bl, "Status: ");
938 	switch (dev->dev_status) {
939 	case TRANSPORT_DEVICE_ACTIVATED:
940 		*bl += sprintf(b + *bl, "ACTIVATED");
941 		break;
942 	case TRANSPORT_DEVICE_DEACTIVATED:
943 		*bl += sprintf(b + *bl, "DEACTIVATED");
944 		break;
945 	case TRANSPORT_DEVICE_SHUTDOWN:
946 		*bl += sprintf(b + *bl, "SHUTDOWN");
947 		break;
948 	case TRANSPORT_DEVICE_OFFLINE_ACTIVATED:
949 	case TRANSPORT_DEVICE_OFFLINE_DEACTIVATED:
950 		*bl += sprintf(b + *bl, "OFFLINE");
951 		break;
952 	default:
953 		*bl += sprintf(b + *bl, "UNKNOWN=%d", dev->dev_status);
954 		break;
955 	}
956 
957 	*bl += sprintf(b + *bl, "  Execute/Max Queue Depth: %d/%d",
958 		atomic_read(&dev->execute_tasks), dev->queue_depth);
959 	*bl += sprintf(b + *bl, "  SectorSize: %u  MaxSectors: %u\n",
960 		dev->se_sub_dev->se_dev_attrib.block_size, dev->se_sub_dev->se_dev_attrib.max_sectors);
961 	*bl += sprintf(b + *bl, "        ");
962 }
963 
964 void transport_dump_vpd_proto_id(
965 	struct t10_vpd *vpd,
966 	unsigned char *p_buf,
967 	int p_buf_len)
968 {
969 	unsigned char buf[VPD_TMP_BUF_SIZE];
970 	int len;
971 
972 	memset(buf, 0, VPD_TMP_BUF_SIZE);
973 	len = sprintf(buf, "T10 VPD Protocol Identifier: ");
974 
975 	switch (vpd->protocol_identifier) {
976 	case 0x00:
977 		sprintf(buf+len, "Fibre Channel\n");
978 		break;
979 	case 0x10:
980 		sprintf(buf+len, "Parallel SCSI\n");
981 		break;
982 	case 0x20:
983 		sprintf(buf+len, "SSA\n");
984 		break;
985 	case 0x30:
986 		sprintf(buf+len, "IEEE 1394\n");
987 		break;
988 	case 0x40:
989 		sprintf(buf+len, "SCSI Remote Direct Memory Access"
990 				" Protocol\n");
991 		break;
992 	case 0x50:
993 		sprintf(buf+len, "Internet SCSI (iSCSI)\n");
994 		break;
995 	case 0x60:
996 		sprintf(buf+len, "SAS Serial SCSI Protocol\n");
997 		break;
998 	case 0x70:
999 		sprintf(buf+len, "Automation/Drive Interface Transport"
1000 				" Protocol\n");
1001 		break;
1002 	case 0x80:
1003 		sprintf(buf+len, "AT Attachment Interface ATA/ATAPI\n");
1004 		break;
1005 	default:
1006 		sprintf(buf+len, "Unknown 0x%02x\n",
1007 				vpd->protocol_identifier);
1008 		break;
1009 	}
1010 
1011 	if (p_buf)
1012 		strncpy(p_buf, buf, p_buf_len);
1013 	else
1014 		pr_debug("%s", buf);
1015 }
1016 
1017 void
1018 transport_set_vpd_proto_id(struct t10_vpd *vpd, unsigned char *page_83)
1019 {
1020 	/*
1021 	 * Check if the Protocol Identifier Valid (PIV) bit is set..
1022 	 *
1023 	 * from spc3r23.pdf section 7.5.1
1024 	 */
1025 	 if (page_83[1] & 0x80) {
1026 		vpd->protocol_identifier = (page_83[0] & 0xf0);
1027 		vpd->protocol_identifier_set = 1;
1028 		transport_dump_vpd_proto_id(vpd, NULL, 0);
1029 	}
1030 }
1031 EXPORT_SYMBOL(transport_set_vpd_proto_id);
1032 
1033 int transport_dump_vpd_assoc(
1034 	struct t10_vpd *vpd,
1035 	unsigned char *p_buf,
1036 	int p_buf_len)
1037 {
1038 	unsigned char buf[VPD_TMP_BUF_SIZE];
1039 	int ret = 0;
1040 	int len;
1041 
1042 	memset(buf, 0, VPD_TMP_BUF_SIZE);
1043 	len = sprintf(buf, "T10 VPD Identifier Association: ");
1044 
1045 	switch (vpd->association) {
1046 	case 0x00:
1047 		sprintf(buf+len, "addressed logical unit\n");
1048 		break;
1049 	case 0x10:
1050 		sprintf(buf+len, "target port\n");
1051 		break;
1052 	case 0x20:
1053 		sprintf(buf+len, "SCSI target device\n");
1054 		break;
1055 	default:
1056 		sprintf(buf+len, "Unknown 0x%02x\n", vpd->association);
1057 		ret = -EINVAL;
1058 		break;
1059 	}
1060 
1061 	if (p_buf)
1062 		strncpy(p_buf, buf, p_buf_len);
1063 	else
1064 		pr_debug("%s", buf);
1065 
1066 	return ret;
1067 }
1068 
1069 int transport_set_vpd_assoc(struct t10_vpd *vpd, unsigned char *page_83)
1070 {
1071 	/*
1072 	 * The VPD identification association..
1073 	 *
1074 	 * from spc3r23.pdf Section 7.6.3.1 Table 297
1075 	 */
1076 	vpd->association = (page_83[1] & 0x30);
1077 	return transport_dump_vpd_assoc(vpd, NULL, 0);
1078 }
1079 EXPORT_SYMBOL(transport_set_vpd_assoc);
1080 
1081 int transport_dump_vpd_ident_type(
1082 	struct t10_vpd *vpd,
1083 	unsigned char *p_buf,
1084 	int p_buf_len)
1085 {
1086 	unsigned char buf[VPD_TMP_BUF_SIZE];
1087 	int ret = 0;
1088 	int len;
1089 
1090 	memset(buf, 0, VPD_TMP_BUF_SIZE);
1091 	len = sprintf(buf, "T10 VPD Identifier Type: ");
1092 
1093 	switch (vpd->device_identifier_type) {
1094 	case 0x00:
1095 		sprintf(buf+len, "Vendor specific\n");
1096 		break;
1097 	case 0x01:
1098 		sprintf(buf+len, "T10 Vendor ID based\n");
1099 		break;
1100 	case 0x02:
1101 		sprintf(buf+len, "EUI-64 based\n");
1102 		break;
1103 	case 0x03:
1104 		sprintf(buf+len, "NAA\n");
1105 		break;
1106 	case 0x04:
1107 		sprintf(buf+len, "Relative target port identifier\n");
1108 		break;
1109 	case 0x08:
1110 		sprintf(buf+len, "SCSI name string\n");
1111 		break;
1112 	default:
1113 		sprintf(buf+len, "Unsupported: 0x%02x\n",
1114 				vpd->device_identifier_type);
1115 		ret = -EINVAL;
1116 		break;
1117 	}
1118 
1119 	if (p_buf) {
1120 		if (p_buf_len < strlen(buf)+1)
1121 			return -EINVAL;
1122 		strncpy(p_buf, buf, p_buf_len);
1123 	} else {
1124 		pr_debug("%s", buf);
1125 	}
1126 
1127 	return ret;
1128 }
1129 
1130 int transport_set_vpd_ident_type(struct t10_vpd *vpd, unsigned char *page_83)
1131 {
1132 	/*
1133 	 * The VPD identifier type..
1134 	 *
1135 	 * from spc3r23.pdf Section 7.6.3.1 Table 298
1136 	 */
1137 	vpd->device_identifier_type = (page_83[1] & 0x0f);
1138 	return transport_dump_vpd_ident_type(vpd, NULL, 0);
1139 }
1140 EXPORT_SYMBOL(transport_set_vpd_ident_type);
1141 
1142 int transport_dump_vpd_ident(
1143 	struct t10_vpd *vpd,
1144 	unsigned char *p_buf,
1145 	int p_buf_len)
1146 {
1147 	unsigned char buf[VPD_TMP_BUF_SIZE];
1148 	int ret = 0;
1149 
1150 	memset(buf, 0, VPD_TMP_BUF_SIZE);
1151 
1152 	switch (vpd->device_identifier_code_set) {
1153 	case 0x01: /* Binary */
1154 		sprintf(buf, "T10 VPD Binary Device Identifier: %s\n",
1155 			&vpd->device_identifier[0]);
1156 		break;
1157 	case 0x02: /* ASCII */
1158 		sprintf(buf, "T10 VPD ASCII Device Identifier: %s\n",
1159 			&vpd->device_identifier[0]);
1160 		break;
1161 	case 0x03: /* UTF-8 */
1162 		sprintf(buf, "T10 VPD UTF-8 Device Identifier: %s\n",
1163 			&vpd->device_identifier[0]);
1164 		break;
1165 	default:
1166 		sprintf(buf, "T10 VPD Device Identifier encoding unsupported:"
1167 			" 0x%02x", vpd->device_identifier_code_set);
1168 		ret = -EINVAL;
1169 		break;
1170 	}
1171 
1172 	if (p_buf)
1173 		strncpy(p_buf, buf, p_buf_len);
1174 	else
1175 		pr_debug("%s", buf);
1176 
1177 	return ret;
1178 }
1179 
1180 int
1181 transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83)
1182 {
1183 	static const char hex_str[] = "0123456789abcdef";
1184 	int j = 0, i = 4; /* offset to start of the identifer */
1185 
1186 	/*
1187 	 * The VPD Code Set (encoding)
1188 	 *
1189 	 * from spc3r23.pdf Section 7.6.3.1 Table 296
1190 	 */
1191 	vpd->device_identifier_code_set = (page_83[0] & 0x0f);
1192 	switch (vpd->device_identifier_code_set) {
1193 	case 0x01: /* Binary */
1194 		vpd->device_identifier[j++] =
1195 				hex_str[vpd->device_identifier_type];
1196 		while (i < (4 + page_83[3])) {
1197 			vpd->device_identifier[j++] =
1198 				hex_str[(page_83[i] & 0xf0) >> 4];
1199 			vpd->device_identifier[j++] =
1200 				hex_str[page_83[i] & 0x0f];
1201 			i++;
1202 		}
1203 		break;
1204 	case 0x02: /* ASCII */
1205 	case 0x03: /* UTF-8 */
1206 		while (i < (4 + page_83[3]))
1207 			vpd->device_identifier[j++] = page_83[i++];
1208 		break;
1209 	default:
1210 		break;
1211 	}
1212 
1213 	return transport_dump_vpd_ident(vpd, NULL, 0);
1214 }
1215 EXPORT_SYMBOL(transport_set_vpd_ident);
1216 
1217 static void core_setup_task_attr_emulation(struct se_device *dev)
1218 {
1219 	/*
1220 	 * If this device is from Target_Core_Mod/pSCSI, disable the
1221 	 * SAM Task Attribute emulation.
1222 	 *
1223 	 * This is currently not available in upsream Linux/SCSI Target
1224 	 * mode code, and is assumed to be disabled while using TCM/pSCSI.
1225 	 */
1226 	if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1227 		dev->dev_task_attr_type = SAM_TASK_ATTR_PASSTHROUGH;
1228 		return;
1229 	}
1230 
1231 	dev->dev_task_attr_type = SAM_TASK_ATTR_EMULATED;
1232 	pr_debug("%s: Using SAM_TASK_ATTR_EMULATED for SPC: 0x%02x"
1233 		" device\n", dev->transport->name,
1234 		dev->transport->get_device_rev(dev));
1235 }
1236 
1237 static void scsi_dump_inquiry(struct se_device *dev)
1238 {
1239 	struct t10_wwn *wwn = &dev->se_sub_dev->t10_wwn;
1240 	char buf[17];
1241 	int i, device_type;
1242 	/*
1243 	 * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer
1244 	 */
1245 	for (i = 0; i < 8; i++)
1246 		if (wwn->vendor[i] >= 0x20)
1247 			buf[i] = wwn->vendor[i];
1248 		else
1249 			buf[i] = ' ';
1250 	buf[i] = '\0';
1251 	pr_debug("  Vendor: %s\n", buf);
1252 
1253 	for (i = 0; i < 16; i++)
1254 		if (wwn->model[i] >= 0x20)
1255 			buf[i] = wwn->model[i];
1256 		else
1257 			buf[i] = ' ';
1258 	buf[i] = '\0';
1259 	pr_debug("  Model: %s\n", buf);
1260 
1261 	for (i = 0; i < 4; i++)
1262 		if (wwn->revision[i] >= 0x20)
1263 			buf[i] = wwn->revision[i];
1264 		else
1265 			buf[i] = ' ';
1266 	buf[i] = '\0';
1267 	pr_debug("  Revision: %s\n", buf);
1268 
1269 	device_type = dev->transport->get_device_type(dev);
1270 	pr_debug("  Type:   %s ", scsi_device_type(device_type));
1271 	pr_debug("                 ANSI SCSI revision: %02x\n",
1272 				dev->transport->get_device_rev(dev));
1273 }
1274 
1275 struct se_device *transport_add_device_to_core_hba(
1276 	struct se_hba *hba,
1277 	struct se_subsystem_api *transport,
1278 	struct se_subsystem_dev *se_dev,
1279 	u32 device_flags,
1280 	void *transport_dev,
1281 	struct se_dev_limits *dev_limits,
1282 	const char *inquiry_prod,
1283 	const char *inquiry_rev)
1284 {
1285 	int force_pt;
1286 	struct se_device  *dev;
1287 
1288 	dev = kzalloc(sizeof(struct se_device), GFP_KERNEL);
1289 	if (!dev) {
1290 		pr_err("Unable to allocate memory for se_dev_t\n");
1291 		return NULL;
1292 	}
1293 
1294 	transport_init_queue_obj(&dev->dev_queue_obj);
1295 	dev->dev_flags		= device_flags;
1296 	dev->dev_status		|= TRANSPORT_DEVICE_DEACTIVATED;
1297 	dev->dev_ptr		= transport_dev;
1298 	dev->se_hba		= hba;
1299 	dev->se_sub_dev		= se_dev;
1300 	dev->transport		= transport;
1301 	INIT_LIST_HEAD(&dev->dev_list);
1302 	INIT_LIST_HEAD(&dev->dev_sep_list);
1303 	INIT_LIST_HEAD(&dev->dev_tmr_list);
1304 	INIT_LIST_HEAD(&dev->execute_task_list);
1305 	INIT_LIST_HEAD(&dev->delayed_cmd_list);
1306 	INIT_LIST_HEAD(&dev->state_task_list);
1307 	INIT_LIST_HEAD(&dev->qf_cmd_list);
1308 	spin_lock_init(&dev->execute_task_lock);
1309 	spin_lock_init(&dev->delayed_cmd_lock);
1310 	spin_lock_init(&dev->dev_reservation_lock);
1311 	spin_lock_init(&dev->dev_status_lock);
1312 	spin_lock_init(&dev->se_port_lock);
1313 	spin_lock_init(&dev->se_tmr_lock);
1314 	spin_lock_init(&dev->qf_cmd_lock);
1315 	atomic_set(&dev->dev_ordered_id, 0);
1316 
1317 	se_dev_set_default_attribs(dev, dev_limits);
1318 
1319 	dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX);
1320 	dev->creation_time = get_jiffies_64();
1321 	spin_lock_init(&dev->stats_lock);
1322 
1323 	spin_lock(&hba->device_lock);
1324 	list_add_tail(&dev->dev_list, &hba->hba_dev_list);
1325 	hba->dev_count++;
1326 	spin_unlock(&hba->device_lock);
1327 	/*
1328 	 * Setup the SAM Task Attribute emulation for struct se_device
1329 	 */
1330 	core_setup_task_attr_emulation(dev);
1331 	/*
1332 	 * Force PR and ALUA passthrough emulation with internal object use.
1333 	 */
1334 	force_pt = (hba->hba_flags & HBA_FLAGS_INTERNAL_USE);
1335 	/*
1336 	 * Setup the Reservations infrastructure for struct se_device
1337 	 */
1338 	core_setup_reservations(dev, force_pt);
1339 	/*
1340 	 * Setup the Asymmetric Logical Unit Assignment for struct se_device
1341 	 */
1342 	if (core_setup_alua(dev, force_pt) < 0)
1343 		goto out;
1344 
1345 	/*
1346 	 * Startup the struct se_device processing thread
1347 	 */
1348 	dev->process_thread = kthread_run(transport_processing_thread, dev,
1349 					  "LIO_%s", dev->transport->name);
1350 	if (IS_ERR(dev->process_thread)) {
1351 		pr_err("Unable to create kthread: LIO_%s\n",
1352 			dev->transport->name);
1353 		goto out;
1354 	}
1355 	/*
1356 	 * Setup work_queue for QUEUE_FULL
1357 	 */
1358 	INIT_WORK(&dev->qf_work_queue, target_qf_do_work);
1359 	/*
1360 	 * Preload the initial INQUIRY const values if we are doing
1361 	 * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI
1362 	 * passthrough because this is being provided by the backend LLD.
1363 	 * This is required so that transport_get_inquiry() copies these
1364 	 * originals once back into DEV_T10_WWN(dev) for the virtual device
1365 	 * setup.
1366 	 */
1367 	if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) {
1368 		if (!inquiry_prod || !inquiry_rev) {
1369 			pr_err("All non TCM/pSCSI plugins require"
1370 				" INQUIRY consts\n");
1371 			goto out;
1372 		}
1373 
1374 		strncpy(&dev->se_sub_dev->t10_wwn.vendor[0], "LIO-ORG", 8);
1375 		strncpy(&dev->se_sub_dev->t10_wwn.model[0], inquiry_prod, 16);
1376 		strncpy(&dev->se_sub_dev->t10_wwn.revision[0], inquiry_rev, 4);
1377 	}
1378 	scsi_dump_inquiry(dev);
1379 
1380 	return dev;
1381 out:
1382 	kthread_stop(dev->process_thread);
1383 
1384 	spin_lock(&hba->device_lock);
1385 	list_del(&dev->dev_list);
1386 	hba->dev_count--;
1387 	spin_unlock(&hba->device_lock);
1388 
1389 	se_release_vpd_for_dev(dev);
1390 
1391 	kfree(dev);
1392 
1393 	return NULL;
1394 }
1395 EXPORT_SYMBOL(transport_add_device_to_core_hba);
1396 
1397 /*	transport_generic_prepare_cdb():
1398  *
1399  *	Since the Initiator sees iSCSI devices as LUNs,  the SCSI CDB will
1400  *	contain the iSCSI LUN in bits 7-5 of byte 1 as per SAM-2.
1401  *	The point of this is since we are mapping iSCSI LUNs to
1402  *	SCSI Target IDs having a non-zero LUN in the CDB will throw the
1403  *	devices and HBAs for a loop.
1404  */
1405 static inline void transport_generic_prepare_cdb(
1406 	unsigned char *cdb)
1407 {
1408 	switch (cdb[0]) {
1409 	case READ_10: /* SBC - RDProtect */
1410 	case READ_12: /* SBC - RDProtect */
1411 	case READ_16: /* SBC - RDProtect */
1412 	case SEND_DIAGNOSTIC: /* SPC - SELF-TEST Code */
1413 	case VERIFY: /* SBC - VRProtect */
1414 	case VERIFY_16: /* SBC - VRProtect */
1415 	case WRITE_VERIFY: /* SBC - VRProtect */
1416 	case WRITE_VERIFY_12: /* SBC - VRProtect */
1417 		break;
1418 	default:
1419 		cdb[1] &= 0x1f; /* clear logical unit number */
1420 		break;
1421 	}
1422 }
1423 
1424 static struct se_task *
1425 transport_generic_get_task(struct se_cmd *cmd,
1426 		enum dma_data_direction data_direction)
1427 {
1428 	struct se_task *task;
1429 	struct se_device *dev = cmd->se_dev;
1430 
1431 	task = dev->transport->alloc_task(cmd->t_task_cdb);
1432 	if (!task) {
1433 		pr_err("Unable to allocate struct se_task\n");
1434 		return NULL;
1435 	}
1436 
1437 	INIT_LIST_HEAD(&task->t_list);
1438 	INIT_LIST_HEAD(&task->t_execute_list);
1439 	INIT_LIST_HEAD(&task->t_state_list);
1440 	init_completion(&task->task_stop_comp);
1441 	task->task_se_cmd = cmd;
1442 	task->task_data_direction = data_direction;
1443 
1444 	return task;
1445 }
1446 
1447 static int transport_generic_cmd_sequencer(struct se_cmd *, unsigned char *);
1448 
1449 /*
1450  * Used by fabric modules containing a local struct se_cmd within their
1451  * fabric dependent per I/O descriptor.
1452  */
1453 void transport_init_se_cmd(
1454 	struct se_cmd *cmd,
1455 	struct target_core_fabric_ops *tfo,
1456 	struct se_session *se_sess,
1457 	u32 data_length,
1458 	int data_direction,
1459 	int task_attr,
1460 	unsigned char *sense_buffer)
1461 {
1462 	INIT_LIST_HEAD(&cmd->se_lun_node);
1463 	INIT_LIST_HEAD(&cmd->se_delayed_node);
1464 	INIT_LIST_HEAD(&cmd->se_qf_node);
1465 	INIT_LIST_HEAD(&cmd->se_queue_node);
1466 	INIT_LIST_HEAD(&cmd->se_cmd_list);
1467 	INIT_LIST_HEAD(&cmd->t_task_list);
1468 	init_completion(&cmd->transport_lun_fe_stop_comp);
1469 	init_completion(&cmd->transport_lun_stop_comp);
1470 	init_completion(&cmd->t_transport_stop_comp);
1471 	init_completion(&cmd->cmd_wait_comp);
1472 	spin_lock_init(&cmd->t_state_lock);
1473 	cmd->transport_state = CMD_T_DEV_ACTIVE;
1474 
1475 	cmd->se_tfo = tfo;
1476 	cmd->se_sess = se_sess;
1477 	cmd->data_length = data_length;
1478 	cmd->data_direction = data_direction;
1479 	cmd->sam_task_attr = task_attr;
1480 	cmd->sense_buffer = sense_buffer;
1481 }
1482 EXPORT_SYMBOL(transport_init_se_cmd);
1483 
1484 static int transport_check_alloc_task_attr(struct se_cmd *cmd)
1485 {
1486 	/*
1487 	 * Check if SAM Task Attribute emulation is enabled for this
1488 	 * struct se_device storage object
1489 	 */
1490 	if (cmd->se_dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
1491 		return 0;
1492 
1493 	if (cmd->sam_task_attr == MSG_ACA_TAG) {
1494 		pr_debug("SAM Task Attribute ACA"
1495 			" emulation is not supported\n");
1496 		return -EINVAL;
1497 	}
1498 	/*
1499 	 * Used to determine when ORDERED commands should go from
1500 	 * Dormant to Active status.
1501 	 */
1502 	cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id);
1503 	smp_mb__after_atomic_inc();
1504 	pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
1505 			cmd->se_ordered_id, cmd->sam_task_attr,
1506 			cmd->se_dev->transport->name);
1507 	return 0;
1508 }
1509 
1510 /*	transport_generic_allocate_tasks():
1511  *
1512  *	Called from fabric RX Thread.
1513  */
1514 int transport_generic_allocate_tasks(
1515 	struct se_cmd *cmd,
1516 	unsigned char *cdb)
1517 {
1518 	int ret;
1519 
1520 	transport_generic_prepare_cdb(cdb);
1521 	/*
1522 	 * Ensure that the received CDB is less than the max (252 + 8) bytes
1523 	 * for VARIABLE_LENGTH_CMD
1524 	 */
1525 	if (scsi_command_size(cdb) > SCSI_MAX_VARLEN_CDB_SIZE) {
1526 		pr_err("Received SCSI CDB with command_size: %d that"
1527 			" exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
1528 			scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE);
1529 		cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
1530 		cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
1531 		return -EINVAL;
1532 	}
1533 	/*
1534 	 * If the received CDB is larger than TCM_MAX_COMMAND_SIZE,
1535 	 * allocate the additional extended CDB buffer now..  Otherwise
1536 	 * setup the pointer from __t_task_cdb to t_task_cdb.
1537 	 */
1538 	if (scsi_command_size(cdb) > sizeof(cmd->__t_task_cdb)) {
1539 		cmd->t_task_cdb = kzalloc(scsi_command_size(cdb),
1540 						GFP_KERNEL);
1541 		if (!cmd->t_task_cdb) {
1542 			pr_err("Unable to allocate cmd->t_task_cdb"
1543 				" %u > sizeof(cmd->__t_task_cdb): %lu ops\n",
1544 				scsi_command_size(cdb),
1545 				(unsigned long)sizeof(cmd->__t_task_cdb));
1546 			cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
1547 			cmd->scsi_sense_reason =
1548 					TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1549 			return -ENOMEM;
1550 		}
1551 	} else
1552 		cmd->t_task_cdb = &cmd->__t_task_cdb[0];
1553 	/*
1554 	 * Copy the original CDB into cmd->
1555 	 */
1556 	memcpy(cmd->t_task_cdb, cdb, scsi_command_size(cdb));
1557 	/*
1558 	 * Setup the received CDB based on SCSI defined opcodes and
1559 	 * perform unit attention, persistent reservations and ALUA
1560 	 * checks for virtual device backends.  The cmd->t_task_cdb
1561 	 * pointer is expected to be setup before we reach this point.
1562 	 */
1563 	ret = transport_generic_cmd_sequencer(cmd, cdb);
1564 	if (ret < 0)
1565 		return ret;
1566 	/*
1567 	 * Check for SAM Task Attribute Emulation
1568 	 */
1569 	if (transport_check_alloc_task_attr(cmd) < 0) {
1570 		cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
1571 		cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
1572 		return -EINVAL;
1573 	}
1574 	spin_lock(&cmd->se_lun->lun_sep_lock);
1575 	if (cmd->se_lun->lun_sep)
1576 		cmd->se_lun->lun_sep->sep_stats.cmd_pdus++;
1577 	spin_unlock(&cmd->se_lun->lun_sep_lock);
1578 	return 0;
1579 }
1580 EXPORT_SYMBOL(transport_generic_allocate_tasks);
1581 
1582 /*
1583  * Used by fabric module frontends to queue tasks directly.
1584  * Many only be used from process context only
1585  */
1586 int transport_handle_cdb_direct(
1587 	struct se_cmd *cmd)
1588 {
1589 	int ret;
1590 
1591 	if (!cmd->se_lun) {
1592 		dump_stack();
1593 		pr_err("cmd->se_lun is NULL\n");
1594 		return -EINVAL;
1595 	}
1596 	if (in_interrupt()) {
1597 		dump_stack();
1598 		pr_err("transport_generic_handle_cdb cannot be called"
1599 				" from interrupt context\n");
1600 		return -EINVAL;
1601 	}
1602 	/*
1603 	 * Set TRANSPORT_NEW_CMD state and CMD_T_ACTIVE following
1604 	 * transport_generic_handle_cdb*() -> transport_add_cmd_to_queue()
1605 	 * in existing usage to ensure that outstanding descriptors are handled
1606 	 * correctly during shutdown via transport_wait_for_tasks()
1607 	 *
1608 	 * Also, we don't take cmd->t_state_lock here as we only expect
1609 	 * this to be called for initial descriptor submission.
1610 	 */
1611 	cmd->t_state = TRANSPORT_NEW_CMD;
1612 	cmd->transport_state |= CMD_T_ACTIVE;
1613 
1614 	/*
1615 	 * transport_generic_new_cmd() is already handling QUEUE_FULL,
1616 	 * so follow TRANSPORT_NEW_CMD processing thread context usage
1617 	 * and call transport_generic_request_failure() if necessary..
1618 	 */
1619 	ret = transport_generic_new_cmd(cmd);
1620 	if (ret < 0)
1621 		transport_generic_request_failure(cmd);
1622 
1623 	return 0;
1624 }
1625 EXPORT_SYMBOL(transport_handle_cdb_direct);
1626 
1627 /**
1628  * target_submit_cmd - lookup unpacked lun and submit uninitialized se_cmd
1629  *
1630  * @se_cmd: command descriptor to submit
1631  * @se_sess: associated se_sess for endpoint
1632  * @cdb: pointer to SCSI CDB
1633  * @sense: pointer to SCSI sense buffer
1634  * @unpacked_lun: unpacked LUN to reference for struct se_lun
1635  * @data_length: fabric expected data transfer length
1636  * @task_addr: SAM task attribute
1637  * @data_dir: DMA data direction
1638  * @flags: flags for command submission from target_sc_flags_tables
1639  *
1640  * This may only be called from process context, and also currently
1641  * assumes internal allocation of fabric payload buffer by target-core.
1642  **/
1643 void target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
1644 		unsigned char *cdb, unsigned char *sense, u32 unpacked_lun,
1645 		u32 data_length, int task_attr, int data_dir, int flags)
1646 {
1647 	struct se_portal_group *se_tpg;
1648 	int rc;
1649 
1650 	se_tpg = se_sess->se_tpg;
1651 	BUG_ON(!se_tpg);
1652 	BUG_ON(se_cmd->se_tfo || se_cmd->se_sess);
1653 	BUG_ON(in_interrupt());
1654 	/*
1655 	 * Initialize se_cmd for target operation.  From this point
1656 	 * exceptions are handled by sending exception status via
1657 	 * target_core_fabric_ops->queue_status() callback
1658 	 */
1659 	transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
1660 				data_length, data_dir, task_attr, sense);
1661 	/*
1662 	 * Obtain struct se_cmd->cmd_kref reference and add new cmd to
1663 	 * se_sess->sess_cmd_list.  A second kref_get here is necessary
1664 	 * for fabrics using TARGET_SCF_ACK_KREF that expect a second
1665 	 * kref_put() to happen during fabric packet acknowledgement.
1666 	 */
1667 	target_get_sess_cmd(se_sess, se_cmd, (flags & TARGET_SCF_ACK_KREF));
1668 	/*
1669 	 * Signal bidirectional data payloads to target-core
1670 	 */
1671 	if (flags & TARGET_SCF_BIDI_OP)
1672 		se_cmd->se_cmd_flags |= SCF_BIDI;
1673 	/*
1674 	 * Locate se_lun pointer and attach it to struct se_cmd
1675 	 */
1676 	if (transport_lookup_cmd_lun(se_cmd, unpacked_lun) < 0) {
1677 		transport_send_check_condition_and_sense(se_cmd,
1678 				se_cmd->scsi_sense_reason, 0);
1679 		target_put_sess_cmd(se_sess, se_cmd);
1680 		return;
1681 	}
1682 	/*
1683 	 * Sanitize CDBs via transport_generic_cmd_sequencer() and
1684 	 * allocate the necessary tasks to complete the received CDB+data
1685 	 */
1686 	rc = transport_generic_allocate_tasks(se_cmd, cdb);
1687 	if (rc != 0) {
1688 		transport_generic_request_failure(se_cmd);
1689 		return;
1690 	}
1691 	/*
1692 	 * Dispatch se_cmd descriptor to se_lun->lun_se_dev backend
1693 	 * for immediate execution of READs, otherwise wait for
1694 	 * transport_generic_handle_data() to be called for WRITEs
1695 	 * when fabric has filled the incoming buffer.
1696 	 */
1697 	transport_handle_cdb_direct(se_cmd);
1698 	return;
1699 }
1700 EXPORT_SYMBOL(target_submit_cmd);
1701 
1702 /*
1703  * Used by fabric module frontends defining a TFO->new_cmd_map() caller
1704  * to  queue up a newly setup se_cmd w/ TRANSPORT_NEW_CMD_MAP in order to
1705  * complete setup in TCM process context w/ TFO->new_cmd_map().
1706  */
1707 int transport_generic_handle_cdb_map(
1708 	struct se_cmd *cmd)
1709 {
1710 	if (!cmd->se_lun) {
1711 		dump_stack();
1712 		pr_err("cmd->se_lun is NULL\n");
1713 		return -EINVAL;
1714 	}
1715 
1716 	transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD_MAP, false);
1717 	return 0;
1718 }
1719 EXPORT_SYMBOL(transport_generic_handle_cdb_map);
1720 
1721 /*	transport_generic_handle_data():
1722  *
1723  *
1724  */
1725 int transport_generic_handle_data(
1726 	struct se_cmd *cmd)
1727 {
1728 	/*
1729 	 * For the software fabric case, then we assume the nexus is being
1730 	 * failed/shutdown when signals are pending from the kthread context
1731 	 * caller, so we return a failure.  For the HW target mode case running
1732 	 * in interrupt code, the signal_pending() check is skipped.
1733 	 */
1734 	if (!in_interrupt() && signal_pending(current))
1735 		return -EPERM;
1736 	/*
1737 	 * If the received CDB has aleady been ABORTED by the generic
1738 	 * target engine, we now call transport_check_aborted_status()
1739 	 * to queue any delated TASK_ABORTED status for the received CDB to the
1740 	 * fabric module as we are expecting no further incoming DATA OUT
1741 	 * sequences at this point.
1742 	 */
1743 	if (transport_check_aborted_status(cmd, 1) != 0)
1744 		return 0;
1745 
1746 	transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_WRITE, false);
1747 	return 0;
1748 }
1749 EXPORT_SYMBOL(transport_generic_handle_data);
1750 
1751 /*	transport_generic_handle_tmr():
1752  *
1753  *
1754  */
1755 int transport_generic_handle_tmr(
1756 	struct se_cmd *cmd)
1757 {
1758 	transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_TMR, false);
1759 	return 0;
1760 }
1761 EXPORT_SYMBOL(transport_generic_handle_tmr);
1762 
1763 /*
1764  * If the task is active, request it to be stopped and sleep until it
1765  * has completed.
1766  */
1767 bool target_stop_task(struct se_task *task, unsigned long *flags)
1768 {
1769 	struct se_cmd *cmd = task->task_se_cmd;
1770 	bool was_active = false;
1771 
1772 	if (task->task_flags & TF_ACTIVE) {
1773 		task->task_flags |= TF_REQUEST_STOP;
1774 		spin_unlock_irqrestore(&cmd->t_state_lock, *flags);
1775 
1776 		pr_debug("Task %p waiting to complete\n", task);
1777 		wait_for_completion(&task->task_stop_comp);
1778 		pr_debug("Task %p stopped successfully\n", task);
1779 
1780 		spin_lock_irqsave(&cmd->t_state_lock, *flags);
1781 		atomic_dec(&cmd->t_task_cdbs_left);
1782 		task->task_flags &= ~(TF_ACTIVE | TF_REQUEST_STOP);
1783 		was_active = true;
1784 	}
1785 
1786 	return was_active;
1787 }
1788 
1789 static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)
1790 {
1791 	struct se_task *task, *task_tmp;
1792 	unsigned long flags;
1793 	int ret = 0;
1794 
1795 	pr_debug("ITT[0x%08x] - Stopping tasks\n",
1796 		cmd->se_tfo->get_task_tag(cmd));
1797 
1798 	/*
1799 	 * No tasks remain in the execution queue
1800 	 */
1801 	spin_lock_irqsave(&cmd->t_state_lock, flags);
1802 	list_for_each_entry_safe(task, task_tmp,
1803 				&cmd->t_task_list, t_list) {
1804 		pr_debug("Processing task %p\n", task);
1805 		/*
1806 		 * If the struct se_task has not been sent and is not active,
1807 		 * remove the struct se_task from the execution queue.
1808 		 */
1809 		if (!(task->task_flags & (TF_ACTIVE | TF_SENT))) {
1810 			spin_unlock_irqrestore(&cmd->t_state_lock,
1811 					flags);
1812 			transport_remove_task_from_execute_queue(task,
1813 					cmd->se_dev);
1814 
1815 			pr_debug("Task %p removed from execute queue\n", task);
1816 			spin_lock_irqsave(&cmd->t_state_lock, flags);
1817 			continue;
1818 		}
1819 
1820 		if (!target_stop_task(task, &flags)) {
1821 			pr_debug("Task %p - did nothing\n", task);
1822 			ret++;
1823 		}
1824 	}
1825 	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
1826 
1827 	return ret;
1828 }
1829 
1830 /*
1831  * Handle SAM-esque emulation for generic transport request failures.
1832  */
1833 static void transport_generic_request_failure(struct se_cmd *cmd)
1834 {
1835 	int ret = 0;
1836 
1837 	pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x"
1838 		" CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd),
1839 		cmd->t_task_cdb[0]);
1840 	pr_debug("-----[ i_state: %d t_state: %d scsi_sense_reason: %d\n",
1841 		cmd->se_tfo->get_cmd_state(cmd),
1842 		cmd->t_state, cmd->scsi_sense_reason);
1843 	pr_debug("-----[ t_tasks: %d t_task_cdbs_left: %d"
1844 		" t_task_cdbs_sent: %d t_task_cdbs_ex_left: %d --"
1845 		" CMD_T_ACTIVE: %d CMD_T_STOP: %d CMD_T_SENT: %d\n",
1846 		cmd->t_task_list_num,
1847 		atomic_read(&cmd->t_task_cdbs_left),
1848 		atomic_read(&cmd->t_task_cdbs_sent),
1849 		atomic_read(&cmd->t_task_cdbs_ex_left),
1850 		(cmd->transport_state & CMD_T_ACTIVE) != 0,
1851 		(cmd->transport_state & CMD_T_STOP) != 0,
1852 		(cmd->transport_state & CMD_T_SENT) != 0);
1853 
1854 	/*
1855 	 * For SAM Task Attribute emulation for failed struct se_cmd
1856 	 */
1857 	if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
1858 		transport_complete_task_attr(cmd);
1859 
1860 	switch (cmd->scsi_sense_reason) {
1861 	case TCM_NON_EXISTENT_LUN:
1862 	case TCM_UNSUPPORTED_SCSI_OPCODE:
1863 	case TCM_INVALID_CDB_FIELD:
1864 	case TCM_INVALID_PARAMETER_LIST:
1865 	case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
1866 	case TCM_UNKNOWN_MODE_PAGE:
1867 	case TCM_WRITE_PROTECTED:
1868 	case TCM_CHECK_CONDITION_ABORT_CMD:
1869 	case TCM_CHECK_CONDITION_UNIT_ATTENTION:
1870 	case TCM_CHECK_CONDITION_NOT_READY:
1871 		break;
1872 	case TCM_RESERVATION_CONFLICT:
1873 		/*
1874 		 * No SENSE Data payload for this case, set SCSI Status
1875 		 * and queue the response to $FABRIC_MOD.
1876 		 *
1877 		 * Uses linux/include/scsi/scsi.h SAM status codes defs
1878 		 */
1879 		cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
1880 		/*
1881 		 * For UA Interlock Code 11b, a RESERVATION CONFLICT will
1882 		 * establish a UNIT ATTENTION with PREVIOUS RESERVATION
1883 		 * CONFLICT STATUS.
1884 		 *
1885 		 * See spc4r17, section 7.4.6 Control Mode Page, Table 349
1886 		 */
1887 		if (cmd->se_sess &&
1888 		    cmd->se_dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 2)
1889 			core_scsi3_ua_allocate(cmd->se_sess->se_node_acl,
1890 				cmd->orig_fe_lun, 0x2C,
1891 				ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
1892 
1893 		ret = cmd->se_tfo->queue_status(cmd);
1894 		if (ret == -EAGAIN || ret == -ENOMEM)
1895 			goto queue_full;
1896 		goto check_stop;
1897 	default:
1898 		pr_err("Unknown transport error for CDB 0x%02x: %d\n",
1899 			cmd->t_task_cdb[0], cmd->scsi_sense_reason);
1900 		cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
1901 		break;
1902 	}
1903 	/*
1904 	 * If a fabric does not define a cmd->se_tfo->new_cmd_map caller,
1905 	 * make the call to transport_send_check_condition_and_sense()
1906 	 * directly.  Otherwise expect the fabric to make the call to
1907 	 * transport_send_check_condition_and_sense() after handling
1908 	 * possible unsoliticied write data payloads.
1909 	 */
1910 	ret = transport_send_check_condition_and_sense(cmd,
1911 			cmd->scsi_sense_reason, 0);
1912 	if (ret == -EAGAIN || ret == -ENOMEM)
1913 		goto queue_full;
1914 
1915 check_stop:
1916 	transport_lun_remove_cmd(cmd);
1917 	if (!transport_cmd_check_stop_to_fabric(cmd))
1918 		;
1919 	return;
1920 
1921 queue_full:
1922 	cmd->t_state = TRANSPORT_COMPLETE_QF_OK;
1923 	transport_handle_queue_full(cmd, cmd->se_dev);
1924 }
1925 
1926 static inline u32 transport_lba_21(unsigned char *cdb)
1927 {
1928 	return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3];
1929 }
1930 
1931 static inline u32 transport_lba_32(unsigned char *cdb)
1932 {
1933 	return (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
1934 }
1935 
1936 static inline unsigned long long transport_lba_64(unsigned char *cdb)
1937 {
1938 	unsigned int __v1, __v2;
1939 
1940 	__v1 = (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
1941 	__v2 = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
1942 
1943 	return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
1944 }
1945 
1946 /*
1947  * For VARIABLE_LENGTH_CDB w/ 32 byte extended CDBs
1948  */
1949 static inline unsigned long long transport_lba_64_ext(unsigned char *cdb)
1950 {
1951 	unsigned int __v1, __v2;
1952 
1953 	__v1 = (cdb[12] << 24) | (cdb[13] << 16) | (cdb[14] << 8) | cdb[15];
1954 	__v2 = (cdb[16] << 24) | (cdb[17] << 16) | (cdb[18] << 8) | cdb[19];
1955 
1956 	return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
1957 }
1958 
1959 static void transport_set_supported_SAM_opcode(struct se_cmd *se_cmd)
1960 {
1961 	unsigned long flags;
1962 
1963 	spin_lock_irqsave(&se_cmd->t_state_lock, flags);
1964 	se_cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE;
1965 	spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
1966 }
1967 
1968 /*
1969  * Called from Fabric Module context from transport_execute_tasks()
1970  *
1971  * The return of this function determins if the tasks from struct se_cmd
1972  * get added to the execution queue in transport_execute_tasks(),
1973  * or are added to the delayed or ordered lists here.
1974  */
1975 static inline int transport_execute_task_attr(struct se_cmd *cmd)
1976 {
1977 	if (cmd->se_dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
1978 		return 1;
1979 	/*
1980 	 * Check for the existence of HEAD_OF_QUEUE, and if true return 1
1981 	 * to allow the passed struct se_cmd list of tasks to the front of the list.
1982 	 */
1983 	 if (cmd->sam_task_attr == MSG_HEAD_TAG) {
1984 		pr_debug("Added HEAD_OF_QUEUE for CDB:"
1985 			" 0x%02x, se_ordered_id: %u\n",
1986 			cmd->t_task_cdb[0],
1987 			cmd->se_ordered_id);
1988 		return 1;
1989 	} else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
1990 		atomic_inc(&cmd->se_dev->dev_ordered_sync);
1991 		smp_mb__after_atomic_inc();
1992 
1993 		pr_debug("Added ORDERED for CDB: 0x%02x to ordered"
1994 				" list, se_ordered_id: %u\n",
1995 				cmd->t_task_cdb[0],
1996 				cmd->se_ordered_id);
1997 		/*
1998 		 * Add ORDERED command to tail of execution queue if
1999 		 * no other older commands exist that need to be
2000 		 * completed first.
2001 		 */
2002 		if (!atomic_read(&cmd->se_dev->simple_cmds))
2003 			return 1;
2004 	} else {
2005 		/*
2006 		 * For SIMPLE and UNTAGGED Task Attribute commands
2007 		 */
2008 		atomic_inc(&cmd->se_dev->simple_cmds);
2009 		smp_mb__after_atomic_inc();
2010 	}
2011 	/*
2012 	 * Otherwise if one or more outstanding ORDERED task attribute exist,
2013 	 * add the dormant task(s) built for the passed struct se_cmd to the
2014 	 * execution queue and become in Active state for this struct se_device.
2015 	 */
2016 	if (atomic_read(&cmd->se_dev->dev_ordered_sync) != 0) {
2017 		/*
2018 		 * Otherwise, add cmd w/ tasks to delayed cmd queue that
2019 		 * will be drained upon completion of HEAD_OF_QUEUE task.
2020 		 */
2021 		spin_lock(&cmd->se_dev->delayed_cmd_lock);
2022 		cmd->se_cmd_flags |= SCF_DELAYED_CMD_FROM_SAM_ATTR;
2023 		list_add_tail(&cmd->se_delayed_node,
2024 				&cmd->se_dev->delayed_cmd_list);
2025 		spin_unlock(&cmd->se_dev->delayed_cmd_lock);
2026 
2027 		pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to"
2028 			" delayed CMD list, se_ordered_id: %u\n",
2029 			cmd->t_task_cdb[0], cmd->sam_task_attr,
2030 			cmd->se_ordered_id);
2031 		/*
2032 		 * Return zero to let transport_execute_tasks() know
2033 		 * not to add the delayed tasks to the execution list.
2034 		 */
2035 		return 0;
2036 	}
2037 	/*
2038 	 * Otherwise, no ORDERED task attributes exist..
2039 	 */
2040 	return 1;
2041 }
2042 
2043 /*
2044  * Called from fabric module context in transport_generic_new_cmd() and
2045  * transport_generic_process_write()
2046  */
2047 static int transport_execute_tasks(struct se_cmd *cmd)
2048 {
2049 	int add_tasks;
2050 	struct se_device *se_dev = cmd->se_dev;
2051 	/*
2052 	 * Call transport_cmd_check_stop() to see if a fabric exception
2053 	 * has occurred that prevents execution.
2054 	 */
2055 	if (!transport_cmd_check_stop(cmd, 0, TRANSPORT_PROCESSING)) {
2056 		/*
2057 		 * Check for SAM Task Attribute emulation and HEAD_OF_QUEUE
2058 		 * attribute for the tasks of the received struct se_cmd CDB
2059 		 */
2060 		add_tasks = transport_execute_task_attr(cmd);
2061 		if (!add_tasks)
2062 			goto execute_tasks;
2063 		/*
2064 		 * __transport_execute_tasks() -> __transport_add_tasks_from_cmd()
2065 		 * adds associated se_tasks while holding dev->execute_task_lock
2066 		 * before I/O dispath to avoid a double spinlock access.
2067 		 */
2068 		__transport_execute_tasks(se_dev, cmd);
2069 		return 0;
2070 	}
2071 
2072 execute_tasks:
2073 	__transport_execute_tasks(se_dev, NULL);
2074 	return 0;
2075 }
2076 
2077 /*
2078  * Called to check struct se_device tcq depth window, and once open pull struct se_task
2079  * from struct se_device->execute_task_list and
2080  *
2081  * Called from transport_processing_thread()
2082  */
2083 static int __transport_execute_tasks(struct se_device *dev, struct se_cmd *new_cmd)
2084 {
2085 	int error;
2086 	struct se_cmd *cmd = NULL;
2087 	struct se_task *task = NULL;
2088 	unsigned long flags;
2089 
2090 check_depth:
2091 	spin_lock_irq(&dev->execute_task_lock);
2092 	if (new_cmd != NULL)
2093 		__transport_add_tasks_from_cmd(new_cmd);
2094 
2095 	if (list_empty(&dev->execute_task_list)) {
2096 		spin_unlock_irq(&dev->execute_task_lock);
2097 		return 0;
2098 	}
2099 	task = list_first_entry(&dev->execute_task_list,
2100 				struct se_task, t_execute_list);
2101 	__transport_remove_task_from_execute_queue(task, dev);
2102 	spin_unlock_irq(&dev->execute_task_lock);
2103 
2104 	cmd = task->task_se_cmd;
2105 	spin_lock_irqsave(&cmd->t_state_lock, flags);
2106 	task->task_flags |= (TF_ACTIVE | TF_SENT);
2107 	atomic_inc(&cmd->t_task_cdbs_sent);
2108 
2109 	if (atomic_read(&cmd->t_task_cdbs_sent) ==
2110 	    cmd->t_task_list_num)
2111 		cmd->transport_state |= CMD_T_SENT;
2112 
2113 	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2114 
2115 	if (cmd->execute_task)
2116 		error = cmd->execute_task(task);
2117 	else
2118 		error = dev->transport->do_task(task);
2119 	if (error != 0) {
2120 		spin_lock_irqsave(&cmd->t_state_lock, flags);
2121 		task->task_flags &= ~TF_ACTIVE;
2122 		cmd->transport_state &= ~CMD_T_SENT;
2123 		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2124 
2125 		transport_stop_tasks_for_cmd(cmd);
2126 		transport_generic_request_failure(cmd);
2127 	}
2128 
2129 	new_cmd = NULL;
2130 	goto check_depth;
2131 
2132 	return 0;
2133 }
2134 
2135 static inline u32 transport_get_sectors_6(
2136 	unsigned char *cdb,
2137 	struct se_cmd *cmd,
2138 	int *ret)
2139 {
2140 	struct se_device *dev = cmd->se_dev;
2141 
2142 	/*
2143 	 * Assume TYPE_DISK for non struct se_device objects.
2144 	 * Use 8-bit sector value.
2145 	 */
2146 	if (!dev)
2147 		goto type_disk;
2148 
2149 	/*
2150 	 * Use 24-bit allocation length for TYPE_TAPE.
2151 	 */
2152 	if (dev->transport->get_device_type(dev) == TYPE_TAPE)
2153 		return (u32)(cdb[2] << 16) + (cdb[3] << 8) + cdb[4];
2154 
2155 	/*
2156 	 * Everything else assume TYPE_DISK Sector CDB location.
2157 	 * Use 8-bit sector value.  SBC-3 says:
2158 	 *
2159 	 *   A TRANSFER LENGTH field set to zero specifies that 256
2160 	 *   logical blocks shall be written.  Any other value
2161 	 *   specifies the number of logical blocks that shall be
2162 	 *   written.
2163 	 */
2164 type_disk:
2165 	return cdb[4] ? : 256;
2166 }
2167 
2168 static inline u32 transport_get_sectors_10(
2169 	unsigned char *cdb,
2170 	struct se_cmd *cmd,
2171 	int *ret)
2172 {
2173 	struct se_device *dev = cmd->se_dev;
2174 
2175 	/*
2176 	 * Assume TYPE_DISK for non struct se_device objects.
2177 	 * Use 16-bit sector value.
2178 	 */
2179 	if (!dev)
2180 		goto type_disk;
2181 
2182 	/*
2183 	 * XXX_10 is not defined in SSC, throw an exception
2184 	 */
2185 	if (dev->transport->get_device_type(dev) == TYPE_TAPE) {
2186 		*ret = -EINVAL;
2187 		return 0;
2188 	}
2189 
2190 	/*
2191 	 * Everything else assume TYPE_DISK Sector CDB location.
2192 	 * Use 16-bit sector value.
2193 	 */
2194 type_disk:
2195 	return (u32)(cdb[7] << 8) + cdb[8];
2196 }
2197 
2198 static inline u32 transport_get_sectors_12(
2199 	unsigned char *cdb,
2200 	struct se_cmd *cmd,
2201 	int *ret)
2202 {
2203 	struct se_device *dev = cmd->se_dev;
2204 
2205 	/*
2206 	 * Assume TYPE_DISK for non struct se_device objects.
2207 	 * Use 32-bit sector value.
2208 	 */
2209 	if (!dev)
2210 		goto type_disk;
2211 
2212 	/*
2213 	 * XXX_12 is not defined in SSC, throw an exception
2214 	 */
2215 	if (dev->transport->get_device_type(dev) == TYPE_TAPE) {
2216 		*ret = -EINVAL;
2217 		return 0;
2218 	}
2219 
2220 	/*
2221 	 * Everything else assume TYPE_DISK Sector CDB location.
2222 	 * Use 32-bit sector value.
2223 	 */
2224 type_disk:
2225 	return (u32)(cdb[6] << 24) + (cdb[7] << 16) + (cdb[8] << 8) + cdb[9];
2226 }
2227 
2228 static inline u32 transport_get_sectors_16(
2229 	unsigned char *cdb,
2230 	struct se_cmd *cmd,
2231 	int *ret)
2232 {
2233 	struct se_device *dev = cmd->se_dev;
2234 
2235 	/*
2236 	 * Assume TYPE_DISK for non struct se_device objects.
2237 	 * Use 32-bit sector value.
2238 	 */
2239 	if (!dev)
2240 		goto type_disk;
2241 
2242 	/*
2243 	 * Use 24-bit allocation length for TYPE_TAPE.
2244 	 */
2245 	if (dev->transport->get_device_type(dev) == TYPE_TAPE)
2246 		return (u32)(cdb[12] << 16) + (cdb[13] << 8) + cdb[14];
2247 
2248 type_disk:
2249 	return (u32)(cdb[10] << 24) + (cdb[11] << 16) +
2250 		    (cdb[12] << 8) + cdb[13];
2251 }
2252 
2253 /*
2254  * Used for VARIABLE_LENGTH_CDB WRITE_32 and READ_32 variants
2255  */
2256 static inline u32 transport_get_sectors_32(
2257 	unsigned char *cdb,
2258 	struct se_cmd *cmd,
2259 	int *ret)
2260 {
2261 	/*
2262 	 * Assume TYPE_DISK for non struct se_device objects.
2263 	 * Use 32-bit sector value.
2264 	 */
2265 	return (u32)(cdb[28] << 24) + (cdb[29] << 16) +
2266 		    (cdb[30] << 8) + cdb[31];
2267 
2268 }
2269 
2270 static inline u32 transport_get_size(
2271 	u32 sectors,
2272 	unsigned char *cdb,
2273 	struct se_cmd *cmd)
2274 {
2275 	struct se_device *dev = cmd->se_dev;
2276 
2277 	if (dev->transport->get_device_type(dev) == TYPE_TAPE) {
2278 		if (cdb[1] & 1) { /* sectors */
2279 			return dev->se_sub_dev->se_dev_attrib.block_size * sectors;
2280 		} else /* bytes */
2281 			return sectors;
2282 	}
2283 #if 0
2284 	pr_debug("Returning block_size: %u, sectors: %u == %u for"
2285 			" %s object\n", dev->se_sub_dev->se_dev_attrib.block_size, sectors,
2286 			dev->se_sub_dev->se_dev_attrib.block_size * sectors,
2287 			dev->transport->name);
2288 #endif
2289 	return dev->se_sub_dev->se_dev_attrib.block_size * sectors;
2290 }
2291 
2292 static void transport_xor_callback(struct se_cmd *cmd)
2293 {
2294 	unsigned char *buf, *addr;
2295 	struct scatterlist *sg;
2296 	unsigned int offset;
2297 	int i;
2298 	int count;
2299 	/*
2300 	 * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command
2301 	 *
2302 	 * 1) read the specified logical block(s);
2303 	 * 2) transfer logical blocks from the data-out buffer;
2304 	 * 3) XOR the logical blocks transferred from the data-out buffer with
2305 	 *    the logical blocks read, storing the resulting XOR data in a buffer;
2306 	 * 4) if the DISABLE WRITE bit is set to zero, then write the logical
2307 	 *    blocks transferred from the data-out buffer; and
2308 	 * 5) transfer the resulting XOR data to the data-in buffer.
2309 	 */
2310 	buf = kmalloc(cmd->data_length, GFP_KERNEL);
2311 	if (!buf) {
2312 		pr_err("Unable to allocate xor_callback buf\n");
2313 		return;
2314 	}
2315 	/*
2316 	 * Copy the scatterlist WRITE buffer located at cmd->t_data_sg
2317 	 * into the locally allocated *buf
2318 	 */
2319 	sg_copy_to_buffer(cmd->t_data_sg,
2320 			  cmd->t_data_nents,
2321 			  buf,
2322 			  cmd->data_length);
2323 
2324 	/*
2325 	 * Now perform the XOR against the BIDI read memory located at
2326 	 * cmd->t_mem_bidi_list
2327 	 */
2328 
2329 	offset = 0;
2330 	for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) {
2331 		addr = kmap_atomic(sg_page(sg), KM_USER0);
2332 		if (!addr)
2333 			goto out;
2334 
2335 		for (i = 0; i < sg->length; i++)
2336 			*(addr + sg->offset + i) ^= *(buf + offset + i);
2337 
2338 		offset += sg->length;
2339 		kunmap_atomic(addr, KM_USER0);
2340 	}
2341 
2342 out:
2343 	kfree(buf);
2344 }
2345 
2346 /*
2347  * Used to obtain Sense Data from underlying Linux/SCSI struct scsi_cmnd
2348  */
2349 static int transport_get_sense_data(struct se_cmd *cmd)
2350 {
2351 	unsigned char *buffer = cmd->sense_buffer, *sense_buffer = NULL;
2352 	struct se_device *dev = cmd->se_dev;
2353 	struct se_task *task = NULL, *task_tmp;
2354 	unsigned long flags;
2355 	u32 offset = 0;
2356 
2357 	WARN_ON(!cmd->se_lun);
2358 
2359 	if (!dev)
2360 		return 0;
2361 
2362 	spin_lock_irqsave(&cmd->t_state_lock, flags);
2363 	if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
2364 		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2365 		return 0;
2366 	}
2367 
2368 	list_for_each_entry_safe(task, task_tmp,
2369 				&cmd->t_task_list, t_list) {
2370 		if (!(task->task_flags & TF_HAS_SENSE))
2371 			continue;
2372 
2373 		if (!dev->transport->get_sense_buffer) {
2374 			pr_err("dev->transport->get_sense_buffer"
2375 					" is NULL\n");
2376 			continue;
2377 		}
2378 
2379 		sense_buffer = dev->transport->get_sense_buffer(task);
2380 		if (!sense_buffer) {
2381 			pr_err("ITT[0x%08x]_TASK[%p]: Unable to locate"
2382 				" sense buffer for task with sense\n",
2383 				cmd->se_tfo->get_task_tag(cmd), task);
2384 			continue;
2385 		}
2386 		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2387 
2388 		offset = cmd->se_tfo->set_fabric_sense_len(cmd,
2389 				TRANSPORT_SENSE_BUFFER);
2390 
2391 		memcpy(&buffer[offset], sense_buffer,
2392 				TRANSPORT_SENSE_BUFFER);
2393 		cmd->scsi_status = task->task_scsi_status;
2394 		/* Automatically padded */
2395 		cmd->scsi_sense_length =
2396 				(TRANSPORT_SENSE_BUFFER + offset);
2397 
2398 		pr_debug("HBA_[%u]_PLUG[%s]: Set SAM STATUS: 0x%02x"
2399 				" and sense\n",
2400 			dev->se_hba->hba_id, dev->transport->name,
2401 				cmd->scsi_status);
2402 		return 0;
2403 	}
2404 	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2405 
2406 	return -1;
2407 }
2408 
2409 static inline long long transport_dev_end_lba(struct se_device *dev)
2410 {
2411 	return dev->transport->get_blocks(dev) + 1;
2412 }
2413 
2414 static int transport_cmd_get_valid_sectors(struct se_cmd *cmd)
2415 {
2416 	struct se_device *dev = cmd->se_dev;
2417 	u32 sectors;
2418 
2419 	if (dev->transport->get_device_type(dev) != TYPE_DISK)
2420 		return 0;
2421 
2422 	sectors = (cmd->data_length / dev->se_sub_dev->se_dev_attrib.block_size);
2423 
2424 	if ((cmd->t_task_lba + sectors) > transport_dev_end_lba(dev)) {
2425 		pr_err("LBA: %llu Sectors: %u exceeds"
2426 			" transport_dev_end_lba(): %llu\n",
2427 			cmd->t_task_lba, sectors,
2428 			transport_dev_end_lba(dev));
2429 		return -EINVAL;
2430 	}
2431 
2432 	return 0;
2433 }
2434 
2435 static int target_check_write_same_discard(unsigned char *flags, struct se_device *dev)
2436 {
2437 	/*
2438 	 * Determine if the received WRITE_SAME is used to for direct
2439 	 * passthrough into Linux/SCSI with struct request via TCM/pSCSI
2440 	 * or we are signaling the use of internal WRITE_SAME + UNMAP=1
2441 	 * emulation for -> Linux/BLOCK disbard with TCM/IBLOCK code.
2442 	 */
2443 	int passthrough = (dev->transport->transport_type ==
2444 				TRANSPORT_PLUGIN_PHBA_PDEV);
2445 
2446 	if (!passthrough) {
2447 		if ((flags[0] & 0x04) || (flags[0] & 0x02)) {
2448 			pr_err("WRITE_SAME PBDATA and LBDATA"
2449 				" bits not supported for Block Discard"
2450 				" Emulation\n");
2451 			return -ENOSYS;
2452 		}
2453 		/*
2454 		 * Currently for the emulated case we only accept
2455 		 * tpws with the UNMAP=1 bit set.
2456 		 */
2457 		if (!(flags[0] & 0x08)) {
2458 			pr_err("WRITE_SAME w/o UNMAP bit not"
2459 				" supported for Block Discard Emulation\n");
2460 			return -ENOSYS;
2461 		}
2462 	}
2463 
2464 	return 0;
2465 }
2466 
2467 /*	transport_generic_cmd_sequencer():
2468  *
2469  *	Generic Command Sequencer that should work for most DAS transport
2470  *	drivers.
2471  *
2472  *	Called from transport_generic_allocate_tasks() in the $FABRIC_MOD
2473  *	RX Thread.
2474  *
2475  *	FIXME: Need to support other SCSI OPCODES where as well.
2476  */
2477 static int transport_generic_cmd_sequencer(
2478 	struct se_cmd *cmd,
2479 	unsigned char *cdb)
2480 {
2481 	struct se_device *dev = cmd->se_dev;
2482 	struct se_subsystem_dev *su_dev = dev->se_sub_dev;
2483 	int ret = 0, sector_ret = 0, passthrough;
2484 	u32 sectors = 0, size = 0, pr_reg_type = 0;
2485 	u16 service_action;
2486 	u8 alua_ascq = 0;
2487 	/*
2488 	 * Check for an existing UNIT ATTENTION condition
2489 	 */
2490 	if (core_scsi3_ua_check(cmd, cdb) < 0) {
2491 		cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
2492 		cmd->scsi_sense_reason = TCM_CHECK_CONDITION_UNIT_ATTENTION;
2493 		return -EINVAL;
2494 	}
2495 	/*
2496 	 * Check status of Asymmetric Logical Unit Assignment port
2497 	 */
2498 	ret = su_dev->t10_alua.alua_state_check(cmd, cdb, &alua_ascq);
2499 	if (ret != 0) {
2500 		/*
2501 		 * Set SCSI additional sense code (ASC) to 'LUN Not Accessible';
2502 		 * The ALUA additional sense code qualifier (ASCQ) is determined
2503 		 * by the ALUA primary or secondary access state..
2504 		 */
2505 		if (ret > 0) {
2506 #if 0
2507 			pr_debug("[%s]: ALUA TG Port not available,"
2508 				" SenseKey: NOT_READY, ASC/ASCQ: 0x04/0x%02x\n",
2509 				cmd->se_tfo->get_fabric_name(), alua_ascq);
2510 #endif
2511 			transport_set_sense_codes(cmd, 0x04, alua_ascq);
2512 			cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
2513 			cmd->scsi_sense_reason = TCM_CHECK_CONDITION_NOT_READY;
2514 			return -EINVAL;
2515 		}
2516 		goto out_invalid_cdb_field;
2517 	}
2518 	/*
2519 	 * Check status for SPC-3 Persistent Reservations
2520 	 */
2521 	if (su_dev->t10_pr.pr_ops.t10_reservation_check(cmd, &pr_reg_type) != 0) {
2522 		if (su_dev->t10_pr.pr_ops.t10_seq_non_holder(
2523 					cmd, cdb, pr_reg_type) != 0) {
2524 			cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
2525 			cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT;
2526 			cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
2527 			return -EBUSY;
2528 		}
2529 		/*
2530 		 * This means the CDB is allowed for the SCSI Initiator port
2531 		 * when said port is *NOT* holding the legacy SPC-2 or
2532 		 * SPC-3 Persistent Reservation.
2533 		 */
2534 	}
2535 
2536 	/*
2537 	 * If we operate in passthrough mode we skip most CDB emulation and
2538 	 * instead hand the commands down to the physical SCSI device.
2539 	 */
2540 	passthrough =
2541 		(dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV);
2542 
2543 	switch (cdb[0]) {
2544 	case READ_6:
2545 		sectors = transport_get_sectors_6(cdb, cmd, &sector_ret);
2546 		if (sector_ret)
2547 			goto out_unsupported_cdb;
2548 		size = transport_get_size(sectors, cdb, cmd);
2549 		cmd->t_task_lba = transport_lba_21(cdb);
2550 		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2551 		break;
2552 	case READ_10:
2553 		sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
2554 		if (sector_ret)
2555 			goto out_unsupported_cdb;
2556 		size = transport_get_size(sectors, cdb, cmd);
2557 		cmd->t_task_lba = transport_lba_32(cdb);
2558 		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2559 		break;
2560 	case READ_12:
2561 		sectors = transport_get_sectors_12(cdb, cmd, &sector_ret);
2562 		if (sector_ret)
2563 			goto out_unsupported_cdb;
2564 		size = transport_get_size(sectors, cdb, cmd);
2565 		cmd->t_task_lba = transport_lba_32(cdb);
2566 		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2567 		break;
2568 	case READ_16:
2569 		sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
2570 		if (sector_ret)
2571 			goto out_unsupported_cdb;
2572 		size = transport_get_size(sectors, cdb, cmd);
2573 		cmd->t_task_lba = transport_lba_64(cdb);
2574 		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2575 		break;
2576 	case WRITE_6:
2577 		sectors = transport_get_sectors_6(cdb, cmd, &sector_ret);
2578 		if (sector_ret)
2579 			goto out_unsupported_cdb;
2580 		size = transport_get_size(sectors, cdb, cmd);
2581 		cmd->t_task_lba = transport_lba_21(cdb);
2582 		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2583 		break;
2584 	case WRITE_10:
2585 		sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
2586 		if (sector_ret)
2587 			goto out_unsupported_cdb;
2588 		size = transport_get_size(sectors, cdb, cmd);
2589 		cmd->t_task_lba = transport_lba_32(cdb);
2590 		if (cdb[1] & 0x8)
2591 			cmd->se_cmd_flags |= SCF_FUA;
2592 		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2593 		break;
2594 	case WRITE_12:
2595 		sectors = transport_get_sectors_12(cdb, cmd, &sector_ret);
2596 		if (sector_ret)
2597 			goto out_unsupported_cdb;
2598 		size = transport_get_size(sectors, cdb, cmd);
2599 		cmd->t_task_lba = transport_lba_32(cdb);
2600 		if (cdb[1] & 0x8)
2601 			cmd->se_cmd_flags |= SCF_FUA;
2602 		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2603 		break;
2604 	case WRITE_16:
2605 		sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
2606 		if (sector_ret)
2607 			goto out_unsupported_cdb;
2608 		size = transport_get_size(sectors, cdb, cmd);
2609 		cmd->t_task_lba = transport_lba_64(cdb);
2610 		if (cdb[1] & 0x8)
2611 			cmd->se_cmd_flags |= SCF_FUA;
2612 		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2613 		break;
2614 	case XDWRITEREAD_10:
2615 		if ((cmd->data_direction != DMA_TO_DEVICE) ||
2616 		    !(cmd->se_cmd_flags & SCF_BIDI))
2617 			goto out_invalid_cdb_field;
2618 		sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
2619 		if (sector_ret)
2620 			goto out_unsupported_cdb;
2621 		size = transport_get_size(sectors, cdb, cmd);
2622 		cmd->t_task_lba = transport_lba_32(cdb);
2623 		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2624 
2625 		/*
2626 		 * Do now allow BIDI commands for passthrough mode.
2627 		 */
2628 		if (passthrough)
2629 			goto out_unsupported_cdb;
2630 
2631 		/*
2632 		 * Setup BIDI XOR callback to be run after I/O completion.
2633 		 */
2634 		cmd->transport_complete_callback = &transport_xor_callback;
2635 		if (cdb[1] & 0x8)
2636 			cmd->se_cmd_flags |= SCF_FUA;
2637 		break;
2638 	case VARIABLE_LENGTH_CMD:
2639 		service_action = get_unaligned_be16(&cdb[8]);
2640 		switch (service_action) {
2641 		case XDWRITEREAD_32:
2642 			sectors = transport_get_sectors_32(cdb, cmd, &sector_ret);
2643 			if (sector_ret)
2644 				goto out_unsupported_cdb;
2645 			size = transport_get_size(sectors, cdb, cmd);
2646 			/*
2647 			 * Use WRITE_32 and READ_32 opcodes for the emulated
2648 			 * XDWRITE_READ_32 logic.
2649 			 */
2650 			cmd->t_task_lba = transport_lba_64_ext(cdb);
2651 			cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2652 
2653 			/*
2654 			 * Do now allow BIDI commands for passthrough mode.
2655 			 */
2656 			if (passthrough)
2657 				goto out_unsupported_cdb;
2658 
2659 			/*
2660 			 * Setup BIDI XOR callback to be run during after I/O
2661 			 * completion.
2662 			 */
2663 			cmd->transport_complete_callback = &transport_xor_callback;
2664 			if (cdb[1] & 0x8)
2665 				cmd->se_cmd_flags |= SCF_FUA;
2666 			break;
2667 		case WRITE_SAME_32:
2668 			sectors = transport_get_sectors_32(cdb, cmd, &sector_ret);
2669 			if (sector_ret)
2670 				goto out_unsupported_cdb;
2671 
2672 			if (sectors)
2673 				size = transport_get_size(1, cdb, cmd);
2674 			else {
2675 				pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not"
2676 				       " supported\n");
2677 				goto out_invalid_cdb_field;
2678 			}
2679 
2680 			cmd->t_task_lba = get_unaligned_be64(&cdb[12]);
2681 			cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2682 
2683 			if (target_check_write_same_discard(&cdb[10], dev) < 0)
2684 				goto out_unsupported_cdb;
2685 			if (!passthrough)
2686 				cmd->execute_task = target_emulate_write_same;
2687 			break;
2688 		default:
2689 			pr_err("VARIABLE_LENGTH_CMD service action"
2690 				" 0x%04x not supported\n", service_action);
2691 			goto out_unsupported_cdb;
2692 		}
2693 		break;
2694 	case MAINTENANCE_IN:
2695 		if (dev->transport->get_device_type(dev) != TYPE_ROM) {
2696 			/* MAINTENANCE_IN from SCC-2 */
2697 			/*
2698 			 * Check for emulated MI_REPORT_TARGET_PGS.
2699 			 */
2700 			if (cdb[1] == MI_REPORT_TARGET_PGS &&
2701 			    su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) {
2702 				cmd->execute_task =
2703 					target_emulate_report_target_port_groups;
2704 			}
2705 			size = (cdb[6] << 24) | (cdb[7] << 16) |
2706 			       (cdb[8] << 8) | cdb[9];
2707 		} else {
2708 			/* GPCMD_SEND_KEY from multi media commands */
2709 			size = (cdb[8] << 8) + cdb[9];
2710 		}
2711 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2712 		break;
2713 	case MODE_SELECT:
2714 		size = cdb[4];
2715 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2716 		break;
2717 	case MODE_SELECT_10:
2718 		size = (cdb[7] << 8) + cdb[8];
2719 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2720 		break;
2721 	case MODE_SENSE:
2722 		size = cdb[4];
2723 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2724 		if (!passthrough)
2725 			cmd->execute_task = target_emulate_modesense;
2726 		break;
2727 	case MODE_SENSE_10:
2728 		size = (cdb[7] << 8) + cdb[8];
2729 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2730 		if (!passthrough)
2731 			cmd->execute_task = target_emulate_modesense;
2732 		break;
2733 	case GPCMD_READ_BUFFER_CAPACITY:
2734 	case GPCMD_SEND_OPC:
2735 	case LOG_SELECT:
2736 	case LOG_SENSE:
2737 		size = (cdb[7] << 8) + cdb[8];
2738 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2739 		break;
2740 	case READ_BLOCK_LIMITS:
2741 		size = READ_BLOCK_LEN;
2742 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2743 		break;
2744 	case GPCMD_GET_CONFIGURATION:
2745 	case GPCMD_READ_FORMAT_CAPACITIES:
2746 	case GPCMD_READ_DISC_INFO:
2747 	case GPCMD_READ_TRACK_RZONE_INFO:
2748 		size = (cdb[7] << 8) + cdb[8];
2749 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2750 		break;
2751 	case PERSISTENT_RESERVE_IN:
2752 		if (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS)
2753 			cmd->execute_task = target_scsi3_emulate_pr_in;
2754 		size = (cdb[7] << 8) + cdb[8];
2755 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2756 		break;
2757 	case PERSISTENT_RESERVE_OUT:
2758 		if (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS)
2759 			cmd->execute_task = target_scsi3_emulate_pr_out;
2760 		size = (cdb[7] << 8) + cdb[8];
2761 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2762 		break;
2763 	case GPCMD_MECHANISM_STATUS:
2764 	case GPCMD_READ_DVD_STRUCTURE:
2765 		size = (cdb[8] << 8) + cdb[9];
2766 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2767 		break;
2768 	case READ_POSITION:
2769 		size = READ_POSITION_LEN;
2770 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2771 		break;
2772 	case MAINTENANCE_OUT:
2773 		if (dev->transport->get_device_type(dev) != TYPE_ROM) {
2774 			/* MAINTENANCE_OUT from SCC-2
2775 			 *
2776 			 * Check for emulated MO_SET_TARGET_PGS.
2777 			 */
2778 			if (cdb[1] == MO_SET_TARGET_PGS &&
2779 			    su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) {
2780 				cmd->execute_task =
2781 					target_emulate_set_target_port_groups;
2782 			}
2783 
2784 			size = (cdb[6] << 24) | (cdb[7] << 16) |
2785 			       (cdb[8] << 8) | cdb[9];
2786 		} else  {
2787 			/* GPCMD_REPORT_KEY from multi media commands */
2788 			size = (cdb[8] << 8) + cdb[9];
2789 		}
2790 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2791 		break;
2792 	case INQUIRY:
2793 		size = (cdb[3] << 8) + cdb[4];
2794 		/*
2795 		 * Do implict HEAD_OF_QUEUE processing for INQUIRY.
2796 		 * See spc4r17 section 5.3
2797 		 */
2798 		if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
2799 			cmd->sam_task_attr = MSG_HEAD_TAG;
2800 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2801 		if (!passthrough)
2802 			cmd->execute_task = target_emulate_inquiry;
2803 		break;
2804 	case READ_BUFFER:
2805 		size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
2806 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2807 		break;
2808 	case READ_CAPACITY:
2809 		size = READ_CAP_LEN;
2810 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2811 		if (!passthrough)
2812 			cmd->execute_task = target_emulate_readcapacity;
2813 		break;
2814 	case READ_MEDIA_SERIAL_NUMBER:
2815 	case SECURITY_PROTOCOL_IN:
2816 	case SECURITY_PROTOCOL_OUT:
2817 		size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
2818 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2819 		break;
2820 	case SERVICE_ACTION_IN:
2821 		switch (cmd->t_task_cdb[1] & 0x1f) {
2822 		case SAI_READ_CAPACITY_16:
2823 			if (!passthrough)
2824 				cmd->execute_task =
2825 					target_emulate_readcapacity_16;
2826 			break;
2827 		default:
2828 			if (passthrough)
2829 				break;
2830 
2831 			pr_err("Unsupported SA: 0x%02x\n",
2832 				cmd->t_task_cdb[1] & 0x1f);
2833 			goto out_unsupported_cdb;
2834 		}
2835 		/*FALLTHROUGH*/
2836 	case ACCESS_CONTROL_IN:
2837 	case ACCESS_CONTROL_OUT:
2838 	case EXTENDED_COPY:
2839 	case READ_ATTRIBUTE:
2840 	case RECEIVE_COPY_RESULTS:
2841 	case WRITE_ATTRIBUTE:
2842 		size = (cdb[10] << 24) | (cdb[11] << 16) |
2843 		       (cdb[12] << 8) | cdb[13];
2844 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2845 		break;
2846 	case RECEIVE_DIAGNOSTIC:
2847 	case SEND_DIAGNOSTIC:
2848 		size = (cdb[3] << 8) | cdb[4];
2849 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2850 		break;
2851 /* #warning FIXME: Figure out correct GPCMD_READ_CD blocksize. */
2852 #if 0
2853 	case GPCMD_READ_CD:
2854 		sectors = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
2855 		size = (2336 * sectors);
2856 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2857 		break;
2858 #endif
2859 	case READ_TOC:
2860 		size = cdb[8];
2861 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2862 		break;
2863 	case REQUEST_SENSE:
2864 		size = cdb[4];
2865 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2866 		if (!passthrough)
2867 			cmd->execute_task = target_emulate_request_sense;
2868 		break;
2869 	case READ_ELEMENT_STATUS:
2870 		size = 65536 * cdb[7] + 256 * cdb[8] + cdb[9];
2871 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2872 		break;
2873 	case WRITE_BUFFER:
2874 		size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
2875 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2876 		break;
2877 	case RESERVE:
2878 	case RESERVE_10:
2879 		/*
2880 		 * The SPC-2 RESERVE does not contain a size in the SCSI CDB.
2881 		 * Assume the passthrough or $FABRIC_MOD will tell us about it.
2882 		 */
2883 		if (cdb[0] == RESERVE_10)
2884 			size = (cdb[7] << 8) | cdb[8];
2885 		else
2886 			size = cmd->data_length;
2887 
2888 		/*
2889 		 * Setup the legacy emulated handler for SPC-2 and
2890 		 * >= SPC-3 compatible reservation handling (CRH=1)
2891 		 * Otherwise, we assume the underlying SCSI logic is
2892 		 * is running in SPC_PASSTHROUGH, and wants reservations
2893 		 * emulation disabled.
2894 		 */
2895 		if (su_dev->t10_pr.res_type != SPC_PASSTHROUGH)
2896 			cmd->execute_task = target_scsi2_reservation_reserve;
2897 		cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
2898 		break;
2899 	case RELEASE:
2900 	case RELEASE_10:
2901 		/*
2902 		 * The SPC-2 RELEASE does not contain a size in the SCSI CDB.
2903 		 * Assume the passthrough or $FABRIC_MOD will tell us about it.
2904 		*/
2905 		if (cdb[0] == RELEASE_10)
2906 			size = (cdb[7] << 8) | cdb[8];
2907 		else
2908 			size = cmd->data_length;
2909 
2910 		if (su_dev->t10_pr.res_type != SPC_PASSTHROUGH)
2911 			cmd->execute_task = target_scsi2_reservation_release;
2912 		cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
2913 		break;
2914 	case SYNCHRONIZE_CACHE:
2915 	case SYNCHRONIZE_CACHE_16:
2916 		/*
2917 		 * Extract LBA and range to be flushed for emulated SYNCHRONIZE_CACHE
2918 		 */
2919 		if (cdb[0] == SYNCHRONIZE_CACHE) {
2920 			sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
2921 			cmd->t_task_lba = transport_lba_32(cdb);
2922 		} else {
2923 			sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
2924 			cmd->t_task_lba = transport_lba_64(cdb);
2925 		}
2926 		if (sector_ret)
2927 			goto out_unsupported_cdb;
2928 
2929 		size = transport_get_size(sectors, cdb, cmd);
2930 		cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
2931 
2932 		if (passthrough)
2933 			break;
2934 
2935 		/*
2936 		 * Check to ensure that LBA + Range does not exceed past end of
2937 		 * device for IBLOCK and FILEIO ->do_sync_cache() backend calls
2938 		 */
2939 		if ((cmd->t_task_lba != 0) || (sectors != 0)) {
2940 			if (transport_cmd_get_valid_sectors(cmd) < 0)
2941 				goto out_invalid_cdb_field;
2942 		}
2943 		cmd->execute_task = target_emulate_synchronize_cache;
2944 		break;
2945 	case UNMAP:
2946 		size = get_unaligned_be16(&cdb[7]);
2947 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2948 		if (!passthrough)
2949 			cmd->execute_task = target_emulate_unmap;
2950 		break;
2951 	case WRITE_SAME_16:
2952 		sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
2953 		if (sector_ret)
2954 			goto out_unsupported_cdb;
2955 
2956 		if (sectors)
2957 			size = transport_get_size(1, cdb, cmd);
2958 		else {
2959 			pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
2960 			goto out_invalid_cdb_field;
2961 		}
2962 
2963 		cmd->t_task_lba = get_unaligned_be64(&cdb[2]);
2964 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2965 
2966 		if (target_check_write_same_discard(&cdb[1], dev) < 0)
2967 			goto out_unsupported_cdb;
2968 		if (!passthrough)
2969 			cmd->execute_task = target_emulate_write_same;
2970 		break;
2971 	case WRITE_SAME:
2972 		sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
2973 		if (sector_ret)
2974 			goto out_unsupported_cdb;
2975 
2976 		if (sectors)
2977 			size = transport_get_size(1, cdb, cmd);
2978 		else {
2979 			pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
2980 			goto out_invalid_cdb_field;
2981 		}
2982 
2983 		cmd->t_task_lba = get_unaligned_be32(&cdb[2]);
2984 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2985 		/*
2986 		 * Follow sbcr26 with WRITE_SAME (10) and check for the existence
2987 		 * of byte 1 bit 3 UNMAP instead of original reserved field
2988 		 */
2989 		if (target_check_write_same_discard(&cdb[1], dev) < 0)
2990 			goto out_unsupported_cdb;
2991 		if (!passthrough)
2992 			cmd->execute_task = target_emulate_write_same;
2993 		break;
2994 	case ALLOW_MEDIUM_REMOVAL:
2995 	case ERASE:
2996 	case REZERO_UNIT:
2997 	case SEEK_10:
2998 	case SPACE:
2999 	case START_STOP:
3000 	case TEST_UNIT_READY:
3001 	case VERIFY:
3002 	case WRITE_FILEMARKS:
3003 		cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
3004 		if (!passthrough)
3005 			cmd->execute_task = target_emulate_noop;
3006 		break;
3007 	case GPCMD_CLOSE_TRACK:
3008 	case INITIALIZE_ELEMENT_STATUS:
3009 	case GPCMD_LOAD_UNLOAD:
3010 	case GPCMD_SET_SPEED:
3011 	case MOVE_MEDIUM:
3012 		cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
3013 		break;
3014 	case REPORT_LUNS:
3015 		cmd->execute_task = target_report_luns;
3016 		size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
3017 		/*
3018 		 * Do implict HEAD_OF_QUEUE processing for REPORT_LUNS
3019 		 * See spc4r17 section 5.3
3020 		 */
3021 		if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
3022 			cmd->sam_task_attr = MSG_HEAD_TAG;
3023 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3024 		break;
3025 	default:
3026 		pr_warn("TARGET_CORE[%s]: Unsupported SCSI Opcode"
3027 			" 0x%02x, sending CHECK_CONDITION.\n",
3028 			cmd->se_tfo->get_fabric_name(), cdb[0]);
3029 		goto out_unsupported_cdb;
3030 	}
3031 
3032 	if (size != cmd->data_length) {
3033 		pr_warn("TARGET_CORE[%s]: Expected Transfer Length:"
3034 			" %u does not match SCSI CDB Length: %u for SAM Opcode:"
3035 			" 0x%02x\n", cmd->se_tfo->get_fabric_name(),
3036 				cmd->data_length, size, cdb[0]);
3037 
3038 		cmd->cmd_spdtl = size;
3039 
3040 		if (cmd->data_direction == DMA_TO_DEVICE) {
3041 			pr_err("Rejecting underflow/overflow"
3042 					" WRITE data\n");
3043 			goto out_invalid_cdb_field;
3044 		}
3045 		/*
3046 		 * Reject READ_* or WRITE_* with overflow/underflow for
3047 		 * type SCF_SCSI_DATA_SG_IO_CDB.
3048 		 */
3049 		if (!ret && (dev->se_sub_dev->se_dev_attrib.block_size != 512))  {
3050 			pr_err("Failing OVERFLOW/UNDERFLOW for LBA op"
3051 				" CDB on non 512-byte sector setup subsystem"
3052 				" plugin: %s\n", dev->transport->name);
3053 			/* Returns CHECK_CONDITION + INVALID_CDB_FIELD */
3054 			goto out_invalid_cdb_field;
3055 		}
3056 
3057 		if (size > cmd->data_length) {
3058 			cmd->se_cmd_flags |= SCF_OVERFLOW_BIT;
3059 			cmd->residual_count = (size - cmd->data_length);
3060 		} else {
3061 			cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
3062 			cmd->residual_count = (cmd->data_length - size);
3063 		}
3064 		cmd->data_length = size;
3065 	}
3066 
3067 	/* reject any command that we don't have a handler for */
3068 	if (!(passthrough || cmd->execute_task ||
3069 	     (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)))
3070 		goto out_unsupported_cdb;
3071 
3072 	transport_set_supported_SAM_opcode(cmd);
3073 	return ret;
3074 
3075 out_unsupported_cdb:
3076 	cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
3077 	cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
3078 	return -EINVAL;
3079 out_invalid_cdb_field:
3080 	cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
3081 	cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
3082 	return -EINVAL;
3083 }
3084 
3085 /*
3086  * Called from I/O completion to determine which dormant/delayed
3087  * and ordered cmds need to have their tasks added to the execution queue.
3088  */
3089 static void transport_complete_task_attr(struct se_cmd *cmd)
3090 {
3091 	struct se_device *dev = cmd->se_dev;
3092 	struct se_cmd *cmd_p, *cmd_tmp;
3093 	int new_active_tasks = 0;
3094 
3095 	if (cmd->sam_task_attr == MSG_SIMPLE_TAG) {
3096 		atomic_dec(&dev->simple_cmds);
3097 		smp_mb__after_atomic_dec();
3098 		dev->dev_cur_ordered_id++;
3099 		pr_debug("Incremented dev->dev_cur_ordered_id: %u for"
3100 			" SIMPLE: %u\n", dev->dev_cur_ordered_id,
3101 			cmd->se_ordered_id);
3102 	} else if (cmd->sam_task_attr == MSG_HEAD_TAG) {
3103 		dev->dev_cur_ordered_id++;
3104 		pr_debug("Incremented dev_cur_ordered_id: %u for"
3105 			" HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id,
3106 			cmd->se_ordered_id);
3107 	} else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
3108 		atomic_dec(&dev->dev_ordered_sync);
3109 		smp_mb__after_atomic_dec();
3110 
3111 		dev->dev_cur_ordered_id++;
3112 		pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:"
3113 			" %u\n", dev->dev_cur_ordered_id, cmd->se_ordered_id);
3114 	}
3115 	/*
3116 	 * Process all commands up to the last received
3117 	 * ORDERED task attribute which requires another blocking
3118 	 * boundary
3119 	 */
3120 	spin_lock(&dev->delayed_cmd_lock);
3121 	list_for_each_entry_safe(cmd_p, cmd_tmp,
3122 			&dev->delayed_cmd_list, se_delayed_node) {
3123 
3124 		list_del(&cmd_p->se_delayed_node);
3125 		spin_unlock(&dev->delayed_cmd_lock);
3126 
3127 		pr_debug("Calling add_tasks() for"
3128 			" cmd_p: 0x%02x Task Attr: 0x%02x"
3129 			" Dormant -> Active, se_ordered_id: %u\n",
3130 			cmd_p->t_task_cdb[0],
3131 			cmd_p->sam_task_attr, cmd_p->se_ordered_id);
3132 
3133 		transport_add_tasks_from_cmd(cmd_p);
3134 		new_active_tasks++;
3135 
3136 		spin_lock(&dev->delayed_cmd_lock);
3137 		if (cmd_p->sam_task_attr == MSG_ORDERED_TAG)
3138 			break;
3139 	}
3140 	spin_unlock(&dev->delayed_cmd_lock);
3141 	/*
3142 	 * If new tasks have become active, wake up the transport thread
3143 	 * to do the processing of the Active tasks.
3144 	 */
3145 	if (new_active_tasks != 0)
3146 		wake_up_interruptible(&dev->dev_queue_obj.thread_wq);
3147 }
3148 
3149 static void transport_complete_qf(struct se_cmd *cmd)
3150 {
3151 	int ret = 0;
3152 
3153 	if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
3154 		transport_complete_task_attr(cmd);
3155 
3156 	if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
3157 		ret = cmd->se_tfo->queue_status(cmd);
3158 		if (ret)
3159 			goto out;
3160 	}
3161 
3162 	switch (cmd->data_direction) {
3163 	case DMA_FROM_DEVICE:
3164 		ret = cmd->se_tfo->queue_data_in(cmd);
3165 		break;
3166 	case DMA_TO_DEVICE:
3167 		if (cmd->t_bidi_data_sg) {
3168 			ret = cmd->se_tfo->queue_data_in(cmd);
3169 			if (ret < 0)
3170 				break;
3171 		}
3172 		/* Fall through for DMA_TO_DEVICE */
3173 	case DMA_NONE:
3174 		ret = cmd->se_tfo->queue_status(cmd);
3175 		break;
3176 	default:
3177 		break;
3178 	}
3179 
3180 out:
3181 	if (ret < 0) {
3182 		transport_handle_queue_full(cmd, cmd->se_dev);
3183 		return;
3184 	}
3185 	transport_lun_remove_cmd(cmd);
3186 	transport_cmd_check_stop_to_fabric(cmd);
3187 }
3188 
3189 static void transport_handle_queue_full(
3190 	struct se_cmd *cmd,
3191 	struct se_device *dev)
3192 {
3193 	spin_lock_irq(&dev->qf_cmd_lock);
3194 	list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list);
3195 	atomic_inc(&dev->dev_qf_count);
3196 	smp_mb__after_atomic_inc();
3197 	spin_unlock_irq(&cmd->se_dev->qf_cmd_lock);
3198 
3199 	schedule_work(&cmd->se_dev->qf_work_queue);
3200 }
3201 
3202 static void target_complete_ok_work(struct work_struct *work)
3203 {
3204 	struct se_cmd *cmd = container_of(work, struct se_cmd, work);
3205 	int reason = 0, ret;
3206 
3207 	/*
3208 	 * Check if we need to move delayed/dormant tasks from cmds on the
3209 	 * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task
3210 	 * Attribute.
3211 	 */
3212 	if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
3213 		transport_complete_task_attr(cmd);
3214 	/*
3215 	 * Check to schedule QUEUE_FULL work, or execute an existing
3216 	 * cmd->transport_qf_callback()
3217 	 */
3218 	if (atomic_read(&cmd->se_dev->dev_qf_count) != 0)
3219 		schedule_work(&cmd->se_dev->qf_work_queue);
3220 
3221 	/*
3222 	 * Check if we need to retrieve a sense buffer from
3223 	 * the struct se_cmd in question.
3224 	 */
3225 	if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
3226 		if (transport_get_sense_data(cmd) < 0)
3227 			reason = TCM_NON_EXISTENT_LUN;
3228 
3229 		/*
3230 		 * Only set when an struct se_task->task_scsi_status returned
3231 		 * a non GOOD status.
3232 		 */
3233 		if (cmd->scsi_status) {
3234 			ret = transport_send_check_condition_and_sense(
3235 					cmd, reason, 1);
3236 			if (ret == -EAGAIN || ret == -ENOMEM)
3237 				goto queue_full;
3238 
3239 			transport_lun_remove_cmd(cmd);
3240 			transport_cmd_check_stop_to_fabric(cmd);
3241 			return;
3242 		}
3243 	}
3244 	/*
3245 	 * Check for a callback, used by amongst other things
3246 	 * XDWRITE_READ_10 emulation.
3247 	 */
3248 	if (cmd->transport_complete_callback)
3249 		cmd->transport_complete_callback(cmd);
3250 
3251 	switch (cmd->data_direction) {
3252 	case DMA_FROM_DEVICE:
3253 		spin_lock(&cmd->se_lun->lun_sep_lock);
3254 		if (cmd->se_lun->lun_sep) {
3255 			cmd->se_lun->lun_sep->sep_stats.tx_data_octets +=
3256 					cmd->data_length;
3257 		}
3258 		spin_unlock(&cmd->se_lun->lun_sep_lock);
3259 
3260 		ret = cmd->se_tfo->queue_data_in(cmd);
3261 		if (ret == -EAGAIN || ret == -ENOMEM)
3262 			goto queue_full;
3263 		break;
3264 	case DMA_TO_DEVICE:
3265 		spin_lock(&cmd->se_lun->lun_sep_lock);
3266 		if (cmd->se_lun->lun_sep) {
3267 			cmd->se_lun->lun_sep->sep_stats.rx_data_octets +=
3268 				cmd->data_length;
3269 		}
3270 		spin_unlock(&cmd->se_lun->lun_sep_lock);
3271 		/*
3272 		 * Check if we need to send READ payload for BIDI-COMMAND
3273 		 */
3274 		if (cmd->t_bidi_data_sg) {
3275 			spin_lock(&cmd->se_lun->lun_sep_lock);
3276 			if (cmd->se_lun->lun_sep) {
3277 				cmd->se_lun->lun_sep->sep_stats.tx_data_octets +=
3278 					cmd->data_length;
3279 			}
3280 			spin_unlock(&cmd->se_lun->lun_sep_lock);
3281 			ret = cmd->se_tfo->queue_data_in(cmd);
3282 			if (ret == -EAGAIN || ret == -ENOMEM)
3283 				goto queue_full;
3284 			break;
3285 		}
3286 		/* Fall through for DMA_TO_DEVICE */
3287 	case DMA_NONE:
3288 		ret = cmd->se_tfo->queue_status(cmd);
3289 		if (ret == -EAGAIN || ret == -ENOMEM)
3290 			goto queue_full;
3291 		break;
3292 	default:
3293 		break;
3294 	}
3295 
3296 	transport_lun_remove_cmd(cmd);
3297 	transport_cmd_check_stop_to_fabric(cmd);
3298 	return;
3299 
3300 queue_full:
3301 	pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p,"
3302 		" data_direction: %d\n", cmd, cmd->data_direction);
3303 	cmd->t_state = TRANSPORT_COMPLETE_QF_OK;
3304 	transport_handle_queue_full(cmd, cmd->se_dev);
3305 }
3306 
3307 static void transport_free_dev_tasks(struct se_cmd *cmd)
3308 {
3309 	struct se_task *task, *task_tmp;
3310 	unsigned long flags;
3311 	LIST_HEAD(dispose_list);
3312 
3313 	spin_lock_irqsave(&cmd->t_state_lock, flags);
3314 	list_for_each_entry_safe(task, task_tmp,
3315 				&cmd->t_task_list, t_list) {
3316 		if (!(task->task_flags & TF_ACTIVE))
3317 			list_move_tail(&task->t_list, &dispose_list);
3318 	}
3319 	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3320 
3321 	while (!list_empty(&dispose_list)) {
3322 		task = list_first_entry(&dispose_list, struct se_task, t_list);
3323 
3324 		if (task->task_sg != cmd->t_data_sg &&
3325 		    task->task_sg != cmd->t_bidi_data_sg)
3326 			kfree(task->task_sg);
3327 
3328 		list_del(&task->t_list);
3329 
3330 		cmd->se_dev->transport->free_task(task);
3331 	}
3332 }
3333 
3334 static inline void transport_free_sgl(struct scatterlist *sgl, int nents)
3335 {
3336 	struct scatterlist *sg;
3337 	int count;
3338 
3339 	for_each_sg(sgl, sg, nents, count)
3340 		__free_page(sg_page(sg));
3341 
3342 	kfree(sgl);
3343 }
3344 
3345 static inline void transport_free_pages(struct se_cmd *cmd)
3346 {
3347 	if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC)
3348 		return;
3349 
3350 	transport_free_sgl(cmd->t_data_sg, cmd->t_data_nents);
3351 	cmd->t_data_sg = NULL;
3352 	cmd->t_data_nents = 0;
3353 
3354 	transport_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents);
3355 	cmd->t_bidi_data_sg = NULL;
3356 	cmd->t_bidi_data_nents = 0;
3357 }
3358 
3359 /**
3360  * transport_release_cmd - free a command
3361  * @cmd:       command to free
3362  *
3363  * This routine unconditionally frees a command, and reference counting
3364  * or list removal must be done in the caller.
3365  */
3366 static void transport_release_cmd(struct se_cmd *cmd)
3367 {
3368 	BUG_ON(!cmd->se_tfo);
3369 
3370 	if (cmd->se_tmr_req)
3371 		core_tmr_release_req(cmd->se_tmr_req);
3372 	if (cmd->t_task_cdb != cmd->__t_task_cdb)
3373 		kfree(cmd->t_task_cdb);
3374 	/*
3375 	 * If this cmd has been setup with target_get_sess_cmd(), drop
3376 	 * the kref and call ->release_cmd() in kref callback.
3377 	 */
3378 	 if (cmd->check_release != 0) {
3379 		target_put_sess_cmd(cmd->se_sess, cmd);
3380 		return;
3381 	}
3382 	cmd->se_tfo->release_cmd(cmd);
3383 }
3384 
3385 /**
3386  * transport_put_cmd - release a reference to a command
3387  * @cmd:       command to release
3388  *
3389  * This routine releases our reference to the command and frees it if possible.
3390  */
3391 static void transport_put_cmd(struct se_cmd *cmd)
3392 {
3393 	unsigned long flags;
3394 	int free_tasks = 0;
3395 
3396 	spin_lock_irqsave(&cmd->t_state_lock, flags);
3397 	if (atomic_read(&cmd->t_fe_count)) {
3398 		if (!atomic_dec_and_test(&cmd->t_fe_count))
3399 			goto out_busy;
3400 	}
3401 
3402 	if (atomic_read(&cmd->t_se_count)) {
3403 		if (!atomic_dec_and_test(&cmd->t_se_count))
3404 			goto out_busy;
3405 	}
3406 
3407 	if (cmd->transport_state & CMD_T_DEV_ACTIVE) {
3408 		cmd->transport_state &= ~CMD_T_DEV_ACTIVE;
3409 		transport_all_task_dev_remove_state(cmd);
3410 		free_tasks = 1;
3411 	}
3412 	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3413 
3414 	if (free_tasks != 0)
3415 		transport_free_dev_tasks(cmd);
3416 
3417 	transport_free_pages(cmd);
3418 	transport_release_cmd(cmd);
3419 	return;
3420 out_busy:
3421 	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3422 }
3423 
3424 /*
3425  * transport_generic_map_mem_to_cmd - Use fabric-alloced pages instead of
3426  * allocating in the core.
3427  * @cmd:  Associated se_cmd descriptor
3428  * @mem:  SGL style memory for TCM WRITE / READ
3429  * @sg_mem_num: Number of SGL elements
3430  * @mem_bidi_in: SGL style memory for TCM BIDI READ
3431  * @sg_mem_bidi_num: Number of BIDI READ SGL elements
3432  *
3433  * Return: nonzero return cmd was rejected for -ENOMEM or inproper usage
3434  * of parameters.
3435  */
3436 int transport_generic_map_mem_to_cmd(
3437 	struct se_cmd *cmd,
3438 	struct scatterlist *sgl,
3439 	u32 sgl_count,
3440 	struct scatterlist *sgl_bidi,
3441 	u32 sgl_bidi_count)
3442 {
3443 	if (!sgl || !sgl_count)
3444 		return 0;
3445 
3446 	if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) ||
3447 	    (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB)) {
3448 		/*
3449 		 * Reject SCSI data overflow with map_mem_to_cmd() as incoming
3450 		 * scatterlists already have been set to follow what the fabric
3451 		 * passes for the original expected data transfer length.
3452 		 */
3453 		if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
3454 			pr_warn("Rejecting SCSI DATA overflow for fabric using"
3455 				" SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC\n");
3456 			cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
3457 			cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
3458 			return -EINVAL;
3459 		}
3460 
3461 		cmd->t_data_sg = sgl;
3462 		cmd->t_data_nents = sgl_count;
3463 
3464 		if (sgl_bidi && sgl_bidi_count) {
3465 			cmd->t_bidi_data_sg = sgl_bidi;
3466 			cmd->t_bidi_data_nents = sgl_bidi_count;
3467 		}
3468 		cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
3469 	}
3470 
3471 	return 0;
3472 }
3473 EXPORT_SYMBOL(transport_generic_map_mem_to_cmd);
3474 
3475 void *transport_kmap_data_sg(struct se_cmd *cmd)
3476 {
3477 	struct scatterlist *sg = cmd->t_data_sg;
3478 	struct page **pages;
3479 	int i;
3480 
3481 	BUG_ON(!sg);
3482 	/*
3483 	 * We need to take into account a possible offset here for fabrics like
3484 	 * tcm_loop who may be using a contig buffer from the SCSI midlayer for
3485 	 * control CDBs passed as SGLs via transport_generic_map_mem_to_cmd()
3486 	 */
3487 	if (!cmd->t_data_nents)
3488 		return NULL;
3489 	else if (cmd->t_data_nents == 1)
3490 		return kmap(sg_page(sg)) + sg->offset;
3491 
3492 	/* >1 page. use vmap */
3493 	pages = kmalloc(sizeof(*pages) * cmd->t_data_nents, GFP_KERNEL);
3494 	if (!pages)
3495 		return NULL;
3496 
3497 	/* convert sg[] to pages[] */
3498 	for_each_sg(cmd->t_data_sg, sg, cmd->t_data_nents, i) {
3499 		pages[i] = sg_page(sg);
3500 	}
3501 
3502 	cmd->t_data_vmap = vmap(pages, cmd->t_data_nents,  VM_MAP, PAGE_KERNEL);
3503 	kfree(pages);
3504 	if (!cmd->t_data_vmap)
3505 		return NULL;
3506 
3507 	return cmd->t_data_vmap + cmd->t_data_sg[0].offset;
3508 }
3509 EXPORT_SYMBOL(transport_kmap_data_sg);
3510 
3511 void transport_kunmap_data_sg(struct se_cmd *cmd)
3512 {
3513 	if (!cmd->t_data_nents)
3514 		return;
3515 	else if (cmd->t_data_nents == 1)
3516 		kunmap(sg_page(cmd->t_data_sg));
3517 
3518 	vunmap(cmd->t_data_vmap);
3519 	cmd->t_data_vmap = NULL;
3520 }
3521 EXPORT_SYMBOL(transport_kunmap_data_sg);
3522 
3523 static int
3524 transport_generic_get_mem(struct se_cmd *cmd)
3525 {
3526 	u32 length = cmd->data_length;
3527 	unsigned int nents;
3528 	struct page *page;
3529 	gfp_t zero_flag;
3530 	int i = 0;
3531 
3532 	nents = DIV_ROUND_UP(length, PAGE_SIZE);
3533 	cmd->t_data_sg = kmalloc(sizeof(struct scatterlist) * nents, GFP_KERNEL);
3534 	if (!cmd->t_data_sg)
3535 		return -ENOMEM;
3536 
3537 	cmd->t_data_nents = nents;
3538 	sg_init_table(cmd->t_data_sg, nents);
3539 
3540 	zero_flag = cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB ? 0 : __GFP_ZERO;
3541 
3542 	while (length) {
3543 		u32 page_len = min_t(u32, length, PAGE_SIZE);
3544 		page = alloc_page(GFP_KERNEL | zero_flag);
3545 		if (!page)
3546 			goto out;
3547 
3548 		sg_set_page(&cmd->t_data_sg[i], page, page_len, 0);
3549 		length -= page_len;
3550 		i++;
3551 	}
3552 	return 0;
3553 
3554 out:
3555 	while (i >= 0) {
3556 		__free_page(sg_page(&cmd->t_data_sg[i]));
3557 		i--;
3558 	}
3559 	kfree(cmd->t_data_sg);
3560 	cmd->t_data_sg = NULL;
3561 	return -ENOMEM;
3562 }
3563 
3564 /* Reduce sectors if they are too long for the device */
3565 static inline sector_t transport_limit_task_sectors(
3566 	struct se_device *dev,
3567 	unsigned long long lba,
3568 	sector_t sectors)
3569 {
3570 	sectors = min_t(sector_t, sectors, dev->se_sub_dev->se_dev_attrib.max_sectors);
3571 
3572 	if (dev->transport->get_device_type(dev) == TYPE_DISK)
3573 		if ((lba + sectors) > transport_dev_end_lba(dev))
3574 			sectors = ((transport_dev_end_lba(dev) - lba) + 1);
3575 
3576 	return sectors;
3577 }
3578 
3579 
3580 /*
3581  * This function can be used by HW target mode drivers to create a linked
3582  * scatterlist from all contiguously allocated struct se_task->task_sg[].
3583  * This is intended to be called during the completion path by TCM Core
3584  * when struct target_core_fabric_ops->check_task_sg_chaining is enabled.
3585  */
3586 void transport_do_task_sg_chain(struct se_cmd *cmd)
3587 {
3588 	struct scatterlist *sg_first = NULL;
3589 	struct scatterlist *sg_prev = NULL;
3590 	int sg_prev_nents = 0;
3591 	struct scatterlist *sg;
3592 	struct se_task *task;
3593 	u32 chained_nents = 0;
3594 	int i;
3595 
3596 	BUG_ON(!cmd->se_tfo->task_sg_chaining);
3597 
3598 	/*
3599 	 * Walk the struct se_task list and setup scatterlist chains
3600 	 * for each contiguously allocated struct se_task->task_sg[].
3601 	 */
3602 	list_for_each_entry(task, &cmd->t_task_list, t_list) {
3603 		if (!task->task_sg)
3604 			continue;
3605 
3606 		if (!sg_first) {
3607 			sg_first = task->task_sg;
3608 			chained_nents = task->task_sg_nents;
3609 		} else {
3610 			sg_chain(sg_prev, sg_prev_nents, task->task_sg);
3611 			chained_nents += task->task_sg_nents;
3612 		}
3613 		/*
3614 		 * For the padded tasks, use the extra SGL vector allocated
3615 		 * in transport_allocate_data_tasks() for the sg_prev_nents
3616 		 * offset into sg_chain() above.
3617 		 *
3618 		 * We do not need the padding for the last task (or a single
3619 		 * task), but in that case we will never use the sg_prev_nents
3620 		 * value below which would be incorrect.
3621 		 */
3622 		sg_prev_nents = (task->task_sg_nents + 1);
3623 		sg_prev = task->task_sg;
3624 	}
3625 	/*
3626 	 * Setup the starting pointer and total t_tasks_sg_linked_no including
3627 	 * padding SGs for linking and to mark the end.
3628 	 */
3629 	cmd->t_tasks_sg_chained = sg_first;
3630 	cmd->t_tasks_sg_chained_no = chained_nents;
3631 
3632 	pr_debug("Setup cmd: %p cmd->t_tasks_sg_chained: %p and"
3633 		" t_tasks_sg_chained_no: %u\n", cmd, cmd->t_tasks_sg_chained,
3634 		cmd->t_tasks_sg_chained_no);
3635 
3636 	for_each_sg(cmd->t_tasks_sg_chained, sg,
3637 			cmd->t_tasks_sg_chained_no, i) {
3638 
3639 		pr_debug("SG[%d]: %p page: %p length: %d offset: %d\n",
3640 			i, sg, sg_page(sg), sg->length, sg->offset);
3641 		if (sg_is_chain(sg))
3642 			pr_debug("SG: %p sg_is_chain=1\n", sg);
3643 		if (sg_is_last(sg))
3644 			pr_debug("SG: %p sg_is_last=1\n", sg);
3645 	}
3646 }
3647 EXPORT_SYMBOL(transport_do_task_sg_chain);
3648 
3649 /*
3650  * Break up cmd into chunks transport can handle
3651  */
3652 static int
3653 transport_allocate_data_tasks(struct se_cmd *cmd,
3654 	enum dma_data_direction data_direction,
3655 	struct scatterlist *cmd_sg, unsigned int sgl_nents)
3656 {
3657 	struct se_device *dev = cmd->se_dev;
3658 	int task_count, i;
3659 	unsigned long long lba;
3660 	sector_t sectors, dev_max_sectors;
3661 	u32 sector_size;
3662 
3663 	if (transport_cmd_get_valid_sectors(cmd) < 0)
3664 		return -EINVAL;
3665 
3666 	dev_max_sectors = dev->se_sub_dev->se_dev_attrib.max_sectors;
3667 	sector_size = dev->se_sub_dev->se_dev_attrib.block_size;
3668 
3669 	WARN_ON(cmd->data_length % sector_size);
3670 
3671 	lba = cmd->t_task_lba;
3672 	sectors = DIV_ROUND_UP(cmd->data_length, sector_size);
3673 	task_count = DIV_ROUND_UP_SECTOR_T(sectors, dev_max_sectors);
3674 
3675 	/*
3676 	 * If we need just a single task reuse the SG list in the command
3677 	 * and avoid a lot of work.
3678 	 */
3679 	if (task_count == 1) {
3680 		struct se_task *task;
3681 		unsigned long flags;
3682 
3683 		task = transport_generic_get_task(cmd, data_direction);
3684 		if (!task)
3685 			return -ENOMEM;
3686 
3687 		task->task_sg = cmd_sg;
3688 		task->task_sg_nents = sgl_nents;
3689 
3690 		task->task_lba = lba;
3691 		task->task_sectors = sectors;
3692 		task->task_size = task->task_sectors * sector_size;
3693 
3694 		spin_lock_irqsave(&cmd->t_state_lock, flags);
3695 		list_add_tail(&task->t_list, &cmd->t_task_list);
3696 		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3697 
3698 		return task_count;
3699 	}
3700 
3701 	for (i = 0; i < task_count; i++) {
3702 		struct se_task *task;
3703 		unsigned int task_size, task_sg_nents_padded;
3704 		struct scatterlist *sg;
3705 		unsigned long flags;
3706 		int count;
3707 
3708 		task = transport_generic_get_task(cmd, data_direction);
3709 		if (!task)
3710 			return -ENOMEM;
3711 
3712 		task->task_lba = lba;
3713 		task->task_sectors = min(sectors, dev_max_sectors);
3714 		task->task_size = task->task_sectors * sector_size;
3715 
3716 		/*
3717 		 * This now assumes that passed sg_ents are in PAGE_SIZE chunks
3718 		 * in order to calculate the number per task SGL entries
3719 		 */
3720 		task->task_sg_nents = DIV_ROUND_UP(task->task_size, PAGE_SIZE);
3721 		/*
3722 		 * Check if the fabric module driver is requesting that all
3723 		 * struct se_task->task_sg[] be chained together..  If so,
3724 		 * then allocate an extra padding SG entry for linking and
3725 		 * marking the end of the chained SGL for every task except
3726 		 * the last one for (task_count > 1) operation, or skipping
3727 		 * the extra padding for the (task_count == 1) case.
3728 		 */
3729 		if (cmd->se_tfo->task_sg_chaining && (i < (task_count - 1))) {
3730 			task_sg_nents_padded = (task->task_sg_nents + 1);
3731 		} else
3732 			task_sg_nents_padded = task->task_sg_nents;
3733 
3734 		task->task_sg = kmalloc(sizeof(struct scatterlist) *
3735 					task_sg_nents_padded, GFP_KERNEL);
3736 		if (!task->task_sg) {
3737 			cmd->se_dev->transport->free_task(task);
3738 			return -ENOMEM;
3739 		}
3740 
3741 		sg_init_table(task->task_sg, task_sg_nents_padded);
3742 
3743 		task_size = task->task_size;
3744 
3745 		/* Build new sgl, only up to task_size */
3746 		for_each_sg(task->task_sg, sg, task->task_sg_nents, count) {
3747 			if (cmd_sg->length > task_size)
3748 				break;
3749 
3750 			*sg = *cmd_sg;
3751 			task_size -= cmd_sg->length;
3752 			cmd_sg = sg_next(cmd_sg);
3753 		}
3754 
3755 		lba += task->task_sectors;
3756 		sectors -= task->task_sectors;
3757 
3758 		spin_lock_irqsave(&cmd->t_state_lock, flags);
3759 		list_add_tail(&task->t_list, &cmd->t_task_list);
3760 		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3761 	}
3762 
3763 	return task_count;
3764 }
3765 
3766 static int
3767 transport_allocate_control_task(struct se_cmd *cmd)
3768 {
3769 	struct se_task *task;
3770 	unsigned long flags;
3771 
3772 	/* Workaround for handling zero-length control CDBs */
3773 	if ((cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) &&
3774 	    !cmd->data_length)
3775 		return 0;
3776 
3777 	task = transport_generic_get_task(cmd, cmd->data_direction);
3778 	if (!task)
3779 		return -ENOMEM;
3780 
3781 	task->task_sg = cmd->t_data_sg;
3782 	task->task_size = cmd->data_length;
3783 	task->task_sg_nents = cmd->t_data_nents;
3784 
3785 	spin_lock_irqsave(&cmd->t_state_lock, flags);
3786 	list_add_tail(&task->t_list, &cmd->t_task_list);
3787 	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3788 
3789 	/* Success! Return number of tasks allocated */
3790 	return 1;
3791 }
3792 
3793 /*
3794  * Allocate any required ressources to execute the command, and either place
3795  * it on the execution queue if possible.  For writes we might not have the
3796  * payload yet, thus notify the fabric via a call to ->write_pending instead.
3797  */
3798 int transport_generic_new_cmd(struct se_cmd *cmd)
3799 {
3800 	struct se_device *dev = cmd->se_dev;
3801 	int task_cdbs, task_cdbs_bidi = 0;
3802 	int set_counts = 1;
3803 	int ret = 0;
3804 
3805 	/*
3806 	 * Determine is the TCM fabric module has already allocated physical
3807 	 * memory, and is directly calling transport_generic_map_mem_to_cmd()
3808 	 * beforehand.
3809 	 */
3810 	if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) &&
3811 	    cmd->data_length) {
3812 		ret = transport_generic_get_mem(cmd);
3813 		if (ret < 0)
3814 			goto out_fail;
3815 	}
3816 
3817 	/*
3818 	 * For BIDI command set up the read tasks first.
3819 	 */
3820 	if (cmd->t_bidi_data_sg &&
3821 	    dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) {
3822 		BUG_ON(!(cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB));
3823 
3824 		task_cdbs_bidi = transport_allocate_data_tasks(cmd,
3825 				DMA_FROM_DEVICE, cmd->t_bidi_data_sg,
3826 				cmd->t_bidi_data_nents);
3827 		if (task_cdbs_bidi <= 0)
3828 			goto out_fail;
3829 
3830 		atomic_inc(&cmd->t_fe_count);
3831 		atomic_inc(&cmd->t_se_count);
3832 		set_counts = 0;
3833 	}
3834 
3835 	if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) {
3836 		task_cdbs = transport_allocate_data_tasks(cmd,
3837 					cmd->data_direction, cmd->t_data_sg,
3838 					cmd->t_data_nents);
3839 	} else {
3840 		task_cdbs = transport_allocate_control_task(cmd);
3841 	}
3842 
3843 	if (task_cdbs < 0)
3844 		goto out_fail;
3845 	else if (!task_cdbs && (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)) {
3846 		spin_lock_irq(&cmd->t_state_lock);
3847 		cmd->t_state = TRANSPORT_COMPLETE;
3848 		cmd->transport_state |= CMD_T_ACTIVE;
3849 		spin_unlock_irq(&cmd->t_state_lock);
3850 
3851 		if (cmd->t_task_cdb[0] == REQUEST_SENSE) {
3852 			u8 ua_asc = 0, ua_ascq = 0;
3853 
3854 			core_scsi3_ua_clear_for_request_sense(cmd,
3855 					&ua_asc, &ua_ascq);
3856 		}
3857 
3858 		INIT_WORK(&cmd->work, target_complete_ok_work);
3859 		queue_work(target_completion_wq, &cmd->work);
3860 		return 0;
3861 	}
3862 
3863 	if (set_counts) {
3864 		atomic_inc(&cmd->t_fe_count);
3865 		atomic_inc(&cmd->t_se_count);
3866 	}
3867 
3868 	cmd->t_task_list_num = (task_cdbs + task_cdbs_bidi);
3869 	atomic_set(&cmd->t_task_cdbs_left, cmd->t_task_list_num);
3870 	atomic_set(&cmd->t_task_cdbs_ex_left, cmd->t_task_list_num);
3871 
3872 	/*
3873 	 * For WRITEs, let the fabric know its buffer is ready..
3874 	 * This WRITE struct se_cmd (and all of its associated struct se_task's)
3875 	 * will be added to the struct se_device execution queue after its WRITE
3876 	 * data has arrived. (ie: It gets handled by the transport processing
3877 	 * thread a second time)
3878 	 */
3879 	if (cmd->data_direction == DMA_TO_DEVICE) {
3880 		transport_add_tasks_to_state_queue(cmd);
3881 		return transport_generic_write_pending(cmd);
3882 	}
3883 	/*
3884 	 * Everything else but a WRITE, add the struct se_cmd's struct se_task's
3885 	 * to the execution queue.
3886 	 */
3887 	transport_execute_tasks(cmd);
3888 	return 0;
3889 
3890 out_fail:
3891 	cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
3892 	cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
3893 	return -EINVAL;
3894 }
3895 EXPORT_SYMBOL(transport_generic_new_cmd);
3896 
3897 /*	transport_generic_process_write():
3898  *
3899  *
3900  */
3901 void transport_generic_process_write(struct se_cmd *cmd)
3902 {
3903 	transport_execute_tasks(cmd);
3904 }
3905 EXPORT_SYMBOL(transport_generic_process_write);
3906 
3907 static void transport_write_pending_qf(struct se_cmd *cmd)
3908 {
3909 	int ret;
3910 
3911 	ret = cmd->se_tfo->write_pending(cmd);
3912 	if (ret == -EAGAIN || ret == -ENOMEM) {
3913 		pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n",
3914 			 cmd);
3915 		transport_handle_queue_full(cmd, cmd->se_dev);
3916 	}
3917 }
3918 
3919 static int transport_generic_write_pending(struct se_cmd *cmd)
3920 {
3921 	unsigned long flags;
3922 	int ret;
3923 
3924 	spin_lock_irqsave(&cmd->t_state_lock, flags);
3925 	cmd->t_state = TRANSPORT_WRITE_PENDING;
3926 	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3927 
3928 	/*
3929 	 * Clear the se_cmd for WRITE_PENDING status in order to set
3930 	 * CMD_T_ACTIVE so that transport_generic_handle_data can be called
3931 	 * from HW target mode interrupt code.  This is safe to be called
3932 	 * with transport_off=1 before the cmd->se_tfo->write_pending
3933 	 * because the se_cmd->se_lun pointer is not being cleared.
3934 	 */
3935 	transport_cmd_check_stop(cmd, 1, 0);
3936 
3937 	/*
3938 	 * Call the fabric write_pending function here to let the
3939 	 * frontend know that WRITE buffers are ready.
3940 	 */
3941 	ret = cmd->se_tfo->write_pending(cmd);
3942 	if (ret == -EAGAIN || ret == -ENOMEM)
3943 		goto queue_full;
3944 	else if (ret < 0)
3945 		return ret;
3946 
3947 	return 1;
3948 
3949 queue_full:
3950 	pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd);
3951 	cmd->t_state = TRANSPORT_COMPLETE_QF_WP;
3952 	transport_handle_queue_full(cmd, cmd->se_dev);
3953 	return 0;
3954 }
3955 
3956 void transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
3957 {
3958 	if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) {
3959 		if (wait_for_tasks && cmd->se_tmr_req)
3960 			 transport_wait_for_tasks(cmd);
3961 
3962 		transport_release_cmd(cmd);
3963 	} else {
3964 		if (wait_for_tasks)
3965 			transport_wait_for_tasks(cmd);
3966 
3967 		core_dec_lacl_count(cmd->se_sess->se_node_acl, cmd);
3968 
3969 		if (cmd->se_lun)
3970 			transport_lun_remove_cmd(cmd);
3971 
3972 		transport_free_dev_tasks(cmd);
3973 
3974 		transport_put_cmd(cmd);
3975 	}
3976 }
3977 EXPORT_SYMBOL(transport_generic_free_cmd);
3978 
3979 /* target_get_sess_cmd - Add command to active ->sess_cmd_list
3980  * @se_sess:	session to reference
3981  * @se_cmd:	command descriptor to add
3982  * @ack_kref:	Signal that fabric will perform an ack target_put_sess_cmd()
3983  */
3984 void target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd,
3985 			bool ack_kref)
3986 {
3987 	unsigned long flags;
3988 
3989 	kref_init(&se_cmd->cmd_kref);
3990 	/*
3991 	 * Add a second kref if the fabric caller is expecting to handle
3992 	 * fabric acknowledgement that requires two target_put_sess_cmd()
3993 	 * invocations before se_cmd descriptor release.
3994 	 */
3995 	if (ack_kref == true)
3996 		kref_get(&se_cmd->cmd_kref);
3997 
3998 	spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
3999 	list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list);
4000 	se_cmd->check_release = 1;
4001 	spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
4002 }
4003 EXPORT_SYMBOL(target_get_sess_cmd);
4004 
4005 static void target_release_cmd_kref(struct kref *kref)
4006 {
4007 	struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref);
4008 	struct se_session *se_sess = se_cmd->se_sess;
4009 	unsigned long flags;
4010 
4011 	spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
4012 	if (list_empty(&se_cmd->se_cmd_list)) {
4013 		spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
4014 		WARN_ON(1);
4015 		return;
4016 	}
4017 	if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) {
4018 		spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
4019 		complete(&se_cmd->cmd_wait_comp);
4020 		return;
4021 	}
4022 	list_del(&se_cmd->se_cmd_list);
4023 	spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
4024 
4025 	se_cmd->se_tfo->release_cmd(se_cmd);
4026 }
4027 
4028 /* target_put_sess_cmd - Check for active I/O shutdown via kref_put
4029  * @se_sess:	session to reference
4030  * @se_cmd:	command descriptor to drop
4031  */
4032 int target_put_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd)
4033 {
4034 	return kref_put(&se_cmd->cmd_kref, target_release_cmd_kref);
4035 }
4036 EXPORT_SYMBOL(target_put_sess_cmd);
4037 
4038 /* target_splice_sess_cmd_list - Split active cmds into sess_wait_list
4039  * @se_sess:	session to split
4040  */
4041 void target_splice_sess_cmd_list(struct se_session *se_sess)
4042 {
4043 	struct se_cmd *se_cmd;
4044 	unsigned long flags;
4045 
4046 	WARN_ON(!list_empty(&se_sess->sess_wait_list));
4047 	INIT_LIST_HEAD(&se_sess->sess_wait_list);
4048 
4049 	spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
4050 	se_sess->sess_tearing_down = 1;
4051 
4052 	list_splice_init(&se_sess->sess_cmd_list, &se_sess->sess_wait_list);
4053 
4054 	list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list)
4055 		se_cmd->cmd_wait_set = 1;
4056 
4057 	spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
4058 }
4059 EXPORT_SYMBOL(target_splice_sess_cmd_list);
4060 
4061 /* target_wait_for_sess_cmds - Wait for outstanding descriptors
4062  * @se_sess:    session to wait for active I/O
4063  * @wait_for_tasks:	Make extra transport_wait_for_tasks call
4064  */
4065 void target_wait_for_sess_cmds(
4066 	struct se_session *se_sess,
4067 	int wait_for_tasks)
4068 {
4069 	struct se_cmd *se_cmd, *tmp_cmd;
4070 	bool rc = false;
4071 
4072 	list_for_each_entry_safe(se_cmd, tmp_cmd,
4073 				&se_sess->sess_wait_list, se_cmd_list) {
4074 		list_del(&se_cmd->se_cmd_list);
4075 
4076 		pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:"
4077 			" %d\n", se_cmd, se_cmd->t_state,
4078 			se_cmd->se_tfo->get_cmd_state(se_cmd));
4079 
4080 		if (wait_for_tasks) {
4081 			pr_debug("Calling transport_wait_for_tasks se_cmd: %p t_state: %d,"
4082 				" fabric state: %d\n", se_cmd, se_cmd->t_state,
4083 				se_cmd->se_tfo->get_cmd_state(se_cmd));
4084 
4085 			rc = transport_wait_for_tasks(se_cmd);
4086 
4087 			pr_debug("After transport_wait_for_tasks se_cmd: %p t_state: %d,"
4088 				" fabric state: %d\n", se_cmd, se_cmd->t_state,
4089 				se_cmd->se_tfo->get_cmd_state(se_cmd));
4090 		}
4091 
4092 		if (!rc) {
4093 			wait_for_completion(&se_cmd->cmd_wait_comp);
4094 			pr_debug("After cmd_wait_comp: se_cmd: %p t_state: %d"
4095 				" fabric state: %d\n", se_cmd, se_cmd->t_state,
4096 				se_cmd->se_tfo->get_cmd_state(se_cmd));
4097 		}
4098 
4099 		se_cmd->se_tfo->release_cmd(se_cmd);
4100 	}
4101 }
4102 EXPORT_SYMBOL(target_wait_for_sess_cmds);
4103 
4104 /*	transport_lun_wait_for_tasks():
4105  *
4106  *	Called from ConfigFS context to stop the passed struct se_cmd to allow
4107  *	an struct se_lun to be successfully shutdown.
4108  */
4109 static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun)
4110 {
4111 	unsigned long flags;
4112 	int ret;
4113 	/*
4114 	 * If the frontend has already requested this struct se_cmd to
4115 	 * be stopped, we can safely ignore this struct se_cmd.
4116 	 */
4117 	spin_lock_irqsave(&cmd->t_state_lock, flags);
4118 	if (cmd->transport_state & CMD_T_STOP) {
4119 		cmd->transport_state &= ~CMD_T_LUN_STOP;
4120 
4121 		pr_debug("ConfigFS ITT[0x%08x] - CMD_T_STOP, skipping\n",
4122 			 cmd->se_tfo->get_task_tag(cmd));
4123 		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4124 		transport_cmd_check_stop(cmd, 1, 0);
4125 		return -EPERM;
4126 	}
4127 	cmd->transport_state |= CMD_T_LUN_FE_STOP;
4128 	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4129 
4130 	wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq);
4131 
4132 	ret = transport_stop_tasks_for_cmd(cmd);
4133 
4134 	pr_debug("ConfigFS: cmd: %p t_tasks: %d stop tasks ret:"
4135 			" %d\n", cmd, cmd->t_task_list_num, ret);
4136 	if (!ret) {
4137 		pr_debug("ConfigFS: ITT[0x%08x] - stopping cmd....\n",
4138 				cmd->se_tfo->get_task_tag(cmd));
4139 		wait_for_completion(&cmd->transport_lun_stop_comp);
4140 		pr_debug("ConfigFS: ITT[0x%08x] - stopped cmd....\n",
4141 				cmd->se_tfo->get_task_tag(cmd));
4142 	}
4143 	transport_remove_cmd_from_queue(cmd);
4144 
4145 	return 0;
4146 }
4147 
4148 static void __transport_clear_lun_from_sessions(struct se_lun *lun)
4149 {
4150 	struct se_cmd *cmd = NULL;
4151 	unsigned long lun_flags, cmd_flags;
4152 	/*
4153 	 * Do exception processing and return CHECK_CONDITION status to the
4154 	 * Initiator Port.
4155 	 */
4156 	spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
4157 	while (!list_empty(&lun->lun_cmd_list)) {
4158 		cmd = list_first_entry(&lun->lun_cmd_list,
4159 		       struct se_cmd, se_lun_node);
4160 		list_del_init(&cmd->se_lun_node);
4161 
4162 		/*
4163 		 * This will notify iscsi_target_transport.c:
4164 		 * transport_cmd_check_stop() that a LUN shutdown is in
4165 		 * progress for the iscsi_cmd_t.
4166 		 */
4167 		spin_lock(&cmd->t_state_lock);
4168 		pr_debug("SE_LUN[%d] - Setting cmd->transport"
4169 			"_lun_stop for  ITT: 0x%08x\n",
4170 			cmd->se_lun->unpacked_lun,
4171 			cmd->se_tfo->get_task_tag(cmd));
4172 		cmd->transport_state |= CMD_T_LUN_STOP;
4173 		spin_unlock(&cmd->t_state_lock);
4174 
4175 		spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags);
4176 
4177 		if (!cmd->se_lun) {
4178 			pr_err("ITT: 0x%08x, [i,t]_state: %u/%u\n",
4179 				cmd->se_tfo->get_task_tag(cmd),
4180 				cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
4181 			BUG();
4182 		}
4183 		/*
4184 		 * If the Storage engine still owns the iscsi_cmd_t, determine
4185 		 * and/or stop its context.
4186 		 */
4187 		pr_debug("SE_LUN[%d] - ITT: 0x%08x before transport"
4188 			"_lun_wait_for_tasks()\n", cmd->se_lun->unpacked_lun,
4189 			cmd->se_tfo->get_task_tag(cmd));
4190 
4191 		if (transport_lun_wait_for_tasks(cmd, cmd->se_lun) < 0) {
4192 			spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
4193 			continue;
4194 		}
4195 
4196 		pr_debug("SE_LUN[%d] - ITT: 0x%08x after transport_lun"
4197 			"_wait_for_tasks(): SUCCESS\n",
4198 			cmd->se_lun->unpacked_lun,
4199 			cmd->se_tfo->get_task_tag(cmd));
4200 
4201 		spin_lock_irqsave(&cmd->t_state_lock, cmd_flags);
4202 		if (!(cmd->transport_state & CMD_T_DEV_ACTIVE)) {
4203 			spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
4204 			goto check_cond;
4205 		}
4206 		cmd->transport_state &= ~CMD_T_DEV_ACTIVE;
4207 		transport_all_task_dev_remove_state(cmd);
4208 		spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
4209 
4210 		transport_free_dev_tasks(cmd);
4211 		/*
4212 		 * The Storage engine stopped this struct se_cmd before it was
4213 		 * send to the fabric frontend for delivery back to the
4214 		 * Initiator Node.  Return this SCSI CDB back with an
4215 		 * CHECK_CONDITION status.
4216 		 */
4217 check_cond:
4218 		transport_send_check_condition_and_sense(cmd,
4219 				TCM_NON_EXISTENT_LUN, 0);
4220 		/*
4221 		 *  If the fabric frontend is waiting for this iscsi_cmd_t to
4222 		 * be released, notify the waiting thread now that LU has
4223 		 * finished accessing it.
4224 		 */
4225 		spin_lock_irqsave(&cmd->t_state_lock, cmd_flags);
4226 		if (cmd->transport_state & CMD_T_LUN_FE_STOP) {
4227 			pr_debug("SE_LUN[%d] - Detected FE stop for"
4228 				" struct se_cmd: %p ITT: 0x%08x\n",
4229 				lun->unpacked_lun,
4230 				cmd, cmd->se_tfo->get_task_tag(cmd));
4231 
4232 			spin_unlock_irqrestore(&cmd->t_state_lock,
4233 					cmd_flags);
4234 			transport_cmd_check_stop(cmd, 1, 0);
4235 			complete(&cmd->transport_lun_fe_stop_comp);
4236 			spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
4237 			continue;
4238 		}
4239 		pr_debug("SE_LUN[%d] - ITT: 0x%08x finished processing\n",
4240 			lun->unpacked_lun, cmd->se_tfo->get_task_tag(cmd));
4241 
4242 		spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
4243 		spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
4244 	}
4245 	spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags);
4246 }
4247 
4248 static int transport_clear_lun_thread(void *p)
4249 {
4250 	struct se_lun *lun = p;
4251 
4252 	__transport_clear_lun_from_sessions(lun);
4253 	complete(&lun->lun_shutdown_comp);
4254 
4255 	return 0;
4256 }
4257 
4258 int transport_clear_lun_from_sessions(struct se_lun *lun)
4259 {
4260 	struct task_struct *kt;
4261 
4262 	kt = kthread_run(transport_clear_lun_thread, lun,
4263 			"tcm_cl_%u", lun->unpacked_lun);
4264 	if (IS_ERR(kt)) {
4265 		pr_err("Unable to start clear_lun thread\n");
4266 		return PTR_ERR(kt);
4267 	}
4268 	wait_for_completion(&lun->lun_shutdown_comp);
4269 
4270 	return 0;
4271 }
4272 
4273 /**
4274  * transport_wait_for_tasks - wait for completion to occur
4275  * @cmd:	command to wait
4276  *
4277  * Called from frontend fabric context to wait for storage engine
4278  * to pause and/or release frontend generated struct se_cmd.
4279  */
4280 bool transport_wait_for_tasks(struct se_cmd *cmd)
4281 {
4282 	unsigned long flags;
4283 
4284 	spin_lock_irqsave(&cmd->t_state_lock, flags);
4285 	if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && !(cmd->se_tmr_req)) {
4286 		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4287 		return false;
4288 	}
4289 	/*
4290 	 * Only perform a possible wait_for_tasks if SCF_SUPPORTED_SAM_OPCODE
4291 	 * has been set in transport_set_supported_SAM_opcode().
4292 	 */
4293 	if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) && !cmd->se_tmr_req) {
4294 		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4295 		return false;
4296 	}
4297 	/*
4298 	 * If we are already stopped due to an external event (ie: LUN shutdown)
4299 	 * sleep until the connection can have the passed struct se_cmd back.
4300 	 * The cmd->transport_lun_stopped_sem will be upped by
4301 	 * transport_clear_lun_from_sessions() once the ConfigFS context caller
4302 	 * has completed its operation on the struct se_cmd.
4303 	 */
4304 	if (cmd->transport_state & CMD_T_LUN_STOP) {
4305 		pr_debug("wait_for_tasks: Stopping"
4306 			" wait_for_completion(&cmd->t_tasktransport_lun_fe"
4307 			"_stop_comp); for ITT: 0x%08x\n",
4308 			cmd->se_tfo->get_task_tag(cmd));
4309 		/*
4310 		 * There is a special case for WRITES where a FE exception +
4311 		 * LUN shutdown means ConfigFS context is still sleeping on
4312 		 * transport_lun_stop_comp in transport_lun_wait_for_tasks().
4313 		 * We go ahead and up transport_lun_stop_comp just to be sure
4314 		 * here.
4315 		 */
4316 		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4317 		complete(&cmd->transport_lun_stop_comp);
4318 		wait_for_completion(&cmd->transport_lun_fe_stop_comp);
4319 		spin_lock_irqsave(&cmd->t_state_lock, flags);
4320 
4321 		transport_all_task_dev_remove_state(cmd);
4322 		/*
4323 		 * At this point, the frontend who was the originator of this
4324 		 * struct se_cmd, now owns the structure and can be released through
4325 		 * normal means below.
4326 		 */
4327 		pr_debug("wait_for_tasks: Stopped"
4328 			" wait_for_completion(&cmd->t_tasktransport_lun_fe_"
4329 			"stop_comp); for ITT: 0x%08x\n",
4330 			cmd->se_tfo->get_task_tag(cmd));
4331 
4332 		cmd->transport_state &= ~CMD_T_LUN_STOP;
4333 	}
4334 
4335 	if (!(cmd->transport_state & CMD_T_ACTIVE) ||
4336 	     (cmd->transport_state & CMD_T_ABORTED)) {
4337 		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4338 		return false;
4339 	}
4340 
4341 	cmd->transport_state |= CMD_T_STOP;
4342 
4343 	pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08x"
4344 		" i_state: %d, t_state: %d, CMD_T_STOP\n",
4345 		cmd, cmd->se_tfo->get_task_tag(cmd),
4346 		cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
4347 
4348 	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4349 
4350 	wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq);
4351 
4352 	wait_for_completion(&cmd->t_transport_stop_comp);
4353 
4354 	spin_lock_irqsave(&cmd->t_state_lock, flags);
4355 	cmd->transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP);
4356 
4357 	pr_debug("wait_for_tasks: Stopped wait_for_compltion("
4358 		"&cmd->t_transport_stop_comp) for ITT: 0x%08x\n",
4359 		cmd->se_tfo->get_task_tag(cmd));
4360 
4361 	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4362 
4363 	return true;
4364 }
4365 EXPORT_SYMBOL(transport_wait_for_tasks);
4366 
4367 static int transport_get_sense_codes(
4368 	struct se_cmd *cmd,
4369 	u8 *asc,
4370 	u8 *ascq)
4371 {
4372 	*asc = cmd->scsi_asc;
4373 	*ascq = cmd->scsi_ascq;
4374 
4375 	return 0;
4376 }
4377 
4378 static int transport_set_sense_codes(
4379 	struct se_cmd *cmd,
4380 	u8 asc,
4381 	u8 ascq)
4382 {
4383 	cmd->scsi_asc = asc;
4384 	cmd->scsi_ascq = ascq;
4385 
4386 	return 0;
4387 }
4388 
4389 int transport_send_check_condition_and_sense(
4390 	struct se_cmd *cmd,
4391 	u8 reason,
4392 	int from_transport)
4393 {
4394 	unsigned char *buffer = cmd->sense_buffer;
4395 	unsigned long flags;
4396 	int offset;
4397 	u8 asc = 0, ascq = 0;
4398 
4399 	spin_lock_irqsave(&cmd->t_state_lock, flags);
4400 	if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
4401 		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4402 		return 0;
4403 	}
4404 	cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION;
4405 	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4406 
4407 	if (!reason && from_transport)
4408 		goto after_reason;
4409 
4410 	if (!from_transport)
4411 		cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE;
4412 	/*
4413 	 * Data Segment and SenseLength of the fabric response PDU.
4414 	 *
4415 	 * TRANSPORT_SENSE_BUFFER is now set to SCSI_SENSE_BUFFERSIZE
4416 	 * from include/scsi/scsi_cmnd.h
4417 	 */
4418 	offset = cmd->se_tfo->set_fabric_sense_len(cmd,
4419 				TRANSPORT_SENSE_BUFFER);
4420 	/*
4421 	 * Actual SENSE DATA, see SPC-3 7.23.2  SPC_SENSE_KEY_OFFSET uses
4422 	 * SENSE KEY values from include/scsi/scsi.h
4423 	 */
4424 	switch (reason) {
4425 	case TCM_NON_EXISTENT_LUN:
4426 		/* CURRENT ERROR */
4427 		buffer[offset] = 0x70;
4428 		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4429 		/* ILLEGAL REQUEST */
4430 		buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
4431 		/* LOGICAL UNIT NOT SUPPORTED */
4432 		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x25;
4433 		break;
4434 	case TCM_UNSUPPORTED_SCSI_OPCODE:
4435 	case TCM_SECTOR_COUNT_TOO_MANY:
4436 		/* CURRENT ERROR */
4437 		buffer[offset] = 0x70;
4438 		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4439 		/* ILLEGAL REQUEST */
4440 		buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
4441 		/* INVALID COMMAND OPERATION CODE */
4442 		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x20;
4443 		break;
4444 	case TCM_UNKNOWN_MODE_PAGE:
4445 		/* CURRENT ERROR */
4446 		buffer[offset] = 0x70;
4447 		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4448 		/* ILLEGAL REQUEST */
4449 		buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
4450 		/* INVALID FIELD IN CDB */
4451 		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24;
4452 		break;
4453 	case TCM_CHECK_CONDITION_ABORT_CMD:
4454 		/* CURRENT ERROR */
4455 		buffer[offset] = 0x70;
4456 		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4457 		/* ABORTED COMMAND */
4458 		buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
4459 		/* BUS DEVICE RESET FUNCTION OCCURRED */
4460 		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x29;
4461 		buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x03;
4462 		break;
4463 	case TCM_INCORRECT_AMOUNT_OF_DATA:
4464 		/* CURRENT ERROR */
4465 		buffer[offset] = 0x70;
4466 		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4467 		/* ABORTED COMMAND */
4468 		buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
4469 		/* WRITE ERROR */
4470 		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x0c;
4471 		/* NOT ENOUGH UNSOLICITED DATA */
4472 		buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x0d;
4473 		break;
4474 	case TCM_INVALID_CDB_FIELD:
4475 		/* CURRENT ERROR */
4476 		buffer[offset] = 0x70;
4477 		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4478 		/* ILLEGAL REQUEST */
4479 		buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
4480 		/* INVALID FIELD IN CDB */
4481 		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24;
4482 		break;
4483 	case TCM_INVALID_PARAMETER_LIST:
4484 		/* CURRENT ERROR */
4485 		buffer[offset] = 0x70;
4486 		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4487 		/* ILLEGAL REQUEST */
4488 		buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
4489 		/* INVALID FIELD IN PARAMETER LIST */
4490 		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x26;
4491 		break;
4492 	case TCM_UNEXPECTED_UNSOLICITED_DATA:
4493 		/* CURRENT ERROR */
4494 		buffer[offset] = 0x70;
4495 		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4496 		/* ABORTED COMMAND */
4497 		buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
4498 		/* WRITE ERROR */
4499 		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x0c;
4500 		/* UNEXPECTED_UNSOLICITED_DATA */
4501 		buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x0c;
4502 		break;
4503 	case TCM_SERVICE_CRC_ERROR:
4504 		/* CURRENT ERROR */
4505 		buffer[offset] = 0x70;
4506 		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4507 		/* ABORTED COMMAND */
4508 		buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
4509 		/* PROTOCOL SERVICE CRC ERROR */
4510 		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x47;
4511 		/* N/A */
4512 		buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x05;
4513 		break;
4514 	case TCM_SNACK_REJECTED:
4515 		/* CURRENT ERROR */
4516 		buffer[offset] = 0x70;
4517 		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4518 		/* ABORTED COMMAND */
4519 		buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
4520 		/* READ ERROR */
4521 		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x11;
4522 		/* FAILED RETRANSMISSION REQUEST */
4523 		buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x13;
4524 		break;
4525 	case TCM_WRITE_PROTECTED:
4526 		/* CURRENT ERROR */
4527 		buffer[offset] = 0x70;
4528 		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4529 		/* DATA PROTECT */
4530 		buffer[offset+SPC_SENSE_KEY_OFFSET] = DATA_PROTECT;
4531 		/* WRITE PROTECTED */
4532 		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x27;
4533 		break;
4534 	case TCM_CHECK_CONDITION_UNIT_ATTENTION:
4535 		/* CURRENT ERROR */
4536 		buffer[offset] = 0x70;
4537 		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4538 		/* UNIT ATTENTION */
4539 		buffer[offset+SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION;
4540 		core_scsi3_ua_for_check_condition(cmd, &asc, &ascq);
4541 		buffer[offset+SPC_ASC_KEY_OFFSET] = asc;
4542 		buffer[offset+SPC_ASCQ_KEY_OFFSET] = ascq;
4543 		break;
4544 	case TCM_CHECK_CONDITION_NOT_READY:
4545 		/* CURRENT ERROR */
4546 		buffer[offset] = 0x70;
4547 		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4548 		/* Not Ready */
4549 		buffer[offset+SPC_SENSE_KEY_OFFSET] = NOT_READY;
4550 		transport_get_sense_codes(cmd, &asc, &ascq);
4551 		buffer[offset+SPC_ASC_KEY_OFFSET] = asc;
4552 		buffer[offset+SPC_ASCQ_KEY_OFFSET] = ascq;
4553 		break;
4554 	case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
4555 	default:
4556 		/* CURRENT ERROR */
4557 		buffer[offset] = 0x70;
4558 		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4559 		/* ILLEGAL REQUEST */
4560 		buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
4561 		/* LOGICAL UNIT COMMUNICATION FAILURE */
4562 		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x80;
4563 		break;
4564 	}
4565 	/*
4566 	 * This code uses linux/include/scsi/scsi.h SAM status codes!
4567 	 */
4568 	cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
4569 	/*
4570 	 * Automatically padded, this value is encoded in the fabric's
4571 	 * data_length response PDU containing the SCSI defined sense data.
4572 	 */
4573 	cmd->scsi_sense_length  = TRANSPORT_SENSE_BUFFER + offset;
4574 
4575 after_reason:
4576 	return cmd->se_tfo->queue_status(cmd);
4577 }
4578 EXPORT_SYMBOL(transport_send_check_condition_and_sense);
4579 
4580 int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
4581 {
4582 	int ret = 0;
4583 
4584 	if (cmd->transport_state & CMD_T_ABORTED) {
4585 		if (!send_status ||
4586 		     (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
4587 			return 1;
4588 #if 0
4589 		pr_debug("Sending delayed SAM_STAT_TASK_ABORTED"
4590 			" status for CDB: 0x%02x ITT: 0x%08x\n",
4591 			cmd->t_task_cdb[0],
4592 			cmd->se_tfo->get_task_tag(cmd));
4593 #endif
4594 		cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS;
4595 		cmd->se_tfo->queue_status(cmd);
4596 		ret = 1;
4597 	}
4598 	return ret;
4599 }
4600 EXPORT_SYMBOL(transport_check_aborted_status);
4601 
4602 void transport_send_task_abort(struct se_cmd *cmd)
4603 {
4604 	unsigned long flags;
4605 
4606 	spin_lock_irqsave(&cmd->t_state_lock, flags);
4607 	if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
4608 		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4609 		return;
4610 	}
4611 	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4612 
4613 	/*
4614 	 * If there are still expected incoming fabric WRITEs, we wait
4615 	 * until until they have completed before sending a TASK_ABORTED
4616 	 * response.  This response with TASK_ABORTED status will be
4617 	 * queued back to fabric module by transport_check_aborted_status().
4618 	 */
4619 	if (cmd->data_direction == DMA_TO_DEVICE) {
4620 		if (cmd->se_tfo->write_pending_status(cmd) != 0) {
4621 			cmd->transport_state |= CMD_T_ABORTED;
4622 			smp_mb__after_atomic_inc();
4623 		}
4624 	}
4625 	cmd->scsi_status = SAM_STAT_TASK_ABORTED;
4626 #if 0
4627 	pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x,"
4628 		" ITT: 0x%08x\n", cmd->t_task_cdb[0],
4629 		cmd->se_tfo->get_task_tag(cmd));
4630 #endif
4631 	cmd->se_tfo->queue_status(cmd);
4632 }
4633 
4634 static int transport_generic_do_tmr(struct se_cmd *cmd)
4635 {
4636 	struct se_device *dev = cmd->se_dev;
4637 	struct se_tmr_req *tmr = cmd->se_tmr_req;
4638 	int ret;
4639 
4640 	switch (tmr->function) {
4641 	case TMR_ABORT_TASK:
4642 		tmr->response = TMR_FUNCTION_REJECTED;
4643 		break;
4644 	case TMR_ABORT_TASK_SET:
4645 	case TMR_CLEAR_ACA:
4646 	case TMR_CLEAR_TASK_SET:
4647 		tmr->response = TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED;
4648 		break;
4649 	case TMR_LUN_RESET:
4650 		ret = core_tmr_lun_reset(dev, tmr, NULL, NULL);
4651 		tmr->response = (!ret) ? TMR_FUNCTION_COMPLETE :
4652 					 TMR_FUNCTION_REJECTED;
4653 		break;
4654 	case TMR_TARGET_WARM_RESET:
4655 		tmr->response = TMR_FUNCTION_REJECTED;
4656 		break;
4657 	case TMR_TARGET_COLD_RESET:
4658 		tmr->response = TMR_FUNCTION_REJECTED;
4659 		break;
4660 	default:
4661 		pr_err("Uknown TMR function: 0x%02x.\n",
4662 				tmr->function);
4663 		tmr->response = TMR_FUNCTION_REJECTED;
4664 		break;
4665 	}
4666 
4667 	cmd->t_state = TRANSPORT_ISTATE_PROCESSING;
4668 	cmd->se_tfo->queue_tm_rsp(cmd);
4669 
4670 	transport_cmd_check_stop_to_fabric(cmd);
4671 	return 0;
4672 }
4673 
4674 /*	transport_processing_thread():
4675  *
4676  *
4677  */
4678 static int transport_processing_thread(void *param)
4679 {
4680 	int ret;
4681 	struct se_cmd *cmd;
4682 	struct se_device *dev = param;
4683 
4684 	while (!kthread_should_stop()) {
4685 		ret = wait_event_interruptible(dev->dev_queue_obj.thread_wq,
4686 				atomic_read(&dev->dev_queue_obj.queue_cnt) ||
4687 				kthread_should_stop());
4688 		if (ret < 0)
4689 			goto out;
4690 
4691 get_cmd:
4692 		cmd = transport_get_cmd_from_queue(&dev->dev_queue_obj);
4693 		if (!cmd)
4694 			continue;
4695 
4696 		switch (cmd->t_state) {
4697 		case TRANSPORT_NEW_CMD:
4698 			BUG();
4699 			break;
4700 		case TRANSPORT_NEW_CMD_MAP:
4701 			if (!cmd->se_tfo->new_cmd_map) {
4702 				pr_err("cmd->se_tfo->new_cmd_map is"
4703 					" NULL for TRANSPORT_NEW_CMD_MAP\n");
4704 				BUG();
4705 			}
4706 			ret = cmd->se_tfo->new_cmd_map(cmd);
4707 			if (ret < 0) {
4708 				transport_generic_request_failure(cmd);
4709 				break;
4710 			}
4711 			ret = transport_generic_new_cmd(cmd);
4712 			if (ret < 0) {
4713 				transport_generic_request_failure(cmd);
4714 				break;
4715 			}
4716 			break;
4717 		case TRANSPORT_PROCESS_WRITE:
4718 			transport_generic_process_write(cmd);
4719 			break;
4720 		case TRANSPORT_PROCESS_TMR:
4721 			transport_generic_do_tmr(cmd);
4722 			break;
4723 		case TRANSPORT_COMPLETE_QF_WP:
4724 			transport_write_pending_qf(cmd);
4725 			break;
4726 		case TRANSPORT_COMPLETE_QF_OK:
4727 			transport_complete_qf(cmd);
4728 			break;
4729 		default:
4730 			pr_err("Unknown t_state: %d  for ITT: 0x%08x "
4731 				"i_state: %d on SE LUN: %u\n",
4732 				cmd->t_state,
4733 				cmd->se_tfo->get_task_tag(cmd),
4734 				cmd->se_tfo->get_cmd_state(cmd),
4735 				cmd->se_lun->unpacked_lun);
4736 			BUG();
4737 		}
4738 
4739 		goto get_cmd;
4740 	}
4741 
4742 out:
4743 	WARN_ON(!list_empty(&dev->state_task_list));
4744 	WARN_ON(!list_empty(&dev->dev_queue_obj.qobj_list));
4745 	dev->process_thread = NULL;
4746 	return 0;
4747 }
4748