xref: /linux/drivers/target/target_core_transport.c (revision f2ee442115c9b6219083c019939a9cc0c9abb2f8)
1 /*******************************************************************************
2  * Filename:  target_core_transport.c
3  *
4  * This file contains the Generic Target Engine Core.
5  *
6  * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc.
7  * Copyright (c) 2005, 2006, 2007 SBE, Inc.
8  * Copyright (c) 2007-2010 Rising Tide Systems
9  * Copyright (c) 2008-2010 Linux-iSCSI.org
10  *
11  * Nicholas A. Bellinger <nab@kernel.org>
12  *
13  * This program is free software; you can redistribute it and/or modify
14  * it under the terms of the GNU General Public License as published by
15  * the Free Software Foundation; either version 2 of the License, or
16  * (at your option) any later version.
17  *
18  * This program is distributed in the hope that it will be useful,
19  * but WITHOUT ANY WARRANTY; without even the implied warranty of
20  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
21  * GNU General Public License for more details.
22  *
23  * You should have received a copy of the GNU General Public License
24  * along with this program; if not, write to the Free Software
25  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
26  *
27  ******************************************************************************/
28 
29 #include <linux/net.h>
30 #include <linux/delay.h>
31 #include <linux/string.h>
32 #include <linux/timer.h>
33 #include <linux/slab.h>
34 #include <linux/blkdev.h>
35 #include <linux/spinlock.h>
36 #include <linux/kthread.h>
37 #include <linux/in.h>
38 #include <linux/cdrom.h>
39 #include <linux/module.h>
40 #include <asm/unaligned.h>
41 #include <net/sock.h>
42 #include <net/tcp.h>
43 #include <scsi/scsi.h>
44 #include <scsi/scsi_cmnd.h>
45 #include <scsi/scsi_tcq.h>
46 
47 #include <target/target_core_base.h>
48 #include <target/target_core_device.h>
49 #include <target/target_core_tmr.h>
50 #include <target/target_core_tpg.h>
51 #include <target/target_core_transport.h>
52 #include <target/target_core_fabric_ops.h>
53 #include <target/target_core_configfs.h>
54 
55 #include "target_core_alua.h"
56 #include "target_core_cdb.h"
57 #include "target_core_hba.h"
58 #include "target_core_pr.h"
59 #include "target_core_ua.h"
60 
61 static int sub_api_initialized;
62 
63 static struct workqueue_struct *target_completion_wq;
64 static struct kmem_cache *se_cmd_cache;
65 static struct kmem_cache *se_sess_cache;
66 struct kmem_cache *se_tmr_req_cache;
67 struct kmem_cache *se_ua_cache;
68 struct kmem_cache *t10_pr_reg_cache;
69 struct kmem_cache *t10_alua_lu_gp_cache;
70 struct kmem_cache *t10_alua_lu_gp_mem_cache;
71 struct kmem_cache *t10_alua_tg_pt_gp_cache;
72 struct kmem_cache *t10_alua_tg_pt_gp_mem_cache;
73 
74 static int transport_generic_write_pending(struct se_cmd *);
75 static int transport_processing_thread(void *param);
76 static int __transport_execute_tasks(struct se_device *dev);
77 static void transport_complete_task_attr(struct se_cmd *cmd);
78 static void transport_handle_queue_full(struct se_cmd *cmd,
79 		struct se_device *dev);
80 static void transport_free_dev_tasks(struct se_cmd *cmd);
81 static int transport_generic_get_mem(struct se_cmd *cmd);
82 static void transport_put_cmd(struct se_cmd *cmd);
83 static void transport_remove_cmd_from_queue(struct se_cmd *cmd);
84 static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq);
85 static void transport_generic_request_failure(struct se_cmd *, int, int);
86 static void target_complete_ok_work(struct work_struct *work);
87 
88 int init_se_kmem_caches(void)
89 {
90 	se_cmd_cache = kmem_cache_create("se_cmd_cache",
91 			sizeof(struct se_cmd), __alignof__(struct se_cmd), 0, NULL);
92 	if (!se_cmd_cache) {
93 		pr_err("kmem_cache_create for struct se_cmd failed\n");
94 		goto out;
95 	}
96 	se_tmr_req_cache = kmem_cache_create("se_tmr_cache",
97 			sizeof(struct se_tmr_req), __alignof__(struct se_tmr_req),
98 			0, NULL);
99 	if (!se_tmr_req_cache) {
100 		pr_err("kmem_cache_create() for struct se_tmr_req"
101 				" failed\n");
102 		goto out_free_cmd_cache;
103 	}
104 	se_sess_cache = kmem_cache_create("se_sess_cache",
105 			sizeof(struct se_session), __alignof__(struct se_session),
106 			0, NULL);
107 	if (!se_sess_cache) {
108 		pr_err("kmem_cache_create() for struct se_session"
109 				" failed\n");
110 		goto out_free_tmr_req_cache;
111 	}
112 	se_ua_cache = kmem_cache_create("se_ua_cache",
113 			sizeof(struct se_ua), __alignof__(struct se_ua),
114 			0, NULL);
115 	if (!se_ua_cache) {
116 		pr_err("kmem_cache_create() for struct se_ua failed\n");
117 		goto out_free_sess_cache;
118 	}
119 	t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache",
120 			sizeof(struct t10_pr_registration),
121 			__alignof__(struct t10_pr_registration), 0, NULL);
122 	if (!t10_pr_reg_cache) {
123 		pr_err("kmem_cache_create() for struct t10_pr_registration"
124 				" failed\n");
125 		goto out_free_ua_cache;
126 	}
127 	t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache",
128 			sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp),
129 			0, NULL);
130 	if (!t10_alua_lu_gp_cache) {
131 		pr_err("kmem_cache_create() for t10_alua_lu_gp_cache"
132 				" failed\n");
133 		goto out_free_pr_reg_cache;
134 	}
135 	t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache",
136 			sizeof(struct t10_alua_lu_gp_member),
137 			__alignof__(struct t10_alua_lu_gp_member), 0, NULL);
138 	if (!t10_alua_lu_gp_mem_cache) {
139 		pr_err("kmem_cache_create() for t10_alua_lu_gp_mem_"
140 				"cache failed\n");
141 		goto out_free_lu_gp_cache;
142 	}
143 	t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache",
144 			sizeof(struct t10_alua_tg_pt_gp),
145 			__alignof__(struct t10_alua_tg_pt_gp), 0, NULL);
146 	if (!t10_alua_tg_pt_gp_cache) {
147 		pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_"
148 				"cache failed\n");
149 		goto out_free_lu_gp_mem_cache;
150 	}
151 	t10_alua_tg_pt_gp_mem_cache = kmem_cache_create(
152 			"t10_alua_tg_pt_gp_mem_cache",
153 			sizeof(struct t10_alua_tg_pt_gp_member),
154 			__alignof__(struct t10_alua_tg_pt_gp_member),
155 			0, NULL);
156 	if (!t10_alua_tg_pt_gp_mem_cache) {
157 		pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_"
158 				"mem_t failed\n");
159 		goto out_free_tg_pt_gp_cache;
160 	}
161 
162 	target_completion_wq = alloc_workqueue("target_completion",
163 					       WQ_MEM_RECLAIM, 0);
164 	if (!target_completion_wq)
165 		goto out_free_tg_pt_gp_mem_cache;
166 
167 	return 0;
168 
169 out_free_tg_pt_gp_mem_cache:
170 	kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
171 out_free_tg_pt_gp_cache:
172 	kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
173 out_free_lu_gp_mem_cache:
174 	kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
175 out_free_lu_gp_cache:
176 	kmem_cache_destroy(t10_alua_lu_gp_cache);
177 out_free_pr_reg_cache:
178 	kmem_cache_destroy(t10_pr_reg_cache);
179 out_free_ua_cache:
180 	kmem_cache_destroy(se_ua_cache);
181 out_free_sess_cache:
182 	kmem_cache_destroy(se_sess_cache);
183 out_free_tmr_req_cache:
184 	kmem_cache_destroy(se_tmr_req_cache);
185 out_free_cmd_cache:
186 	kmem_cache_destroy(se_cmd_cache);
187 out:
188 	return -ENOMEM;
189 }
190 
191 void release_se_kmem_caches(void)
192 {
193 	destroy_workqueue(target_completion_wq);
194 	kmem_cache_destroy(se_cmd_cache);
195 	kmem_cache_destroy(se_tmr_req_cache);
196 	kmem_cache_destroy(se_sess_cache);
197 	kmem_cache_destroy(se_ua_cache);
198 	kmem_cache_destroy(t10_pr_reg_cache);
199 	kmem_cache_destroy(t10_alua_lu_gp_cache);
200 	kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
201 	kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
202 	kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
203 }
204 
205 /* This code ensures unique mib indexes are handed out. */
206 static DEFINE_SPINLOCK(scsi_mib_index_lock);
207 static u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX];
208 
209 /*
210  * Allocate a new row index for the entry type specified
211  */
212 u32 scsi_get_new_index(scsi_index_t type)
213 {
214 	u32 new_index;
215 
216 	BUG_ON((type < 0) || (type >= SCSI_INDEX_TYPE_MAX));
217 
218 	spin_lock(&scsi_mib_index_lock);
219 	new_index = ++scsi_mib_index[type];
220 	spin_unlock(&scsi_mib_index_lock);
221 
222 	return new_index;
223 }
224 
225 void transport_init_queue_obj(struct se_queue_obj *qobj)
226 {
227 	atomic_set(&qobj->queue_cnt, 0);
228 	INIT_LIST_HEAD(&qobj->qobj_list);
229 	init_waitqueue_head(&qobj->thread_wq);
230 	spin_lock_init(&qobj->cmd_queue_lock);
231 }
232 EXPORT_SYMBOL(transport_init_queue_obj);
233 
234 void transport_subsystem_check_init(void)
235 {
236 	int ret;
237 
238 	if (sub_api_initialized)
239 		return;
240 
241 	ret = request_module("target_core_iblock");
242 	if (ret != 0)
243 		pr_err("Unable to load target_core_iblock\n");
244 
245 	ret = request_module("target_core_file");
246 	if (ret != 0)
247 		pr_err("Unable to load target_core_file\n");
248 
249 	ret = request_module("target_core_pscsi");
250 	if (ret != 0)
251 		pr_err("Unable to load target_core_pscsi\n");
252 
253 	ret = request_module("target_core_stgt");
254 	if (ret != 0)
255 		pr_err("Unable to load target_core_stgt\n");
256 
257 	sub_api_initialized = 1;
258 	return;
259 }
260 
261 struct se_session *transport_init_session(void)
262 {
263 	struct se_session *se_sess;
264 
265 	se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL);
266 	if (!se_sess) {
267 		pr_err("Unable to allocate struct se_session from"
268 				" se_sess_cache\n");
269 		return ERR_PTR(-ENOMEM);
270 	}
271 	INIT_LIST_HEAD(&se_sess->sess_list);
272 	INIT_LIST_HEAD(&se_sess->sess_acl_list);
273 	INIT_LIST_HEAD(&se_sess->sess_cmd_list);
274 	INIT_LIST_HEAD(&se_sess->sess_wait_list);
275 	spin_lock_init(&se_sess->sess_cmd_lock);
276 
277 	return se_sess;
278 }
279 EXPORT_SYMBOL(transport_init_session);
280 
281 /*
282  * Called with spin_lock_bh(&struct se_portal_group->session_lock called.
283  */
284 void __transport_register_session(
285 	struct se_portal_group *se_tpg,
286 	struct se_node_acl *se_nacl,
287 	struct se_session *se_sess,
288 	void *fabric_sess_ptr)
289 {
290 	unsigned char buf[PR_REG_ISID_LEN];
291 
292 	se_sess->se_tpg = se_tpg;
293 	se_sess->fabric_sess_ptr = fabric_sess_ptr;
294 	/*
295 	 * Used by struct se_node_acl's under ConfigFS to locate active se_session-t
296 	 *
297 	 * Only set for struct se_session's that will actually be moving I/O.
298 	 * eg: *NOT* discovery sessions.
299 	 */
300 	if (se_nacl) {
301 		/*
302 		 * If the fabric module supports an ISID based TransportID,
303 		 * save this value in binary from the fabric I_T Nexus now.
304 		 */
305 		if (se_tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) {
306 			memset(&buf[0], 0, PR_REG_ISID_LEN);
307 			se_tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess,
308 					&buf[0], PR_REG_ISID_LEN);
309 			se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]);
310 		}
311 		spin_lock_irq(&se_nacl->nacl_sess_lock);
312 		/*
313 		 * The se_nacl->nacl_sess pointer will be set to the
314 		 * last active I_T Nexus for each struct se_node_acl.
315 		 */
316 		se_nacl->nacl_sess = se_sess;
317 
318 		list_add_tail(&se_sess->sess_acl_list,
319 			      &se_nacl->acl_sess_list);
320 		spin_unlock_irq(&se_nacl->nacl_sess_lock);
321 	}
322 	list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list);
323 
324 	pr_debug("TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n",
325 		se_tpg->se_tpg_tfo->get_fabric_name(), se_sess->fabric_sess_ptr);
326 }
327 EXPORT_SYMBOL(__transport_register_session);
328 
329 void transport_register_session(
330 	struct se_portal_group *se_tpg,
331 	struct se_node_acl *se_nacl,
332 	struct se_session *se_sess,
333 	void *fabric_sess_ptr)
334 {
335 	spin_lock_bh(&se_tpg->session_lock);
336 	__transport_register_session(se_tpg, se_nacl, se_sess, fabric_sess_ptr);
337 	spin_unlock_bh(&se_tpg->session_lock);
338 }
339 EXPORT_SYMBOL(transport_register_session);
340 
341 void transport_deregister_session_configfs(struct se_session *se_sess)
342 {
343 	struct se_node_acl *se_nacl;
344 	unsigned long flags;
345 	/*
346 	 * Used by struct se_node_acl's under ConfigFS to locate active struct se_session
347 	 */
348 	se_nacl = se_sess->se_node_acl;
349 	if (se_nacl) {
350 		spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
351 		list_del(&se_sess->sess_acl_list);
352 		/*
353 		 * If the session list is empty, then clear the pointer.
354 		 * Otherwise, set the struct se_session pointer from the tail
355 		 * element of the per struct se_node_acl active session list.
356 		 */
357 		if (list_empty(&se_nacl->acl_sess_list))
358 			se_nacl->nacl_sess = NULL;
359 		else {
360 			se_nacl->nacl_sess = container_of(
361 					se_nacl->acl_sess_list.prev,
362 					struct se_session, sess_acl_list);
363 		}
364 		spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
365 	}
366 }
367 EXPORT_SYMBOL(transport_deregister_session_configfs);
368 
369 void transport_free_session(struct se_session *se_sess)
370 {
371 	kmem_cache_free(se_sess_cache, se_sess);
372 }
373 EXPORT_SYMBOL(transport_free_session);
374 
375 void transport_deregister_session(struct se_session *se_sess)
376 {
377 	struct se_portal_group *se_tpg = se_sess->se_tpg;
378 	struct se_node_acl *se_nacl;
379 	unsigned long flags;
380 
381 	if (!se_tpg) {
382 		transport_free_session(se_sess);
383 		return;
384 	}
385 
386 	spin_lock_irqsave(&se_tpg->session_lock, flags);
387 	list_del(&se_sess->sess_list);
388 	se_sess->se_tpg = NULL;
389 	se_sess->fabric_sess_ptr = NULL;
390 	spin_unlock_irqrestore(&se_tpg->session_lock, flags);
391 
392 	/*
393 	 * Determine if we need to do extra work for this initiator node's
394 	 * struct se_node_acl if it had been previously dynamically generated.
395 	 */
396 	se_nacl = se_sess->se_node_acl;
397 	if (se_nacl) {
398 		spin_lock_irqsave(&se_tpg->acl_node_lock, flags);
399 		if (se_nacl->dynamic_node_acl) {
400 			if (!se_tpg->se_tpg_tfo->tpg_check_demo_mode_cache(
401 					se_tpg)) {
402 				list_del(&se_nacl->acl_list);
403 				se_tpg->num_node_acls--;
404 				spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags);
405 
406 				core_tpg_wait_for_nacl_pr_ref(se_nacl);
407 				core_free_device_list_for_node(se_nacl, se_tpg);
408 				se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg,
409 						se_nacl);
410 				spin_lock_irqsave(&se_tpg->acl_node_lock, flags);
411 			}
412 		}
413 		spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags);
414 	}
415 
416 	transport_free_session(se_sess);
417 
418 	pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n",
419 		se_tpg->se_tpg_tfo->get_fabric_name());
420 }
421 EXPORT_SYMBOL(transport_deregister_session);
422 
423 /*
424  * Called with cmd->t_state_lock held.
425  */
426 static void transport_all_task_dev_remove_state(struct se_cmd *cmd)
427 {
428 	struct se_device *dev = cmd->se_dev;
429 	struct se_task *task;
430 	unsigned long flags;
431 
432 	if (!dev)
433 		return;
434 
435 	list_for_each_entry(task, &cmd->t_task_list, t_list) {
436 		if (task->task_flags & TF_ACTIVE)
437 			continue;
438 
439 		if (!atomic_read(&task->task_state_active))
440 			continue;
441 
442 		spin_lock_irqsave(&dev->execute_task_lock, flags);
443 		list_del(&task->t_state_list);
444 		pr_debug("Removed ITT: 0x%08x dev: %p task[%p]\n",
445 			cmd->se_tfo->get_task_tag(cmd), dev, task);
446 		spin_unlock_irqrestore(&dev->execute_task_lock, flags);
447 
448 		atomic_set(&task->task_state_active, 0);
449 		atomic_dec(&cmd->t_task_cdbs_ex_left);
450 	}
451 }
452 
453 /*	transport_cmd_check_stop():
454  *
455  *	'transport_off = 1' determines if t_transport_active should be cleared.
456  *	'transport_off = 2' determines if task_dev_state should be removed.
457  *
458  *	A non-zero u8 t_state sets cmd->t_state.
459  *	Returns 1 when command is stopped, else 0.
460  */
461 static int transport_cmd_check_stop(
462 	struct se_cmd *cmd,
463 	int transport_off,
464 	u8 t_state)
465 {
466 	unsigned long flags;
467 
468 	spin_lock_irqsave(&cmd->t_state_lock, flags);
469 	/*
470 	 * Determine if IOCTL context caller in requesting the stopping of this
471 	 * command for LUN shutdown purposes.
472 	 */
473 	if (atomic_read(&cmd->transport_lun_stop)) {
474 		pr_debug("%s:%d atomic_read(&cmd->transport_lun_stop)"
475 			" == TRUE for ITT: 0x%08x\n", __func__, __LINE__,
476 			cmd->se_tfo->get_task_tag(cmd));
477 
478 		atomic_set(&cmd->t_transport_active, 0);
479 		if (transport_off == 2)
480 			transport_all_task_dev_remove_state(cmd);
481 		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
482 
483 		complete(&cmd->transport_lun_stop_comp);
484 		return 1;
485 	}
486 	/*
487 	 * Determine if frontend context caller is requesting the stopping of
488 	 * this command for frontend exceptions.
489 	 */
490 	if (atomic_read(&cmd->t_transport_stop)) {
491 		pr_debug("%s:%d atomic_read(&cmd->t_transport_stop) =="
492 			" TRUE for ITT: 0x%08x\n", __func__, __LINE__,
493 			cmd->se_tfo->get_task_tag(cmd));
494 
495 		if (transport_off == 2)
496 			transport_all_task_dev_remove_state(cmd);
497 
498 		/*
499 		 * Clear struct se_cmd->se_lun before the transport_off == 2 handoff
500 		 * to FE.
501 		 */
502 		if (transport_off == 2)
503 			cmd->se_lun = NULL;
504 		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
505 
506 		complete(&cmd->t_transport_stop_comp);
507 		return 1;
508 	}
509 	if (transport_off) {
510 		atomic_set(&cmd->t_transport_active, 0);
511 		if (transport_off == 2) {
512 			transport_all_task_dev_remove_state(cmd);
513 			/*
514 			 * Clear struct se_cmd->se_lun before the transport_off == 2
515 			 * handoff to fabric module.
516 			 */
517 			cmd->se_lun = NULL;
518 			/*
519 			 * Some fabric modules like tcm_loop can release
520 			 * their internally allocated I/O reference now and
521 			 * struct se_cmd now.
522 			 *
523 			 * Fabric modules are expected to return '1' here if the
524 			 * se_cmd being passed is released at this point,
525 			 * or zero if not being released.
526 			 */
527 			if (cmd->se_tfo->check_stop_free != NULL) {
528 				spin_unlock_irqrestore(
529 					&cmd->t_state_lock, flags);
530 
531 				return cmd->se_tfo->check_stop_free(cmd);
532 			}
533 		}
534 		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
535 
536 		return 0;
537 	} else if (t_state)
538 		cmd->t_state = t_state;
539 	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
540 
541 	return 0;
542 }
543 
544 static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd)
545 {
546 	return transport_cmd_check_stop(cmd, 2, 0);
547 }
548 
549 static void transport_lun_remove_cmd(struct se_cmd *cmd)
550 {
551 	struct se_lun *lun = cmd->se_lun;
552 	unsigned long flags;
553 
554 	if (!lun)
555 		return;
556 
557 	spin_lock_irqsave(&cmd->t_state_lock, flags);
558 	if (!atomic_read(&cmd->transport_dev_active)) {
559 		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
560 		goto check_lun;
561 	}
562 	atomic_set(&cmd->transport_dev_active, 0);
563 	transport_all_task_dev_remove_state(cmd);
564 	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
565 
566 
567 check_lun:
568 	spin_lock_irqsave(&lun->lun_cmd_lock, flags);
569 	if (atomic_read(&cmd->transport_lun_active)) {
570 		list_del(&cmd->se_lun_node);
571 		atomic_set(&cmd->transport_lun_active, 0);
572 #if 0
573 		pr_debug("Removed ITT: 0x%08x from LUN LIST[%d]\n"
574 			cmd->se_tfo->get_task_tag(cmd), lun->unpacked_lun);
575 #endif
576 	}
577 	spin_unlock_irqrestore(&lun->lun_cmd_lock, flags);
578 }
579 
580 void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
581 {
582 	if (!cmd->se_tmr_req)
583 		transport_lun_remove_cmd(cmd);
584 
585 	if (transport_cmd_check_stop_to_fabric(cmd))
586 		return;
587 	if (remove) {
588 		transport_remove_cmd_from_queue(cmd);
589 		transport_put_cmd(cmd);
590 	}
591 }
592 
593 static void transport_add_cmd_to_queue(struct se_cmd *cmd, int t_state,
594 		bool at_head)
595 {
596 	struct se_device *dev = cmd->se_dev;
597 	struct se_queue_obj *qobj = &dev->dev_queue_obj;
598 	unsigned long flags;
599 
600 	if (t_state) {
601 		spin_lock_irqsave(&cmd->t_state_lock, flags);
602 		cmd->t_state = t_state;
603 		atomic_set(&cmd->t_transport_active, 1);
604 		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
605 	}
606 
607 	spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
608 
609 	/* If the cmd is already on the list, remove it before we add it */
610 	if (!list_empty(&cmd->se_queue_node))
611 		list_del(&cmd->se_queue_node);
612 	else
613 		atomic_inc(&qobj->queue_cnt);
614 
615 	if (at_head)
616 		list_add(&cmd->se_queue_node, &qobj->qobj_list);
617 	else
618 		list_add_tail(&cmd->se_queue_node, &qobj->qobj_list);
619 	atomic_set(&cmd->t_transport_queue_active, 1);
620 	spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
621 
622 	wake_up_interruptible(&qobj->thread_wq);
623 }
624 
625 static struct se_cmd *
626 transport_get_cmd_from_queue(struct se_queue_obj *qobj)
627 {
628 	struct se_cmd *cmd;
629 	unsigned long flags;
630 
631 	spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
632 	if (list_empty(&qobj->qobj_list)) {
633 		spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
634 		return NULL;
635 	}
636 	cmd = list_first_entry(&qobj->qobj_list, struct se_cmd, se_queue_node);
637 
638 	atomic_set(&cmd->t_transport_queue_active, 0);
639 
640 	list_del_init(&cmd->se_queue_node);
641 	atomic_dec(&qobj->queue_cnt);
642 	spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
643 
644 	return cmd;
645 }
646 
647 static void transport_remove_cmd_from_queue(struct se_cmd *cmd)
648 {
649 	struct se_queue_obj *qobj = &cmd->se_dev->dev_queue_obj;
650 	unsigned long flags;
651 
652 	spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
653 	if (!atomic_read(&cmd->t_transport_queue_active)) {
654 		spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
655 		return;
656 	}
657 	atomic_set(&cmd->t_transport_queue_active, 0);
658 	atomic_dec(&qobj->queue_cnt);
659 	list_del_init(&cmd->se_queue_node);
660 	spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
661 
662 	if (atomic_read(&cmd->t_transport_queue_active)) {
663 		pr_err("ITT: 0x%08x t_transport_queue_active: %d\n",
664 			cmd->se_tfo->get_task_tag(cmd),
665 			atomic_read(&cmd->t_transport_queue_active));
666 	}
667 }
668 
669 /*
670  * Completion function used by TCM subsystem plugins (such as FILEIO)
671  * for queueing up response from struct se_subsystem_api->do_task()
672  */
673 void transport_complete_sync_cache(struct se_cmd *cmd, int good)
674 {
675 	struct se_task *task = list_entry(cmd->t_task_list.next,
676 				struct se_task, t_list);
677 
678 	if (good) {
679 		cmd->scsi_status = SAM_STAT_GOOD;
680 		task->task_scsi_status = GOOD;
681 	} else {
682 		task->task_scsi_status = SAM_STAT_CHECK_CONDITION;
683 		task->task_error_status = PYX_TRANSPORT_ILLEGAL_REQUEST;
684 		task->task_se_cmd->transport_error_status =
685 					PYX_TRANSPORT_ILLEGAL_REQUEST;
686 	}
687 
688 	transport_complete_task(task, good);
689 }
690 EXPORT_SYMBOL(transport_complete_sync_cache);
691 
692 static void target_complete_failure_work(struct work_struct *work)
693 {
694 	struct se_cmd *cmd = container_of(work, struct se_cmd, work);
695 
696 	transport_generic_request_failure(cmd, 1, 1);
697 }
698 
699 /*	transport_complete_task():
700  *
701  *	Called from interrupt and non interrupt context depending
702  *	on the transport plugin.
703  */
704 void transport_complete_task(struct se_task *task, int success)
705 {
706 	struct se_cmd *cmd = task->task_se_cmd;
707 	struct se_device *dev = cmd->se_dev;
708 	unsigned long flags;
709 #if 0
710 	pr_debug("task: %p CDB: 0x%02x obj_ptr: %p\n", task,
711 			cmd->t_task_cdb[0], dev);
712 #endif
713 	if (dev)
714 		atomic_inc(&dev->depth_left);
715 
716 	spin_lock_irqsave(&cmd->t_state_lock, flags);
717 	task->task_flags &= ~TF_ACTIVE;
718 
719 	/*
720 	 * See if any sense data exists, if so set the TASK_SENSE flag.
721 	 * Also check for any other post completion work that needs to be
722 	 * done by the plugins.
723 	 */
724 	if (dev && dev->transport->transport_complete) {
725 		if (dev->transport->transport_complete(task) != 0) {
726 			cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE;
727 			task->task_sense = 1;
728 			success = 1;
729 		}
730 	}
731 
732 	/*
733 	 * See if we are waiting for outstanding struct se_task
734 	 * to complete for an exception condition
735 	 */
736 	if (task->task_flags & TF_REQUEST_STOP) {
737 		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
738 		complete(&task->task_stop_comp);
739 		return;
740 	}
741 
742 	if (!success)
743 		cmd->t_tasks_failed = 1;
744 
745 	/*
746 	 * Decrement the outstanding t_task_cdbs_left count.  The last
747 	 * struct se_task from struct se_cmd will complete itself into the
748 	 * device queue depending upon int success.
749 	 */
750 	if (!atomic_dec_and_test(&cmd->t_task_cdbs_left)) {
751 		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
752 		return;
753 	}
754 
755 	if (cmd->t_tasks_failed) {
756 		if (!task->task_error_status) {
757 			task->task_error_status =
758 				PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
759 			cmd->transport_error_status =
760 				PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
761 		}
762 		INIT_WORK(&cmd->work, target_complete_failure_work);
763 	} else {
764 		atomic_set(&cmd->t_transport_complete, 1);
765 		INIT_WORK(&cmd->work, target_complete_ok_work);
766 	}
767 
768 	cmd->t_state = TRANSPORT_COMPLETE;
769 	atomic_set(&cmd->t_transport_active, 1);
770 	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
771 
772 	queue_work(target_completion_wq, &cmd->work);
773 }
774 EXPORT_SYMBOL(transport_complete_task);
775 
776 /*
777  * Called by transport_add_tasks_from_cmd() once a struct se_cmd's
778  * struct se_task list are ready to be added to the active execution list
779  * struct se_device
780 
781  * Called with se_dev_t->execute_task_lock called.
782  */
783 static inline int transport_add_task_check_sam_attr(
784 	struct se_task *task,
785 	struct se_task *task_prev,
786 	struct se_device *dev)
787 {
788 	/*
789 	 * No SAM Task attribute emulation enabled, add to tail of
790 	 * execution queue
791 	 */
792 	if (dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) {
793 		list_add_tail(&task->t_execute_list, &dev->execute_task_list);
794 		return 0;
795 	}
796 	/*
797 	 * HEAD_OF_QUEUE attribute for received CDB, which means
798 	 * the first task that is associated with a struct se_cmd goes to
799 	 * head of the struct se_device->execute_task_list, and task_prev
800 	 * after that for each subsequent task
801 	 */
802 	if (task->task_se_cmd->sam_task_attr == MSG_HEAD_TAG) {
803 		list_add(&task->t_execute_list,
804 				(task_prev != NULL) ?
805 				&task_prev->t_execute_list :
806 				&dev->execute_task_list);
807 
808 		pr_debug("Set HEAD_OF_QUEUE for task CDB: 0x%02x"
809 				" in execution queue\n",
810 				task->task_se_cmd->t_task_cdb[0]);
811 		return 1;
812 	}
813 	/*
814 	 * For ORDERED, SIMPLE or UNTAGGED attribute tasks once they have been
815 	 * transitioned from Dermant -> Active state, and are added to the end
816 	 * of the struct se_device->execute_task_list
817 	 */
818 	list_add_tail(&task->t_execute_list, &dev->execute_task_list);
819 	return 0;
820 }
821 
822 /*	__transport_add_task_to_execute_queue():
823  *
824  *	Called with se_dev_t->execute_task_lock called.
825  */
826 static void __transport_add_task_to_execute_queue(
827 	struct se_task *task,
828 	struct se_task *task_prev,
829 	struct se_device *dev)
830 {
831 	int head_of_queue;
832 
833 	head_of_queue = transport_add_task_check_sam_attr(task, task_prev, dev);
834 	atomic_inc(&dev->execute_tasks);
835 
836 	if (atomic_read(&task->task_state_active))
837 		return;
838 	/*
839 	 * Determine if this task needs to go to HEAD_OF_QUEUE for the
840 	 * state list as well.  Running with SAM Task Attribute emulation
841 	 * will always return head_of_queue == 0 here
842 	 */
843 	if (head_of_queue)
844 		list_add(&task->t_state_list, (task_prev) ?
845 				&task_prev->t_state_list :
846 				&dev->state_task_list);
847 	else
848 		list_add_tail(&task->t_state_list, &dev->state_task_list);
849 
850 	atomic_set(&task->task_state_active, 1);
851 
852 	pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n",
853 		task->task_se_cmd->se_tfo->get_task_tag(task->task_se_cmd),
854 		task, dev);
855 }
856 
857 static void transport_add_tasks_to_state_queue(struct se_cmd *cmd)
858 {
859 	struct se_device *dev = cmd->se_dev;
860 	struct se_task *task;
861 	unsigned long flags;
862 
863 	spin_lock_irqsave(&cmd->t_state_lock, flags);
864 	list_for_each_entry(task, &cmd->t_task_list, t_list) {
865 		if (atomic_read(&task->task_state_active))
866 			continue;
867 
868 		spin_lock(&dev->execute_task_lock);
869 		list_add_tail(&task->t_state_list, &dev->state_task_list);
870 		atomic_set(&task->task_state_active, 1);
871 
872 		pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n",
873 			task->task_se_cmd->se_tfo->get_task_tag(
874 			task->task_se_cmd), task, dev);
875 
876 		spin_unlock(&dev->execute_task_lock);
877 	}
878 	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
879 }
880 
881 static void transport_add_tasks_from_cmd(struct se_cmd *cmd)
882 {
883 	struct se_device *dev = cmd->se_dev;
884 	struct se_task *task, *task_prev = NULL;
885 	unsigned long flags;
886 
887 	spin_lock_irqsave(&dev->execute_task_lock, flags);
888 	list_for_each_entry(task, &cmd->t_task_list, t_list) {
889 		if (!list_empty(&task->t_execute_list))
890 			continue;
891 		/*
892 		 * __transport_add_task_to_execute_queue() handles the
893 		 * SAM Task Attribute emulation if enabled
894 		 */
895 		__transport_add_task_to_execute_queue(task, task_prev, dev);
896 		task_prev = task;
897 	}
898 	spin_unlock_irqrestore(&dev->execute_task_lock, flags);
899 }
900 
901 void __transport_remove_task_from_execute_queue(struct se_task *task,
902 		struct se_device *dev)
903 {
904 	list_del_init(&task->t_execute_list);
905 	atomic_dec(&dev->execute_tasks);
906 }
907 
908 void transport_remove_task_from_execute_queue(
909 	struct se_task *task,
910 	struct se_device *dev)
911 {
912 	unsigned long flags;
913 
914 	if (WARN_ON(list_empty(&task->t_execute_list)))
915 		return;
916 
917 	spin_lock_irqsave(&dev->execute_task_lock, flags);
918 	__transport_remove_task_from_execute_queue(task, dev);
919 	spin_unlock_irqrestore(&dev->execute_task_lock, flags);
920 }
921 
922 /*
923  * Handle QUEUE_FULL / -EAGAIN and -ENOMEM status
924  */
925 
926 static void target_qf_do_work(struct work_struct *work)
927 {
928 	struct se_device *dev = container_of(work, struct se_device,
929 					qf_work_queue);
930 	LIST_HEAD(qf_cmd_list);
931 	struct se_cmd *cmd, *cmd_tmp;
932 
933 	spin_lock_irq(&dev->qf_cmd_lock);
934 	list_splice_init(&dev->qf_cmd_list, &qf_cmd_list);
935 	spin_unlock_irq(&dev->qf_cmd_lock);
936 
937 	list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) {
938 		list_del(&cmd->se_qf_node);
939 		atomic_dec(&dev->dev_qf_count);
940 		smp_mb__after_atomic_dec();
941 
942 		pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue"
943 			" context: %s\n", cmd->se_tfo->get_fabric_name(), cmd,
944 			(cmd->t_state == TRANSPORT_COMPLETE_QF_OK) ? "COMPLETE_OK" :
945 			(cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING"
946 			: "UNKNOWN");
947 
948 		transport_add_cmd_to_queue(cmd, cmd->t_state, true);
949 	}
950 }
951 
952 unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd)
953 {
954 	switch (cmd->data_direction) {
955 	case DMA_NONE:
956 		return "NONE";
957 	case DMA_FROM_DEVICE:
958 		return "READ";
959 	case DMA_TO_DEVICE:
960 		return "WRITE";
961 	case DMA_BIDIRECTIONAL:
962 		return "BIDI";
963 	default:
964 		break;
965 	}
966 
967 	return "UNKNOWN";
968 }
969 
970 void transport_dump_dev_state(
971 	struct se_device *dev,
972 	char *b,
973 	int *bl)
974 {
975 	*bl += sprintf(b + *bl, "Status: ");
976 	switch (dev->dev_status) {
977 	case TRANSPORT_DEVICE_ACTIVATED:
978 		*bl += sprintf(b + *bl, "ACTIVATED");
979 		break;
980 	case TRANSPORT_DEVICE_DEACTIVATED:
981 		*bl += sprintf(b + *bl, "DEACTIVATED");
982 		break;
983 	case TRANSPORT_DEVICE_SHUTDOWN:
984 		*bl += sprintf(b + *bl, "SHUTDOWN");
985 		break;
986 	case TRANSPORT_DEVICE_OFFLINE_ACTIVATED:
987 	case TRANSPORT_DEVICE_OFFLINE_DEACTIVATED:
988 		*bl += sprintf(b + *bl, "OFFLINE");
989 		break;
990 	default:
991 		*bl += sprintf(b + *bl, "UNKNOWN=%d", dev->dev_status);
992 		break;
993 	}
994 
995 	*bl += sprintf(b + *bl, "  Execute/Left/Max Queue Depth: %d/%d/%d",
996 		atomic_read(&dev->execute_tasks), atomic_read(&dev->depth_left),
997 		dev->queue_depth);
998 	*bl += sprintf(b + *bl, "  SectorSize: %u  MaxSectors: %u\n",
999 		dev->se_sub_dev->se_dev_attrib.block_size, dev->se_sub_dev->se_dev_attrib.max_sectors);
1000 	*bl += sprintf(b + *bl, "        ");
1001 }
1002 
1003 void transport_dump_vpd_proto_id(
1004 	struct t10_vpd *vpd,
1005 	unsigned char *p_buf,
1006 	int p_buf_len)
1007 {
1008 	unsigned char buf[VPD_TMP_BUF_SIZE];
1009 	int len;
1010 
1011 	memset(buf, 0, VPD_TMP_BUF_SIZE);
1012 	len = sprintf(buf, "T10 VPD Protocol Identifier: ");
1013 
1014 	switch (vpd->protocol_identifier) {
1015 	case 0x00:
1016 		sprintf(buf+len, "Fibre Channel\n");
1017 		break;
1018 	case 0x10:
1019 		sprintf(buf+len, "Parallel SCSI\n");
1020 		break;
1021 	case 0x20:
1022 		sprintf(buf+len, "SSA\n");
1023 		break;
1024 	case 0x30:
1025 		sprintf(buf+len, "IEEE 1394\n");
1026 		break;
1027 	case 0x40:
1028 		sprintf(buf+len, "SCSI Remote Direct Memory Access"
1029 				" Protocol\n");
1030 		break;
1031 	case 0x50:
1032 		sprintf(buf+len, "Internet SCSI (iSCSI)\n");
1033 		break;
1034 	case 0x60:
1035 		sprintf(buf+len, "SAS Serial SCSI Protocol\n");
1036 		break;
1037 	case 0x70:
1038 		sprintf(buf+len, "Automation/Drive Interface Transport"
1039 				" Protocol\n");
1040 		break;
1041 	case 0x80:
1042 		sprintf(buf+len, "AT Attachment Interface ATA/ATAPI\n");
1043 		break;
1044 	default:
1045 		sprintf(buf+len, "Unknown 0x%02x\n",
1046 				vpd->protocol_identifier);
1047 		break;
1048 	}
1049 
1050 	if (p_buf)
1051 		strncpy(p_buf, buf, p_buf_len);
1052 	else
1053 		pr_debug("%s", buf);
1054 }
1055 
1056 void
1057 transport_set_vpd_proto_id(struct t10_vpd *vpd, unsigned char *page_83)
1058 {
1059 	/*
1060 	 * Check if the Protocol Identifier Valid (PIV) bit is set..
1061 	 *
1062 	 * from spc3r23.pdf section 7.5.1
1063 	 */
1064 	 if (page_83[1] & 0x80) {
1065 		vpd->protocol_identifier = (page_83[0] & 0xf0);
1066 		vpd->protocol_identifier_set = 1;
1067 		transport_dump_vpd_proto_id(vpd, NULL, 0);
1068 	}
1069 }
1070 EXPORT_SYMBOL(transport_set_vpd_proto_id);
1071 
1072 int transport_dump_vpd_assoc(
1073 	struct t10_vpd *vpd,
1074 	unsigned char *p_buf,
1075 	int p_buf_len)
1076 {
1077 	unsigned char buf[VPD_TMP_BUF_SIZE];
1078 	int ret = 0;
1079 	int len;
1080 
1081 	memset(buf, 0, VPD_TMP_BUF_SIZE);
1082 	len = sprintf(buf, "T10 VPD Identifier Association: ");
1083 
1084 	switch (vpd->association) {
1085 	case 0x00:
1086 		sprintf(buf+len, "addressed logical unit\n");
1087 		break;
1088 	case 0x10:
1089 		sprintf(buf+len, "target port\n");
1090 		break;
1091 	case 0x20:
1092 		sprintf(buf+len, "SCSI target device\n");
1093 		break;
1094 	default:
1095 		sprintf(buf+len, "Unknown 0x%02x\n", vpd->association);
1096 		ret = -EINVAL;
1097 		break;
1098 	}
1099 
1100 	if (p_buf)
1101 		strncpy(p_buf, buf, p_buf_len);
1102 	else
1103 		pr_debug("%s", buf);
1104 
1105 	return ret;
1106 }
1107 
1108 int transport_set_vpd_assoc(struct t10_vpd *vpd, unsigned char *page_83)
1109 {
1110 	/*
1111 	 * The VPD identification association..
1112 	 *
1113 	 * from spc3r23.pdf Section 7.6.3.1 Table 297
1114 	 */
1115 	vpd->association = (page_83[1] & 0x30);
1116 	return transport_dump_vpd_assoc(vpd, NULL, 0);
1117 }
1118 EXPORT_SYMBOL(transport_set_vpd_assoc);
1119 
1120 int transport_dump_vpd_ident_type(
1121 	struct t10_vpd *vpd,
1122 	unsigned char *p_buf,
1123 	int p_buf_len)
1124 {
1125 	unsigned char buf[VPD_TMP_BUF_SIZE];
1126 	int ret = 0;
1127 	int len;
1128 
1129 	memset(buf, 0, VPD_TMP_BUF_SIZE);
1130 	len = sprintf(buf, "T10 VPD Identifier Type: ");
1131 
1132 	switch (vpd->device_identifier_type) {
1133 	case 0x00:
1134 		sprintf(buf+len, "Vendor specific\n");
1135 		break;
1136 	case 0x01:
1137 		sprintf(buf+len, "T10 Vendor ID based\n");
1138 		break;
1139 	case 0x02:
1140 		sprintf(buf+len, "EUI-64 based\n");
1141 		break;
1142 	case 0x03:
1143 		sprintf(buf+len, "NAA\n");
1144 		break;
1145 	case 0x04:
1146 		sprintf(buf+len, "Relative target port identifier\n");
1147 		break;
1148 	case 0x08:
1149 		sprintf(buf+len, "SCSI name string\n");
1150 		break;
1151 	default:
1152 		sprintf(buf+len, "Unsupported: 0x%02x\n",
1153 				vpd->device_identifier_type);
1154 		ret = -EINVAL;
1155 		break;
1156 	}
1157 
1158 	if (p_buf) {
1159 		if (p_buf_len < strlen(buf)+1)
1160 			return -EINVAL;
1161 		strncpy(p_buf, buf, p_buf_len);
1162 	} else {
1163 		pr_debug("%s", buf);
1164 	}
1165 
1166 	return ret;
1167 }
1168 
1169 int transport_set_vpd_ident_type(struct t10_vpd *vpd, unsigned char *page_83)
1170 {
1171 	/*
1172 	 * The VPD identifier type..
1173 	 *
1174 	 * from spc3r23.pdf Section 7.6.3.1 Table 298
1175 	 */
1176 	vpd->device_identifier_type = (page_83[1] & 0x0f);
1177 	return transport_dump_vpd_ident_type(vpd, NULL, 0);
1178 }
1179 EXPORT_SYMBOL(transport_set_vpd_ident_type);
1180 
1181 int transport_dump_vpd_ident(
1182 	struct t10_vpd *vpd,
1183 	unsigned char *p_buf,
1184 	int p_buf_len)
1185 {
1186 	unsigned char buf[VPD_TMP_BUF_SIZE];
1187 	int ret = 0;
1188 
1189 	memset(buf, 0, VPD_TMP_BUF_SIZE);
1190 
1191 	switch (vpd->device_identifier_code_set) {
1192 	case 0x01: /* Binary */
1193 		sprintf(buf, "T10 VPD Binary Device Identifier: %s\n",
1194 			&vpd->device_identifier[0]);
1195 		break;
1196 	case 0x02: /* ASCII */
1197 		sprintf(buf, "T10 VPD ASCII Device Identifier: %s\n",
1198 			&vpd->device_identifier[0]);
1199 		break;
1200 	case 0x03: /* UTF-8 */
1201 		sprintf(buf, "T10 VPD UTF-8 Device Identifier: %s\n",
1202 			&vpd->device_identifier[0]);
1203 		break;
1204 	default:
1205 		sprintf(buf, "T10 VPD Device Identifier encoding unsupported:"
1206 			" 0x%02x", vpd->device_identifier_code_set);
1207 		ret = -EINVAL;
1208 		break;
1209 	}
1210 
1211 	if (p_buf)
1212 		strncpy(p_buf, buf, p_buf_len);
1213 	else
1214 		pr_debug("%s", buf);
1215 
1216 	return ret;
1217 }
1218 
1219 int
1220 transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83)
1221 {
1222 	static const char hex_str[] = "0123456789abcdef";
1223 	int j = 0, i = 4; /* offset to start of the identifer */
1224 
1225 	/*
1226 	 * The VPD Code Set (encoding)
1227 	 *
1228 	 * from spc3r23.pdf Section 7.6.3.1 Table 296
1229 	 */
1230 	vpd->device_identifier_code_set = (page_83[0] & 0x0f);
1231 	switch (vpd->device_identifier_code_set) {
1232 	case 0x01: /* Binary */
1233 		vpd->device_identifier[j++] =
1234 				hex_str[vpd->device_identifier_type];
1235 		while (i < (4 + page_83[3])) {
1236 			vpd->device_identifier[j++] =
1237 				hex_str[(page_83[i] & 0xf0) >> 4];
1238 			vpd->device_identifier[j++] =
1239 				hex_str[page_83[i] & 0x0f];
1240 			i++;
1241 		}
1242 		break;
1243 	case 0x02: /* ASCII */
1244 	case 0x03: /* UTF-8 */
1245 		while (i < (4 + page_83[3]))
1246 			vpd->device_identifier[j++] = page_83[i++];
1247 		break;
1248 	default:
1249 		break;
1250 	}
1251 
1252 	return transport_dump_vpd_ident(vpd, NULL, 0);
1253 }
1254 EXPORT_SYMBOL(transport_set_vpd_ident);
1255 
1256 static void core_setup_task_attr_emulation(struct se_device *dev)
1257 {
1258 	/*
1259 	 * If this device is from Target_Core_Mod/pSCSI, disable the
1260 	 * SAM Task Attribute emulation.
1261 	 *
1262 	 * This is currently not available in upsream Linux/SCSI Target
1263 	 * mode code, and is assumed to be disabled while using TCM/pSCSI.
1264 	 */
1265 	if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1266 		dev->dev_task_attr_type = SAM_TASK_ATTR_PASSTHROUGH;
1267 		return;
1268 	}
1269 
1270 	dev->dev_task_attr_type = SAM_TASK_ATTR_EMULATED;
1271 	pr_debug("%s: Using SAM_TASK_ATTR_EMULATED for SPC: 0x%02x"
1272 		" device\n", dev->transport->name,
1273 		dev->transport->get_device_rev(dev));
1274 }
1275 
1276 static void scsi_dump_inquiry(struct se_device *dev)
1277 {
1278 	struct t10_wwn *wwn = &dev->se_sub_dev->t10_wwn;
1279 	int i, device_type;
1280 	/*
1281 	 * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer
1282 	 */
1283 	pr_debug("  Vendor: ");
1284 	for (i = 0; i < 8; i++)
1285 		if (wwn->vendor[i] >= 0x20)
1286 			pr_debug("%c", wwn->vendor[i]);
1287 		else
1288 			pr_debug(" ");
1289 
1290 	pr_debug("  Model: ");
1291 	for (i = 0; i < 16; i++)
1292 		if (wwn->model[i] >= 0x20)
1293 			pr_debug("%c", wwn->model[i]);
1294 		else
1295 			pr_debug(" ");
1296 
1297 	pr_debug("  Revision: ");
1298 	for (i = 0; i < 4; i++)
1299 		if (wwn->revision[i] >= 0x20)
1300 			pr_debug("%c", wwn->revision[i]);
1301 		else
1302 			pr_debug(" ");
1303 
1304 	pr_debug("\n");
1305 
1306 	device_type = dev->transport->get_device_type(dev);
1307 	pr_debug("  Type:   %s ", scsi_device_type(device_type));
1308 	pr_debug("                 ANSI SCSI revision: %02x\n",
1309 				dev->transport->get_device_rev(dev));
1310 }
1311 
1312 struct se_device *transport_add_device_to_core_hba(
1313 	struct se_hba *hba,
1314 	struct se_subsystem_api *transport,
1315 	struct se_subsystem_dev *se_dev,
1316 	u32 device_flags,
1317 	void *transport_dev,
1318 	struct se_dev_limits *dev_limits,
1319 	const char *inquiry_prod,
1320 	const char *inquiry_rev)
1321 {
1322 	int force_pt;
1323 	struct se_device  *dev;
1324 
1325 	dev = kzalloc(sizeof(struct se_device), GFP_KERNEL);
1326 	if (!dev) {
1327 		pr_err("Unable to allocate memory for se_dev_t\n");
1328 		return NULL;
1329 	}
1330 
1331 	transport_init_queue_obj(&dev->dev_queue_obj);
1332 	dev->dev_flags		= device_flags;
1333 	dev->dev_status		|= TRANSPORT_DEVICE_DEACTIVATED;
1334 	dev->dev_ptr		= transport_dev;
1335 	dev->se_hba		= hba;
1336 	dev->se_sub_dev		= se_dev;
1337 	dev->transport		= transport;
1338 	atomic_set(&dev->active_cmds, 0);
1339 	INIT_LIST_HEAD(&dev->dev_list);
1340 	INIT_LIST_HEAD(&dev->dev_sep_list);
1341 	INIT_LIST_HEAD(&dev->dev_tmr_list);
1342 	INIT_LIST_HEAD(&dev->execute_task_list);
1343 	INIT_LIST_HEAD(&dev->delayed_cmd_list);
1344 	INIT_LIST_HEAD(&dev->ordered_cmd_list);
1345 	INIT_LIST_HEAD(&dev->state_task_list);
1346 	INIT_LIST_HEAD(&dev->qf_cmd_list);
1347 	spin_lock_init(&dev->execute_task_lock);
1348 	spin_lock_init(&dev->delayed_cmd_lock);
1349 	spin_lock_init(&dev->ordered_cmd_lock);
1350 	spin_lock_init(&dev->state_task_lock);
1351 	spin_lock_init(&dev->dev_alua_lock);
1352 	spin_lock_init(&dev->dev_reservation_lock);
1353 	spin_lock_init(&dev->dev_status_lock);
1354 	spin_lock_init(&dev->dev_status_thr_lock);
1355 	spin_lock_init(&dev->se_port_lock);
1356 	spin_lock_init(&dev->se_tmr_lock);
1357 	spin_lock_init(&dev->qf_cmd_lock);
1358 
1359 	dev->queue_depth	= dev_limits->queue_depth;
1360 	atomic_set(&dev->depth_left, dev->queue_depth);
1361 	atomic_set(&dev->dev_ordered_id, 0);
1362 
1363 	se_dev_set_default_attribs(dev, dev_limits);
1364 
1365 	dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX);
1366 	dev->creation_time = get_jiffies_64();
1367 	spin_lock_init(&dev->stats_lock);
1368 
1369 	spin_lock(&hba->device_lock);
1370 	list_add_tail(&dev->dev_list, &hba->hba_dev_list);
1371 	hba->dev_count++;
1372 	spin_unlock(&hba->device_lock);
1373 	/*
1374 	 * Setup the SAM Task Attribute emulation for struct se_device
1375 	 */
1376 	core_setup_task_attr_emulation(dev);
1377 	/*
1378 	 * Force PR and ALUA passthrough emulation with internal object use.
1379 	 */
1380 	force_pt = (hba->hba_flags & HBA_FLAGS_INTERNAL_USE);
1381 	/*
1382 	 * Setup the Reservations infrastructure for struct se_device
1383 	 */
1384 	core_setup_reservations(dev, force_pt);
1385 	/*
1386 	 * Setup the Asymmetric Logical Unit Assignment for struct se_device
1387 	 */
1388 	if (core_setup_alua(dev, force_pt) < 0)
1389 		goto out;
1390 
1391 	/*
1392 	 * Startup the struct se_device processing thread
1393 	 */
1394 	dev->process_thread = kthread_run(transport_processing_thread, dev,
1395 					  "LIO_%s", dev->transport->name);
1396 	if (IS_ERR(dev->process_thread)) {
1397 		pr_err("Unable to create kthread: LIO_%s\n",
1398 			dev->transport->name);
1399 		goto out;
1400 	}
1401 	/*
1402 	 * Setup work_queue for QUEUE_FULL
1403 	 */
1404 	INIT_WORK(&dev->qf_work_queue, target_qf_do_work);
1405 	/*
1406 	 * Preload the initial INQUIRY const values if we are doing
1407 	 * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI
1408 	 * passthrough because this is being provided by the backend LLD.
1409 	 * This is required so that transport_get_inquiry() copies these
1410 	 * originals once back into DEV_T10_WWN(dev) for the virtual device
1411 	 * setup.
1412 	 */
1413 	if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) {
1414 		if (!inquiry_prod || !inquiry_rev) {
1415 			pr_err("All non TCM/pSCSI plugins require"
1416 				" INQUIRY consts\n");
1417 			goto out;
1418 		}
1419 
1420 		strncpy(&dev->se_sub_dev->t10_wwn.vendor[0], "LIO-ORG", 8);
1421 		strncpy(&dev->se_sub_dev->t10_wwn.model[0], inquiry_prod, 16);
1422 		strncpy(&dev->se_sub_dev->t10_wwn.revision[0], inquiry_rev, 4);
1423 	}
1424 	scsi_dump_inquiry(dev);
1425 
1426 	return dev;
1427 out:
1428 	kthread_stop(dev->process_thread);
1429 
1430 	spin_lock(&hba->device_lock);
1431 	list_del(&dev->dev_list);
1432 	hba->dev_count--;
1433 	spin_unlock(&hba->device_lock);
1434 
1435 	se_release_vpd_for_dev(dev);
1436 
1437 	kfree(dev);
1438 
1439 	return NULL;
1440 }
1441 EXPORT_SYMBOL(transport_add_device_to_core_hba);
1442 
1443 /*	transport_generic_prepare_cdb():
1444  *
1445  *	Since the Initiator sees iSCSI devices as LUNs,  the SCSI CDB will
1446  *	contain the iSCSI LUN in bits 7-5 of byte 1 as per SAM-2.
1447  *	The point of this is since we are mapping iSCSI LUNs to
1448  *	SCSI Target IDs having a non-zero LUN in the CDB will throw the
1449  *	devices and HBAs for a loop.
1450  */
1451 static inline void transport_generic_prepare_cdb(
1452 	unsigned char *cdb)
1453 {
1454 	switch (cdb[0]) {
1455 	case READ_10: /* SBC - RDProtect */
1456 	case READ_12: /* SBC - RDProtect */
1457 	case READ_16: /* SBC - RDProtect */
1458 	case SEND_DIAGNOSTIC: /* SPC - SELF-TEST Code */
1459 	case VERIFY: /* SBC - VRProtect */
1460 	case VERIFY_16: /* SBC - VRProtect */
1461 	case WRITE_VERIFY: /* SBC - VRProtect */
1462 	case WRITE_VERIFY_12: /* SBC - VRProtect */
1463 		break;
1464 	default:
1465 		cdb[1] &= 0x1f; /* clear logical unit number */
1466 		break;
1467 	}
1468 }
1469 
1470 static struct se_task *
1471 transport_generic_get_task(struct se_cmd *cmd,
1472 		enum dma_data_direction data_direction)
1473 {
1474 	struct se_task *task;
1475 	struct se_device *dev = cmd->se_dev;
1476 
1477 	task = dev->transport->alloc_task(cmd->t_task_cdb);
1478 	if (!task) {
1479 		pr_err("Unable to allocate struct se_task\n");
1480 		return NULL;
1481 	}
1482 
1483 	INIT_LIST_HEAD(&task->t_list);
1484 	INIT_LIST_HEAD(&task->t_execute_list);
1485 	INIT_LIST_HEAD(&task->t_state_list);
1486 	init_completion(&task->task_stop_comp);
1487 	task->task_se_cmd = cmd;
1488 	task->task_data_direction = data_direction;
1489 
1490 	return task;
1491 }
1492 
1493 static int transport_generic_cmd_sequencer(struct se_cmd *, unsigned char *);
1494 
1495 /*
1496  * Used by fabric modules containing a local struct se_cmd within their
1497  * fabric dependent per I/O descriptor.
1498  */
1499 void transport_init_se_cmd(
1500 	struct se_cmd *cmd,
1501 	struct target_core_fabric_ops *tfo,
1502 	struct se_session *se_sess,
1503 	u32 data_length,
1504 	int data_direction,
1505 	int task_attr,
1506 	unsigned char *sense_buffer)
1507 {
1508 	INIT_LIST_HEAD(&cmd->se_lun_node);
1509 	INIT_LIST_HEAD(&cmd->se_delayed_node);
1510 	INIT_LIST_HEAD(&cmd->se_ordered_node);
1511 	INIT_LIST_HEAD(&cmd->se_qf_node);
1512 	INIT_LIST_HEAD(&cmd->se_queue_node);
1513 	INIT_LIST_HEAD(&cmd->se_cmd_list);
1514 	INIT_LIST_HEAD(&cmd->t_task_list);
1515 	init_completion(&cmd->transport_lun_fe_stop_comp);
1516 	init_completion(&cmd->transport_lun_stop_comp);
1517 	init_completion(&cmd->t_transport_stop_comp);
1518 	init_completion(&cmd->cmd_wait_comp);
1519 	spin_lock_init(&cmd->t_state_lock);
1520 	atomic_set(&cmd->transport_dev_active, 1);
1521 
1522 	cmd->se_tfo = tfo;
1523 	cmd->se_sess = se_sess;
1524 	cmd->data_length = data_length;
1525 	cmd->data_direction = data_direction;
1526 	cmd->sam_task_attr = task_attr;
1527 	cmd->sense_buffer = sense_buffer;
1528 }
1529 EXPORT_SYMBOL(transport_init_se_cmd);
1530 
1531 static int transport_check_alloc_task_attr(struct se_cmd *cmd)
1532 {
1533 	/*
1534 	 * Check if SAM Task Attribute emulation is enabled for this
1535 	 * struct se_device storage object
1536 	 */
1537 	if (cmd->se_dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
1538 		return 0;
1539 
1540 	if (cmd->sam_task_attr == MSG_ACA_TAG) {
1541 		pr_debug("SAM Task Attribute ACA"
1542 			" emulation is not supported\n");
1543 		return -EINVAL;
1544 	}
1545 	/*
1546 	 * Used to determine when ORDERED commands should go from
1547 	 * Dormant to Active status.
1548 	 */
1549 	cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id);
1550 	smp_mb__after_atomic_inc();
1551 	pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
1552 			cmd->se_ordered_id, cmd->sam_task_attr,
1553 			cmd->se_dev->transport->name);
1554 	return 0;
1555 }
1556 
1557 /*	transport_generic_allocate_tasks():
1558  *
1559  *	Called from fabric RX Thread.
1560  */
1561 int transport_generic_allocate_tasks(
1562 	struct se_cmd *cmd,
1563 	unsigned char *cdb)
1564 {
1565 	int ret;
1566 
1567 	transport_generic_prepare_cdb(cdb);
1568 	/*
1569 	 * Ensure that the received CDB is less than the max (252 + 8) bytes
1570 	 * for VARIABLE_LENGTH_CMD
1571 	 */
1572 	if (scsi_command_size(cdb) > SCSI_MAX_VARLEN_CDB_SIZE) {
1573 		pr_err("Received SCSI CDB with command_size: %d that"
1574 			" exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
1575 			scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE);
1576 		return -EINVAL;
1577 	}
1578 	/*
1579 	 * If the received CDB is larger than TCM_MAX_COMMAND_SIZE,
1580 	 * allocate the additional extended CDB buffer now..  Otherwise
1581 	 * setup the pointer from __t_task_cdb to t_task_cdb.
1582 	 */
1583 	if (scsi_command_size(cdb) > sizeof(cmd->__t_task_cdb)) {
1584 		cmd->t_task_cdb = kzalloc(scsi_command_size(cdb),
1585 						GFP_KERNEL);
1586 		if (!cmd->t_task_cdb) {
1587 			pr_err("Unable to allocate cmd->t_task_cdb"
1588 				" %u > sizeof(cmd->__t_task_cdb): %lu ops\n",
1589 				scsi_command_size(cdb),
1590 				(unsigned long)sizeof(cmd->__t_task_cdb));
1591 			return -ENOMEM;
1592 		}
1593 	} else
1594 		cmd->t_task_cdb = &cmd->__t_task_cdb[0];
1595 	/*
1596 	 * Copy the original CDB into cmd->
1597 	 */
1598 	memcpy(cmd->t_task_cdb, cdb, scsi_command_size(cdb));
1599 	/*
1600 	 * Setup the received CDB based on SCSI defined opcodes and
1601 	 * perform unit attention, persistent reservations and ALUA
1602 	 * checks for virtual device backends.  The cmd->t_task_cdb
1603 	 * pointer is expected to be setup before we reach this point.
1604 	 */
1605 	ret = transport_generic_cmd_sequencer(cmd, cdb);
1606 	if (ret < 0)
1607 		return ret;
1608 	/*
1609 	 * Check for SAM Task Attribute Emulation
1610 	 */
1611 	if (transport_check_alloc_task_attr(cmd) < 0) {
1612 		cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
1613 		cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
1614 		return -EINVAL;
1615 	}
1616 	spin_lock(&cmd->se_lun->lun_sep_lock);
1617 	if (cmd->se_lun->lun_sep)
1618 		cmd->se_lun->lun_sep->sep_stats.cmd_pdus++;
1619 	spin_unlock(&cmd->se_lun->lun_sep_lock);
1620 	return 0;
1621 }
1622 EXPORT_SYMBOL(transport_generic_allocate_tasks);
1623 
1624 /*
1625  * Used by fabric module frontends to queue tasks directly.
1626  * Many only be used from process context only
1627  */
1628 int transport_handle_cdb_direct(
1629 	struct se_cmd *cmd)
1630 {
1631 	int ret;
1632 
1633 	if (!cmd->se_lun) {
1634 		dump_stack();
1635 		pr_err("cmd->se_lun is NULL\n");
1636 		return -EINVAL;
1637 	}
1638 	if (in_interrupt()) {
1639 		dump_stack();
1640 		pr_err("transport_generic_handle_cdb cannot be called"
1641 				" from interrupt context\n");
1642 		return -EINVAL;
1643 	}
1644 	/*
1645 	 * Set TRANSPORT_NEW_CMD state and cmd->t_transport_active=1 following
1646 	 * transport_generic_handle_cdb*() -> transport_add_cmd_to_queue()
1647 	 * in existing usage to ensure that outstanding descriptors are handled
1648 	 * correctly during shutdown via transport_wait_for_tasks()
1649 	 *
1650 	 * Also, we don't take cmd->t_state_lock here as we only expect
1651 	 * this to be called for initial descriptor submission.
1652 	 */
1653 	cmd->t_state = TRANSPORT_NEW_CMD;
1654 	atomic_set(&cmd->t_transport_active, 1);
1655 	/*
1656 	 * transport_generic_new_cmd() is already handling QUEUE_FULL,
1657 	 * so follow TRANSPORT_NEW_CMD processing thread context usage
1658 	 * and call transport_generic_request_failure() if necessary..
1659 	 */
1660 	ret = transport_generic_new_cmd(cmd);
1661 	if (ret < 0) {
1662 		cmd->transport_error_status = ret;
1663 		transport_generic_request_failure(cmd, 0,
1664 				(cmd->data_direction != DMA_TO_DEVICE));
1665 	}
1666 	return 0;
1667 }
1668 EXPORT_SYMBOL(transport_handle_cdb_direct);
1669 
1670 /*
1671  * Used by fabric module frontends defining a TFO->new_cmd_map() caller
1672  * to  queue up a newly setup se_cmd w/ TRANSPORT_NEW_CMD_MAP in order to
1673  * complete setup in TCM process context w/ TFO->new_cmd_map().
1674  */
1675 int transport_generic_handle_cdb_map(
1676 	struct se_cmd *cmd)
1677 {
1678 	if (!cmd->se_lun) {
1679 		dump_stack();
1680 		pr_err("cmd->se_lun is NULL\n");
1681 		return -EINVAL;
1682 	}
1683 
1684 	transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD_MAP, false);
1685 	return 0;
1686 }
1687 EXPORT_SYMBOL(transport_generic_handle_cdb_map);
1688 
1689 /*	transport_generic_handle_data():
1690  *
1691  *
1692  */
1693 int transport_generic_handle_data(
1694 	struct se_cmd *cmd)
1695 {
1696 	/*
1697 	 * For the software fabric case, then we assume the nexus is being
1698 	 * failed/shutdown when signals are pending from the kthread context
1699 	 * caller, so we return a failure.  For the HW target mode case running
1700 	 * in interrupt code, the signal_pending() check is skipped.
1701 	 */
1702 	if (!in_interrupt() && signal_pending(current))
1703 		return -EPERM;
1704 	/*
1705 	 * If the received CDB has aleady been ABORTED by the generic
1706 	 * target engine, we now call transport_check_aborted_status()
1707 	 * to queue any delated TASK_ABORTED status for the received CDB to the
1708 	 * fabric module as we are expecting no further incoming DATA OUT
1709 	 * sequences at this point.
1710 	 */
1711 	if (transport_check_aborted_status(cmd, 1) != 0)
1712 		return 0;
1713 
1714 	transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_WRITE, false);
1715 	return 0;
1716 }
1717 EXPORT_SYMBOL(transport_generic_handle_data);
1718 
1719 /*	transport_generic_handle_tmr():
1720  *
1721  *
1722  */
1723 int transport_generic_handle_tmr(
1724 	struct se_cmd *cmd)
1725 {
1726 	transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_TMR, false);
1727 	return 0;
1728 }
1729 EXPORT_SYMBOL(transport_generic_handle_tmr);
1730 
1731 /*
1732  * If the task is active, request it to be stopped and sleep until it
1733  * has completed.
1734  */
1735 bool target_stop_task(struct se_task *task, unsigned long *flags)
1736 {
1737 	struct se_cmd *cmd = task->task_se_cmd;
1738 	bool was_active = false;
1739 
1740 	if (task->task_flags & TF_ACTIVE) {
1741 		task->task_flags |= TF_REQUEST_STOP;
1742 		spin_unlock_irqrestore(&cmd->t_state_lock, *flags);
1743 
1744 		pr_debug("Task %p waiting to complete\n", task);
1745 		wait_for_completion(&task->task_stop_comp);
1746 		pr_debug("Task %p stopped successfully\n", task);
1747 
1748 		spin_lock_irqsave(&cmd->t_state_lock, *flags);
1749 		atomic_dec(&cmd->t_task_cdbs_left);
1750 		task->task_flags &= ~(TF_ACTIVE | TF_REQUEST_STOP);
1751 		was_active = true;
1752 	}
1753 
1754 	return was_active;
1755 }
1756 
1757 static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)
1758 {
1759 	struct se_task *task, *task_tmp;
1760 	unsigned long flags;
1761 	int ret = 0;
1762 
1763 	pr_debug("ITT[0x%08x] - Stopping tasks\n",
1764 		cmd->se_tfo->get_task_tag(cmd));
1765 
1766 	/*
1767 	 * No tasks remain in the execution queue
1768 	 */
1769 	spin_lock_irqsave(&cmd->t_state_lock, flags);
1770 	list_for_each_entry_safe(task, task_tmp,
1771 				&cmd->t_task_list, t_list) {
1772 		pr_debug("Processing task %p\n", task);
1773 		/*
1774 		 * If the struct se_task has not been sent and is not active,
1775 		 * remove the struct se_task from the execution queue.
1776 		 */
1777 		if (!(task->task_flags & (TF_ACTIVE | TF_SENT))) {
1778 			spin_unlock_irqrestore(&cmd->t_state_lock,
1779 					flags);
1780 			transport_remove_task_from_execute_queue(task,
1781 					cmd->se_dev);
1782 
1783 			pr_debug("Task %p removed from execute queue\n", task);
1784 			spin_lock_irqsave(&cmd->t_state_lock, flags);
1785 			continue;
1786 		}
1787 
1788 		if (!target_stop_task(task, &flags)) {
1789 			pr_debug("Task %p - did nothing\n", task);
1790 			ret++;
1791 		}
1792 	}
1793 	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
1794 
1795 	return ret;
1796 }
1797 
1798 /*
1799  * Handle SAM-esque emulation for generic transport request failures.
1800  */
1801 static void transport_generic_request_failure(
1802 	struct se_cmd *cmd,
1803 	int complete,
1804 	int sc)
1805 {
1806 	int ret = 0;
1807 
1808 	pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x"
1809 		" CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd),
1810 		cmd->t_task_cdb[0]);
1811 	pr_debug("-----[ i_state: %d t_state: %d transport_error_status: %d\n",
1812 		cmd->se_tfo->get_cmd_state(cmd),
1813 		cmd->t_state,
1814 		cmd->transport_error_status);
1815 	pr_debug("-----[ t_tasks: %d t_task_cdbs_left: %d"
1816 		" t_task_cdbs_sent: %d t_task_cdbs_ex_left: %d --"
1817 		" t_transport_active: %d t_transport_stop: %d"
1818 		" t_transport_sent: %d\n", cmd->t_task_list_num,
1819 		atomic_read(&cmd->t_task_cdbs_left),
1820 		atomic_read(&cmd->t_task_cdbs_sent),
1821 		atomic_read(&cmd->t_task_cdbs_ex_left),
1822 		atomic_read(&cmd->t_transport_active),
1823 		atomic_read(&cmd->t_transport_stop),
1824 		atomic_read(&cmd->t_transport_sent));
1825 
1826 	/*
1827 	 * For SAM Task Attribute emulation for failed struct se_cmd
1828 	 */
1829 	if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
1830 		transport_complete_task_attr(cmd);
1831 
1832 	if (complete) {
1833 		cmd->transport_error_status = PYX_TRANSPORT_LU_COMM_FAILURE;
1834 	}
1835 
1836 	switch (cmd->transport_error_status) {
1837 	case PYX_TRANSPORT_UNKNOWN_SAM_OPCODE:
1838 		cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
1839 		break;
1840 	case PYX_TRANSPORT_REQ_TOO_MANY_SECTORS:
1841 		cmd->scsi_sense_reason = TCM_SECTOR_COUNT_TOO_MANY;
1842 		break;
1843 	case PYX_TRANSPORT_INVALID_CDB_FIELD:
1844 		cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
1845 		break;
1846 	case PYX_TRANSPORT_INVALID_PARAMETER_LIST:
1847 		cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
1848 		break;
1849 	case PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES:
1850 		if (!sc)
1851 			transport_new_cmd_failure(cmd);
1852 		/*
1853 		 * Currently for PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES,
1854 		 * we force this session to fall back to session
1855 		 * recovery.
1856 		 */
1857 		cmd->se_tfo->fall_back_to_erl0(cmd->se_sess);
1858 		cmd->se_tfo->stop_session(cmd->se_sess, 0, 0);
1859 
1860 		goto check_stop;
1861 	case PYX_TRANSPORT_LU_COMM_FAILURE:
1862 	case PYX_TRANSPORT_ILLEGAL_REQUEST:
1863 		cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1864 		break;
1865 	case PYX_TRANSPORT_UNKNOWN_MODE_PAGE:
1866 		cmd->scsi_sense_reason = TCM_UNKNOWN_MODE_PAGE;
1867 		break;
1868 	case PYX_TRANSPORT_WRITE_PROTECTED:
1869 		cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
1870 		break;
1871 	case PYX_TRANSPORT_RESERVATION_CONFLICT:
1872 		/*
1873 		 * No SENSE Data payload for this case, set SCSI Status
1874 		 * and queue the response to $FABRIC_MOD.
1875 		 *
1876 		 * Uses linux/include/scsi/scsi.h SAM status codes defs
1877 		 */
1878 		cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
1879 		/*
1880 		 * For UA Interlock Code 11b, a RESERVATION CONFLICT will
1881 		 * establish a UNIT ATTENTION with PREVIOUS RESERVATION
1882 		 * CONFLICT STATUS.
1883 		 *
1884 		 * See spc4r17, section 7.4.6 Control Mode Page, Table 349
1885 		 */
1886 		if (cmd->se_sess &&
1887 		    cmd->se_dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 2)
1888 			core_scsi3_ua_allocate(cmd->se_sess->se_node_acl,
1889 				cmd->orig_fe_lun, 0x2C,
1890 				ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
1891 
1892 		ret = cmd->se_tfo->queue_status(cmd);
1893 		if (ret == -EAGAIN || ret == -ENOMEM)
1894 			goto queue_full;
1895 		goto check_stop;
1896 	case PYX_TRANSPORT_USE_SENSE_REASON:
1897 		/*
1898 		 * struct se_cmd->scsi_sense_reason already set
1899 		 */
1900 		break;
1901 	default:
1902 		pr_err("Unknown transport error for CDB 0x%02x: %d\n",
1903 			cmd->t_task_cdb[0],
1904 			cmd->transport_error_status);
1905 		cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
1906 		break;
1907 	}
1908 	/*
1909 	 * If a fabric does not define a cmd->se_tfo->new_cmd_map caller,
1910 	 * make the call to transport_send_check_condition_and_sense()
1911 	 * directly.  Otherwise expect the fabric to make the call to
1912 	 * transport_send_check_condition_and_sense() after handling
1913 	 * possible unsoliticied write data payloads.
1914 	 */
1915 	if (!sc && !cmd->se_tfo->new_cmd_map)
1916 		transport_new_cmd_failure(cmd);
1917 	else {
1918 		ret = transport_send_check_condition_and_sense(cmd,
1919 				cmd->scsi_sense_reason, 0);
1920 		if (ret == -EAGAIN || ret == -ENOMEM)
1921 			goto queue_full;
1922 	}
1923 
1924 check_stop:
1925 	transport_lun_remove_cmd(cmd);
1926 	if (!transport_cmd_check_stop_to_fabric(cmd))
1927 		;
1928 	return;
1929 
1930 queue_full:
1931 	cmd->t_state = TRANSPORT_COMPLETE_QF_OK;
1932 	transport_handle_queue_full(cmd, cmd->se_dev);
1933 }
1934 
1935 static inline u32 transport_lba_21(unsigned char *cdb)
1936 {
1937 	return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3];
1938 }
1939 
1940 static inline u32 transport_lba_32(unsigned char *cdb)
1941 {
1942 	return (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
1943 }
1944 
1945 static inline unsigned long long transport_lba_64(unsigned char *cdb)
1946 {
1947 	unsigned int __v1, __v2;
1948 
1949 	__v1 = (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
1950 	__v2 = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
1951 
1952 	return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
1953 }
1954 
1955 /*
1956  * For VARIABLE_LENGTH_CDB w/ 32 byte extended CDBs
1957  */
1958 static inline unsigned long long transport_lba_64_ext(unsigned char *cdb)
1959 {
1960 	unsigned int __v1, __v2;
1961 
1962 	__v1 = (cdb[12] << 24) | (cdb[13] << 16) | (cdb[14] << 8) | cdb[15];
1963 	__v2 = (cdb[16] << 24) | (cdb[17] << 16) | (cdb[18] << 8) | cdb[19];
1964 
1965 	return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
1966 }
1967 
1968 static void transport_set_supported_SAM_opcode(struct se_cmd *se_cmd)
1969 {
1970 	unsigned long flags;
1971 
1972 	spin_lock_irqsave(&se_cmd->t_state_lock, flags);
1973 	se_cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE;
1974 	spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
1975 }
1976 
1977 static inline int transport_tcq_window_closed(struct se_device *dev)
1978 {
1979 	if (dev->dev_tcq_window_closed++ <
1980 			PYX_TRANSPORT_WINDOW_CLOSED_THRESHOLD) {
1981 		msleep(PYX_TRANSPORT_WINDOW_CLOSED_WAIT_SHORT);
1982 	} else
1983 		msleep(PYX_TRANSPORT_WINDOW_CLOSED_WAIT_LONG);
1984 
1985 	wake_up_interruptible(&dev->dev_queue_obj.thread_wq);
1986 	return 0;
1987 }
1988 
1989 /*
1990  * Called from Fabric Module context from transport_execute_tasks()
1991  *
1992  * The return of this function determins if the tasks from struct se_cmd
1993  * get added to the execution queue in transport_execute_tasks(),
1994  * or are added to the delayed or ordered lists here.
1995  */
1996 static inline int transport_execute_task_attr(struct se_cmd *cmd)
1997 {
1998 	if (cmd->se_dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
1999 		return 1;
2000 	/*
2001 	 * Check for the existence of HEAD_OF_QUEUE, and if true return 1
2002 	 * to allow the passed struct se_cmd list of tasks to the front of the list.
2003 	 */
2004 	 if (cmd->sam_task_attr == MSG_HEAD_TAG) {
2005 		atomic_inc(&cmd->se_dev->dev_hoq_count);
2006 		smp_mb__after_atomic_inc();
2007 		pr_debug("Added HEAD_OF_QUEUE for CDB:"
2008 			" 0x%02x, se_ordered_id: %u\n",
2009 			cmd->t_task_cdb[0],
2010 			cmd->se_ordered_id);
2011 		return 1;
2012 	} else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
2013 		spin_lock(&cmd->se_dev->ordered_cmd_lock);
2014 		list_add_tail(&cmd->se_ordered_node,
2015 				&cmd->se_dev->ordered_cmd_list);
2016 		spin_unlock(&cmd->se_dev->ordered_cmd_lock);
2017 
2018 		atomic_inc(&cmd->se_dev->dev_ordered_sync);
2019 		smp_mb__after_atomic_inc();
2020 
2021 		pr_debug("Added ORDERED for CDB: 0x%02x to ordered"
2022 				" list, se_ordered_id: %u\n",
2023 				cmd->t_task_cdb[0],
2024 				cmd->se_ordered_id);
2025 		/*
2026 		 * Add ORDERED command to tail of execution queue if
2027 		 * no other older commands exist that need to be
2028 		 * completed first.
2029 		 */
2030 		if (!atomic_read(&cmd->se_dev->simple_cmds))
2031 			return 1;
2032 	} else {
2033 		/*
2034 		 * For SIMPLE and UNTAGGED Task Attribute commands
2035 		 */
2036 		atomic_inc(&cmd->se_dev->simple_cmds);
2037 		smp_mb__after_atomic_inc();
2038 	}
2039 	/*
2040 	 * Otherwise if one or more outstanding ORDERED task attribute exist,
2041 	 * add the dormant task(s) built for the passed struct se_cmd to the
2042 	 * execution queue and become in Active state for this struct se_device.
2043 	 */
2044 	if (atomic_read(&cmd->se_dev->dev_ordered_sync) != 0) {
2045 		/*
2046 		 * Otherwise, add cmd w/ tasks to delayed cmd queue that
2047 		 * will be drained upon completion of HEAD_OF_QUEUE task.
2048 		 */
2049 		spin_lock(&cmd->se_dev->delayed_cmd_lock);
2050 		cmd->se_cmd_flags |= SCF_DELAYED_CMD_FROM_SAM_ATTR;
2051 		list_add_tail(&cmd->se_delayed_node,
2052 				&cmd->se_dev->delayed_cmd_list);
2053 		spin_unlock(&cmd->se_dev->delayed_cmd_lock);
2054 
2055 		pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to"
2056 			" delayed CMD list, se_ordered_id: %u\n",
2057 			cmd->t_task_cdb[0], cmd->sam_task_attr,
2058 			cmd->se_ordered_id);
2059 		/*
2060 		 * Return zero to let transport_execute_tasks() know
2061 		 * not to add the delayed tasks to the execution list.
2062 		 */
2063 		return 0;
2064 	}
2065 	/*
2066 	 * Otherwise, no ORDERED task attributes exist..
2067 	 */
2068 	return 1;
2069 }
2070 
2071 /*
2072  * Called from fabric module context in transport_generic_new_cmd() and
2073  * transport_generic_process_write()
2074  */
2075 static int transport_execute_tasks(struct se_cmd *cmd)
2076 {
2077 	int add_tasks;
2078 
2079 	if (se_dev_check_online(cmd->se_orig_obj_ptr) != 0) {
2080 		cmd->transport_error_status = PYX_TRANSPORT_LU_COMM_FAILURE;
2081 		transport_generic_request_failure(cmd, 0, 1);
2082 		return 0;
2083 	}
2084 
2085 	/*
2086 	 * Call transport_cmd_check_stop() to see if a fabric exception
2087 	 * has occurred that prevents execution.
2088 	 */
2089 	if (!transport_cmd_check_stop(cmd, 0, TRANSPORT_PROCESSING)) {
2090 		/*
2091 		 * Check for SAM Task Attribute emulation and HEAD_OF_QUEUE
2092 		 * attribute for the tasks of the received struct se_cmd CDB
2093 		 */
2094 		add_tasks = transport_execute_task_attr(cmd);
2095 		if (!add_tasks)
2096 			goto execute_tasks;
2097 		/*
2098 		 * This calls transport_add_tasks_from_cmd() to handle
2099 		 * HEAD_OF_QUEUE ordering for SAM Task Attribute emulation
2100 		 * (if enabled) in __transport_add_task_to_execute_queue() and
2101 		 * transport_add_task_check_sam_attr().
2102 		 */
2103 		transport_add_tasks_from_cmd(cmd);
2104 	}
2105 	/*
2106 	 * Kick the execution queue for the cmd associated struct se_device
2107 	 * storage object.
2108 	 */
2109 execute_tasks:
2110 	__transport_execute_tasks(cmd->se_dev);
2111 	return 0;
2112 }
2113 
2114 /*
2115  * Called to check struct se_device tcq depth window, and once open pull struct se_task
2116  * from struct se_device->execute_task_list and
2117  *
2118  * Called from transport_processing_thread()
2119  */
2120 static int __transport_execute_tasks(struct se_device *dev)
2121 {
2122 	int error;
2123 	struct se_cmd *cmd = NULL;
2124 	struct se_task *task = NULL;
2125 	unsigned long flags;
2126 
2127 	/*
2128 	 * Check if there is enough room in the device and HBA queue to send
2129 	 * struct se_tasks to the selected transport.
2130 	 */
2131 check_depth:
2132 	if (!atomic_read(&dev->depth_left))
2133 		return transport_tcq_window_closed(dev);
2134 
2135 	dev->dev_tcq_window_closed = 0;
2136 
2137 	spin_lock_irq(&dev->execute_task_lock);
2138 	if (list_empty(&dev->execute_task_list)) {
2139 		spin_unlock_irq(&dev->execute_task_lock);
2140 		return 0;
2141 	}
2142 	task = list_first_entry(&dev->execute_task_list,
2143 				struct se_task, t_execute_list);
2144 	__transport_remove_task_from_execute_queue(task, dev);
2145 	spin_unlock_irq(&dev->execute_task_lock);
2146 
2147 	atomic_dec(&dev->depth_left);
2148 
2149 	cmd = task->task_se_cmd;
2150 
2151 	spin_lock_irqsave(&cmd->t_state_lock, flags);
2152 	task->task_flags |= (TF_ACTIVE | TF_SENT);
2153 	atomic_inc(&cmd->t_task_cdbs_sent);
2154 
2155 	if (atomic_read(&cmd->t_task_cdbs_sent) ==
2156 	    cmd->t_task_list_num)
2157 		atomic_set(&cmd->t_transport_sent, 1);
2158 
2159 	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2160 
2161 	if (cmd->execute_task)
2162 		error = cmd->execute_task(task);
2163 	else
2164 		error = dev->transport->do_task(task);
2165 	if (error != 0) {
2166 		cmd->transport_error_status = error;
2167 		spin_lock_irqsave(&cmd->t_state_lock, flags);
2168 		task->task_flags &= ~TF_ACTIVE;
2169 		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2170 		atomic_set(&cmd->t_transport_sent, 0);
2171 		transport_stop_tasks_for_cmd(cmd);
2172 		atomic_inc(&dev->depth_left);
2173 		transport_generic_request_failure(cmd, 0, 1);
2174 	}
2175 
2176 	goto check_depth;
2177 
2178 	return 0;
2179 }
2180 
2181 void transport_new_cmd_failure(struct se_cmd *se_cmd)
2182 {
2183 	unsigned long flags;
2184 	/*
2185 	 * Any unsolicited data will get dumped for failed command inside of
2186 	 * the fabric plugin
2187 	 */
2188 	spin_lock_irqsave(&se_cmd->t_state_lock, flags);
2189 	se_cmd->se_cmd_flags |= SCF_SE_CMD_FAILED;
2190 	se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
2191 	spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
2192 }
2193 
2194 static inline u32 transport_get_sectors_6(
2195 	unsigned char *cdb,
2196 	struct se_cmd *cmd,
2197 	int *ret)
2198 {
2199 	struct se_device *dev = cmd->se_dev;
2200 
2201 	/*
2202 	 * Assume TYPE_DISK for non struct se_device objects.
2203 	 * Use 8-bit sector value.
2204 	 */
2205 	if (!dev)
2206 		goto type_disk;
2207 
2208 	/*
2209 	 * Use 24-bit allocation length for TYPE_TAPE.
2210 	 */
2211 	if (dev->transport->get_device_type(dev) == TYPE_TAPE)
2212 		return (u32)(cdb[2] << 16) + (cdb[3] << 8) + cdb[4];
2213 
2214 	/*
2215 	 * Everything else assume TYPE_DISK Sector CDB location.
2216 	 * Use 8-bit sector value.
2217 	 */
2218 type_disk:
2219 	return (u32)cdb[4];
2220 }
2221 
2222 static inline u32 transport_get_sectors_10(
2223 	unsigned char *cdb,
2224 	struct se_cmd *cmd,
2225 	int *ret)
2226 {
2227 	struct se_device *dev = cmd->se_dev;
2228 
2229 	/*
2230 	 * Assume TYPE_DISK for non struct se_device objects.
2231 	 * Use 16-bit sector value.
2232 	 */
2233 	if (!dev)
2234 		goto type_disk;
2235 
2236 	/*
2237 	 * XXX_10 is not defined in SSC, throw an exception
2238 	 */
2239 	if (dev->transport->get_device_type(dev) == TYPE_TAPE) {
2240 		*ret = -EINVAL;
2241 		return 0;
2242 	}
2243 
2244 	/*
2245 	 * Everything else assume TYPE_DISK Sector CDB location.
2246 	 * Use 16-bit sector value.
2247 	 */
2248 type_disk:
2249 	return (u32)(cdb[7] << 8) + cdb[8];
2250 }
2251 
2252 static inline u32 transport_get_sectors_12(
2253 	unsigned char *cdb,
2254 	struct se_cmd *cmd,
2255 	int *ret)
2256 {
2257 	struct se_device *dev = cmd->se_dev;
2258 
2259 	/*
2260 	 * Assume TYPE_DISK for non struct se_device objects.
2261 	 * Use 32-bit sector value.
2262 	 */
2263 	if (!dev)
2264 		goto type_disk;
2265 
2266 	/*
2267 	 * XXX_12 is not defined in SSC, throw an exception
2268 	 */
2269 	if (dev->transport->get_device_type(dev) == TYPE_TAPE) {
2270 		*ret = -EINVAL;
2271 		return 0;
2272 	}
2273 
2274 	/*
2275 	 * Everything else assume TYPE_DISK Sector CDB location.
2276 	 * Use 32-bit sector value.
2277 	 */
2278 type_disk:
2279 	return (u32)(cdb[6] << 24) + (cdb[7] << 16) + (cdb[8] << 8) + cdb[9];
2280 }
2281 
2282 static inline u32 transport_get_sectors_16(
2283 	unsigned char *cdb,
2284 	struct se_cmd *cmd,
2285 	int *ret)
2286 {
2287 	struct se_device *dev = cmd->se_dev;
2288 
2289 	/*
2290 	 * Assume TYPE_DISK for non struct se_device objects.
2291 	 * Use 32-bit sector value.
2292 	 */
2293 	if (!dev)
2294 		goto type_disk;
2295 
2296 	/*
2297 	 * Use 24-bit allocation length for TYPE_TAPE.
2298 	 */
2299 	if (dev->transport->get_device_type(dev) == TYPE_TAPE)
2300 		return (u32)(cdb[12] << 16) + (cdb[13] << 8) + cdb[14];
2301 
2302 type_disk:
2303 	return (u32)(cdb[10] << 24) + (cdb[11] << 16) +
2304 		    (cdb[12] << 8) + cdb[13];
2305 }
2306 
2307 /*
2308  * Used for VARIABLE_LENGTH_CDB WRITE_32 and READ_32 variants
2309  */
2310 static inline u32 transport_get_sectors_32(
2311 	unsigned char *cdb,
2312 	struct se_cmd *cmd,
2313 	int *ret)
2314 {
2315 	/*
2316 	 * Assume TYPE_DISK for non struct se_device objects.
2317 	 * Use 32-bit sector value.
2318 	 */
2319 	return (u32)(cdb[28] << 24) + (cdb[29] << 16) +
2320 		    (cdb[30] << 8) + cdb[31];
2321 
2322 }
2323 
2324 static inline u32 transport_get_size(
2325 	u32 sectors,
2326 	unsigned char *cdb,
2327 	struct se_cmd *cmd)
2328 {
2329 	struct se_device *dev = cmd->se_dev;
2330 
2331 	if (dev->transport->get_device_type(dev) == TYPE_TAPE) {
2332 		if (cdb[1] & 1) { /* sectors */
2333 			return dev->se_sub_dev->se_dev_attrib.block_size * sectors;
2334 		} else /* bytes */
2335 			return sectors;
2336 	}
2337 #if 0
2338 	pr_debug("Returning block_size: %u, sectors: %u == %u for"
2339 			" %s object\n", dev->se_sub_dev->se_dev_attrib.block_size, sectors,
2340 			dev->se_sub_dev->se_dev_attrib.block_size * sectors,
2341 			dev->transport->name);
2342 #endif
2343 	return dev->se_sub_dev->se_dev_attrib.block_size * sectors;
2344 }
2345 
2346 static void transport_xor_callback(struct se_cmd *cmd)
2347 {
2348 	unsigned char *buf, *addr;
2349 	struct scatterlist *sg;
2350 	unsigned int offset;
2351 	int i;
2352 	int count;
2353 	/*
2354 	 * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command
2355 	 *
2356 	 * 1) read the specified logical block(s);
2357 	 * 2) transfer logical blocks from the data-out buffer;
2358 	 * 3) XOR the logical blocks transferred from the data-out buffer with
2359 	 *    the logical blocks read, storing the resulting XOR data in a buffer;
2360 	 * 4) if the DISABLE WRITE bit is set to zero, then write the logical
2361 	 *    blocks transferred from the data-out buffer; and
2362 	 * 5) transfer the resulting XOR data to the data-in buffer.
2363 	 */
2364 	buf = kmalloc(cmd->data_length, GFP_KERNEL);
2365 	if (!buf) {
2366 		pr_err("Unable to allocate xor_callback buf\n");
2367 		return;
2368 	}
2369 	/*
2370 	 * Copy the scatterlist WRITE buffer located at cmd->t_data_sg
2371 	 * into the locally allocated *buf
2372 	 */
2373 	sg_copy_to_buffer(cmd->t_data_sg,
2374 			  cmd->t_data_nents,
2375 			  buf,
2376 			  cmd->data_length);
2377 
2378 	/*
2379 	 * Now perform the XOR against the BIDI read memory located at
2380 	 * cmd->t_mem_bidi_list
2381 	 */
2382 
2383 	offset = 0;
2384 	for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) {
2385 		addr = kmap_atomic(sg_page(sg), KM_USER0);
2386 		if (!addr)
2387 			goto out;
2388 
2389 		for (i = 0; i < sg->length; i++)
2390 			*(addr + sg->offset + i) ^= *(buf + offset + i);
2391 
2392 		offset += sg->length;
2393 		kunmap_atomic(addr, KM_USER0);
2394 	}
2395 
2396 out:
2397 	kfree(buf);
2398 }
2399 
2400 /*
2401  * Used to obtain Sense Data from underlying Linux/SCSI struct scsi_cmnd
2402  */
2403 static int transport_get_sense_data(struct se_cmd *cmd)
2404 {
2405 	unsigned char *buffer = cmd->sense_buffer, *sense_buffer = NULL;
2406 	struct se_device *dev = cmd->se_dev;
2407 	struct se_task *task = NULL, *task_tmp;
2408 	unsigned long flags;
2409 	u32 offset = 0;
2410 
2411 	WARN_ON(!cmd->se_lun);
2412 
2413 	if (!dev)
2414 		return 0;
2415 
2416 	spin_lock_irqsave(&cmd->t_state_lock, flags);
2417 	if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
2418 		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2419 		return 0;
2420 	}
2421 
2422 	list_for_each_entry_safe(task, task_tmp,
2423 				&cmd->t_task_list, t_list) {
2424 		if (!task->task_sense)
2425 			continue;
2426 
2427 		if (!dev->transport->get_sense_buffer) {
2428 			pr_err("dev->transport->get_sense_buffer"
2429 					" is NULL\n");
2430 			continue;
2431 		}
2432 
2433 		sense_buffer = dev->transport->get_sense_buffer(task);
2434 		if (!sense_buffer) {
2435 			pr_err("ITT[0x%08x]_TASK[%p]: Unable to locate"
2436 				" sense buffer for task with sense\n",
2437 				cmd->se_tfo->get_task_tag(cmd), task);
2438 			continue;
2439 		}
2440 		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2441 
2442 		offset = cmd->se_tfo->set_fabric_sense_len(cmd,
2443 				TRANSPORT_SENSE_BUFFER);
2444 
2445 		memcpy(&buffer[offset], sense_buffer,
2446 				TRANSPORT_SENSE_BUFFER);
2447 		cmd->scsi_status = task->task_scsi_status;
2448 		/* Automatically padded */
2449 		cmd->scsi_sense_length =
2450 				(TRANSPORT_SENSE_BUFFER + offset);
2451 
2452 		pr_debug("HBA_[%u]_PLUG[%s]: Set SAM STATUS: 0x%02x"
2453 				" and sense\n",
2454 			dev->se_hba->hba_id, dev->transport->name,
2455 				cmd->scsi_status);
2456 		return 0;
2457 	}
2458 	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2459 
2460 	return -1;
2461 }
2462 
2463 static int
2464 transport_handle_reservation_conflict(struct se_cmd *cmd)
2465 {
2466 	cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
2467 	cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT;
2468 	cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
2469 	/*
2470 	 * For UA Interlock Code 11b, a RESERVATION CONFLICT will
2471 	 * establish a UNIT ATTENTION with PREVIOUS RESERVATION
2472 	 * CONFLICT STATUS.
2473 	 *
2474 	 * See spc4r17, section 7.4.6 Control Mode Page, Table 349
2475 	 */
2476 	if (cmd->se_sess &&
2477 	    cmd->se_dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 2)
2478 		core_scsi3_ua_allocate(cmd->se_sess->se_node_acl,
2479 			cmd->orig_fe_lun, 0x2C,
2480 			ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
2481 	return -EINVAL;
2482 }
2483 
2484 static inline long long transport_dev_end_lba(struct se_device *dev)
2485 {
2486 	return dev->transport->get_blocks(dev) + 1;
2487 }
2488 
2489 static int transport_cmd_get_valid_sectors(struct se_cmd *cmd)
2490 {
2491 	struct se_device *dev = cmd->se_dev;
2492 	u32 sectors;
2493 
2494 	if (dev->transport->get_device_type(dev) != TYPE_DISK)
2495 		return 0;
2496 
2497 	sectors = (cmd->data_length / dev->se_sub_dev->se_dev_attrib.block_size);
2498 
2499 	if ((cmd->t_task_lba + sectors) > transport_dev_end_lba(dev)) {
2500 		pr_err("LBA: %llu Sectors: %u exceeds"
2501 			" transport_dev_end_lba(): %llu\n",
2502 			cmd->t_task_lba, sectors,
2503 			transport_dev_end_lba(dev));
2504 		return -EINVAL;
2505 	}
2506 
2507 	return 0;
2508 }
2509 
2510 static int target_check_write_same_discard(unsigned char *flags, struct se_device *dev)
2511 {
2512 	/*
2513 	 * Determine if the received WRITE_SAME is used to for direct
2514 	 * passthrough into Linux/SCSI with struct request via TCM/pSCSI
2515 	 * or we are signaling the use of internal WRITE_SAME + UNMAP=1
2516 	 * emulation for -> Linux/BLOCK disbard with TCM/IBLOCK code.
2517 	 */
2518 	int passthrough = (dev->transport->transport_type ==
2519 				TRANSPORT_PLUGIN_PHBA_PDEV);
2520 
2521 	if (!passthrough) {
2522 		if ((flags[0] & 0x04) || (flags[0] & 0x02)) {
2523 			pr_err("WRITE_SAME PBDATA and LBDATA"
2524 				" bits not supported for Block Discard"
2525 				" Emulation\n");
2526 			return -ENOSYS;
2527 		}
2528 		/*
2529 		 * Currently for the emulated case we only accept
2530 		 * tpws with the UNMAP=1 bit set.
2531 		 */
2532 		if (!(flags[0] & 0x08)) {
2533 			pr_err("WRITE_SAME w/o UNMAP bit not"
2534 				" supported for Block Discard Emulation\n");
2535 			return -ENOSYS;
2536 		}
2537 	}
2538 
2539 	return 0;
2540 }
2541 
2542 /*	transport_generic_cmd_sequencer():
2543  *
2544  *	Generic Command Sequencer that should work for most DAS transport
2545  *	drivers.
2546  *
2547  *	Called from transport_generic_allocate_tasks() in the $FABRIC_MOD
2548  *	RX Thread.
2549  *
2550  *	FIXME: Need to support other SCSI OPCODES where as well.
2551  */
2552 static int transport_generic_cmd_sequencer(
2553 	struct se_cmd *cmd,
2554 	unsigned char *cdb)
2555 {
2556 	struct se_device *dev = cmd->se_dev;
2557 	struct se_subsystem_dev *su_dev = dev->se_sub_dev;
2558 	int ret = 0, sector_ret = 0, passthrough;
2559 	u32 sectors = 0, size = 0, pr_reg_type = 0;
2560 	u16 service_action;
2561 	u8 alua_ascq = 0;
2562 	/*
2563 	 * Check for an existing UNIT ATTENTION condition
2564 	 */
2565 	if (core_scsi3_ua_check(cmd, cdb) < 0) {
2566 		cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
2567 		cmd->scsi_sense_reason = TCM_CHECK_CONDITION_UNIT_ATTENTION;
2568 		return -EINVAL;
2569 	}
2570 	/*
2571 	 * Check status of Asymmetric Logical Unit Assignment port
2572 	 */
2573 	ret = su_dev->t10_alua.alua_state_check(cmd, cdb, &alua_ascq);
2574 	if (ret != 0) {
2575 		/*
2576 		 * Set SCSI additional sense code (ASC) to 'LUN Not Accessible';
2577 		 * The ALUA additional sense code qualifier (ASCQ) is determined
2578 		 * by the ALUA primary or secondary access state..
2579 		 */
2580 		if (ret > 0) {
2581 #if 0
2582 			pr_debug("[%s]: ALUA TG Port not available,"
2583 				" SenseKey: NOT_READY, ASC/ASCQ: 0x04/0x%02x\n",
2584 				cmd->se_tfo->get_fabric_name(), alua_ascq);
2585 #endif
2586 			transport_set_sense_codes(cmd, 0x04, alua_ascq);
2587 			cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
2588 			cmd->scsi_sense_reason = TCM_CHECK_CONDITION_NOT_READY;
2589 			return -EINVAL;
2590 		}
2591 		goto out_invalid_cdb_field;
2592 	}
2593 	/*
2594 	 * Check status for SPC-3 Persistent Reservations
2595 	 */
2596 	if (su_dev->t10_pr.pr_ops.t10_reservation_check(cmd, &pr_reg_type) != 0) {
2597 		if (su_dev->t10_pr.pr_ops.t10_seq_non_holder(
2598 					cmd, cdb, pr_reg_type) != 0)
2599 			return transport_handle_reservation_conflict(cmd);
2600 		/*
2601 		 * This means the CDB is allowed for the SCSI Initiator port
2602 		 * when said port is *NOT* holding the legacy SPC-2 or
2603 		 * SPC-3 Persistent Reservation.
2604 		 */
2605 	}
2606 
2607 	/*
2608 	 * If we operate in passthrough mode we skip most CDB emulation and
2609 	 * instead hand the commands down to the physical SCSI device.
2610 	 */
2611 	passthrough =
2612 		(dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV);
2613 
2614 	switch (cdb[0]) {
2615 	case READ_6:
2616 		sectors = transport_get_sectors_6(cdb, cmd, &sector_ret);
2617 		if (sector_ret)
2618 			goto out_unsupported_cdb;
2619 		size = transport_get_size(sectors, cdb, cmd);
2620 		cmd->t_task_lba = transport_lba_21(cdb);
2621 		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2622 		break;
2623 	case READ_10:
2624 		sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
2625 		if (sector_ret)
2626 			goto out_unsupported_cdb;
2627 		size = transport_get_size(sectors, cdb, cmd);
2628 		cmd->t_task_lba = transport_lba_32(cdb);
2629 		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2630 		break;
2631 	case READ_12:
2632 		sectors = transport_get_sectors_12(cdb, cmd, &sector_ret);
2633 		if (sector_ret)
2634 			goto out_unsupported_cdb;
2635 		size = transport_get_size(sectors, cdb, cmd);
2636 		cmd->t_task_lba = transport_lba_32(cdb);
2637 		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2638 		break;
2639 	case READ_16:
2640 		sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
2641 		if (sector_ret)
2642 			goto out_unsupported_cdb;
2643 		size = transport_get_size(sectors, cdb, cmd);
2644 		cmd->t_task_lba = transport_lba_64(cdb);
2645 		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2646 		break;
2647 	case WRITE_6:
2648 		sectors = transport_get_sectors_6(cdb, cmd, &sector_ret);
2649 		if (sector_ret)
2650 			goto out_unsupported_cdb;
2651 		size = transport_get_size(sectors, cdb, cmd);
2652 		cmd->t_task_lba = transport_lba_21(cdb);
2653 		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2654 		break;
2655 	case WRITE_10:
2656 		sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
2657 		if (sector_ret)
2658 			goto out_unsupported_cdb;
2659 		size = transport_get_size(sectors, cdb, cmd);
2660 		cmd->t_task_lba = transport_lba_32(cdb);
2661 		cmd->t_tasks_fua = (cdb[1] & 0x8);
2662 		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2663 		break;
2664 	case WRITE_12:
2665 		sectors = transport_get_sectors_12(cdb, cmd, &sector_ret);
2666 		if (sector_ret)
2667 			goto out_unsupported_cdb;
2668 		size = transport_get_size(sectors, cdb, cmd);
2669 		cmd->t_task_lba = transport_lba_32(cdb);
2670 		cmd->t_tasks_fua = (cdb[1] & 0x8);
2671 		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2672 		break;
2673 	case WRITE_16:
2674 		sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
2675 		if (sector_ret)
2676 			goto out_unsupported_cdb;
2677 		size = transport_get_size(sectors, cdb, cmd);
2678 		cmd->t_task_lba = transport_lba_64(cdb);
2679 		cmd->t_tasks_fua = (cdb[1] & 0x8);
2680 		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2681 		break;
2682 	case XDWRITEREAD_10:
2683 		if ((cmd->data_direction != DMA_TO_DEVICE) ||
2684 		    !(cmd->t_tasks_bidi))
2685 			goto out_invalid_cdb_field;
2686 		sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
2687 		if (sector_ret)
2688 			goto out_unsupported_cdb;
2689 		size = transport_get_size(sectors, cdb, cmd);
2690 		cmd->t_task_lba = transport_lba_32(cdb);
2691 		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2692 
2693 		/*
2694 		 * Do now allow BIDI commands for passthrough mode.
2695 		 */
2696 		if (passthrough)
2697 			goto out_unsupported_cdb;
2698 
2699 		/*
2700 		 * Setup BIDI XOR callback to be run after I/O completion.
2701 		 */
2702 		cmd->transport_complete_callback = &transport_xor_callback;
2703 		cmd->t_tasks_fua = (cdb[1] & 0x8);
2704 		break;
2705 	case VARIABLE_LENGTH_CMD:
2706 		service_action = get_unaligned_be16(&cdb[8]);
2707 		switch (service_action) {
2708 		case XDWRITEREAD_32:
2709 			sectors = transport_get_sectors_32(cdb, cmd, &sector_ret);
2710 			if (sector_ret)
2711 				goto out_unsupported_cdb;
2712 			size = transport_get_size(sectors, cdb, cmd);
2713 			/*
2714 			 * Use WRITE_32 and READ_32 opcodes for the emulated
2715 			 * XDWRITE_READ_32 logic.
2716 			 */
2717 			cmd->t_task_lba = transport_lba_64_ext(cdb);
2718 			cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2719 
2720 			/*
2721 			 * Do now allow BIDI commands for passthrough mode.
2722 			 */
2723 			if (passthrough)
2724 				goto out_unsupported_cdb;
2725 
2726 			/*
2727 			 * Setup BIDI XOR callback to be run during after I/O
2728 			 * completion.
2729 			 */
2730 			cmd->transport_complete_callback = &transport_xor_callback;
2731 			cmd->t_tasks_fua = (cdb[10] & 0x8);
2732 			break;
2733 		case WRITE_SAME_32:
2734 			sectors = transport_get_sectors_32(cdb, cmd, &sector_ret);
2735 			if (sector_ret)
2736 				goto out_unsupported_cdb;
2737 
2738 			if (sectors)
2739 				size = transport_get_size(1, cdb, cmd);
2740 			else {
2741 				pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not"
2742 				       " supported\n");
2743 				goto out_invalid_cdb_field;
2744 			}
2745 
2746 			cmd->t_task_lba = get_unaligned_be64(&cdb[12]);
2747 			cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2748 
2749 			if (target_check_write_same_discard(&cdb[10], dev) < 0)
2750 				goto out_invalid_cdb_field;
2751 			if (!passthrough)
2752 				cmd->execute_task = target_emulate_write_same;
2753 			break;
2754 		default:
2755 			pr_err("VARIABLE_LENGTH_CMD service action"
2756 				" 0x%04x not supported\n", service_action);
2757 			goto out_unsupported_cdb;
2758 		}
2759 		break;
2760 	case MAINTENANCE_IN:
2761 		if (dev->transport->get_device_type(dev) != TYPE_ROM) {
2762 			/* MAINTENANCE_IN from SCC-2 */
2763 			/*
2764 			 * Check for emulated MI_REPORT_TARGET_PGS.
2765 			 */
2766 			if (cdb[1] == MI_REPORT_TARGET_PGS &&
2767 			    su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) {
2768 				cmd->execute_task =
2769 					target_emulate_report_target_port_groups;
2770 			}
2771 			size = (cdb[6] << 24) | (cdb[7] << 16) |
2772 			       (cdb[8] << 8) | cdb[9];
2773 		} else {
2774 			/* GPCMD_SEND_KEY from multi media commands */
2775 			size = (cdb[8] << 8) + cdb[9];
2776 		}
2777 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2778 		break;
2779 	case MODE_SELECT:
2780 		size = cdb[4];
2781 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2782 		break;
2783 	case MODE_SELECT_10:
2784 		size = (cdb[7] << 8) + cdb[8];
2785 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2786 		break;
2787 	case MODE_SENSE:
2788 		size = cdb[4];
2789 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2790 		if (!passthrough)
2791 			cmd->execute_task = target_emulate_modesense;
2792 		break;
2793 	case MODE_SENSE_10:
2794 		size = (cdb[7] << 8) + cdb[8];
2795 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2796 		if (!passthrough)
2797 			cmd->execute_task = target_emulate_modesense;
2798 		break;
2799 	case GPCMD_READ_BUFFER_CAPACITY:
2800 	case GPCMD_SEND_OPC:
2801 	case LOG_SELECT:
2802 	case LOG_SENSE:
2803 		size = (cdb[7] << 8) + cdb[8];
2804 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2805 		break;
2806 	case READ_BLOCK_LIMITS:
2807 		size = READ_BLOCK_LEN;
2808 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2809 		break;
2810 	case GPCMD_GET_CONFIGURATION:
2811 	case GPCMD_READ_FORMAT_CAPACITIES:
2812 	case GPCMD_READ_DISC_INFO:
2813 	case GPCMD_READ_TRACK_RZONE_INFO:
2814 		size = (cdb[7] << 8) + cdb[8];
2815 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2816 		break;
2817 	case PERSISTENT_RESERVE_IN:
2818 		if (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS)
2819 			cmd->execute_task = target_scsi3_emulate_pr_in;
2820 		size = (cdb[7] << 8) + cdb[8];
2821 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2822 		break;
2823 	case PERSISTENT_RESERVE_OUT:
2824 		if (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS)
2825 			cmd->execute_task = target_scsi3_emulate_pr_out;
2826 		size = (cdb[7] << 8) + cdb[8];
2827 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2828 		break;
2829 	case GPCMD_MECHANISM_STATUS:
2830 	case GPCMD_READ_DVD_STRUCTURE:
2831 		size = (cdb[8] << 8) + cdb[9];
2832 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2833 		break;
2834 	case READ_POSITION:
2835 		size = READ_POSITION_LEN;
2836 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2837 		break;
2838 	case MAINTENANCE_OUT:
2839 		if (dev->transport->get_device_type(dev) != TYPE_ROM) {
2840 			/* MAINTENANCE_OUT from SCC-2
2841 			 *
2842 			 * Check for emulated MO_SET_TARGET_PGS.
2843 			 */
2844 			if (cdb[1] == MO_SET_TARGET_PGS &&
2845 			    su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) {
2846 				cmd->execute_task =
2847 					target_emulate_set_target_port_groups;
2848 			}
2849 
2850 			size = (cdb[6] << 24) | (cdb[7] << 16) |
2851 			       (cdb[8] << 8) | cdb[9];
2852 		} else  {
2853 			/* GPCMD_REPORT_KEY from multi media commands */
2854 			size = (cdb[8] << 8) + cdb[9];
2855 		}
2856 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2857 		break;
2858 	case INQUIRY:
2859 		size = (cdb[3] << 8) + cdb[4];
2860 		/*
2861 		 * Do implict HEAD_OF_QUEUE processing for INQUIRY.
2862 		 * See spc4r17 section 5.3
2863 		 */
2864 		if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
2865 			cmd->sam_task_attr = MSG_HEAD_TAG;
2866 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2867 		if (!passthrough)
2868 			cmd->execute_task = target_emulate_inquiry;
2869 		break;
2870 	case READ_BUFFER:
2871 		size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
2872 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2873 		break;
2874 	case READ_CAPACITY:
2875 		size = READ_CAP_LEN;
2876 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2877 		if (!passthrough)
2878 			cmd->execute_task = target_emulate_readcapacity;
2879 		break;
2880 	case READ_MEDIA_SERIAL_NUMBER:
2881 	case SECURITY_PROTOCOL_IN:
2882 	case SECURITY_PROTOCOL_OUT:
2883 		size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
2884 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2885 		break;
2886 	case SERVICE_ACTION_IN:
2887 		switch (cmd->t_task_cdb[1] & 0x1f) {
2888 		case SAI_READ_CAPACITY_16:
2889 			if (!passthrough)
2890 				cmd->execute_task =
2891 					target_emulate_readcapacity_16;
2892 			break;
2893 		default:
2894 			if (passthrough)
2895 				break;
2896 
2897 			pr_err("Unsupported SA: 0x%02x\n",
2898 				cmd->t_task_cdb[1] & 0x1f);
2899 			goto out_unsupported_cdb;
2900 		}
2901 		/*FALLTHROUGH*/
2902 	case ACCESS_CONTROL_IN:
2903 	case ACCESS_CONTROL_OUT:
2904 	case EXTENDED_COPY:
2905 	case READ_ATTRIBUTE:
2906 	case RECEIVE_COPY_RESULTS:
2907 	case WRITE_ATTRIBUTE:
2908 		size = (cdb[10] << 24) | (cdb[11] << 16) |
2909 		       (cdb[12] << 8) | cdb[13];
2910 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2911 		break;
2912 	case RECEIVE_DIAGNOSTIC:
2913 	case SEND_DIAGNOSTIC:
2914 		size = (cdb[3] << 8) | cdb[4];
2915 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2916 		break;
2917 /* #warning FIXME: Figure out correct GPCMD_READ_CD blocksize. */
2918 #if 0
2919 	case GPCMD_READ_CD:
2920 		sectors = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
2921 		size = (2336 * sectors);
2922 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2923 		break;
2924 #endif
2925 	case READ_TOC:
2926 		size = cdb[8];
2927 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2928 		break;
2929 	case REQUEST_SENSE:
2930 		size = cdb[4];
2931 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2932 		if (!passthrough)
2933 			cmd->execute_task = target_emulate_request_sense;
2934 		break;
2935 	case READ_ELEMENT_STATUS:
2936 		size = 65536 * cdb[7] + 256 * cdb[8] + cdb[9];
2937 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2938 		break;
2939 	case WRITE_BUFFER:
2940 		size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
2941 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2942 		break;
2943 	case RESERVE:
2944 	case RESERVE_10:
2945 		/*
2946 		 * The SPC-2 RESERVE does not contain a size in the SCSI CDB.
2947 		 * Assume the passthrough or $FABRIC_MOD will tell us about it.
2948 		 */
2949 		if (cdb[0] == RESERVE_10)
2950 			size = (cdb[7] << 8) | cdb[8];
2951 		else
2952 			size = cmd->data_length;
2953 
2954 		/*
2955 		 * Setup the legacy emulated handler for SPC-2 and
2956 		 * >= SPC-3 compatible reservation handling (CRH=1)
2957 		 * Otherwise, we assume the underlying SCSI logic is
2958 		 * is running in SPC_PASSTHROUGH, and wants reservations
2959 		 * emulation disabled.
2960 		 */
2961 		if (su_dev->t10_pr.res_type != SPC_PASSTHROUGH)
2962 			cmd->execute_task = target_scsi2_reservation_reserve;
2963 		cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
2964 		break;
2965 	case RELEASE:
2966 	case RELEASE_10:
2967 		/*
2968 		 * The SPC-2 RELEASE does not contain a size in the SCSI CDB.
2969 		 * Assume the passthrough or $FABRIC_MOD will tell us about it.
2970 		*/
2971 		if (cdb[0] == RELEASE_10)
2972 			size = (cdb[7] << 8) | cdb[8];
2973 		else
2974 			size = cmd->data_length;
2975 
2976 		if (su_dev->t10_pr.res_type != SPC_PASSTHROUGH)
2977 			cmd->execute_task = target_scsi2_reservation_release;
2978 		cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
2979 		break;
2980 	case SYNCHRONIZE_CACHE:
2981 	case 0x91: /* SYNCHRONIZE_CACHE_16: */
2982 		/*
2983 		 * Extract LBA and range to be flushed for emulated SYNCHRONIZE_CACHE
2984 		 */
2985 		if (cdb[0] == SYNCHRONIZE_CACHE) {
2986 			sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
2987 			cmd->t_task_lba = transport_lba_32(cdb);
2988 		} else {
2989 			sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
2990 			cmd->t_task_lba = transport_lba_64(cdb);
2991 		}
2992 		if (sector_ret)
2993 			goto out_unsupported_cdb;
2994 
2995 		size = transport_get_size(sectors, cdb, cmd);
2996 		cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
2997 
2998 		if (passthrough)
2999 			break;
3000 
3001 		/*
3002 		 * Check to ensure that LBA + Range does not exceed past end of
3003 		 * device for IBLOCK and FILEIO ->do_sync_cache() backend calls
3004 		 */
3005 		if ((cmd->t_task_lba != 0) || (sectors != 0)) {
3006 			if (transport_cmd_get_valid_sectors(cmd) < 0)
3007 				goto out_invalid_cdb_field;
3008 		}
3009 		cmd->execute_task = target_emulate_synchronize_cache;
3010 		break;
3011 	case UNMAP:
3012 		size = get_unaligned_be16(&cdb[7]);
3013 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3014 		if (!passthrough)
3015 			cmd->execute_task = target_emulate_unmap;
3016 		break;
3017 	case WRITE_SAME_16:
3018 		sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
3019 		if (sector_ret)
3020 			goto out_unsupported_cdb;
3021 
3022 		if (sectors)
3023 			size = transport_get_size(1, cdb, cmd);
3024 		else {
3025 			pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
3026 			goto out_invalid_cdb_field;
3027 		}
3028 
3029 		cmd->t_task_lba = get_unaligned_be64(&cdb[2]);
3030 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3031 
3032 		if (target_check_write_same_discard(&cdb[1], dev) < 0)
3033 			goto out_invalid_cdb_field;
3034 		if (!passthrough)
3035 			cmd->execute_task = target_emulate_write_same;
3036 		break;
3037 	case WRITE_SAME:
3038 		sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
3039 		if (sector_ret)
3040 			goto out_unsupported_cdb;
3041 
3042 		if (sectors)
3043 			size = transport_get_size(1, cdb, cmd);
3044 		else {
3045 			pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
3046 			goto out_invalid_cdb_field;
3047 		}
3048 
3049 		cmd->t_task_lba = get_unaligned_be32(&cdb[2]);
3050 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3051 		/*
3052 		 * Follow sbcr26 with WRITE_SAME (10) and check for the existence
3053 		 * of byte 1 bit 3 UNMAP instead of original reserved field
3054 		 */
3055 		if (target_check_write_same_discard(&cdb[1], dev) < 0)
3056 			goto out_invalid_cdb_field;
3057 		if (!passthrough)
3058 			cmd->execute_task = target_emulate_write_same;
3059 		break;
3060 	case ALLOW_MEDIUM_REMOVAL:
3061 	case ERASE:
3062 	case REZERO_UNIT:
3063 	case SEEK_10:
3064 	case SPACE:
3065 	case START_STOP:
3066 	case TEST_UNIT_READY:
3067 	case VERIFY:
3068 	case WRITE_FILEMARKS:
3069 		cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
3070 		if (!passthrough)
3071 			cmd->execute_task = target_emulate_noop;
3072 		break;
3073 	case GPCMD_CLOSE_TRACK:
3074 	case INITIALIZE_ELEMENT_STATUS:
3075 	case GPCMD_LOAD_UNLOAD:
3076 	case GPCMD_SET_SPEED:
3077 	case MOVE_MEDIUM:
3078 		cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
3079 		break;
3080 	case REPORT_LUNS:
3081 		cmd->execute_task = target_report_luns;
3082 		size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
3083 		/*
3084 		 * Do implict HEAD_OF_QUEUE processing for REPORT_LUNS
3085 		 * See spc4r17 section 5.3
3086 		 */
3087 		if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
3088 			cmd->sam_task_attr = MSG_HEAD_TAG;
3089 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3090 		break;
3091 	default:
3092 		pr_warn("TARGET_CORE[%s]: Unsupported SCSI Opcode"
3093 			" 0x%02x, sending CHECK_CONDITION.\n",
3094 			cmd->se_tfo->get_fabric_name(), cdb[0]);
3095 		goto out_unsupported_cdb;
3096 	}
3097 
3098 	if (size != cmd->data_length) {
3099 		pr_warn("TARGET_CORE[%s]: Expected Transfer Length:"
3100 			" %u does not match SCSI CDB Length: %u for SAM Opcode:"
3101 			" 0x%02x\n", cmd->se_tfo->get_fabric_name(),
3102 				cmd->data_length, size, cdb[0]);
3103 
3104 		cmd->cmd_spdtl = size;
3105 
3106 		if (cmd->data_direction == DMA_TO_DEVICE) {
3107 			pr_err("Rejecting underflow/overflow"
3108 					" WRITE data\n");
3109 			goto out_invalid_cdb_field;
3110 		}
3111 		/*
3112 		 * Reject READ_* or WRITE_* with overflow/underflow for
3113 		 * type SCF_SCSI_DATA_SG_IO_CDB.
3114 		 */
3115 		if (!ret && (dev->se_sub_dev->se_dev_attrib.block_size != 512))  {
3116 			pr_err("Failing OVERFLOW/UNDERFLOW for LBA op"
3117 				" CDB on non 512-byte sector setup subsystem"
3118 				" plugin: %s\n", dev->transport->name);
3119 			/* Returns CHECK_CONDITION + INVALID_CDB_FIELD */
3120 			goto out_invalid_cdb_field;
3121 		}
3122 
3123 		if (size > cmd->data_length) {
3124 			cmd->se_cmd_flags |= SCF_OVERFLOW_BIT;
3125 			cmd->residual_count = (size - cmd->data_length);
3126 		} else {
3127 			cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
3128 			cmd->residual_count = (cmd->data_length - size);
3129 		}
3130 		cmd->data_length = size;
3131 	}
3132 
3133 	/* reject any command that we don't have a handler for */
3134 	if (!(passthrough || cmd->execute_task ||
3135 	     (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)))
3136 		goto out_unsupported_cdb;
3137 
3138 	/* Let's limit control cdbs to a page, for simplicity's sake. */
3139 	if ((cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) &&
3140 	    size > PAGE_SIZE)
3141 		goto out_invalid_cdb_field;
3142 
3143 	transport_set_supported_SAM_opcode(cmd);
3144 	return ret;
3145 
3146 out_unsupported_cdb:
3147 	cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
3148 	cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
3149 	return -EINVAL;
3150 out_invalid_cdb_field:
3151 	cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
3152 	cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
3153 	return -EINVAL;
3154 }
3155 
3156 /*
3157  * Called from I/O completion to determine which dormant/delayed
3158  * and ordered cmds need to have their tasks added to the execution queue.
3159  */
3160 static void transport_complete_task_attr(struct se_cmd *cmd)
3161 {
3162 	struct se_device *dev = cmd->se_dev;
3163 	struct se_cmd *cmd_p, *cmd_tmp;
3164 	int new_active_tasks = 0;
3165 
3166 	if (cmd->sam_task_attr == MSG_SIMPLE_TAG) {
3167 		atomic_dec(&dev->simple_cmds);
3168 		smp_mb__after_atomic_dec();
3169 		dev->dev_cur_ordered_id++;
3170 		pr_debug("Incremented dev->dev_cur_ordered_id: %u for"
3171 			" SIMPLE: %u\n", dev->dev_cur_ordered_id,
3172 			cmd->se_ordered_id);
3173 	} else if (cmd->sam_task_attr == MSG_HEAD_TAG) {
3174 		atomic_dec(&dev->dev_hoq_count);
3175 		smp_mb__after_atomic_dec();
3176 		dev->dev_cur_ordered_id++;
3177 		pr_debug("Incremented dev_cur_ordered_id: %u for"
3178 			" HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id,
3179 			cmd->se_ordered_id);
3180 	} else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
3181 		spin_lock(&dev->ordered_cmd_lock);
3182 		list_del(&cmd->se_ordered_node);
3183 		atomic_dec(&dev->dev_ordered_sync);
3184 		smp_mb__after_atomic_dec();
3185 		spin_unlock(&dev->ordered_cmd_lock);
3186 
3187 		dev->dev_cur_ordered_id++;
3188 		pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:"
3189 			" %u\n", dev->dev_cur_ordered_id, cmd->se_ordered_id);
3190 	}
3191 	/*
3192 	 * Process all commands up to the last received
3193 	 * ORDERED task attribute which requires another blocking
3194 	 * boundary
3195 	 */
3196 	spin_lock(&dev->delayed_cmd_lock);
3197 	list_for_each_entry_safe(cmd_p, cmd_tmp,
3198 			&dev->delayed_cmd_list, se_delayed_node) {
3199 
3200 		list_del(&cmd_p->se_delayed_node);
3201 		spin_unlock(&dev->delayed_cmd_lock);
3202 
3203 		pr_debug("Calling add_tasks() for"
3204 			" cmd_p: 0x%02x Task Attr: 0x%02x"
3205 			" Dormant -> Active, se_ordered_id: %u\n",
3206 			cmd_p->t_task_cdb[0],
3207 			cmd_p->sam_task_attr, cmd_p->se_ordered_id);
3208 
3209 		transport_add_tasks_from_cmd(cmd_p);
3210 		new_active_tasks++;
3211 
3212 		spin_lock(&dev->delayed_cmd_lock);
3213 		if (cmd_p->sam_task_attr == MSG_ORDERED_TAG)
3214 			break;
3215 	}
3216 	spin_unlock(&dev->delayed_cmd_lock);
3217 	/*
3218 	 * If new tasks have become active, wake up the transport thread
3219 	 * to do the processing of the Active tasks.
3220 	 */
3221 	if (new_active_tasks != 0)
3222 		wake_up_interruptible(&dev->dev_queue_obj.thread_wq);
3223 }
3224 
3225 static void transport_complete_qf(struct se_cmd *cmd)
3226 {
3227 	int ret = 0;
3228 
3229 	if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
3230 		transport_complete_task_attr(cmd);
3231 
3232 	if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
3233 		ret = cmd->se_tfo->queue_status(cmd);
3234 		if (ret)
3235 			goto out;
3236 	}
3237 
3238 	switch (cmd->data_direction) {
3239 	case DMA_FROM_DEVICE:
3240 		ret = cmd->se_tfo->queue_data_in(cmd);
3241 		break;
3242 	case DMA_TO_DEVICE:
3243 		if (cmd->t_bidi_data_sg) {
3244 			ret = cmd->se_tfo->queue_data_in(cmd);
3245 			if (ret < 0)
3246 				break;
3247 		}
3248 		/* Fall through for DMA_TO_DEVICE */
3249 	case DMA_NONE:
3250 		ret = cmd->se_tfo->queue_status(cmd);
3251 		break;
3252 	default:
3253 		break;
3254 	}
3255 
3256 out:
3257 	if (ret < 0) {
3258 		transport_handle_queue_full(cmd, cmd->se_dev);
3259 		return;
3260 	}
3261 	transport_lun_remove_cmd(cmd);
3262 	transport_cmd_check_stop_to_fabric(cmd);
3263 }
3264 
3265 static void transport_handle_queue_full(
3266 	struct se_cmd *cmd,
3267 	struct se_device *dev)
3268 {
3269 	spin_lock_irq(&dev->qf_cmd_lock);
3270 	list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list);
3271 	atomic_inc(&dev->dev_qf_count);
3272 	smp_mb__after_atomic_inc();
3273 	spin_unlock_irq(&cmd->se_dev->qf_cmd_lock);
3274 
3275 	schedule_work(&cmd->se_dev->qf_work_queue);
3276 }
3277 
3278 static void target_complete_ok_work(struct work_struct *work)
3279 {
3280 	struct se_cmd *cmd = container_of(work, struct se_cmd, work);
3281 	int reason = 0, ret;
3282 
3283 	/*
3284 	 * Check if we need to move delayed/dormant tasks from cmds on the
3285 	 * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task
3286 	 * Attribute.
3287 	 */
3288 	if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
3289 		transport_complete_task_attr(cmd);
3290 	/*
3291 	 * Check to schedule QUEUE_FULL work, or execute an existing
3292 	 * cmd->transport_qf_callback()
3293 	 */
3294 	if (atomic_read(&cmd->se_dev->dev_qf_count) != 0)
3295 		schedule_work(&cmd->se_dev->qf_work_queue);
3296 
3297 	/*
3298 	 * Check if we need to retrieve a sense buffer from
3299 	 * the struct se_cmd in question.
3300 	 */
3301 	if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
3302 		if (transport_get_sense_data(cmd) < 0)
3303 			reason = TCM_NON_EXISTENT_LUN;
3304 
3305 		/*
3306 		 * Only set when an struct se_task->task_scsi_status returned
3307 		 * a non GOOD status.
3308 		 */
3309 		if (cmd->scsi_status) {
3310 			ret = transport_send_check_condition_and_sense(
3311 					cmd, reason, 1);
3312 			if (ret == -EAGAIN || ret == -ENOMEM)
3313 				goto queue_full;
3314 
3315 			transport_lun_remove_cmd(cmd);
3316 			transport_cmd_check_stop_to_fabric(cmd);
3317 			return;
3318 		}
3319 	}
3320 	/*
3321 	 * Check for a callback, used by amongst other things
3322 	 * XDWRITE_READ_10 emulation.
3323 	 */
3324 	if (cmd->transport_complete_callback)
3325 		cmd->transport_complete_callback(cmd);
3326 
3327 	switch (cmd->data_direction) {
3328 	case DMA_FROM_DEVICE:
3329 		spin_lock(&cmd->se_lun->lun_sep_lock);
3330 		if (cmd->se_lun->lun_sep) {
3331 			cmd->se_lun->lun_sep->sep_stats.tx_data_octets +=
3332 					cmd->data_length;
3333 		}
3334 		spin_unlock(&cmd->se_lun->lun_sep_lock);
3335 
3336 		ret = cmd->se_tfo->queue_data_in(cmd);
3337 		if (ret == -EAGAIN || ret == -ENOMEM)
3338 			goto queue_full;
3339 		break;
3340 	case DMA_TO_DEVICE:
3341 		spin_lock(&cmd->se_lun->lun_sep_lock);
3342 		if (cmd->se_lun->lun_sep) {
3343 			cmd->se_lun->lun_sep->sep_stats.rx_data_octets +=
3344 				cmd->data_length;
3345 		}
3346 		spin_unlock(&cmd->se_lun->lun_sep_lock);
3347 		/*
3348 		 * Check if we need to send READ payload for BIDI-COMMAND
3349 		 */
3350 		if (cmd->t_bidi_data_sg) {
3351 			spin_lock(&cmd->se_lun->lun_sep_lock);
3352 			if (cmd->se_lun->lun_sep) {
3353 				cmd->se_lun->lun_sep->sep_stats.tx_data_octets +=
3354 					cmd->data_length;
3355 			}
3356 			spin_unlock(&cmd->se_lun->lun_sep_lock);
3357 			ret = cmd->se_tfo->queue_data_in(cmd);
3358 			if (ret == -EAGAIN || ret == -ENOMEM)
3359 				goto queue_full;
3360 			break;
3361 		}
3362 		/* Fall through for DMA_TO_DEVICE */
3363 	case DMA_NONE:
3364 		ret = cmd->se_tfo->queue_status(cmd);
3365 		if (ret == -EAGAIN || ret == -ENOMEM)
3366 			goto queue_full;
3367 		break;
3368 	default:
3369 		break;
3370 	}
3371 
3372 	transport_lun_remove_cmd(cmd);
3373 	transport_cmd_check_stop_to_fabric(cmd);
3374 	return;
3375 
3376 queue_full:
3377 	pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p,"
3378 		" data_direction: %d\n", cmd, cmd->data_direction);
3379 	cmd->t_state = TRANSPORT_COMPLETE_QF_OK;
3380 	transport_handle_queue_full(cmd, cmd->se_dev);
3381 }
3382 
3383 static void transport_free_dev_tasks(struct se_cmd *cmd)
3384 {
3385 	struct se_task *task, *task_tmp;
3386 	unsigned long flags;
3387 	LIST_HEAD(dispose_list);
3388 
3389 	spin_lock_irqsave(&cmd->t_state_lock, flags);
3390 	list_for_each_entry_safe(task, task_tmp,
3391 				&cmd->t_task_list, t_list) {
3392 		if (!(task->task_flags & TF_ACTIVE))
3393 			list_move_tail(&task->t_list, &dispose_list);
3394 	}
3395 	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3396 
3397 	while (!list_empty(&dispose_list)) {
3398 		task = list_first_entry(&dispose_list, struct se_task, t_list);
3399 
3400 		if (task->task_sg != cmd->t_data_sg &&
3401 		    task->task_sg != cmd->t_bidi_data_sg)
3402 			kfree(task->task_sg);
3403 
3404 		list_del(&task->t_list);
3405 
3406 		cmd->se_dev->transport->free_task(task);
3407 	}
3408 }
3409 
3410 static inline void transport_free_sgl(struct scatterlist *sgl, int nents)
3411 {
3412 	struct scatterlist *sg;
3413 	int count;
3414 
3415 	for_each_sg(sgl, sg, nents, count)
3416 		__free_page(sg_page(sg));
3417 
3418 	kfree(sgl);
3419 }
3420 
3421 static inline void transport_free_pages(struct se_cmd *cmd)
3422 {
3423 	if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC)
3424 		return;
3425 
3426 	transport_free_sgl(cmd->t_data_sg, cmd->t_data_nents);
3427 	cmd->t_data_sg = NULL;
3428 	cmd->t_data_nents = 0;
3429 
3430 	transport_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents);
3431 	cmd->t_bidi_data_sg = NULL;
3432 	cmd->t_bidi_data_nents = 0;
3433 }
3434 
3435 /**
3436  * transport_put_cmd - release a reference to a command
3437  * @cmd:       command to release
3438  *
3439  * This routine releases our reference to the command and frees it if possible.
3440  */
3441 static void transport_put_cmd(struct se_cmd *cmd)
3442 {
3443 	unsigned long flags;
3444 	int free_tasks = 0;
3445 
3446 	spin_lock_irqsave(&cmd->t_state_lock, flags);
3447 	if (atomic_read(&cmd->t_fe_count)) {
3448 		if (!atomic_dec_and_test(&cmd->t_fe_count))
3449 			goto out_busy;
3450 	}
3451 
3452 	if (atomic_read(&cmd->t_se_count)) {
3453 		if (!atomic_dec_and_test(&cmd->t_se_count))
3454 			goto out_busy;
3455 	}
3456 
3457 	if (atomic_read(&cmd->transport_dev_active)) {
3458 		atomic_set(&cmd->transport_dev_active, 0);
3459 		transport_all_task_dev_remove_state(cmd);
3460 		free_tasks = 1;
3461 	}
3462 	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3463 
3464 	if (free_tasks != 0)
3465 		transport_free_dev_tasks(cmd);
3466 
3467 	transport_free_pages(cmd);
3468 	transport_release_cmd(cmd);
3469 	return;
3470 out_busy:
3471 	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3472 }
3473 
3474 /*
3475  * transport_generic_map_mem_to_cmd - Use fabric-alloced pages instead of
3476  * allocating in the core.
3477  * @cmd:  Associated se_cmd descriptor
3478  * @mem:  SGL style memory for TCM WRITE / READ
3479  * @sg_mem_num: Number of SGL elements
3480  * @mem_bidi_in: SGL style memory for TCM BIDI READ
3481  * @sg_mem_bidi_num: Number of BIDI READ SGL elements
3482  *
3483  * Return: nonzero return cmd was rejected for -ENOMEM or inproper usage
3484  * of parameters.
3485  */
3486 int transport_generic_map_mem_to_cmd(
3487 	struct se_cmd *cmd,
3488 	struct scatterlist *sgl,
3489 	u32 sgl_count,
3490 	struct scatterlist *sgl_bidi,
3491 	u32 sgl_bidi_count)
3492 {
3493 	if (!sgl || !sgl_count)
3494 		return 0;
3495 
3496 	if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) ||
3497 	    (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB)) {
3498 
3499 		cmd->t_data_sg = sgl;
3500 		cmd->t_data_nents = sgl_count;
3501 
3502 		if (sgl_bidi && sgl_bidi_count) {
3503 			cmd->t_bidi_data_sg = sgl_bidi;
3504 			cmd->t_bidi_data_nents = sgl_bidi_count;
3505 		}
3506 		cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
3507 	}
3508 
3509 	return 0;
3510 }
3511 EXPORT_SYMBOL(transport_generic_map_mem_to_cmd);
3512 
3513 void *transport_kmap_first_data_page(struct se_cmd *cmd)
3514 {
3515 	struct scatterlist *sg = cmd->t_data_sg;
3516 
3517 	BUG_ON(!sg);
3518 	/*
3519 	 * We need to take into account a possible offset here for fabrics like
3520 	 * tcm_loop who may be using a contig buffer from the SCSI midlayer for
3521 	 * control CDBs passed as SGLs via transport_generic_map_mem_to_cmd()
3522 	 */
3523 	return kmap(sg_page(sg)) + sg->offset;
3524 }
3525 EXPORT_SYMBOL(transport_kmap_first_data_page);
3526 
3527 void transport_kunmap_first_data_page(struct se_cmd *cmd)
3528 {
3529 	kunmap(sg_page(cmd->t_data_sg));
3530 }
3531 EXPORT_SYMBOL(transport_kunmap_first_data_page);
3532 
3533 static int
3534 transport_generic_get_mem(struct se_cmd *cmd)
3535 {
3536 	u32 length = cmd->data_length;
3537 	unsigned int nents;
3538 	struct page *page;
3539 	int i = 0;
3540 
3541 	nents = DIV_ROUND_UP(length, PAGE_SIZE);
3542 	cmd->t_data_sg = kmalloc(sizeof(struct scatterlist) * nents, GFP_KERNEL);
3543 	if (!cmd->t_data_sg)
3544 		return -ENOMEM;
3545 
3546 	cmd->t_data_nents = nents;
3547 	sg_init_table(cmd->t_data_sg, nents);
3548 
3549 	while (length) {
3550 		u32 page_len = min_t(u32, length, PAGE_SIZE);
3551 		page = alloc_page(GFP_KERNEL | __GFP_ZERO);
3552 		if (!page)
3553 			goto out;
3554 
3555 		sg_set_page(&cmd->t_data_sg[i], page, page_len, 0);
3556 		length -= page_len;
3557 		i++;
3558 	}
3559 	return 0;
3560 
3561 out:
3562 	while (i >= 0) {
3563 		__free_page(sg_page(&cmd->t_data_sg[i]));
3564 		i--;
3565 	}
3566 	kfree(cmd->t_data_sg);
3567 	cmd->t_data_sg = NULL;
3568 	return -ENOMEM;
3569 }
3570 
3571 /* Reduce sectors if they are too long for the device */
3572 static inline sector_t transport_limit_task_sectors(
3573 	struct se_device *dev,
3574 	unsigned long long lba,
3575 	sector_t sectors)
3576 {
3577 	sectors = min_t(sector_t, sectors, dev->se_sub_dev->se_dev_attrib.max_sectors);
3578 
3579 	if (dev->transport->get_device_type(dev) == TYPE_DISK)
3580 		if ((lba + sectors) > transport_dev_end_lba(dev))
3581 			sectors = ((transport_dev_end_lba(dev) - lba) + 1);
3582 
3583 	return sectors;
3584 }
3585 
3586 
3587 /*
3588  * This function can be used by HW target mode drivers to create a linked
3589  * scatterlist from all contiguously allocated struct se_task->task_sg[].
3590  * This is intended to be called during the completion path by TCM Core
3591  * when struct target_core_fabric_ops->check_task_sg_chaining is enabled.
3592  */
3593 void transport_do_task_sg_chain(struct se_cmd *cmd)
3594 {
3595 	struct scatterlist *sg_first = NULL;
3596 	struct scatterlist *sg_prev = NULL;
3597 	int sg_prev_nents = 0;
3598 	struct scatterlist *sg;
3599 	struct se_task *task;
3600 	u32 chained_nents = 0;
3601 	int i;
3602 
3603 	BUG_ON(!cmd->se_tfo->task_sg_chaining);
3604 
3605 	/*
3606 	 * Walk the struct se_task list and setup scatterlist chains
3607 	 * for each contiguously allocated struct se_task->task_sg[].
3608 	 */
3609 	list_for_each_entry(task, &cmd->t_task_list, t_list) {
3610 		if (!task->task_sg)
3611 			continue;
3612 
3613 		if (!sg_first) {
3614 			sg_first = task->task_sg;
3615 			chained_nents = task->task_sg_nents;
3616 		} else {
3617 			sg_chain(sg_prev, sg_prev_nents, task->task_sg);
3618 			chained_nents += task->task_sg_nents;
3619 		}
3620 		/*
3621 		 * For the padded tasks, use the extra SGL vector allocated
3622 		 * in transport_allocate_data_tasks() for the sg_prev_nents
3623 		 * offset into sg_chain() above.
3624 		 *
3625 		 * We do not need the padding for the last task (or a single
3626 		 * task), but in that case we will never use the sg_prev_nents
3627 		 * value below which would be incorrect.
3628 		 */
3629 		sg_prev_nents = (task->task_sg_nents + 1);
3630 		sg_prev = task->task_sg;
3631 	}
3632 	/*
3633 	 * Setup the starting pointer and total t_tasks_sg_linked_no including
3634 	 * padding SGs for linking and to mark the end.
3635 	 */
3636 	cmd->t_tasks_sg_chained = sg_first;
3637 	cmd->t_tasks_sg_chained_no = chained_nents;
3638 
3639 	pr_debug("Setup cmd: %p cmd->t_tasks_sg_chained: %p and"
3640 		" t_tasks_sg_chained_no: %u\n", cmd, cmd->t_tasks_sg_chained,
3641 		cmd->t_tasks_sg_chained_no);
3642 
3643 	for_each_sg(cmd->t_tasks_sg_chained, sg,
3644 			cmd->t_tasks_sg_chained_no, i) {
3645 
3646 		pr_debug("SG[%d]: %p page: %p length: %d offset: %d\n",
3647 			i, sg, sg_page(sg), sg->length, sg->offset);
3648 		if (sg_is_chain(sg))
3649 			pr_debug("SG: %p sg_is_chain=1\n", sg);
3650 		if (sg_is_last(sg))
3651 			pr_debug("SG: %p sg_is_last=1\n", sg);
3652 	}
3653 }
3654 EXPORT_SYMBOL(transport_do_task_sg_chain);
3655 
3656 /*
3657  * Break up cmd into chunks transport can handle
3658  */
3659 static int
3660 transport_allocate_data_tasks(struct se_cmd *cmd,
3661 	enum dma_data_direction data_direction,
3662 	struct scatterlist *cmd_sg, unsigned int sgl_nents)
3663 {
3664 	struct se_device *dev = cmd->se_dev;
3665 	int task_count, i;
3666 	unsigned long long lba;
3667 	sector_t sectors, dev_max_sectors;
3668 	u32 sector_size;
3669 
3670 	if (transport_cmd_get_valid_sectors(cmd) < 0)
3671 		return -EINVAL;
3672 
3673 	dev_max_sectors = dev->se_sub_dev->se_dev_attrib.max_sectors;
3674 	sector_size = dev->se_sub_dev->se_dev_attrib.block_size;
3675 
3676 	WARN_ON(cmd->data_length % sector_size);
3677 
3678 	lba = cmd->t_task_lba;
3679 	sectors = DIV_ROUND_UP(cmd->data_length, sector_size);
3680 	task_count = DIV_ROUND_UP_SECTOR_T(sectors, dev_max_sectors);
3681 
3682 	/*
3683 	 * If we need just a single task reuse the SG list in the command
3684 	 * and avoid a lot of work.
3685 	 */
3686 	if (task_count == 1) {
3687 		struct se_task *task;
3688 		unsigned long flags;
3689 
3690 		task = transport_generic_get_task(cmd, data_direction);
3691 		if (!task)
3692 			return -ENOMEM;
3693 
3694 		task->task_sg = cmd_sg;
3695 		task->task_sg_nents = sgl_nents;
3696 
3697 		task->task_lba = lba;
3698 		task->task_sectors = sectors;
3699 		task->task_size = task->task_sectors * sector_size;
3700 
3701 		spin_lock_irqsave(&cmd->t_state_lock, flags);
3702 		list_add_tail(&task->t_list, &cmd->t_task_list);
3703 		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3704 
3705 		return task_count;
3706 	}
3707 
3708 	for (i = 0; i < task_count; i++) {
3709 		struct se_task *task;
3710 		unsigned int task_size, task_sg_nents_padded;
3711 		struct scatterlist *sg;
3712 		unsigned long flags;
3713 		int count;
3714 
3715 		task = transport_generic_get_task(cmd, data_direction);
3716 		if (!task)
3717 			return -ENOMEM;
3718 
3719 		task->task_lba = lba;
3720 		task->task_sectors = min(sectors, dev_max_sectors);
3721 		task->task_size = task->task_sectors * sector_size;
3722 
3723 		/*
3724 		 * This now assumes that passed sg_ents are in PAGE_SIZE chunks
3725 		 * in order to calculate the number per task SGL entries
3726 		 */
3727 		task->task_sg_nents = DIV_ROUND_UP(task->task_size, PAGE_SIZE);
3728 		/*
3729 		 * Check if the fabric module driver is requesting that all
3730 		 * struct se_task->task_sg[] be chained together..  If so,
3731 		 * then allocate an extra padding SG entry for linking and
3732 		 * marking the end of the chained SGL for every task except
3733 		 * the last one for (task_count > 1) operation, or skipping
3734 		 * the extra padding for the (task_count == 1) case.
3735 		 */
3736 		if (cmd->se_tfo->task_sg_chaining && (i < (task_count - 1))) {
3737 			task_sg_nents_padded = (task->task_sg_nents + 1);
3738 		} else
3739 			task_sg_nents_padded = task->task_sg_nents;
3740 
3741 		task->task_sg = kmalloc(sizeof(struct scatterlist) *
3742 					task_sg_nents_padded, GFP_KERNEL);
3743 		if (!task->task_sg) {
3744 			cmd->se_dev->transport->free_task(task);
3745 			return -ENOMEM;
3746 		}
3747 
3748 		sg_init_table(task->task_sg, task_sg_nents_padded);
3749 
3750 		task_size = task->task_size;
3751 
3752 		/* Build new sgl, only up to task_size */
3753 		for_each_sg(task->task_sg, sg, task->task_sg_nents, count) {
3754 			if (cmd_sg->length > task_size)
3755 				break;
3756 
3757 			*sg = *cmd_sg;
3758 			task_size -= cmd_sg->length;
3759 			cmd_sg = sg_next(cmd_sg);
3760 		}
3761 
3762 		lba += task->task_sectors;
3763 		sectors -= task->task_sectors;
3764 
3765 		spin_lock_irqsave(&cmd->t_state_lock, flags);
3766 		list_add_tail(&task->t_list, &cmd->t_task_list);
3767 		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3768 	}
3769 
3770 	return task_count;
3771 }
3772 
3773 static int
3774 transport_allocate_control_task(struct se_cmd *cmd)
3775 {
3776 	struct se_task *task;
3777 	unsigned long flags;
3778 
3779 	task = transport_generic_get_task(cmd, cmd->data_direction);
3780 	if (!task)
3781 		return -ENOMEM;
3782 
3783 	task->task_sg = cmd->t_data_sg;
3784 	task->task_size = cmd->data_length;
3785 	task->task_sg_nents = cmd->t_data_nents;
3786 
3787 	spin_lock_irqsave(&cmd->t_state_lock, flags);
3788 	list_add_tail(&task->t_list, &cmd->t_task_list);
3789 	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3790 
3791 	/* Success! Return number of tasks allocated */
3792 	return 1;
3793 }
3794 
3795 /*
3796  * Allocate any required ressources to execute the command, and either place
3797  * it on the execution queue if possible.  For writes we might not have the
3798  * payload yet, thus notify the fabric via a call to ->write_pending instead.
3799  */
3800 int transport_generic_new_cmd(struct se_cmd *cmd)
3801 {
3802 	struct se_device *dev = cmd->se_dev;
3803 	int task_cdbs, task_cdbs_bidi = 0;
3804 	int set_counts = 1;
3805 	int ret = 0;
3806 
3807 	/*
3808 	 * Determine is the TCM fabric module has already allocated physical
3809 	 * memory, and is directly calling transport_generic_map_mem_to_cmd()
3810 	 * beforehand.
3811 	 */
3812 	if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) &&
3813 	    cmd->data_length) {
3814 		ret = transport_generic_get_mem(cmd);
3815 		if (ret < 0)
3816 			return ret;
3817 	}
3818 
3819 	/*
3820 	 * For BIDI command set up the read tasks first.
3821 	 */
3822 	if (cmd->t_bidi_data_sg &&
3823 	    dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) {
3824 		BUG_ON(!(cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB));
3825 
3826 		task_cdbs_bidi = transport_allocate_data_tasks(cmd,
3827 				DMA_FROM_DEVICE, cmd->t_bidi_data_sg,
3828 				cmd->t_bidi_data_nents);
3829 		if (task_cdbs_bidi <= 0)
3830 			goto out_fail;
3831 
3832 		atomic_inc(&cmd->t_fe_count);
3833 		atomic_inc(&cmd->t_se_count);
3834 		set_counts = 0;
3835 	}
3836 
3837 	if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) {
3838 		task_cdbs = transport_allocate_data_tasks(cmd,
3839 					cmd->data_direction, cmd->t_data_sg,
3840 					cmd->t_data_nents);
3841 	} else {
3842 		task_cdbs = transport_allocate_control_task(cmd);
3843 	}
3844 
3845 	if (task_cdbs <= 0)
3846 		goto out_fail;
3847 
3848 	if (set_counts) {
3849 		atomic_inc(&cmd->t_fe_count);
3850 		atomic_inc(&cmd->t_se_count);
3851 	}
3852 
3853 	cmd->t_task_list_num = (task_cdbs + task_cdbs_bidi);
3854 	atomic_set(&cmd->t_task_cdbs_left, cmd->t_task_list_num);
3855 	atomic_set(&cmd->t_task_cdbs_ex_left, cmd->t_task_list_num);
3856 
3857 	/*
3858 	 * For WRITEs, let the fabric know its buffer is ready..
3859 	 * This WRITE struct se_cmd (and all of its associated struct se_task's)
3860 	 * will be added to the struct se_device execution queue after its WRITE
3861 	 * data has arrived. (ie: It gets handled by the transport processing
3862 	 * thread a second time)
3863 	 */
3864 	if (cmd->data_direction == DMA_TO_DEVICE) {
3865 		transport_add_tasks_to_state_queue(cmd);
3866 		return transport_generic_write_pending(cmd);
3867 	}
3868 	/*
3869 	 * Everything else but a WRITE, add the struct se_cmd's struct se_task's
3870 	 * to the execution queue.
3871 	 */
3872 	transport_execute_tasks(cmd);
3873 	return 0;
3874 
3875 out_fail:
3876 	cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
3877 	cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
3878 	return -EINVAL;
3879 }
3880 EXPORT_SYMBOL(transport_generic_new_cmd);
3881 
3882 /*	transport_generic_process_write():
3883  *
3884  *
3885  */
3886 void transport_generic_process_write(struct se_cmd *cmd)
3887 {
3888 	transport_execute_tasks(cmd);
3889 }
3890 EXPORT_SYMBOL(transport_generic_process_write);
3891 
3892 static void transport_write_pending_qf(struct se_cmd *cmd)
3893 {
3894 	int ret;
3895 
3896 	ret = cmd->se_tfo->write_pending(cmd);
3897 	if (ret == -EAGAIN || ret == -ENOMEM) {
3898 		pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n",
3899 			 cmd);
3900 		transport_handle_queue_full(cmd, cmd->se_dev);
3901 	}
3902 }
3903 
3904 static int transport_generic_write_pending(struct se_cmd *cmd)
3905 {
3906 	unsigned long flags;
3907 	int ret;
3908 
3909 	spin_lock_irqsave(&cmd->t_state_lock, flags);
3910 	cmd->t_state = TRANSPORT_WRITE_PENDING;
3911 	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3912 
3913 	/*
3914 	 * Clear the se_cmd for WRITE_PENDING status in order to set
3915 	 * cmd->t_transport_active=0 so that transport_generic_handle_data
3916 	 * can be called from HW target mode interrupt code.  This is safe
3917 	 * to be called with transport_off=1 before the cmd->se_tfo->write_pending
3918 	 * because the se_cmd->se_lun pointer is not being cleared.
3919 	 */
3920 	transport_cmd_check_stop(cmd, 1, 0);
3921 
3922 	/*
3923 	 * Call the fabric write_pending function here to let the
3924 	 * frontend know that WRITE buffers are ready.
3925 	 */
3926 	ret = cmd->se_tfo->write_pending(cmd);
3927 	if (ret == -EAGAIN || ret == -ENOMEM)
3928 		goto queue_full;
3929 	else if (ret < 0)
3930 		return ret;
3931 
3932 	return PYX_TRANSPORT_WRITE_PENDING;
3933 
3934 queue_full:
3935 	pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd);
3936 	cmd->t_state = TRANSPORT_COMPLETE_QF_WP;
3937 	transport_handle_queue_full(cmd, cmd->se_dev);
3938 	return 0;
3939 }
3940 
3941 /**
3942  * transport_release_cmd - free a command
3943  * @cmd:       command to free
3944  *
3945  * This routine unconditionally frees a command, and reference counting
3946  * or list removal must be done in the caller.
3947  */
3948 void transport_release_cmd(struct se_cmd *cmd)
3949 {
3950 	BUG_ON(!cmd->se_tfo);
3951 
3952 	if (cmd->se_tmr_req)
3953 		core_tmr_release_req(cmd->se_tmr_req);
3954 	if (cmd->t_task_cdb != cmd->__t_task_cdb)
3955 		kfree(cmd->t_task_cdb);
3956 	/*
3957 	 * Check if target_wait_for_sess_cmds() is expecting to
3958 	 * release se_cmd directly here..
3959 	 */
3960 	if (cmd->check_release != 0 && cmd->se_tfo->check_release_cmd)
3961 		if (cmd->se_tfo->check_release_cmd(cmd) != 0)
3962 			return;
3963 
3964 	cmd->se_tfo->release_cmd(cmd);
3965 }
3966 EXPORT_SYMBOL(transport_release_cmd);
3967 
3968 void transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
3969 {
3970 	if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) {
3971 		if (wait_for_tasks && cmd->se_tmr_req)
3972 			 transport_wait_for_tasks(cmd);
3973 
3974 		transport_release_cmd(cmd);
3975 	} else {
3976 		if (wait_for_tasks)
3977 			transport_wait_for_tasks(cmd);
3978 
3979 		core_dec_lacl_count(cmd->se_sess->se_node_acl, cmd);
3980 
3981 		if (cmd->se_lun)
3982 			transport_lun_remove_cmd(cmd);
3983 
3984 		transport_free_dev_tasks(cmd);
3985 
3986 		transport_put_cmd(cmd);
3987 	}
3988 }
3989 EXPORT_SYMBOL(transport_generic_free_cmd);
3990 
3991 /* target_get_sess_cmd - Add command to active ->sess_cmd_list
3992  * @se_sess:	session to reference
3993  * @se_cmd:	command descriptor to add
3994  */
3995 void target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd)
3996 {
3997 	unsigned long flags;
3998 
3999 	spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
4000 	list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list);
4001 	se_cmd->check_release = 1;
4002 	spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
4003 }
4004 EXPORT_SYMBOL(target_get_sess_cmd);
4005 
4006 /* target_put_sess_cmd - Check for active I/O shutdown or list delete
4007  * @se_sess: 	session to reference
4008  * @se_cmd:	command descriptor to drop
4009  */
4010 int target_put_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd)
4011 {
4012 	unsigned long flags;
4013 
4014 	spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
4015 	if (list_empty(&se_cmd->se_cmd_list)) {
4016 		spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
4017 		WARN_ON(1);
4018 		return 0;
4019 	}
4020 
4021 	if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) {
4022 		spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
4023 		complete(&se_cmd->cmd_wait_comp);
4024 		return 1;
4025 	}
4026 	list_del(&se_cmd->se_cmd_list);
4027 	spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
4028 
4029 	return 0;
4030 }
4031 EXPORT_SYMBOL(target_put_sess_cmd);
4032 
4033 /* target_splice_sess_cmd_list - Split active cmds into sess_wait_list
4034  * @se_sess:	session to split
4035  */
4036 void target_splice_sess_cmd_list(struct se_session *se_sess)
4037 {
4038 	struct se_cmd *se_cmd;
4039 	unsigned long flags;
4040 
4041 	WARN_ON(!list_empty(&se_sess->sess_wait_list));
4042 	INIT_LIST_HEAD(&se_sess->sess_wait_list);
4043 
4044 	spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
4045 	se_sess->sess_tearing_down = 1;
4046 
4047 	list_splice_init(&se_sess->sess_cmd_list, &se_sess->sess_wait_list);
4048 
4049 	list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list)
4050 		se_cmd->cmd_wait_set = 1;
4051 
4052 	spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
4053 }
4054 EXPORT_SYMBOL(target_splice_sess_cmd_list);
4055 
4056 /* target_wait_for_sess_cmds - Wait for outstanding descriptors
4057  * @se_sess:    session to wait for active I/O
4058  * @wait_for_tasks:	Make extra transport_wait_for_tasks call
4059  */
4060 void target_wait_for_sess_cmds(
4061 	struct se_session *se_sess,
4062 	int wait_for_tasks)
4063 {
4064 	struct se_cmd *se_cmd, *tmp_cmd;
4065 	bool rc = false;
4066 
4067 	list_for_each_entry_safe(se_cmd, tmp_cmd,
4068 				&se_sess->sess_wait_list, se_cmd_list) {
4069 		list_del(&se_cmd->se_cmd_list);
4070 
4071 		pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:"
4072 			" %d\n", se_cmd, se_cmd->t_state,
4073 			se_cmd->se_tfo->get_cmd_state(se_cmd));
4074 
4075 		if (wait_for_tasks) {
4076 			pr_debug("Calling transport_wait_for_tasks se_cmd: %p t_state: %d,"
4077 				" fabric state: %d\n", se_cmd, se_cmd->t_state,
4078 				se_cmd->se_tfo->get_cmd_state(se_cmd));
4079 
4080 			rc = transport_wait_for_tasks(se_cmd);
4081 
4082 			pr_debug("After transport_wait_for_tasks se_cmd: %p t_state: %d,"
4083 				" fabric state: %d\n", se_cmd, se_cmd->t_state,
4084 				se_cmd->se_tfo->get_cmd_state(se_cmd));
4085 		}
4086 
4087 		if (!rc) {
4088 			wait_for_completion(&se_cmd->cmd_wait_comp);
4089 			pr_debug("After cmd_wait_comp: se_cmd: %p t_state: %d"
4090 				" fabric state: %d\n", se_cmd, se_cmd->t_state,
4091 				se_cmd->se_tfo->get_cmd_state(se_cmd));
4092 		}
4093 
4094 		se_cmd->se_tfo->release_cmd(se_cmd);
4095 	}
4096 }
4097 EXPORT_SYMBOL(target_wait_for_sess_cmds);
4098 
4099 /*	transport_lun_wait_for_tasks():
4100  *
4101  *	Called from ConfigFS context to stop the passed struct se_cmd to allow
4102  *	an struct se_lun to be successfully shutdown.
4103  */
4104 static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun)
4105 {
4106 	unsigned long flags;
4107 	int ret;
4108 	/*
4109 	 * If the frontend has already requested this struct se_cmd to
4110 	 * be stopped, we can safely ignore this struct se_cmd.
4111 	 */
4112 	spin_lock_irqsave(&cmd->t_state_lock, flags);
4113 	if (atomic_read(&cmd->t_transport_stop)) {
4114 		atomic_set(&cmd->transport_lun_stop, 0);
4115 		pr_debug("ConfigFS ITT[0x%08x] - t_transport_stop =="
4116 			" TRUE, skipping\n", cmd->se_tfo->get_task_tag(cmd));
4117 		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4118 		transport_cmd_check_stop(cmd, 1, 0);
4119 		return -EPERM;
4120 	}
4121 	atomic_set(&cmd->transport_lun_fe_stop, 1);
4122 	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4123 
4124 	wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq);
4125 
4126 	ret = transport_stop_tasks_for_cmd(cmd);
4127 
4128 	pr_debug("ConfigFS: cmd: %p t_tasks: %d stop tasks ret:"
4129 			" %d\n", cmd, cmd->t_task_list_num, ret);
4130 	if (!ret) {
4131 		pr_debug("ConfigFS: ITT[0x%08x] - stopping cmd....\n",
4132 				cmd->se_tfo->get_task_tag(cmd));
4133 		wait_for_completion(&cmd->transport_lun_stop_comp);
4134 		pr_debug("ConfigFS: ITT[0x%08x] - stopped cmd....\n",
4135 				cmd->se_tfo->get_task_tag(cmd));
4136 	}
4137 	transport_remove_cmd_from_queue(cmd);
4138 
4139 	return 0;
4140 }
4141 
4142 static void __transport_clear_lun_from_sessions(struct se_lun *lun)
4143 {
4144 	struct se_cmd *cmd = NULL;
4145 	unsigned long lun_flags, cmd_flags;
4146 	/*
4147 	 * Do exception processing and return CHECK_CONDITION status to the
4148 	 * Initiator Port.
4149 	 */
4150 	spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
4151 	while (!list_empty(&lun->lun_cmd_list)) {
4152 		cmd = list_first_entry(&lun->lun_cmd_list,
4153 		       struct se_cmd, se_lun_node);
4154 		list_del(&cmd->se_lun_node);
4155 
4156 		atomic_set(&cmd->transport_lun_active, 0);
4157 		/*
4158 		 * This will notify iscsi_target_transport.c:
4159 		 * transport_cmd_check_stop() that a LUN shutdown is in
4160 		 * progress for the iscsi_cmd_t.
4161 		 */
4162 		spin_lock(&cmd->t_state_lock);
4163 		pr_debug("SE_LUN[%d] - Setting cmd->transport"
4164 			"_lun_stop for  ITT: 0x%08x\n",
4165 			cmd->se_lun->unpacked_lun,
4166 			cmd->se_tfo->get_task_tag(cmd));
4167 		atomic_set(&cmd->transport_lun_stop, 1);
4168 		spin_unlock(&cmd->t_state_lock);
4169 
4170 		spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags);
4171 
4172 		if (!cmd->se_lun) {
4173 			pr_err("ITT: 0x%08x, [i,t]_state: %u/%u\n",
4174 				cmd->se_tfo->get_task_tag(cmd),
4175 				cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
4176 			BUG();
4177 		}
4178 		/*
4179 		 * If the Storage engine still owns the iscsi_cmd_t, determine
4180 		 * and/or stop its context.
4181 		 */
4182 		pr_debug("SE_LUN[%d] - ITT: 0x%08x before transport"
4183 			"_lun_wait_for_tasks()\n", cmd->se_lun->unpacked_lun,
4184 			cmd->se_tfo->get_task_tag(cmd));
4185 
4186 		if (transport_lun_wait_for_tasks(cmd, cmd->se_lun) < 0) {
4187 			spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
4188 			continue;
4189 		}
4190 
4191 		pr_debug("SE_LUN[%d] - ITT: 0x%08x after transport_lun"
4192 			"_wait_for_tasks(): SUCCESS\n",
4193 			cmd->se_lun->unpacked_lun,
4194 			cmd->se_tfo->get_task_tag(cmd));
4195 
4196 		spin_lock_irqsave(&cmd->t_state_lock, cmd_flags);
4197 		if (!atomic_read(&cmd->transport_dev_active)) {
4198 			spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
4199 			goto check_cond;
4200 		}
4201 		atomic_set(&cmd->transport_dev_active, 0);
4202 		transport_all_task_dev_remove_state(cmd);
4203 		spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
4204 
4205 		transport_free_dev_tasks(cmd);
4206 		/*
4207 		 * The Storage engine stopped this struct se_cmd before it was
4208 		 * send to the fabric frontend for delivery back to the
4209 		 * Initiator Node.  Return this SCSI CDB back with an
4210 		 * CHECK_CONDITION status.
4211 		 */
4212 check_cond:
4213 		transport_send_check_condition_and_sense(cmd,
4214 				TCM_NON_EXISTENT_LUN, 0);
4215 		/*
4216 		 *  If the fabric frontend is waiting for this iscsi_cmd_t to
4217 		 * be released, notify the waiting thread now that LU has
4218 		 * finished accessing it.
4219 		 */
4220 		spin_lock_irqsave(&cmd->t_state_lock, cmd_flags);
4221 		if (atomic_read(&cmd->transport_lun_fe_stop)) {
4222 			pr_debug("SE_LUN[%d] - Detected FE stop for"
4223 				" struct se_cmd: %p ITT: 0x%08x\n",
4224 				lun->unpacked_lun,
4225 				cmd, cmd->se_tfo->get_task_tag(cmd));
4226 
4227 			spin_unlock_irqrestore(&cmd->t_state_lock,
4228 					cmd_flags);
4229 			transport_cmd_check_stop(cmd, 1, 0);
4230 			complete(&cmd->transport_lun_fe_stop_comp);
4231 			spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
4232 			continue;
4233 		}
4234 		pr_debug("SE_LUN[%d] - ITT: 0x%08x finished processing\n",
4235 			lun->unpacked_lun, cmd->se_tfo->get_task_tag(cmd));
4236 
4237 		spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
4238 		spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
4239 	}
4240 	spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags);
4241 }
4242 
4243 static int transport_clear_lun_thread(void *p)
4244 {
4245 	struct se_lun *lun = (struct se_lun *)p;
4246 
4247 	__transport_clear_lun_from_sessions(lun);
4248 	complete(&lun->lun_shutdown_comp);
4249 
4250 	return 0;
4251 }
4252 
4253 int transport_clear_lun_from_sessions(struct se_lun *lun)
4254 {
4255 	struct task_struct *kt;
4256 
4257 	kt = kthread_run(transport_clear_lun_thread, lun,
4258 			"tcm_cl_%u", lun->unpacked_lun);
4259 	if (IS_ERR(kt)) {
4260 		pr_err("Unable to start clear_lun thread\n");
4261 		return PTR_ERR(kt);
4262 	}
4263 	wait_for_completion(&lun->lun_shutdown_comp);
4264 
4265 	return 0;
4266 }
4267 
4268 /**
4269  * transport_wait_for_tasks - wait for completion to occur
4270  * @cmd:	command to wait
4271  *
4272  * Called from frontend fabric context to wait for storage engine
4273  * to pause and/or release frontend generated struct se_cmd.
4274  */
4275 bool transport_wait_for_tasks(struct se_cmd *cmd)
4276 {
4277 	unsigned long flags;
4278 
4279 	spin_lock_irqsave(&cmd->t_state_lock, flags);
4280 	if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && !(cmd->se_tmr_req)) {
4281 		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4282 		return false;
4283 	}
4284 	/*
4285 	 * Only perform a possible wait_for_tasks if SCF_SUPPORTED_SAM_OPCODE
4286 	 * has been set in transport_set_supported_SAM_opcode().
4287 	 */
4288 	if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) && !cmd->se_tmr_req) {
4289 		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4290 		return false;
4291 	}
4292 	/*
4293 	 * If we are already stopped due to an external event (ie: LUN shutdown)
4294 	 * sleep until the connection can have the passed struct se_cmd back.
4295 	 * The cmd->transport_lun_stopped_sem will be upped by
4296 	 * transport_clear_lun_from_sessions() once the ConfigFS context caller
4297 	 * has completed its operation on the struct se_cmd.
4298 	 */
4299 	if (atomic_read(&cmd->transport_lun_stop)) {
4300 
4301 		pr_debug("wait_for_tasks: Stopping"
4302 			" wait_for_completion(&cmd->t_tasktransport_lun_fe"
4303 			"_stop_comp); for ITT: 0x%08x\n",
4304 			cmd->se_tfo->get_task_tag(cmd));
4305 		/*
4306 		 * There is a special case for WRITES where a FE exception +
4307 		 * LUN shutdown means ConfigFS context is still sleeping on
4308 		 * transport_lun_stop_comp in transport_lun_wait_for_tasks().
4309 		 * We go ahead and up transport_lun_stop_comp just to be sure
4310 		 * here.
4311 		 */
4312 		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4313 		complete(&cmd->transport_lun_stop_comp);
4314 		wait_for_completion(&cmd->transport_lun_fe_stop_comp);
4315 		spin_lock_irqsave(&cmd->t_state_lock, flags);
4316 
4317 		transport_all_task_dev_remove_state(cmd);
4318 		/*
4319 		 * At this point, the frontend who was the originator of this
4320 		 * struct se_cmd, now owns the structure and can be released through
4321 		 * normal means below.
4322 		 */
4323 		pr_debug("wait_for_tasks: Stopped"
4324 			" wait_for_completion(&cmd->t_tasktransport_lun_fe_"
4325 			"stop_comp); for ITT: 0x%08x\n",
4326 			cmd->se_tfo->get_task_tag(cmd));
4327 
4328 		atomic_set(&cmd->transport_lun_stop, 0);
4329 	}
4330 	if (!atomic_read(&cmd->t_transport_active) ||
4331 	     atomic_read(&cmd->t_transport_aborted)) {
4332 		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4333 		return false;
4334 	}
4335 
4336 	atomic_set(&cmd->t_transport_stop, 1);
4337 
4338 	pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08x"
4339 		" i_state: %d, t_state: %d, t_transport_stop = TRUE\n",
4340 		cmd, cmd->se_tfo->get_task_tag(cmd),
4341 		cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
4342 
4343 	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4344 
4345 	wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq);
4346 
4347 	wait_for_completion(&cmd->t_transport_stop_comp);
4348 
4349 	spin_lock_irqsave(&cmd->t_state_lock, flags);
4350 	atomic_set(&cmd->t_transport_active, 0);
4351 	atomic_set(&cmd->t_transport_stop, 0);
4352 
4353 	pr_debug("wait_for_tasks: Stopped wait_for_compltion("
4354 		"&cmd->t_transport_stop_comp) for ITT: 0x%08x\n",
4355 		cmd->se_tfo->get_task_tag(cmd));
4356 
4357 	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4358 
4359 	return true;
4360 }
4361 EXPORT_SYMBOL(transport_wait_for_tasks);
4362 
4363 static int transport_get_sense_codes(
4364 	struct se_cmd *cmd,
4365 	u8 *asc,
4366 	u8 *ascq)
4367 {
4368 	*asc = cmd->scsi_asc;
4369 	*ascq = cmd->scsi_ascq;
4370 
4371 	return 0;
4372 }
4373 
4374 static int transport_set_sense_codes(
4375 	struct se_cmd *cmd,
4376 	u8 asc,
4377 	u8 ascq)
4378 {
4379 	cmd->scsi_asc = asc;
4380 	cmd->scsi_ascq = ascq;
4381 
4382 	return 0;
4383 }
4384 
4385 int transport_send_check_condition_and_sense(
4386 	struct se_cmd *cmd,
4387 	u8 reason,
4388 	int from_transport)
4389 {
4390 	unsigned char *buffer = cmd->sense_buffer;
4391 	unsigned long flags;
4392 	int offset;
4393 	u8 asc = 0, ascq = 0;
4394 
4395 	spin_lock_irqsave(&cmd->t_state_lock, flags);
4396 	if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
4397 		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4398 		return 0;
4399 	}
4400 	cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION;
4401 	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4402 
4403 	if (!reason && from_transport)
4404 		goto after_reason;
4405 
4406 	if (!from_transport)
4407 		cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE;
4408 	/*
4409 	 * Data Segment and SenseLength of the fabric response PDU.
4410 	 *
4411 	 * TRANSPORT_SENSE_BUFFER is now set to SCSI_SENSE_BUFFERSIZE
4412 	 * from include/scsi/scsi_cmnd.h
4413 	 */
4414 	offset = cmd->se_tfo->set_fabric_sense_len(cmd,
4415 				TRANSPORT_SENSE_BUFFER);
4416 	/*
4417 	 * Actual SENSE DATA, see SPC-3 7.23.2  SPC_SENSE_KEY_OFFSET uses
4418 	 * SENSE KEY values from include/scsi/scsi.h
4419 	 */
4420 	switch (reason) {
4421 	case TCM_NON_EXISTENT_LUN:
4422 		/* CURRENT ERROR */
4423 		buffer[offset] = 0x70;
4424 		/* ILLEGAL REQUEST */
4425 		buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
4426 		/* LOGICAL UNIT NOT SUPPORTED */
4427 		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x25;
4428 		break;
4429 	case TCM_UNSUPPORTED_SCSI_OPCODE:
4430 	case TCM_SECTOR_COUNT_TOO_MANY:
4431 		/* CURRENT ERROR */
4432 		buffer[offset] = 0x70;
4433 		/* ILLEGAL REQUEST */
4434 		buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
4435 		/* INVALID COMMAND OPERATION CODE */
4436 		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x20;
4437 		break;
4438 	case TCM_UNKNOWN_MODE_PAGE:
4439 		/* CURRENT ERROR */
4440 		buffer[offset] = 0x70;
4441 		/* ILLEGAL REQUEST */
4442 		buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
4443 		/* INVALID FIELD IN CDB */
4444 		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24;
4445 		break;
4446 	case TCM_CHECK_CONDITION_ABORT_CMD:
4447 		/* CURRENT ERROR */
4448 		buffer[offset] = 0x70;
4449 		/* ABORTED COMMAND */
4450 		buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
4451 		/* BUS DEVICE RESET FUNCTION OCCURRED */
4452 		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x29;
4453 		buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x03;
4454 		break;
4455 	case TCM_INCORRECT_AMOUNT_OF_DATA:
4456 		/* CURRENT ERROR */
4457 		buffer[offset] = 0x70;
4458 		/* ABORTED COMMAND */
4459 		buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
4460 		/* WRITE ERROR */
4461 		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x0c;
4462 		/* NOT ENOUGH UNSOLICITED DATA */
4463 		buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x0d;
4464 		break;
4465 	case TCM_INVALID_CDB_FIELD:
4466 		/* CURRENT ERROR */
4467 		buffer[offset] = 0x70;
4468 		/* ABORTED COMMAND */
4469 		buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
4470 		/* INVALID FIELD IN CDB */
4471 		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24;
4472 		break;
4473 	case TCM_INVALID_PARAMETER_LIST:
4474 		/* CURRENT ERROR */
4475 		buffer[offset] = 0x70;
4476 		/* ABORTED COMMAND */
4477 		buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
4478 		/* INVALID FIELD IN PARAMETER LIST */
4479 		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x26;
4480 		break;
4481 	case TCM_UNEXPECTED_UNSOLICITED_DATA:
4482 		/* CURRENT ERROR */
4483 		buffer[offset] = 0x70;
4484 		/* ABORTED COMMAND */
4485 		buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
4486 		/* WRITE ERROR */
4487 		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x0c;
4488 		/* UNEXPECTED_UNSOLICITED_DATA */
4489 		buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x0c;
4490 		break;
4491 	case TCM_SERVICE_CRC_ERROR:
4492 		/* CURRENT ERROR */
4493 		buffer[offset] = 0x70;
4494 		/* ABORTED COMMAND */
4495 		buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
4496 		/* PROTOCOL SERVICE CRC ERROR */
4497 		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x47;
4498 		/* N/A */
4499 		buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x05;
4500 		break;
4501 	case TCM_SNACK_REJECTED:
4502 		/* CURRENT ERROR */
4503 		buffer[offset] = 0x70;
4504 		/* ABORTED COMMAND */
4505 		buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
4506 		/* READ ERROR */
4507 		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x11;
4508 		/* FAILED RETRANSMISSION REQUEST */
4509 		buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x13;
4510 		break;
4511 	case TCM_WRITE_PROTECTED:
4512 		/* CURRENT ERROR */
4513 		buffer[offset] = 0x70;
4514 		/* DATA PROTECT */
4515 		buffer[offset+SPC_SENSE_KEY_OFFSET] = DATA_PROTECT;
4516 		/* WRITE PROTECTED */
4517 		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x27;
4518 		break;
4519 	case TCM_CHECK_CONDITION_UNIT_ATTENTION:
4520 		/* CURRENT ERROR */
4521 		buffer[offset] = 0x70;
4522 		/* UNIT ATTENTION */
4523 		buffer[offset+SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION;
4524 		core_scsi3_ua_for_check_condition(cmd, &asc, &ascq);
4525 		buffer[offset+SPC_ASC_KEY_OFFSET] = asc;
4526 		buffer[offset+SPC_ASCQ_KEY_OFFSET] = ascq;
4527 		break;
4528 	case TCM_CHECK_CONDITION_NOT_READY:
4529 		/* CURRENT ERROR */
4530 		buffer[offset] = 0x70;
4531 		/* Not Ready */
4532 		buffer[offset+SPC_SENSE_KEY_OFFSET] = NOT_READY;
4533 		transport_get_sense_codes(cmd, &asc, &ascq);
4534 		buffer[offset+SPC_ASC_KEY_OFFSET] = asc;
4535 		buffer[offset+SPC_ASCQ_KEY_OFFSET] = ascq;
4536 		break;
4537 	case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
4538 	default:
4539 		/* CURRENT ERROR */
4540 		buffer[offset] = 0x70;
4541 		/* ILLEGAL REQUEST */
4542 		buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
4543 		/* LOGICAL UNIT COMMUNICATION FAILURE */
4544 		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x80;
4545 		break;
4546 	}
4547 	/*
4548 	 * This code uses linux/include/scsi/scsi.h SAM status codes!
4549 	 */
4550 	cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
4551 	/*
4552 	 * Automatically padded, this value is encoded in the fabric's
4553 	 * data_length response PDU containing the SCSI defined sense data.
4554 	 */
4555 	cmd->scsi_sense_length  = TRANSPORT_SENSE_BUFFER + offset;
4556 
4557 after_reason:
4558 	return cmd->se_tfo->queue_status(cmd);
4559 }
4560 EXPORT_SYMBOL(transport_send_check_condition_and_sense);
4561 
4562 int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
4563 {
4564 	int ret = 0;
4565 
4566 	if (atomic_read(&cmd->t_transport_aborted) != 0) {
4567 		if (!send_status ||
4568 		     (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
4569 			return 1;
4570 #if 0
4571 		pr_debug("Sending delayed SAM_STAT_TASK_ABORTED"
4572 			" status for CDB: 0x%02x ITT: 0x%08x\n",
4573 			cmd->t_task_cdb[0],
4574 			cmd->se_tfo->get_task_tag(cmd));
4575 #endif
4576 		cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS;
4577 		cmd->se_tfo->queue_status(cmd);
4578 		ret = 1;
4579 	}
4580 	return ret;
4581 }
4582 EXPORT_SYMBOL(transport_check_aborted_status);
4583 
4584 void transport_send_task_abort(struct se_cmd *cmd)
4585 {
4586 	unsigned long flags;
4587 
4588 	spin_lock_irqsave(&cmd->t_state_lock, flags);
4589 	if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
4590 		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4591 		return;
4592 	}
4593 	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4594 
4595 	/*
4596 	 * If there are still expected incoming fabric WRITEs, we wait
4597 	 * until until they have completed before sending a TASK_ABORTED
4598 	 * response.  This response with TASK_ABORTED status will be
4599 	 * queued back to fabric module by transport_check_aborted_status().
4600 	 */
4601 	if (cmd->data_direction == DMA_TO_DEVICE) {
4602 		if (cmd->se_tfo->write_pending_status(cmd) != 0) {
4603 			atomic_inc(&cmd->t_transport_aborted);
4604 			smp_mb__after_atomic_inc();
4605 			cmd->scsi_status = SAM_STAT_TASK_ABORTED;
4606 			transport_new_cmd_failure(cmd);
4607 			return;
4608 		}
4609 	}
4610 	cmd->scsi_status = SAM_STAT_TASK_ABORTED;
4611 #if 0
4612 	pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x,"
4613 		" ITT: 0x%08x\n", cmd->t_task_cdb[0],
4614 		cmd->se_tfo->get_task_tag(cmd));
4615 #endif
4616 	cmd->se_tfo->queue_status(cmd);
4617 }
4618 
4619 /*	transport_generic_do_tmr():
4620  *
4621  *
4622  */
4623 int transport_generic_do_tmr(struct se_cmd *cmd)
4624 {
4625 	struct se_device *dev = cmd->se_dev;
4626 	struct se_tmr_req *tmr = cmd->se_tmr_req;
4627 	int ret;
4628 
4629 	switch (tmr->function) {
4630 	case TMR_ABORT_TASK:
4631 		tmr->response = TMR_FUNCTION_REJECTED;
4632 		break;
4633 	case TMR_ABORT_TASK_SET:
4634 	case TMR_CLEAR_ACA:
4635 	case TMR_CLEAR_TASK_SET:
4636 		tmr->response = TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED;
4637 		break;
4638 	case TMR_LUN_RESET:
4639 		ret = core_tmr_lun_reset(dev, tmr, NULL, NULL);
4640 		tmr->response = (!ret) ? TMR_FUNCTION_COMPLETE :
4641 					 TMR_FUNCTION_REJECTED;
4642 		break;
4643 	case TMR_TARGET_WARM_RESET:
4644 		tmr->response = TMR_FUNCTION_REJECTED;
4645 		break;
4646 	case TMR_TARGET_COLD_RESET:
4647 		tmr->response = TMR_FUNCTION_REJECTED;
4648 		break;
4649 	default:
4650 		pr_err("Uknown TMR function: 0x%02x.\n",
4651 				tmr->function);
4652 		tmr->response = TMR_FUNCTION_REJECTED;
4653 		break;
4654 	}
4655 
4656 	cmd->t_state = TRANSPORT_ISTATE_PROCESSING;
4657 	cmd->se_tfo->queue_tm_rsp(cmd);
4658 
4659 	transport_cmd_check_stop_to_fabric(cmd);
4660 	return 0;
4661 }
4662 
4663 /*	transport_processing_thread():
4664  *
4665  *
4666  */
4667 static int transport_processing_thread(void *param)
4668 {
4669 	int ret;
4670 	struct se_cmd *cmd;
4671 	struct se_device *dev = (struct se_device *) param;
4672 
4673 	set_user_nice(current, -20);
4674 
4675 	while (!kthread_should_stop()) {
4676 		ret = wait_event_interruptible(dev->dev_queue_obj.thread_wq,
4677 				atomic_read(&dev->dev_queue_obj.queue_cnt) ||
4678 				kthread_should_stop());
4679 		if (ret < 0)
4680 			goto out;
4681 
4682 get_cmd:
4683 		__transport_execute_tasks(dev);
4684 
4685 		cmd = transport_get_cmd_from_queue(&dev->dev_queue_obj);
4686 		if (!cmd)
4687 			continue;
4688 
4689 		switch (cmd->t_state) {
4690 		case TRANSPORT_NEW_CMD:
4691 			BUG();
4692 			break;
4693 		case TRANSPORT_NEW_CMD_MAP:
4694 			if (!cmd->se_tfo->new_cmd_map) {
4695 				pr_err("cmd->se_tfo->new_cmd_map is"
4696 					" NULL for TRANSPORT_NEW_CMD_MAP\n");
4697 				BUG();
4698 			}
4699 			ret = cmd->se_tfo->new_cmd_map(cmd);
4700 			if (ret < 0) {
4701 				cmd->transport_error_status = ret;
4702 				transport_generic_request_failure(cmd,
4703 						0, (cmd->data_direction !=
4704 						    DMA_TO_DEVICE));
4705 				break;
4706 			}
4707 			ret = transport_generic_new_cmd(cmd);
4708 			if (ret < 0) {
4709 				cmd->transport_error_status = ret;
4710 				transport_generic_request_failure(cmd,
4711 					0, (cmd->data_direction !=
4712 					 DMA_TO_DEVICE));
4713 			}
4714 			break;
4715 		case TRANSPORT_PROCESS_WRITE:
4716 			transport_generic_process_write(cmd);
4717 			break;
4718 		case TRANSPORT_PROCESS_TMR:
4719 			transport_generic_do_tmr(cmd);
4720 			break;
4721 		case TRANSPORT_COMPLETE_QF_WP:
4722 			transport_write_pending_qf(cmd);
4723 			break;
4724 		case TRANSPORT_COMPLETE_QF_OK:
4725 			transport_complete_qf(cmd);
4726 			break;
4727 		default:
4728 			pr_err("Unknown t_state: %d  for ITT: 0x%08x "
4729 				"i_state: %d on SE LUN: %u\n",
4730 				cmd->t_state,
4731 				cmd->se_tfo->get_task_tag(cmd),
4732 				cmd->se_tfo->get_cmd_state(cmd),
4733 				cmd->se_lun->unpacked_lun);
4734 			BUG();
4735 		}
4736 
4737 		goto get_cmd;
4738 	}
4739 
4740 out:
4741 	WARN_ON(!list_empty(&dev->state_task_list));
4742 	WARN_ON(!list_empty(&dev->dev_queue_obj.qobj_list));
4743 	dev->process_thread = NULL;
4744 	return 0;
4745 }
4746