xref: /linux/drivers/target/target_core_transport.c (revision 12871a0bd67dd4db4418e1daafcd46e9d329ef10)
1 /*******************************************************************************
2  * Filename:  target_core_transport.c
3  *
4  * This file contains the Generic Target Engine Core.
5  *
6  * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc.
7  * Copyright (c) 2005, 2006, 2007 SBE, Inc.
8  * Copyright (c) 2007-2010 Rising Tide Systems
9  * Copyright (c) 2008-2010 Linux-iSCSI.org
10  *
11  * Nicholas A. Bellinger <nab@kernel.org>
12  *
13  * This program is free software; you can redistribute it and/or modify
14  * it under the terms of the GNU General Public License as published by
15  * the Free Software Foundation; either version 2 of the License, or
16  * (at your option) any later version.
17  *
18  * This program is distributed in the hope that it will be useful,
19  * but WITHOUT ANY WARRANTY; without even the implied warranty of
20  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
21  * GNU General Public License for more details.
22  *
23  * You should have received a copy of the GNU General Public License
24  * along with this program; if not, write to the Free Software
25  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
26  *
27  ******************************************************************************/
28 
29 #include <linux/version.h>
30 #include <linux/net.h>
31 #include <linux/delay.h>
32 #include <linux/string.h>
33 #include <linux/timer.h>
34 #include <linux/slab.h>
35 #include <linux/blkdev.h>
36 #include <linux/spinlock.h>
37 #include <linux/kthread.h>
38 #include <linux/in.h>
39 #include <linux/cdrom.h>
40 #include <asm/unaligned.h>
41 #include <net/sock.h>
42 #include <net/tcp.h>
43 #include <scsi/scsi.h>
44 #include <scsi/scsi_cmnd.h>
45 #include <scsi/scsi_tcq.h>
46 
47 #include <target/target_core_base.h>
48 #include <target/target_core_device.h>
49 #include <target/target_core_tmr.h>
50 #include <target/target_core_tpg.h>
51 #include <target/target_core_transport.h>
52 #include <target/target_core_fabric_ops.h>
53 #include <target/target_core_configfs.h>
54 
55 #include "target_core_alua.h"
56 #include "target_core_hba.h"
57 #include "target_core_pr.h"
58 #include "target_core_scdb.h"
59 #include "target_core_ua.h"
60 
61 /* #define DEBUG_CDB_HANDLER */
62 #ifdef DEBUG_CDB_HANDLER
63 #define DEBUG_CDB_H(x...) printk(KERN_INFO x)
64 #else
65 #define DEBUG_CDB_H(x...)
66 #endif
67 
68 /* #define DEBUG_CMD_MAP */
69 #ifdef DEBUG_CMD_MAP
70 #define DEBUG_CMD_M(x...) printk(KERN_INFO x)
71 #else
72 #define DEBUG_CMD_M(x...)
73 #endif
74 
75 /* #define DEBUG_MEM_ALLOC */
76 #ifdef DEBUG_MEM_ALLOC
77 #define DEBUG_MEM(x...) printk(KERN_INFO x)
78 #else
79 #define DEBUG_MEM(x...)
80 #endif
81 
82 /* #define DEBUG_MEM2_ALLOC */
83 #ifdef DEBUG_MEM2_ALLOC
84 #define DEBUG_MEM2(x...) printk(KERN_INFO x)
85 #else
86 #define DEBUG_MEM2(x...)
87 #endif
88 
89 /* #define DEBUG_SG_CALC */
90 #ifdef DEBUG_SG_CALC
91 #define DEBUG_SC(x...) printk(KERN_INFO x)
92 #else
93 #define DEBUG_SC(x...)
94 #endif
95 
96 /* #define DEBUG_SE_OBJ */
97 #ifdef DEBUG_SE_OBJ
98 #define DEBUG_SO(x...) printk(KERN_INFO x)
99 #else
100 #define DEBUG_SO(x...)
101 #endif
102 
103 /* #define DEBUG_CMD_VOL */
104 #ifdef DEBUG_CMD_VOL
105 #define DEBUG_VOL(x...) printk(KERN_INFO x)
106 #else
107 #define DEBUG_VOL(x...)
108 #endif
109 
110 /* #define DEBUG_CMD_STOP */
111 #ifdef DEBUG_CMD_STOP
112 #define DEBUG_CS(x...) printk(KERN_INFO x)
113 #else
114 #define DEBUG_CS(x...)
115 #endif
116 
117 /* #define DEBUG_PASSTHROUGH */
118 #ifdef DEBUG_PASSTHROUGH
119 #define DEBUG_PT(x...) printk(KERN_INFO x)
120 #else
121 #define DEBUG_PT(x...)
122 #endif
123 
124 /* #define DEBUG_TASK_STOP */
125 #ifdef DEBUG_TASK_STOP
126 #define DEBUG_TS(x...) printk(KERN_INFO x)
127 #else
128 #define DEBUG_TS(x...)
129 #endif
130 
131 /* #define DEBUG_TRANSPORT_STOP */
132 #ifdef DEBUG_TRANSPORT_STOP
133 #define DEBUG_TRANSPORT_S(x...) printk(KERN_INFO x)
134 #else
135 #define DEBUG_TRANSPORT_S(x...)
136 #endif
137 
138 /* #define DEBUG_TASK_FAILURE */
139 #ifdef DEBUG_TASK_FAILURE
140 #define DEBUG_TF(x...) printk(KERN_INFO x)
141 #else
142 #define DEBUG_TF(x...)
143 #endif
144 
145 /* #define DEBUG_DEV_OFFLINE */
146 #ifdef DEBUG_DEV_OFFLINE
147 #define DEBUG_DO(x...) printk(KERN_INFO x)
148 #else
149 #define DEBUG_DO(x...)
150 #endif
151 
152 /* #define DEBUG_TASK_STATE */
153 #ifdef DEBUG_TASK_STATE
154 #define DEBUG_TSTATE(x...) printk(KERN_INFO x)
155 #else
156 #define DEBUG_TSTATE(x...)
157 #endif
158 
159 /* #define DEBUG_STATUS_THR */
160 #ifdef DEBUG_STATUS_THR
161 #define DEBUG_ST(x...) printk(KERN_INFO x)
162 #else
163 #define DEBUG_ST(x...)
164 #endif
165 
166 /* #define DEBUG_TASK_TIMEOUT */
167 #ifdef DEBUG_TASK_TIMEOUT
168 #define DEBUG_TT(x...) printk(KERN_INFO x)
169 #else
170 #define DEBUG_TT(x...)
171 #endif
172 
173 /* #define DEBUG_GENERIC_REQUEST_FAILURE */
174 #ifdef DEBUG_GENERIC_REQUEST_FAILURE
175 #define DEBUG_GRF(x...) printk(KERN_INFO x)
176 #else
177 #define DEBUG_GRF(x...)
178 #endif
179 
180 /* #define DEBUG_SAM_TASK_ATTRS */
181 #ifdef DEBUG_SAM_TASK_ATTRS
182 #define DEBUG_STA(x...) printk(KERN_INFO x)
183 #else
184 #define DEBUG_STA(x...)
185 #endif
186 
187 struct se_global *se_global;
188 
189 static struct kmem_cache *se_cmd_cache;
190 static struct kmem_cache *se_sess_cache;
191 struct kmem_cache *se_tmr_req_cache;
192 struct kmem_cache *se_ua_cache;
193 struct kmem_cache *se_mem_cache;
194 struct kmem_cache *t10_pr_reg_cache;
195 struct kmem_cache *t10_alua_lu_gp_cache;
196 struct kmem_cache *t10_alua_lu_gp_mem_cache;
197 struct kmem_cache *t10_alua_tg_pt_gp_cache;
198 struct kmem_cache *t10_alua_tg_pt_gp_mem_cache;
199 
200 /* Used for transport_dev_get_map_*() */
201 typedef int (*map_func_t)(struct se_task *, u32);
202 
203 static int transport_generic_write_pending(struct se_cmd *);
204 static int transport_processing_thread(void *);
205 static int __transport_execute_tasks(struct se_device *dev);
206 static void transport_complete_task_attr(struct se_cmd *cmd);
207 static void transport_direct_request_timeout(struct se_cmd *cmd);
208 static void transport_free_dev_tasks(struct se_cmd *cmd);
209 static u32 transport_generic_get_cdb_count(struct se_cmd *cmd,
210 		unsigned long long starting_lba, u32 sectors,
211 		enum dma_data_direction data_direction,
212 		struct list_head *mem_list, int set_counts);
213 static int transport_generic_get_mem(struct se_cmd *cmd, u32 length,
214 		u32 dma_size);
215 static int transport_generic_remove(struct se_cmd *cmd,
216 		int release_to_pool, int session_reinstatement);
217 static int transport_get_sectors(struct se_cmd *cmd);
218 static struct list_head *transport_init_se_mem_list(void);
219 static int transport_map_sg_to_mem(struct se_cmd *cmd,
220 		struct list_head *se_mem_list, void *in_mem,
221 		u32 *se_mem_cnt);
222 static void transport_memcpy_se_mem_read_contig(struct se_cmd *cmd,
223 		unsigned char *dst, struct list_head *se_mem_list);
224 static void transport_release_fe_cmd(struct se_cmd *cmd);
225 static void transport_remove_cmd_from_queue(struct se_cmd *cmd,
226 		struct se_queue_obj *qobj);
227 static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq);
228 static void transport_stop_all_task_timers(struct se_cmd *cmd);
229 
230 int init_se_global(void)
231 {
232 	struct se_global *global;
233 
234 	global = kzalloc(sizeof(struct se_global), GFP_KERNEL);
235 	if (!(global)) {
236 		printk(KERN_ERR "Unable to allocate memory for struct se_global\n");
237 		return -1;
238 	}
239 
240 	INIT_LIST_HEAD(&global->g_lu_gps_list);
241 	INIT_LIST_HEAD(&global->g_se_tpg_list);
242 	INIT_LIST_HEAD(&global->g_hba_list);
243 	INIT_LIST_HEAD(&global->g_se_dev_list);
244 	spin_lock_init(&global->g_device_lock);
245 	spin_lock_init(&global->hba_lock);
246 	spin_lock_init(&global->se_tpg_lock);
247 	spin_lock_init(&global->lu_gps_lock);
248 	spin_lock_init(&global->plugin_class_lock);
249 
250 	se_cmd_cache = kmem_cache_create("se_cmd_cache",
251 			sizeof(struct se_cmd), __alignof__(struct se_cmd), 0, NULL);
252 	if (!(se_cmd_cache)) {
253 		printk(KERN_ERR "kmem_cache_create for struct se_cmd failed\n");
254 		goto out;
255 	}
256 	se_tmr_req_cache = kmem_cache_create("se_tmr_cache",
257 			sizeof(struct se_tmr_req), __alignof__(struct se_tmr_req),
258 			0, NULL);
259 	if (!(se_tmr_req_cache)) {
260 		printk(KERN_ERR "kmem_cache_create() for struct se_tmr_req"
261 				" failed\n");
262 		goto out;
263 	}
264 	se_sess_cache = kmem_cache_create("se_sess_cache",
265 			sizeof(struct se_session), __alignof__(struct se_session),
266 			0, NULL);
267 	if (!(se_sess_cache)) {
268 		printk(KERN_ERR "kmem_cache_create() for struct se_session"
269 				" failed\n");
270 		goto out;
271 	}
272 	se_ua_cache = kmem_cache_create("se_ua_cache",
273 			sizeof(struct se_ua), __alignof__(struct se_ua),
274 			0, NULL);
275 	if (!(se_ua_cache)) {
276 		printk(KERN_ERR "kmem_cache_create() for struct se_ua failed\n");
277 		goto out;
278 	}
279 	se_mem_cache = kmem_cache_create("se_mem_cache",
280 			sizeof(struct se_mem), __alignof__(struct se_mem), 0, NULL);
281 	if (!(se_mem_cache)) {
282 		printk(KERN_ERR "kmem_cache_create() for struct se_mem failed\n");
283 		goto out;
284 	}
285 	t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache",
286 			sizeof(struct t10_pr_registration),
287 			__alignof__(struct t10_pr_registration), 0, NULL);
288 	if (!(t10_pr_reg_cache)) {
289 		printk(KERN_ERR "kmem_cache_create() for struct t10_pr_registration"
290 				" failed\n");
291 		goto out;
292 	}
293 	t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache",
294 			sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp),
295 			0, NULL);
296 	if (!(t10_alua_lu_gp_cache)) {
297 		printk(KERN_ERR "kmem_cache_create() for t10_alua_lu_gp_cache"
298 				" failed\n");
299 		goto out;
300 	}
301 	t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache",
302 			sizeof(struct t10_alua_lu_gp_member),
303 			__alignof__(struct t10_alua_lu_gp_member), 0, NULL);
304 	if (!(t10_alua_lu_gp_mem_cache)) {
305 		printk(KERN_ERR "kmem_cache_create() for t10_alua_lu_gp_mem_"
306 				"cache failed\n");
307 		goto out;
308 	}
309 	t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache",
310 			sizeof(struct t10_alua_tg_pt_gp),
311 			__alignof__(struct t10_alua_tg_pt_gp), 0, NULL);
312 	if (!(t10_alua_tg_pt_gp_cache)) {
313 		printk(KERN_ERR "kmem_cache_create() for t10_alua_tg_pt_gp_"
314 				"cache failed\n");
315 		goto out;
316 	}
317 	t10_alua_tg_pt_gp_mem_cache = kmem_cache_create(
318 			"t10_alua_tg_pt_gp_mem_cache",
319 			sizeof(struct t10_alua_tg_pt_gp_member),
320 			__alignof__(struct t10_alua_tg_pt_gp_member),
321 			0, NULL);
322 	if (!(t10_alua_tg_pt_gp_mem_cache)) {
323 		printk(KERN_ERR "kmem_cache_create() for t10_alua_tg_pt_gp_"
324 				"mem_t failed\n");
325 		goto out;
326 	}
327 
328 	se_global = global;
329 
330 	return 0;
331 out:
332 	if (se_cmd_cache)
333 		kmem_cache_destroy(se_cmd_cache);
334 	if (se_tmr_req_cache)
335 		kmem_cache_destroy(se_tmr_req_cache);
336 	if (se_sess_cache)
337 		kmem_cache_destroy(se_sess_cache);
338 	if (se_ua_cache)
339 		kmem_cache_destroy(se_ua_cache);
340 	if (se_mem_cache)
341 		kmem_cache_destroy(se_mem_cache);
342 	if (t10_pr_reg_cache)
343 		kmem_cache_destroy(t10_pr_reg_cache);
344 	if (t10_alua_lu_gp_cache)
345 		kmem_cache_destroy(t10_alua_lu_gp_cache);
346 	if (t10_alua_lu_gp_mem_cache)
347 		kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
348 	if (t10_alua_tg_pt_gp_cache)
349 		kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
350 	if (t10_alua_tg_pt_gp_mem_cache)
351 		kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
352 	kfree(global);
353 	return -1;
354 }
355 
356 void release_se_global(void)
357 {
358 	struct se_global *global;
359 
360 	global = se_global;
361 	if (!(global))
362 		return;
363 
364 	kmem_cache_destroy(se_cmd_cache);
365 	kmem_cache_destroy(se_tmr_req_cache);
366 	kmem_cache_destroy(se_sess_cache);
367 	kmem_cache_destroy(se_ua_cache);
368 	kmem_cache_destroy(se_mem_cache);
369 	kmem_cache_destroy(t10_pr_reg_cache);
370 	kmem_cache_destroy(t10_alua_lu_gp_cache);
371 	kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
372 	kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
373 	kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
374 	kfree(global);
375 
376 	se_global = NULL;
377 }
378 
379 /* SCSI statistics table index */
380 static struct scsi_index_table scsi_index_table;
381 
382 /*
383  * Initialize the index table for allocating unique row indexes to various mib
384  * tables.
385  */
386 void init_scsi_index_table(void)
387 {
388 	memset(&scsi_index_table, 0, sizeof(struct scsi_index_table));
389 	spin_lock_init(&scsi_index_table.lock);
390 }
391 
392 /*
393  * Allocate a new row index for the entry type specified
394  */
395 u32 scsi_get_new_index(scsi_index_t type)
396 {
397 	u32 new_index;
398 
399 	if ((type < 0) || (type >= SCSI_INDEX_TYPE_MAX)) {
400 		printk(KERN_ERR "Invalid index type %d\n", type);
401 		return -EINVAL;
402 	}
403 
404 	spin_lock(&scsi_index_table.lock);
405 	new_index = ++scsi_index_table.scsi_mib_index[type];
406 	if (new_index == 0)
407 		new_index = ++scsi_index_table.scsi_mib_index[type];
408 	spin_unlock(&scsi_index_table.lock);
409 
410 	return new_index;
411 }
412 
413 void transport_init_queue_obj(struct se_queue_obj *qobj)
414 {
415 	atomic_set(&qobj->queue_cnt, 0);
416 	INIT_LIST_HEAD(&qobj->qobj_list);
417 	init_waitqueue_head(&qobj->thread_wq);
418 	spin_lock_init(&qobj->cmd_queue_lock);
419 }
420 EXPORT_SYMBOL(transport_init_queue_obj);
421 
422 static int transport_subsystem_reqmods(void)
423 {
424 	int ret;
425 
426 	ret = request_module("target_core_iblock");
427 	if (ret != 0)
428 		printk(KERN_ERR "Unable to load target_core_iblock\n");
429 
430 	ret = request_module("target_core_file");
431 	if (ret != 0)
432 		printk(KERN_ERR "Unable to load target_core_file\n");
433 
434 	ret = request_module("target_core_pscsi");
435 	if (ret != 0)
436 		printk(KERN_ERR "Unable to load target_core_pscsi\n");
437 
438 	ret = request_module("target_core_stgt");
439 	if (ret != 0)
440 		printk(KERN_ERR "Unable to load target_core_stgt\n");
441 
442 	return 0;
443 }
444 
445 int transport_subsystem_check_init(void)
446 {
447 	if (se_global->g_sub_api_initialized)
448 		return 0;
449 	/*
450 	 * Request the loading of known TCM subsystem plugins..
451 	 */
452 	if (transport_subsystem_reqmods() < 0)
453 		return -1;
454 
455 	se_global->g_sub_api_initialized = 1;
456 	return 0;
457 }
458 
459 struct se_session *transport_init_session(void)
460 {
461 	struct se_session *se_sess;
462 
463 	se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL);
464 	if (!(se_sess)) {
465 		printk(KERN_ERR "Unable to allocate struct se_session from"
466 				" se_sess_cache\n");
467 		return ERR_PTR(-ENOMEM);
468 	}
469 	INIT_LIST_HEAD(&se_sess->sess_list);
470 	INIT_LIST_HEAD(&se_sess->sess_acl_list);
471 
472 	return se_sess;
473 }
474 EXPORT_SYMBOL(transport_init_session);
475 
476 /*
477  * Called with spin_lock_bh(&struct se_portal_group->session_lock called.
478  */
479 void __transport_register_session(
480 	struct se_portal_group *se_tpg,
481 	struct se_node_acl *se_nacl,
482 	struct se_session *se_sess,
483 	void *fabric_sess_ptr)
484 {
485 	unsigned char buf[PR_REG_ISID_LEN];
486 
487 	se_sess->se_tpg = se_tpg;
488 	se_sess->fabric_sess_ptr = fabric_sess_ptr;
489 	/*
490 	 * Used by struct se_node_acl's under ConfigFS to locate active se_session-t
491 	 *
492 	 * Only set for struct se_session's that will actually be moving I/O.
493 	 * eg: *NOT* discovery sessions.
494 	 */
495 	if (se_nacl) {
496 		/*
497 		 * If the fabric module supports an ISID based TransportID,
498 		 * save this value in binary from the fabric I_T Nexus now.
499 		 */
500 		if (TPG_TFO(se_tpg)->sess_get_initiator_sid != NULL) {
501 			memset(&buf[0], 0, PR_REG_ISID_LEN);
502 			TPG_TFO(se_tpg)->sess_get_initiator_sid(se_sess,
503 					&buf[0], PR_REG_ISID_LEN);
504 			se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]);
505 		}
506 		spin_lock_irq(&se_nacl->nacl_sess_lock);
507 		/*
508 		 * The se_nacl->nacl_sess pointer will be set to the
509 		 * last active I_T Nexus for each struct se_node_acl.
510 		 */
511 		se_nacl->nacl_sess = se_sess;
512 
513 		list_add_tail(&se_sess->sess_acl_list,
514 			      &se_nacl->acl_sess_list);
515 		spin_unlock_irq(&se_nacl->nacl_sess_lock);
516 	}
517 	list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list);
518 
519 	printk(KERN_INFO "TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n",
520 		TPG_TFO(se_tpg)->get_fabric_name(), se_sess->fabric_sess_ptr);
521 }
522 EXPORT_SYMBOL(__transport_register_session);
523 
524 void transport_register_session(
525 	struct se_portal_group *se_tpg,
526 	struct se_node_acl *se_nacl,
527 	struct se_session *se_sess,
528 	void *fabric_sess_ptr)
529 {
530 	spin_lock_bh(&se_tpg->session_lock);
531 	__transport_register_session(se_tpg, se_nacl, se_sess, fabric_sess_ptr);
532 	spin_unlock_bh(&se_tpg->session_lock);
533 }
534 EXPORT_SYMBOL(transport_register_session);
535 
536 void transport_deregister_session_configfs(struct se_session *se_sess)
537 {
538 	struct se_node_acl *se_nacl;
539 
540 	/*
541 	 * Used by struct se_node_acl's under ConfigFS to locate active struct se_session
542 	 */
543 	se_nacl = se_sess->se_node_acl;
544 	if ((se_nacl)) {
545 		spin_lock_irq(&se_nacl->nacl_sess_lock);
546 		list_del(&se_sess->sess_acl_list);
547 		/*
548 		 * If the session list is empty, then clear the pointer.
549 		 * Otherwise, set the struct se_session pointer from the tail
550 		 * element of the per struct se_node_acl active session list.
551 		 */
552 		if (list_empty(&se_nacl->acl_sess_list))
553 			se_nacl->nacl_sess = NULL;
554 		else {
555 			se_nacl->nacl_sess = container_of(
556 					se_nacl->acl_sess_list.prev,
557 					struct se_session, sess_acl_list);
558 		}
559 		spin_unlock_irq(&se_nacl->nacl_sess_lock);
560 	}
561 }
562 EXPORT_SYMBOL(transport_deregister_session_configfs);
563 
564 void transport_free_session(struct se_session *se_sess)
565 {
566 	kmem_cache_free(se_sess_cache, se_sess);
567 }
568 EXPORT_SYMBOL(transport_free_session);
569 
570 void transport_deregister_session(struct se_session *se_sess)
571 {
572 	struct se_portal_group *se_tpg = se_sess->se_tpg;
573 	struct se_node_acl *se_nacl;
574 
575 	if (!(se_tpg)) {
576 		transport_free_session(se_sess);
577 		return;
578 	}
579 
580 	spin_lock_bh(&se_tpg->session_lock);
581 	list_del(&se_sess->sess_list);
582 	se_sess->se_tpg = NULL;
583 	se_sess->fabric_sess_ptr = NULL;
584 	spin_unlock_bh(&se_tpg->session_lock);
585 
586 	/*
587 	 * Determine if we need to do extra work for this initiator node's
588 	 * struct se_node_acl if it had been previously dynamically generated.
589 	 */
590 	se_nacl = se_sess->se_node_acl;
591 	if ((se_nacl)) {
592 		spin_lock_bh(&se_tpg->acl_node_lock);
593 		if (se_nacl->dynamic_node_acl) {
594 			if (!(TPG_TFO(se_tpg)->tpg_check_demo_mode_cache(
595 					se_tpg))) {
596 				list_del(&se_nacl->acl_list);
597 				se_tpg->num_node_acls--;
598 				spin_unlock_bh(&se_tpg->acl_node_lock);
599 
600 				core_tpg_wait_for_nacl_pr_ref(se_nacl);
601 				core_free_device_list_for_node(se_nacl, se_tpg);
602 				TPG_TFO(se_tpg)->tpg_release_fabric_acl(se_tpg,
603 						se_nacl);
604 				spin_lock_bh(&se_tpg->acl_node_lock);
605 			}
606 		}
607 		spin_unlock_bh(&se_tpg->acl_node_lock);
608 	}
609 
610 	transport_free_session(se_sess);
611 
612 	printk(KERN_INFO "TARGET_CORE[%s]: Deregistered fabric_sess\n",
613 		TPG_TFO(se_tpg)->get_fabric_name());
614 }
615 EXPORT_SYMBOL(transport_deregister_session);
616 
617 /*
618  * Called with T_TASK(cmd)->t_state_lock held.
619  */
620 static void transport_all_task_dev_remove_state(struct se_cmd *cmd)
621 {
622 	struct se_device *dev;
623 	struct se_task *task;
624 	unsigned long flags;
625 
626 	if (!T_TASK(cmd))
627 		return;
628 
629 	list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) {
630 		dev = task->se_dev;
631 		if (!(dev))
632 			continue;
633 
634 		if (atomic_read(&task->task_active))
635 			continue;
636 
637 		if (!(atomic_read(&task->task_state_active)))
638 			continue;
639 
640 		spin_lock_irqsave(&dev->execute_task_lock, flags);
641 		list_del(&task->t_state_list);
642 		DEBUG_TSTATE("Removed ITT: 0x%08x dev: %p task[%p]\n",
643 			CMD_TFO(cmd)->tfo_get_task_tag(cmd), dev, task);
644 		spin_unlock_irqrestore(&dev->execute_task_lock, flags);
645 
646 		atomic_set(&task->task_state_active, 0);
647 		atomic_dec(&T_TASK(cmd)->t_task_cdbs_ex_left);
648 	}
649 }
650 
651 /*	transport_cmd_check_stop():
652  *
653  *	'transport_off = 1' determines if t_transport_active should be cleared.
654  *	'transport_off = 2' determines if task_dev_state should be removed.
655  *
656  *	A non-zero u8 t_state sets cmd->t_state.
657  *	Returns 1 when command is stopped, else 0.
658  */
659 static int transport_cmd_check_stop(
660 	struct se_cmd *cmd,
661 	int transport_off,
662 	u8 t_state)
663 {
664 	unsigned long flags;
665 
666 	spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
667 	/*
668 	 * Determine if IOCTL context caller in requesting the stopping of this
669 	 * command for LUN shutdown purposes.
670 	 */
671 	if (atomic_read(&T_TASK(cmd)->transport_lun_stop)) {
672 		DEBUG_CS("%s:%d atomic_read(&T_TASK(cmd)->transport_lun_stop)"
673 			" == TRUE for ITT: 0x%08x\n", __func__, __LINE__,
674 			CMD_TFO(cmd)->get_task_tag(cmd));
675 
676 		cmd->deferred_t_state = cmd->t_state;
677 		cmd->t_state = TRANSPORT_DEFERRED_CMD;
678 		atomic_set(&T_TASK(cmd)->t_transport_active, 0);
679 		if (transport_off == 2)
680 			transport_all_task_dev_remove_state(cmd);
681 		spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
682 
683 		complete(&T_TASK(cmd)->transport_lun_stop_comp);
684 		return 1;
685 	}
686 	/*
687 	 * Determine if frontend context caller is requesting the stopping of
688 	 * this command for frontend excpections.
689 	 */
690 	if (atomic_read(&T_TASK(cmd)->t_transport_stop)) {
691 		DEBUG_CS("%s:%d atomic_read(&T_TASK(cmd)->t_transport_stop) =="
692 			" TRUE for ITT: 0x%08x\n", __func__, __LINE__,
693 			CMD_TFO(cmd)->get_task_tag(cmd));
694 
695 		cmd->deferred_t_state = cmd->t_state;
696 		cmd->t_state = TRANSPORT_DEFERRED_CMD;
697 		if (transport_off == 2)
698 			transport_all_task_dev_remove_state(cmd);
699 
700 		/*
701 		 * Clear struct se_cmd->se_lun before the transport_off == 2 handoff
702 		 * to FE.
703 		 */
704 		if (transport_off == 2)
705 			cmd->se_lun = NULL;
706 		spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
707 
708 		complete(&T_TASK(cmd)->t_transport_stop_comp);
709 		return 1;
710 	}
711 	if (transport_off) {
712 		atomic_set(&T_TASK(cmd)->t_transport_active, 0);
713 		if (transport_off == 2) {
714 			transport_all_task_dev_remove_state(cmd);
715 			/*
716 			 * Clear struct se_cmd->se_lun before the transport_off == 2
717 			 * handoff to fabric module.
718 			 */
719 			cmd->se_lun = NULL;
720 			/*
721 			 * Some fabric modules like tcm_loop can release
722 			 * their internally allocated I/O reference now and
723 			 * struct se_cmd now.
724 			 */
725 			if (CMD_TFO(cmd)->check_stop_free != NULL) {
726 				spin_unlock_irqrestore(
727 					&T_TASK(cmd)->t_state_lock, flags);
728 
729 				CMD_TFO(cmd)->check_stop_free(cmd);
730 				return 1;
731 			}
732 		}
733 		spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
734 
735 		return 0;
736 	} else if (t_state)
737 		cmd->t_state = t_state;
738 	spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
739 
740 	return 0;
741 }
742 
743 static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd)
744 {
745 	return transport_cmd_check_stop(cmd, 2, 0);
746 }
747 
748 static void transport_lun_remove_cmd(struct se_cmd *cmd)
749 {
750 	struct se_lun *lun = SE_LUN(cmd);
751 	unsigned long flags;
752 
753 	if (!lun)
754 		return;
755 
756 	spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
757 	if (!(atomic_read(&T_TASK(cmd)->transport_dev_active))) {
758 		spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
759 		goto check_lun;
760 	}
761 	atomic_set(&T_TASK(cmd)->transport_dev_active, 0);
762 	transport_all_task_dev_remove_state(cmd);
763 	spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
764 
765 
766 check_lun:
767 	spin_lock_irqsave(&lun->lun_cmd_lock, flags);
768 	if (atomic_read(&T_TASK(cmd)->transport_lun_active)) {
769 		list_del(&cmd->se_lun_list);
770 		atomic_set(&T_TASK(cmd)->transport_lun_active, 0);
771 #if 0
772 		printk(KERN_INFO "Removed ITT: 0x%08x from LUN LIST[%d]\n"
773 			CMD_TFO(cmd)->get_task_tag(cmd), lun->unpacked_lun);
774 #endif
775 	}
776 	spin_unlock_irqrestore(&lun->lun_cmd_lock, flags);
777 }
778 
779 void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
780 {
781 	transport_remove_cmd_from_queue(cmd, SE_DEV(cmd)->dev_queue_obj);
782 	transport_lun_remove_cmd(cmd);
783 
784 	if (transport_cmd_check_stop_to_fabric(cmd))
785 		return;
786 	if (remove)
787 		transport_generic_remove(cmd, 0, 0);
788 }
789 
790 void transport_cmd_finish_abort_tmr(struct se_cmd *cmd)
791 {
792 	transport_remove_cmd_from_queue(cmd, SE_DEV(cmd)->dev_queue_obj);
793 
794 	if (transport_cmd_check_stop_to_fabric(cmd))
795 		return;
796 
797 	transport_generic_remove(cmd, 0, 0);
798 }
799 
800 static int transport_add_cmd_to_queue(
801 	struct se_cmd *cmd,
802 	int t_state)
803 {
804 	struct se_device *dev = cmd->se_dev;
805 	struct se_queue_obj *qobj = dev->dev_queue_obj;
806 	struct se_queue_req *qr;
807 	unsigned long flags;
808 
809 	qr = kzalloc(sizeof(struct se_queue_req), GFP_ATOMIC);
810 	if (!(qr)) {
811 		printk(KERN_ERR "Unable to allocate memory for"
812 				" struct se_queue_req\n");
813 		return -1;
814 	}
815 	INIT_LIST_HEAD(&qr->qr_list);
816 
817 	qr->cmd = (void *)cmd;
818 	qr->state = t_state;
819 
820 	if (t_state) {
821 		spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
822 		cmd->t_state = t_state;
823 		atomic_set(&T_TASK(cmd)->t_transport_active, 1);
824 		spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
825 	}
826 
827 	spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
828 	list_add_tail(&qr->qr_list, &qobj->qobj_list);
829 	atomic_inc(&T_TASK(cmd)->t_transport_queue_active);
830 	spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
831 
832 	atomic_inc(&qobj->queue_cnt);
833 	wake_up_interruptible(&qobj->thread_wq);
834 	return 0;
835 }
836 
837 /*
838  * Called with struct se_queue_obj->cmd_queue_lock held.
839  */
840 static struct se_queue_req *
841 __transport_get_qr_from_queue(struct se_queue_obj *qobj)
842 {
843 	struct se_cmd *cmd;
844 	struct se_queue_req *qr = NULL;
845 
846 	if (list_empty(&qobj->qobj_list))
847 		return NULL;
848 
849 	list_for_each_entry(qr, &qobj->qobj_list, qr_list)
850 		break;
851 
852 	if (qr->cmd) {
853 		cmd = (struct se_cmd *)qr->cmd;
854 		atomic_dec(&T_TASK(cmd)->t_transport_queue_active);
855 	}
856 	list_del(&qr->qr_list);
857 	atomic_dec(&qobj->queue_cnt);
858 
859 	return qr;
860 }
861 
862 static struct se_queue_req *
863 transport_get_qr_from_queue(struct se_queue_obj *qobj)
864 {
865 	struct se_cmd *cmd;
866 	struct se_queue_req *qr;
867 	unsigned long flags;
868 
869 	spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
870 	if (list_empty(&qobj->qobj_list)) {
871 		spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
872 		return NULL;
873 	}
874 
875 	list_for_each_entry(qr, &qobj->qobj_list, qr_list)
876 		break;
877 
878 	if (qr->cmd) {
879 		cmd = (struct se_cmd *)qr->cmd;
880 		atomic_dec(&T_TASK(cmd)->t_transport_queue_active);
881 	}
882 	list_del(&qr->qr_list);
883 	atomic_dec(&qobj->queue_cnt);
884 	spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
885 
886 	return qr;
887 }
888 
889 static void transport_remove_cmd_from_queue(struct se_cmd *cmd,
890 		struct se_queue_obj *qobj)
891 {
892 	struct se_cmd *q_cmd;
893 	struct se_queue_req *qr = NULL, *qr_p = NULL;
894 	unsigned long flags;
895 
896 	spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
897 	if (!(atomic_read(&T_TASK(cmd)->t_transport_queue_active))) {
898 		spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
899 		return;
900 	}
901 
902 	list_for_each_entry_safe(qr, qr_p, &qobj->qobj_list, qr_list) {
903 		q_cmd = (struct se_cmd *)qr->cmd;
904 		if (q_cmd != cmd)
905 			continue;
906 
907 		atomic_dec(&T_TASK(q_cmd)->t_transport_queue_active);
908 		atomic_dec(&qobj->queue_cnt);
909 		list_del(&qr->qr_list);
910 		kfree(qr);
911 	}
912 	spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
913 
914 	if (atomic_read(&T_TASK(cmd)->t_transport_queue_active)) {
915 		printk(KERN_ERR "ITT: 0x%08x t_transport_queue_active: %d\n",
916 			CMD_TFO(cmd)->get_task_tag(cmd),
917 			atomic_read(&T_TASK(cmd)->t_transport_queue_active));
918 	}
919 }
920 
921 /*
922  * Completion function used by TCM subsystem plugins (such as FILEIO)
923  * for queueing up response from struct se_subsystem_api->do_task()
924  */
925 void transport_complete_sync_cache(struct se_cmd *cmd, int good)
926 {
927 	struct se_task *task = list_entry(T_TASK(cmd)->t_task_list.next,
928 				struct se_task, t_list);
929 
930 	if (good) {
931 		cmd->scsi_status = SAM_STAT_GOOD;
932 		task->task_scsi_status = GOOD;
933 	} else {
934 		task->task_scsi_status = SAM_STAT_CHECK_CONDITION;
935 		task->task_error_status = PYX_TRANSPORT_ILLEGAL_REQUEST;
936 		TASK_CMD(task)->transport_error_status =
937 					PYX_TRANSPORT_ILLEGAL_REQUEST;
938 	}
939 
940 	transport_complete_task(task, good);
941 }
942 EXPORT_SYMBOL(transport_complete_sync_cache);
943 
944 /*	transport_complete_task():
945  *
946  *	Called from interrupt and non interrupt context depending
947  *	on the transport plugin.
948  */
949 void transport_complete_task(struct se_task *task, int success)
950 {
951 	struct se_cmd *cmd = TASK_CMD(task);
952 	struct se_device *dev = task->se_dev;
953 	int t_state;
954 	unsigned long flags;
955 #if 0
956 	printk(KERN_INFO "task: %p CDB: 0x%02x obj_ptr: %p\n", task,
957 			T_TASK(cmd)->t_task_cdb[0], dev);
958 #endif
959 	if (dev) {
960 		spin_lock_irqsave(&SE_HBA(dev)->hba_queue_lock, flags);
961 		atomic_inc(&dev->depth_left);
962 		atomic_inc(&SE_HBA(dev)->left_queue_depth);
963 		spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags);
964 	}
965 
966 	spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
967 	atomic_set(&task->task_active, 0);
968 
969 	/*
970 	 * See if any sense data exists, if so set the TASK_SENSE flag.
971 	 * Also check for any other post completion work that needs to be
972 	 * done by the plugins.
973 	 */
974 	if (dev && dev->transport->transport_complete) {
975 		if (dev->transport->transport_complete(task) != 0) {
976 			cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE;
977 			task->task_sense = 1;
978 			success = 1;
979 		}
980 	}
981 
982 	/*
983 	 * See if we are waiting for outstanding struct se_task
984 	 * to complete for an exception condition
985 	 */
986 	if (atomic_read(&task->task_stop)) {
987 		/*
988 		 * Decrement T_TASK(cmd)->t_se_count if this task had
989 		 * previously thrown its timeout exception handler.
990 		 */
991 		if (atomic_read(&task->task_timeout)) {
992 			atomic_dec(&T_TASK(cmd)->t_se_count);
993 			atomic_set(&task->task_timeout, 0);
994 		}
995 		spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
996 
997 		complete(&task->task_stop_comp);
998 		return;
999 	}
1000 	/*
1001 	 * If the task's timeout handler has fired, use the t_task_cdbs_timeout
1002 	 * left counter to determine when the struct se_cmd is ready to be queued to
1003 	 * the processing thread.
1004 	 */
1005 	if (atomic_read(&task->task_timeout)) {
1006 		if (!(atomic_dec_and_test(
1007 				&T_TASK(cmd)->t_task_cdbs_timeout_left))) {
1008 			spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
1009 				flags);
1010 			return;
1011 		}
1012 		t_state = TRANSPORT_COMPLETE_TIMEOUT;
1013 		spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
1014 
1015 		transport_add_cmd_to_queue(cmd, t_state);
1016 		return;
1017 	}
1018 	atomic_dec(&T_TASK(cmd)->t_task_cdbs_timeout_left);
1019 
1020 	/*
1021 	 * Decrement the outstanding t_task_cdbs_left count.  The last
1022 	 * struct se_task from struct se_cmd will complete itself into the
1023 	 * device queue depending upon int success.
1024 	 */
1025 	if (!(atomic_dec_and_test(&T_TASK(cmd)->t_task_cdbs_left))) {
1026 		if (!success)
1027 			T_TASK(cmd)->t_tasks_failed = 1;
1028 
1029 		spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
1030 		return;
1031 	}
1032 
1033 	if (!success || T_TASK(cmd)->t_tasks_failed) {
1034 		t_state = TRANSPORT_COMPLETE_FAILURE;
1035 		if (!task->task_error_status) {
1036 			task->task_error_status =
1037 				PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
1038 			cmd->transport_error_status =
1039 				PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
1040 		}
1041 	} else {
1042 		atomic_set(&T_TASK(cmd)->t_transport_complete, 1);
1043 		t_state = TRANSPORT_COMPLETE_OK;
1044 	}
1045 	spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
1046 
1047 	transport_add_cmd_to_queue(cmd, t_state);
1048 }
1049 EXPORT_SYMBOL(transport_complete_task);
1050 
1051 /*
1052  * Called by transport_add_tasks_from_cmd() once a struct se_cmd's
1053  * struct se_task list are ready to be added to the active execution list
1054  * struct se_device
1055 
1056  * Called with se_dev_t->execute_task_lock called.
1057  */
1058 static inline int transport_add_task_check_sam_attr(
1059 	struct se_task *task,
1060 	struct se_task *task_prev,
1061 	struct se_device *dev)
1062 {
1063 	/*
1064 	 * No SAM Task attribute emulation enabled, add to tail of
1065 	 * execution queue
1066 	 */
1067 	if (dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) {
1068 		list_add_tail(&task->t_execute_list, &dev->execute_task_list);
1069 		return 0;
1070 	}
1071 	/*
1072 	 * HEAD_OF_QUEUE attribute for received CDB, which means
1073 	 * the first task that is associated with a struct se_cmd goes to
1074 	 * head of the struct se_device->execute_task_list, and task_prev
1075 	 * after that for each subsequent task
1076 	 */
1077 	if (task->task_se_cmd->sam_task_attr == MSG_HEAD_TAG) {
1078 		list_add(&task->t_execute_list,
1079 				(task_prev != NULL) ?
1080 				&task_prev->t_execute_list :
1081 				&dev->execute_task_list);
1082 
1083 		DEBUG_STA("Set HEAD_OF_QUEUE for task CDB: 0x%02x"
1084 				" in execution queue\n",
1085 				T_TASK(task->task_se_cmd)->t_task_cdb[0]);
1086 		return 1;
1087 	}
1088 	/*
1089 	 * For ORDERED, SIMPLE or UNTAGGED attribute tasks once they have been
1090 	 * transitioned from Dermant -> Active state, and are added to the end
1091 	 * of the struct se_device->execute_task_list
1092 	 */
1093 	list_add_tail(&task->t_execute_list, &dev->execute_task_list);
1094 	return 0;
1095 }
1096 
1097 /*	__transport_add_task_to_execute_queue():
1098  *
1099  *	Called with se_dev_t->execute_task_lock called.
1100  */
1101 static void __transport_add_task_to_execute_queue(
1102 	struct se_task *task,
1103 	struct se_task *task_prev,
1104 	struct se_device *dev)
1105 {
1106 	int head_of_queue;
1107 
1108 	head_of_queue = transport_add_task_check_sam_attr(task, task_prev, dev);
1109 	atomic_inc(&dev->execute_tasks);
1110 
1111 	if (atomic_read(&task->task_state_active))
1112 		return;
1113 	/*
1114 	 * Determine if this task needs to go to HEAD_OF_QUEUE for the
1115 	 * state list as well.  Running with SAM Task Attribute emulation
1116 	 * will always return head_of_queue == 0 here
1117 	 */
1118 	if (head_of_queue)
1119 		list_add(&task->t_state_list, (task_prev) ?
1120 				&task_prev->t_state_list :
1121 				&dev->state_task_list);
1122 	else
1123 		list_add_tail(&task->t_state_list, &dev->state_task_list);
1124 
1125 	atomic_set(&task->task_state_active, 1);
1126 
1127 	DEBUG_TSTATE("Added ITT: 0x%08x task[%p] to dev: %p\n",
1128 		CMD_TFO(task->task_se_cmd)->get_task_tag(task->task_se_cmd),
1129 		task, dev);
1130 }
1131 
1132 static void transport_add_tasks_to_state_queue(struct se_cmd *cmd)
1133 {
1134 	struct se_device *dev;
1135 	struct se_task *task;
1136 	unsigned long flags;
1137 
1138 	spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
1139 	list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) {
1140 		dev = task->se_dev;
1141 
1142 		if (atomic_read(&task->task_state_active))
1143 			continue;
1144 
1145 		spin_lock(&dev->execute_task_lock);
1146 		list_add_tail(&task->t_state_list, &dev->state_task_list);
1147 		atomic_set(&task->task_state_active, 1);
1148 
1149 		DEBUG_TSTATE("Added ITT: 0x%08x task[%p] to dev: %p\n",
1150 			CMD_TFO(task->task_se_cmd)->get_task_tag(
1151 			task->task_se_cmd), task, dev);
1152 
1153 		spin_unlock(&dev->execute_task_lock);
1154 	}
1155 	spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
1156 }
1157 
1158 static void transport_add_tasks_from_cmd(struct se_cmd *cmd)
1159 {
1160 	struct se_device *dev = SE_DEV(cmd);
1161 	struct se_task *task, *task_prev = NULL;
1162 	unsigned long flags;
1163 
1164 	spin_lock_irqsave(&dev->execute_task_lock, flags);
1165 	list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) {
1166 		if (atomic_read(&task->task_execute_queue))
1167 			continue;
1168 		/*
1169 		 * __transport_add_task_to_execute_queue() handles the
1170 		 * SAM Task Attribute emulation if enabled
1171 		 */
1172 		__transport_add_task_to_execute_queue(task, task_prev, dev);
1173 		atomic_set(&task->task_execute_queue, 1);
1174 		task_prev = task;
1175 	}
1176 	spin_unlock_irqrestore(&dev->execute_task_lock, flags);
1177 
1178 	return;
1179 }
1180 
1181 /*	transport_get_task_from_execute_queue():
1182  *
1183  *	Called with dev->execute_task_lock held.
1184  */
1185 static struct se_task *
1186 transport_get_task_from_execute_queue(struct se_device *dev)
1187 {
1188 	struct se_task *task;
1189 
1190 	if (list_empty(&dev->execute_task_list))
1191 		return NULL;
1192 
1193 	list_for_each_entry(task, &dev->execute_task_list, t_execute_list)
1194 		break;
1195 
1196 	list_del(&task->t_execute_list);
1197 	atomic_set(&task->task_execute_queue, 0);
1198 	atomic_dec(&dev->execute_tasks);
1199 
1200 	return task;
1201 }
1202 
1203 /*	transport_remove_task_from_execute_queue():
1204  *
1205  *
1206  */
1207 void transport_remove_task_from_execute_queue(
1208 	struct se_task *task,
1209 	struct se_device *dev)
1210 {
1211 	unsigned long flags;
1212 
1213 	if (atomic_read(&task->task_execute_queue) == 0) {
1214 		dump_stack();
1215 		return;
1216 	}
1217 
1218 	spin_lock_irqsave(&dev->execute_task_lock, flags);
1219 	list_del(&task->t_execute_list);
1220 	atomic_set(&task->task_execute_queue, 0);
1221 	atomic_dec(&dev->execute_tasks);
1222 	spin_unlock_irqrestore(&dev->execute_task_lock, flags);
1223 }
1224 
1225 unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd)
1226 {
1227 	switch (cmd->data_direction) {
1228 	case DMA_NONE:
1229 		return "NONE";
1230 	case DMA_FROM_DEVICE:
1231 		return "READ";
1232 	case DMA_TO_DEVICE:
1233 		return "WRITE";
1234 	case DMA_BIDIRECTIONAL:
1235 		return "BIDI";
1236 	default:
1237 		break;
1238 	}
1239 
1240 	return "UNKNOWN";
1241 }
1242 
1243 void transport_dump_dev_state(
1244 	struct se_device *dev,
1245 	char *b,
1246 	int *bl)
1247 {
1248 	*bl += sprintf(b + *bl, "Status: ");
1249 	switch (dev->dev_status) {
1250 	case TRANSPORT_DEVICE_ACTIVATED:
1251 		*bl += sprintf(b + *bl, "ACTIVATED");
1252 		break;
1253 	case TRANSPORT_DEVICE_DEACTIVATED:
1254 		*bl += sprintf(b + *bl, "DEACTIVATED");
1255 		break;
1256 	case TRANSPORT_DEVICE_SHUTDOWN:
1257 		*bl += sprintf(b + *bl, "SHUTDOWN");
1258 		break;
1259 	case TRANSPORT_DEVICE_OFFLINE_ACTIVATED:
1260 	case TRANSPORT_DEVICE_OFFLINE_DEACTIVATED:
1261 		*bl += sprintf(b + *bl, "OFFLINE");
1262 		break;
1263 	default:
1264 		*bl += sprintf(b + *bl, "UNKNOWN=%d", dev->dev_status);
1265 		break;
1266 	}
1267 
1268 	*bl += sprintf(b + *bl, "  Execute/Left/Max Queue Depth: %d/%d/%d",
1269 		atomic_read(&dev->execute_tasks), atomic_read(&dev->depth_left),
1270 		dev->queue_depth);
1271 	*bl += sprintf(b + *bl, "  SectorSize: %u  MaxSectors: %u\n",
1272 		DEV_ATTRIB(dev)->block_size, DEV_ATTRIB(dev)->max_sectors);
1273 	*bl += sprintf(b + *bl, "        ");
1274 }
1275 
1276 /*	transport_release_all_cmds():
1277  *
1278  *
1279  */
1280 static void transport_release_all_cmds(struct se_device *dev)
1281 {
1282 	struct se_cmd *cmd = NULL;
1283 	struct se_queue_req *qr = NULL, *qr_p = NULL;
1284 	int bug_out = 0, t_state;
1285 	unsigned long flags;
1286 
1287 	spin_lock_irqsave(&dev->dev_queue_obj->cmd_queue_lock, flags);
1288 	list_for_each_entry_safe(qr, qr_p, &dev->dev_queue_obj->qobj_list,
1289 				qr_list) {
1290 
1291 		cmd = (struct se_cmd *)qr->cmd;
1292 		t_state = qr->state;
1293 		list_del(&qr->qr_list);
1294 		kfree(qr);
1295 		spin_unlock_irqrestore(&dev->dev_queue_obj->cmd_queue_lock,
1296 				flags);
1297 
1298 		printk(KERN_ERR "Releasing ITT: 0x%08x, i_state: %u,"
1299 			" t_state: %u directly\n",
1300 			CMD_TFO(cmd)->get_task_tag(cmd),
1301 			CMD_TFO(cmd)->get_cmd_state(cmd), t_state);
1302 
1303 		transport_release_fe_cmd(cmd);
1304 		bug_out = 1;
1305 
1306 		spin_lock_irqsave(&dev->dev_queue_obj->cmd_queue_lock, flags);
1307 	}
1308 	spin_unlock_irqrestore(&dev->dev_queue_obj->cmd_queue_lock, flags);
1309 #if 0
1310 	if (bug_out)
1311 		BUG();
1312 #endif
1313 }
1314 
1315 void transport_dump_vpd_proto_id(
1316 	struct t10_vpd *vpd,
1317 	unsigned char *p_buf,
1318 	int p_buf_len)
1319 {
1320 	unsigned char buf[VPD_TMP_BUF_SIZE];
1321 	int len;
1322 
1323 	memset(buf, 0, VPD_TMP_BUF_SIZE);
1324 	len = sprintf(buf, "T10 VPD Protocol Identifier: ");
1325 
1326 	switch (vpd->protocol_identifier) {
1327 	case 0x00:
1328 		sprintf(buf+len, "Fibre Channel\n");
1329 		break;
1330 	case 0x10:
1331 		sprintf(buf+len, "Parallel SCSI\n");
1332 		break;
1333 	case 0x20:
1334 		sprintf(buf+len, "SSA\n");
1335 		break;
1336 	case 0x30:
1337 		sprintf(buf+len, "IEEE 1394\n");
1338 		break;
1339 	case 0x40:
1340 		sprintf(buf+len, "SCSI Remote Direct Memory Access"
1341 				" Protocol\n");
1342 		break;
1343 	case 0x50:
1344 		sprintf(buf+len, "Internet SCSI (iSCSI)\n");
1345 		break;
1346 	case 0x60:
1347 		sprintf(buf+len, "SAS Serial SCSI Protocol\n");
1348 		break;
1349 	case 0x70:
1350 		sprintf(buf+len, "Automation/Drive Interface Transport"
1351 				" Protocol\n");
1352 		break;
1353 	case 0x80:
1354 		sprintf(buf+len, "AT Attachment Interface ATA/ATAPI\n");
1355 		break;
1356 	default:
1357 		sprintf(buf+len, "Unknown 0x%02x\n",
1358 				vpd->protocol_identifier);
1359 		break;
1360 	}
1361 
1362 	if (p_buf)
1363 		strncpy(p_buf, buf, p_buf_len);
1364 	else
1365 		printk(KERN_INFO "%s", buf);
1366 }
1367 
1368 void
1369 transport_set_vpd_proto_id(struct t10_vpd *vpd, unsigned char *page_83)
1370 {
1371 	/*
1372 	 * Check if the Protocol Identifier Valid (PIV) bit is set..
1373 	 *
1374 	 * from spc3r23.pdf section 7.5.1
1375 	 */
1376 	 if (page_83[1] & 0x80) {
1377 		vpd->protocol_identifier = (page_83[0] & 0xf0);
1378 		vpd->protocol_identifier_set = 1;
1379 		transport_dump_vpd_proto_id(vpd, NULL, 0);
1380 	}
1381 }
1382 EXPORT_SYMBOL(transport_set_vpd_proto_id);
1383 
1384 int transport_dump_vpd_assoc(
1385 	struct t10_vpd *vpd,
1386 	unsigned char *p_buf,
1387 	int p_buf_len)
1388 {
1389 	unsigned char buf[VPD_TMP_BUF_SIZE];
1390 	int ret = 0, len;
1391 
1392 	memset(buf, 0, VPD_TMP_BUF_SIZE);
1393 	len = sprintf(buf, "T10 VPD Identifier Association: ");
1394 
1395 	switch (vpd->association) {
1396 	case 0x00:
1397 		sprintf(buf+len, "addressed logical unit\n");
1398 		break;
1399 	case 0x10:
1400 		sprintf(buf+len, "target port\n");
1401 		break;
1402 	case 0x20:
1403 		sprintf(buf+len, "SCSI target device\n");
1404 		break;
1405 	default:
1406 		sprintf(buf+len, "Unknown 0x%02x\n", vpd->association);
1407 		ret = -1;
1408 		break;
1409 	}
1410 
1411 	if (p_buf)
1412 		strncpy(p_buf, buf, p_buf_len);
1413 	else
1414 		printk("%s", buf);
1415 
1416 	return ret;
1417 }
1418 
1419 int transport_set_vpd_assoc(struct t10_vpd *vpd, unsigned char *page_83)
1420 {
1421 	/*
1422 	 * The VPD identification association..
1423 	 *
1424 	 * from spc3r23.pdf Section 7.6.3.1 Table 297
1425 	 */
1426 	vpd->association = (page_83[1] & 0x30);
1427 	return transport_dump_vpd_assoc(vpd, NULL, 0);
1428 }
1429 EXPORT_SYMBOL(transport_set_vpd_assoc);
1430 
1431 int transport_dump_vpd_ident_type(
1432 	struct t10_vpd *vpd,
1433 	unsigned char *p_buf,
1434 	int p_buf_len)
1435 {
1436 	unsigned char buf[VPD_TMP_BUF_SIZE];
1437 	int ret = 0, len;
1438 
1439 	memset(buf, 0, VPD_TMP_BUF_SIZE);
1440 	len = sprintf(buf, "T10 VPD Identifier Type: ");
1441 
1442 	switch (vpd->device_identifier_type) {
1443 	case 0x00:
1444 		sprintf(buf+len, "Vendor specific\n");
1445 		break;
1446 	case 0x01:
1447 		sprintf(buf+len, "T10 Vendor ID based\n");
1448 		break;
1449 	case 0x02:
1450 		sprintf(buf+len, "EUI-64 based\n");
1451 		break;
1452 	case 0x03:
1453 		sprintf(buf+len, "NAA\n");
1454 		break;
1455 	case 0x04:
1456 		sprintf(buf+len, "Relative target port identifier\n");
1457 		break;
1458 	case 0x08:
1459 		sprintf(buf+len, "SCSI name string\n");
1460 		break;
1461 	default:
1462 		sprintf(buf+len, "Unsupported: 0x%02x\n",
1463 				vpd->device_identifier_type);
1464 		ret = -1;
1465 		break;
1466 	}
1467 
1468 	if (p_buf)
1469 		strncpy(p_buf, buf, p_buf_len);
1470 	else
1471 		printk("%s", buf);
1472 
1473 	return ret;
1474 }
1475 
1476 int transport_set_vpd_ident_type(struct t10_vpd *vpd, unsigned char *page_83)
1477 {
1478 	/*
1479 	 * The VPD identifier type..
1480 	 *
1481 	 * from spc3r23.pdf Section 7.6.3.1 Table 298
1482 	 */
1483 	vpd->device_identifier_type = (page_83[1] & 0x0f);
1484 	return transport_dump_vpd_ident_type(vpd, NULL, 0);
1485 }
1486 EXPORT_SYMBOL(transport_set_vpd_ident_type);
1487 
1488 int transport_dump_vpd_ident(
1489 	struct t10_vpd *vpd,
1490 	unsigned char *p_buf,
1491 	int p_buf_len)
1492 {
1493 	unsigned char buf[VPD_TMP_BUF_SIZE];
1494 	int ret = 0;
1495 
1496 	memset(buf, 0, VPD_TMP_BUF_SIZE);
1497 
1498 	switch (vpd->device_identifier_code_set) {
1499 	case 0x01: /* Binary */
1500 		sprintf(buf, "T10 VPD Binary Device Identifier: %s\n",
1501 			&vpd->device_identifier[0]);
1502 		break;
1503 	case 0x02: /* ASCII */
1504 		sprintf(buf, "T10 VPD ASCII Device Identifier: %s\n",
1505 			&vpd->device_identifier[0]);
1506 		break;
1507 	case 0x03: /* UTF-8 */
1508 		sprintf(buf, "T10 VPD UTF-8 Device Identifier: %s\n",
1509 			&vpd->device_identifier[0]);
1510 		break;
1511 	default:
1512 		sprintf(buf, "T10 VPD Device Identifier encoding unsupported:"
1513 			" 0x%02x", vpd->device_identifier_code_set);
1514 		ret = -1;
1515 		break;
1516 	}
1517 
1518 	if (p_buf)
1519 		strncpy(p_buf, buf, p_buf_len);
1520 	else
1521 		printk("%s", buf);
1522 
1523 	return ret;
1524 }
1525 
1526 int
1527 transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83)
1528 {
1529 	static const char hex_str[] = "0123456789abcdef";
1530 	int j = 0, i = 4; /* offset to start of the identifer */
1531 
1532 	/*
1533 	 * The VPD Code Set (encoding)
1534 	 *
1535 	 * from spc3r23.pdf Section 7.6.3.1 Table 296
1536 	 */
1537 	vpd->device_identifier_code_set = (page_83[0] & 0x0f);
1538 	switch (vpd->device_identifier_code_set) {
1539 	case 0x01: /* Binary */
1540 		vpd->device_identifier[j++] =
1541 				hex_str[vpd->device_identifier_type];
1542 		while (i < (4 + page_83[3])) {
1543 			vpd->device_identifier[j++] =
1544 				hex_str[(page_83[i] & 0xf0) >> 4];
1545 			vpd->device_identifier[j++] =
1546 				hex_str[page_83[i] & 0x0f];
1547 			i++;
1548 		}
1549 		break;
1550 	case 0x02: /* ASCII */
1551 	case 0x03: /* UTF-8 */
1552 		while (i < (4 + page_83[3]))
1553 			vpd->device_identifier[j++] = page_83[i++];
1554 		break;
1555 	default:
1556 		break;
1557 	}
1558 
1559 	return transport_dump_vpd_ident(vpd, NULL, 0);
1560 }
1561 EXPORT_SYMBOL(transport_set_vpd_ident);
1562 
1563 static void core_setup_task_attr_emulation(struct se_device *dev)
1564 {
1565 	/*
1566 	 * If this device is from Target_Core_Mod/pSCSI, disable the
1567 	 * SAM Task Attribute emulation.
1568 	 *
1569 	 * This is currently not available in upsream Linux/SCSI Target
1570 	 * mode code, and is assumed to be disabled while using TCM/pSCSI.
1571 	 */
1572 	if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1573 		dev->dev_task_attr_type = SAM_TASK_ATTR_PASSTHROUGH;
1574 		return;
1575 	}
1576 
1577 	dev->dev_task_attr_type = SAM_TASK_ATTR_EMULATED;
1578 	DEBUG_STA("%s: Using SAM_TASK_ATTR_EMULATED for SPC: 0x%02x"
1579 		" device\n", TRANSPORT(dev)->name,
1580 		TRANSPORT(dev)->get_device_rev(dev));
1581 }
1582 
1583 static void scsi_dump_inquiry(struct se_device *dev)
1584 {
1585 	struct t10_wwn *wwn = DEV_T10_WWN(dev);
1586 	int i, device_type;
1587 	/*
1588 	 * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer
1589 	 */
1590 	printk("  Vendor: ");
1591 	for (i = 0; i < 8; i++)
1592 		if (wwn->vendor[i] >= 0x20)
1593 			printk("%c", wwn->vendor[i]);
1594 		else
1595 			printk(" ");
1596 
1597 	printk("  Model: ");
1598 	for (i = 0; i < 16; i++)
1599 		if (wwn->model[i] >= 0x20)
1600 			printk("%c", wwn->model[i]);
1601 		else
1602 			printk(" ");
1603 
1604 	printk("  Revision: ");
1605 	for (i = 0; i < 4; i++)
1606 		if (wwn->revision[i] >= 0x20)
1607 			printk("%c", wwn->revision[i]);
1608 		else
1609 			printk(" ");
1610 
1611 	printk("\n");
1612 
1613 	device_type = TRANSPORT(dev)->get_device_type(dev);
1614 	printk("  Type:   %s ", scsi_device_type(device_type));
1615 	printk("                 ANSI SCSI revision: %02x\n",
1616 				TRANSPORT(dev)->get_device_rev(dev));
1617 }
1618 
1619 struct se_device *transport_add_device_to_core_hba(
1620 	struct se_hba *hba,
1621 	struct se_subsystem_api *transport,
1622 	struct se_subsystem_dev *se_dev,
1623 	u32 device_flags,
1624 	void *transport_dev,
1625 	struct se_dev_limits *dev_limits,
1626 	const char *inquiry_prod,
1627 	const char *inquiry_rev)
1628 {
1629 	int force_pt;
1630 	struct se_device  *dev;
1631 
1632 	dev = kzalloc(sizeof(struct se_device), GFP_KERNEL);
1633 	if (!(dev)) {
1634 		printk(KERN_ERR "Unable to allocate memory for se_dev_t\n");
1635 		return NULL;
1636 	}
1637 	dev->dev_queue_obj = kzalloc(sizeof(struct se_queue_obj), GFP_KERNEL);
1638 	if (!(dev->dev_queue_obj)) {
1639 		printk(KERN_ERR "Unable to allocate memory for"
1640 				" dev->dev_queue_obj\n");
1641 		kfree(dev);
1642 		return NULL;
1643 	}
1644 	transport_init_queue_obj(dev->dev_queue_obj);
1645 
1646 	dev->dev_status_queue_obj = kzalloc(sizeof(struct se_queue_obj),
1647 					GFP_KERNEL);
1648 	if (!(dev->dev_status_queue_obj)) {
1649 		printk(KERN_ERR "Unable to allocate memory for"
1650 				" dev->dev_status_queue_obj\n");
1651 		kfree(dev->dev_queue_obj);
1652 		kfree(dev);
1653 		return NULL;
1654 	}
1655 	transport_init_queue_obj(dev->dev_status_queue_obj);
1656 
1657 	dev->dev_flags		= device_flags;
1658 	dev->dev_status		|= TRANSPORT_DEVICE_DEACTIVATED;
1659 	dev->dev_ptr		= (void *) transport_dev;
1660 	dev->se_hba		= hba;
1661 	dev->se_sub_dev		= se_dev;
1662 	dev->transport		= transport;
1663 	atomic_set(&dev->active_cmds, 0);
1664 	INIT_LIST_HEAD(&dev->dev_list);
1665 	INIT_LIST_HEAD(&dev->dev_sep_list);
1666 	INIT_LIST_HEAD(&dev->dev_tmr_list);
1667 	INIT_LIST_HEAD(&dev->execute_task_list);
1668 	INIT_LIST_HEAD(&dev->delayed_cmd_list);
1669 	INIT_LIST_HEAD(&dev->ordered_cmd_list);
1670 	INIT_LIST_HEAD(&dev->state_task_list);
1671 	spin_lock_init(&dev->execute_task_lock);
1672 	spin_lock_init(&dev->delayed_cmd_lock);
1673 	spin_lock_init(&dev->ordered_cmd_lock);
1674 	spin_lock_init(&dev->state_task_lock);
1675 	spin_lock_init(&dev->dev_alua_lock);
1676 	spin_lock_init(&dev->dev_reservation_lock);
1677 	spin_lock_init(&dev->dev_status_lock);
1678 	spin_lock_init(&dev->dev_status_thr_lock);
1679 	spin_lock_init(&dev->se_port_lock);
1680 	spin_lock_init(&dev->se_tmr_lock);
1681 
1682 	dev->queue_depth	= dev_limits->queue_depth;
1683 	atomic_set(&dev->depth_left, dev->queue_depth);
1684 	atomic_set(&dev->dev_ordered_id, 0);
1685 
1686 	se_dev_set_default_attribs(dev, dev_limits);
1687 
1688 	dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX);
1689 	dev->creation_time = get_jiffies_64();
1690 	spin_lock_init(&dev->stats_lock);
1691 
1692 	spin_lock(&hba->device_lock);
1693 	list_add_tail(&dev->dev_list, &hba->hba_dev_list);
1694 	hba->dev_count++;
1695 	spin_unlock(&hba->device_lock);
1696 	/*
1697 	 * Setup the SAM Task Attribute emulation for struct se_device
1698 	 */
1699 	core_setup_task_attr_emulation(dev);
1700 	/*
1701 	 * Force PR and ALUA passthrough emulation with internal object use.
1702 	 */
1703 	force_pt = (hba->hba_flags & HBA_FLAGS_INTERNAL_USE);
1704 	/*
1705 	 * Setup the Reservations infrastructure for struct se_device
1706 	 */
1707 	core_setup_reservations(dev, force_pt);
1708 	/*
1709 	 * Setup the Asymmetric Logical Unit Assignment for struct se_device
1710 	 */
1711 	if (core_setup_alua(dev, force_pt) < 0)
1712 		goto out;
1713 
1714 	/*
1715 	 * Startup the struct se_device processing thread
1716 	 */
1717 	dev->process_thread = kthread_run(transport_processing_thread, dev,
1718 					  "LIO_%s", TRANSPORT(dev)->name);
1719 	if (IS_ERR(dev->process_thread)) {
1720 		printk(KERN_ERR "Unable to create kthread: LIO_%s\n",
1721 			TRANSPORT(dev)->name);
1722 		goto out;
1723 	}
1724 
1725 	/*
1726 	 * Preload the initial INQUIRY const values if we are doing
1727 	 * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI
1728 	 * passthrough because this is being provided by the backend LLD.
1729 	 * This is required so that transport_get_inquiry() copies these
1730 	 * originals once back into DEV_T10_WWN(dev) for the virtual device
1731 	 * setup.
1732 	 */
1733 	if (TRANSPORT(dev)->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) {
1734 		if (!(inquiry_prod) || !(inquiry_prod)) {
1735 			printk(KERN_ERR "All non TCM/pSCSI plugins require"
1736 				" INQUIRY consts\n");
1737 			goto out;
1738 		}
1739 
1740 		strncpy(&DEV_T10_WWN(dev)->vendor[0], "LIO-ORG", 8);
1741 		strncpy(&DEV_T10_WWN(dev)->model[0], inquiry_prod, 16);
1742 		strncpy(&DEV_T10_WWN(dev)->revision[0], inquiry_rev, 4);
1743 	}
1744 	scsi_dump_inquiry(dev);
1745 
1746 	return dev;
1747 out:
1748 	kthread_stop(dev->process_thread);
1749 
1750 	spin_lock(&hba->device_lock);
1751 	list_del(&dev->dev_list);
1752 	hba->dev_count--;
1753 	spin_unlock(&hba->device_lock);
1754 
1755 	se_release_vpd_for_dev(dev);
1756 
1757 	kfree(dev->dev_status_queue_obj);
1758 	kfree(dev->dev_queue_obj);
1759 	kfree(dev);
1760 
1761 	return NULL;
1762 }
1763 EXPORT_SYMBOL(transport_add_device_to_core_hba);
1764 
1765 /*	transport_generic_prepare_cdb():
1766  *
1767  *	Since the Initiator sees iSCSI devices as LUNs,  the SCSI CDB will
1768  *	contain the iSCSI LUN in bits 7-5 of byte 1 as per SAM-2.
1769  *	The point of this is since we are mapping iSCSI LUNs to
1770  *	SCSI Target IDs having a non-zero LUN in the CDB will throw the
1771  *	devices and HBAs for a loop.
1772  */
1773 static inline void transport_generic_prepare_cdb(
1774 	unsigned char *cdb)
1775 {
1776 	switch (cdb[0]) {
1777 	case READ_10: /* SBC - RDProtect */
1778 	case READ_12: /* SBC - RDProtect */
1779 	case READ_16: /* SBC - RDProtect */
1780 	case SEND_DIAGNOSTIC: /* SPC - SELF-TEST Code */
1781 	case VERIFY: /* SBC - VRProtect */
1782 	case VERIFY_16: /* SBC - VRProtect */
1783 	case WRITE_VERIFY: /* SBC - VRProtect */
1784 	case WRITE_VERIFY_12: /* SBC - VRProtect */
1785 		break;
1786 	default:
1787 		cdb[1] &= 0x1f; /* clear logical unit number */
1788 		break;
1789 	}
1790 }
1791 
1792 static struct se_task *
1793 transport_generic_get_task(struct se_cmd *cmd,
1794 		enum dma_data_direction data_direction)
1795 {
1796 	struct se_task *task;
1797 	struct se_device *dev = SE_DEV(cmd);
1798 	unsigned long flags;
1799 
1800 	task = dev->transport->alloc_task(cmd);
1801 	if (!task) {
1802 		printk(KERN_ERR "Unable to allocate struct se_task\n");
1803 		return NULL;
1804 	}
1805 
1806 	INIT_LIST_HEAD(&task->t_list);
1807 	INIT_LIST_HEAD(&task->t_execute_list);
1808 	INIT_LIST_HEAD(&task->t_state_list);
1809 	init_completion(&task->task_stop_comp);
1810 	task->task_no = T_TASK(cmd)->t_tasks_no++;
1811 	task->task_se_cmd = cmd;
1812 	task->se_dev = dev;
1813 	task->task_data_direction = data_direction;
1814 
1815 	spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
1816 	list_add_tail(&task->t_list, &T_TASK(cmd)->t_task_list);
1817 	spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
1818 
1819 	return task;
1820 }
1821 
1822 static int transport_generic_cmd_sequencer(struct se_cmd *, unsigned char *);
1823 
1824 void transport_device_setup_cmd(struct se_cmd *cmd)
1825 {
1826 	cmd->se_dev = SE_LUN(cmd)->lun_se_dev;
1827 }
1828 EXPORT_SYMBOL(transport_device_setup_cmd);
1829 
1830 /*
1831  * Used by fabric modules containing a local struct se_cmd within their
1832  * fabric dependent per I/O descriptor.
1833  */
1834 void transport_init_se_cmd(
1835 	struct se_cmd *cmd,
1836 	struct target_core_fabric_ops *tfo,
1837 	struct se_session *se_sess,
1838 	u32 data_length,
1839 	int data_direction,
1840 	int task_attr,
1841 	unsigned char *sense_buffer)
1842 {
1843 	INIT_LIST_HEAD(&cmd->se_lun_list);
1844 	INIT_LIST_HEAD(&cmd->se_delayed_list);
1845 	INIT_LIST_HEAD(&cmd->se_ordered_list);
1846 	/*
1847 	 * Setup t_task pointer to t_task_backstore
1848 	 */
1849 	cmd->t_task = &cmd->t_task_backstore;
1850 
1851 	INIT_LIST_HEAD(&T_TASK(cmd)->t_task_list);
1852 	init_completion(&T_TASK(cmd)->transport_lun_fe_stop_comp);
1853 	init_completion(&T_TASK(cmd)->transport_lun_stop_comp);
1854 	init_completion(&T_TASK(cmd)->t_transport_stop_comp);
1855 	spin_lock_init(&T_TASK(cmd)->t_state_lock);
1856 	atomic_set(&T_TASK(cmd)->transport_dev_active, 1);
1857 
1858 	cmd->se_tfo = tfo;
1859 	cmd->se_sess = se_sess;
1860 	cmd->data_length = data_length;
1861 	cmd->data_direction = data_direction;
1862 	cmd->sam_task_attr = task_attr;
1863 	cmd->sense_buffer = sense_buffer;
1864 }
1865 EXPORT_SYMBOL(transport_init_se_cmd);
1866 
1867 static int transport_check_alloc_task_attr(struct se_cmd *cmd)
1868 {
1869 	/*
1870 	 * Check if SAM Task Attribute emulation is enabled for this
1871 	 * struct se_device storage object
1872 	 */
1873 	if (SE_DEV(cmd)->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
1874 		return 0;
1875 
1876 	if (cmd->sam_task_attr == MSG_ACA_TAG) {
1877 		DEBUG_STA("SAM Task Attribute ACA"
1878 			" emulation is not supported\n");
1879 		return -1;
1880 	}
1881 	/*
1882 	 * Used to determine when ORDERED commands should go from
1883 	 * Dormant to Active status.
1884 	 */
1885 	cmd->se_ordered_id = atomic_inc_return(&SE_DEV(cmd)->dev_ordered_id);
1886 	smp_mb__after_atomic_inc();
1887 	DEBUG_STA("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
1888 			cmd->se_ordered_id, cmd->sam_task_attr,
1889 			TRANSPORT(cmd->se_dev)->name);
1890 	return 0;
1891 }
1892 
1893 void transport_free_se_cmd(
1894 	struct se_cmd *se_cmd)
1895 {
1896 	if (se_cmd->se_tmr_req)
1897 		core_tmr_release_req(se_cmd->se_tmr_req);
1898 	/*
1899 	 * Check and free any extended CDB buffer that was allocated
1900 	 */
1901 	if (T_TASK(se_cmd)->t_task_cdb != T_TASK(se_cmd)->__t_task_cdb)
1902 		kfree(T_TASK(se_cmd)->t_task_cdb);
1903 }
1904 EXPORT_SYMBOL(transport_free_se_cmd);
1905 
1906 static void transport_generic_wait_for_tasks(struct se_cmd *, int, int);
1907 
1908 /*	transport_generic_allocate_tasks():
1909  *
1910  *	Called from fabric RX Thread.
1911  */
1912 int transport_generic_allocate_tasks(
1913 	struct se_cmd *cmd,
1914 	unsigned char *cdb)
1915 {
1916 	int ret;
1917 
1918 	transport_generic_prepare_cdb(cdb);
1919 
1920 	/*
1921 	 * This is needed for early exceptions.
1922 	 */
1923 	cmd->transport_wait_for_tasks = &transport_generic_wait_for_tasks;
1924 
1925 	transport_device_setup_cmd(cmd);
1926 	/*
1927 	 * Ensure that the received CDB is less than the max (252 + 8) bytes
1928 	 * for VARIABLE_LENGTH_CMD
1929 	 */
1930 	if (scsi_command_size(cdb) > SCSI_MAX_VARLEN_CDB_SIZE) {
1931 		printk(KERN_ERR "Received SCSI CDB with command_size: %d that"
1932 			" exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
1933 			scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE);
1934 		return -1;
1935 	}
1936 	/*
1937 	 * If the received CDB is larger than TCM_MAX_COMMAND_SIZE,
1938 	 * allocate the additional extended CDB buffer now..  Otherwise
1939 	 * setup the pointer from __t_task_cdb to t_task_cdb.
1940 	 */
1941 	if (scsi_command_size(cdb) > sizeof(T_TASK(cmd)->__t_task_cdb)) {
1942 		T_TASK(cmd)->t_task_cdb = kzalloc(scsi_command_size(cdb),
1943 						GFP_KERNEL);
1944 		if (!(T_TASK(cmd)->t_task_cdb)) {
1945 			printk(KERN_ERR "Unable to allocate T_TASK(cmd)->t_task_cdb"
1946 				" %u > sizeof(T_TASK(cmd)->__t_task_cdb): %lu ops\n",
1947 				scsi_command_size(cdb),
1948 				(unsigned long)sizeof(T_TASK(cmd)->__t_task_cdb));
1949 			return -1;
1950 		}
1951 	} else
1952 		T_TASK(cmd)->t_task_cdb = &T_TASK(cmd)->__t_task_cdb[0];
1953 	/*
1954 	 * Copy the original CDB into T_TASK(cmd).
1955 	 */
1956 	memcpy(T_TASK(cmd)->t_task_cdb, cdb, scsi_command_size(cdb));
1957 	/*
1958 	 * Setup the received CDB based on SCSI defined opcodes and
1959 	 * perform unit attention, persistent reservations and ALUA
1960 	 * checks for virtual device backends.  The T_TASK(cmd)->t_task_cdb
1961 	 * pointer is expected to be setup before we reach this point.
1962 	 */
1963 	ret = transport_generic_cmd_sequencer(cmd, cdb);
1964 	if (ret < 0)
1965 		return ret;
1966 	/*
1967 	 * Check for SAM Task Attribute Emulation
1968 	 */
1969 	if (transport_check_alloc_task_attr(cmd) < 0) {
1970 		cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
1971 		cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
1972 		return -2;
1973 	}
1974 	spin_lock(&cmd->se_lun->lun_sep_lock);
1975 	if (cmd->se_lun->lun_sep)
1976 		cmd->se_lun->lun_sep->sep_stats.cmd_pdus++;
1977 	spin_unlock(&cmd->se_lun->lun_sep_lock);
1978 	return 0;
1979 }
1980 EXPORT_SYMBOL(transport_generic_allocate_tasks);
1981 
1982 /*
1983  * Used by fabric module frontends not defining a TFO->new_cmd_map()
1984  * to queue up a newly setup se_cmd w/ TRANSPORT_NEW_CMD statis
1985  */
1986 int transport_generic_handle_cdb(
1987 	struct se_cmd *cmd)
1988 {
1989 	if (!SE_LUN(cmd)) {
1990 		dump_stack();
1991 		printk(KERN_ERR "SE_LUN(cmd) is NULL\n");
1992 		return -1;
1993 	}
1994 
1995 	transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD);
1996 	return 0;
1997 }
1998 EXPORT_SYMBOL(transport_generic_handle_cdb);
1999 
2000 /*
2001  * Used by fabric module frontends defining a TFO->new_cmd_map() caller
2002  * to  queue up a newly setup se_cmd w/ TRANSPORT_NEW_CMD_MAP in order to
2003  * complete setup in TCM process context w/ TFO->new_cmd_map().
2004  */
2005 int transport_generic_handle_cdb_map(
2006 	struct se_cmd *cmd)
2007 {
2008 	if (!SE_LUN(cmd)) {
2009 		dump_stack();
2010 		printk(KERN_ERR "SE_LUN(cmd) is NULL\n");
2011 		return -1;
2012 	}
2013 
2014 	transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD_MAP);
2015 	return 0;
2016 }
2017 EXPORT_SYMBOL(transport_generic_handle_cdb_map);
2018 
2019 /*	transport_generic_handle_data():
2020  *
2021  *
2022  */
2023 int transport_generic_handle_data(
2024 	struct se_cmd *cmd)
2025 {
2026 	/*
2027 	 * For the software fabric case, then we assume the nexus is being
2028 	 * failed/shutdown when signals are pending from the kthread context
2029 	 * caller, so we return a failure.  For the HW target mode case running
2030 	 * in interrupt code, the signal_pending() check is skipped.
2031 	 */
2032 	if (!in_interrupt() && signal_pending(current))
2033 		return -1;
2034 	/*
2035 	 * If the received CDB has aleady been ABORTED by the generic
2036 	 * target engine, we now call transport_check_aborted_status()
2037 	 * to queue any delated TASK_ABORTED status for the received CDB to the
2038 	 * fabric module as we are expecting no further incoming DATA OUT
2039 	 * sequences at this point.
2040 	 */
2041 	if (transport_check_aborted_status(cmd, 1) != 0)
2042 		return 0;
2043 
2044 	transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_WRITE);
2045 	return 0;
2046 }
2047 EXPORT_SYMBOL(transport_generic_handle_data);
2048 
2049 /*	transport_generic_handle_tmr():
2050  *
2051  *
2052  */
2053 int transport_generic_handle_tmr(
2054 	struct se_cmd *cmd)
2055 {
2056 	/*
2057 	 * This is needed for early exceptions.
2058 	 */
2059 	cmd->transport_wait_for_tasks = &transport_generic_wait_for_tasks;
2060 	transport_device_setup_cmd(cmd);
2061 
2062 	transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_TMR);
2063 	return 0;
2064 }
2065 EXPORT_SYMBOL(transport_generic_handle_tmr);
2066 
2067 void transport_generic_free_cmd_intr(
2068 	struct se_cmd *cmd)
2069 {
2070 	transport_add_cmd_to_queue(cmd, TRANSPORT_FREE_CMD_INTR);
2071 }
2072 EXPORT_SYMBOL(transport_generic_free_cmd_intr);
2073 
2074 static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)
2075 {
2076 	struct se_task *task, *task_tmp;
2077 	unsigned long flags;
2078 	int ret = 0;
2079 
2080 	DEBUG_TS("ITT[0x%08x] - Stopping tasks\n",
2081 		CMD_TFO(cmd)->get_task_tag(cmd));
2082 
2083 	/*
2084 	 * No tasks remain in the execution queue
2085 	 */
2086 	spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
2087 	list_for_each_entry_safe(task, task_tmp,
2088 				&T_TASK(cmd)->t_task_list, t_list) {
2089 		DEBUG_TS("task_no[%d] - Processing task %p\n",
2090 				task->task_no, task);
2091 		/*
2092 		 * If the struct se_task has not been sent and is not active,
2093 		 * remove the struct se_task from the execution queue.
2094 		 */
2095 		if (!atomic_read(&task->task_sent) &&
2096 		    !atomic_read(&task->task_active)) {
2097 			spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
2098 					flags);
2099 			transport_remove_task_from_execute_queue(task,
2100 					task->se_dev);
2101 
2102 			DEBUG_TS("task_no[%d] - Removed from execute queue\n",
2103 				task->task_no);
2104 			spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
2105 			continue;
2106 		}
2107 
2108 		/*
2109 		 * If the struct se_task is active, sleep until it is returned
2110 		 * from the plugin.
2111 		 */
2112 		if (atomic_read(&task->task_active)) {
2113 			atomic_set(&task->task_stop, 1);
2114 			spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
2115 					flags);
2116 
2117 			DEBUG_TS("task_no[%d] - Waiting to complete\n",
2118 				task->task_no);
2119 			wait_for_completion(&task->task_stop_comp);
2120 			DEBUG_TS("task_no[%d] - Stopped successfully\n",
2121 				task->task_no);
2122 
2123 			spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
2124 			atomic_dec(&T_TASK(cmd)->t_task_cdbs_left);
2125 
2126 			atomic_set(&task->task_active, 0);
2127 			atomic_set(&task->task_stop, 0);
2128 		} else {
2129 			DEBUG_TS("task_no[%d] - Did nothing\n", task->task_no);
2130 			ret++;
2131 		}
2132 
2133 		__transport_stop_task_timer(task, &flags);
2134 	}
2135 	spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
2136 
2137 	return ret;
2138 }
2139 
2140 static void transport_failure_reset_queue_depth(struct se_device *dev)
2141 {
2142 	unsigned long flags;
2143 
2144 	spin_lock_irqsave(&SE_HBA(dev)->hba_queue_lock, flags);
2145 	atomic_inc(&dev->depth_left);
2146 	atomic_inc(&SE_HBA(dev)->left_queue_depth);
2147 	spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags);
2148 }
2149 
2150 /*
2151  * Handle SAM-esque emulation for generic transport request failures.
2152  */
2153 static void transport_generic_request_failure(
2154 	struct se_cmd *cmd,
2155 	struct se_device *dev,
2156 	int complete,
2157 	int sc)
2158 {
2159 	DEBUG_GRF("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x"
2160 		" CDB: 0x%02x\n", cmd, CMD_TFO(cmd)->get_task_tag(cmd),
2161 		T_TASK(cmd)->t_task_cdb[0]);
2162 	DEBUG_GRF("-----[ i_state: %d t_state/def_t_state:"
2163 		" %d/%d transport_error_status: %d\n",
2164 		CMD_TFO(cmd)->get_cmd_state(cmd),
2165 		cmd->t_state, cmd->deferred_t_state,
2166 		cmd->transport_error_status);
2167 	DEBUG_GRF("-----[ t_task_cdbs: %d t_task_cdbs_left: %d"
2168 		" t_task_cdbs_sent: %d t_task_cdbs_ex_left: %d --"
2169 		" t_transport_active: %d t_transport_stop: %d"
2170 		" t_transport_sent: %d\n", T_TASK(cmd)->t_task_cdbs,
2171 		atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
2172 		atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
2173 		atomic_read(&T_TASK(cmd)->t_task_cdbs_ex_left),
2174 		atomic_read(&T_TASK(cmd)->t_transport_active),
2175 		atomic_read(&T_TASK(cmd)->t_transport_stop),
2176 		atomic_read(&T_TASK(cmd)->t_transport_sent));
2177 
2178 	transport_stop_all_task_timers(cmd);
2179 
2180 	if (dev)
2181 		transport_failure_reset_queue_depth(dev);
2182 	/*
2183 	 * For SAM Task Attribute emulation for failed struct se_cmd
2184 	 */
2185 	if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
2186 		transport_complete_task_attr(cmd);
2187 
2188 	if (complete) {
2189 		transport_direct_request_timeout(cmd);
2190 		cmd->transport_error_status = PYX_TRANSPORT_LU_COMM_FAILURE;
2191 	}
2192 
2193 	switch (cmd->transport_error_status) {
2194 	case PYX_TRANSPORT_UNKNOWN_SAM_OPCODE:
2195 		cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
2196 		break;
2197 	case PYX_TRANSPORT_REQ_TOO_MANY_SECTORS:
2198 		cmd->scsi_sense_reason = TCM_SECTOR_COUNT_TOO_MANY;
2199 		break;
2200 	case PYX_TRANSPORT_INVALID_CDB_FIELD:
2201 		cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
2202 		break;
2203 	case PYX_TRANSPORT_INVALID_PARAMETER_LIST:
2204 		cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
2205 		break;
2206 	case PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES:
2207 		if (!sc)
2208 			transport_new_cmd_failure(cmd);
2209 		/*
2210 		 * Currently for PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES,
2211 		 * we force this session to fall back to session
2212 		 * recovery.
2213 		 */
2214 		CMD_TFO(cmd)->fall_back_to_erl0(cmd->se_sess);
2215 		CMD_TFO(cmd)->stop_session(cmd->se_sess, 0, 0);
2216 
2217 		goto check_stop;
2218 	case PYX_TRANSPORT_LU_COMM_FAILURE:
2219 	case PYX_TRANSPORT_ILLEGAL_REQUEST:
2220 		cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2221 		break;
2222 	case PYX_TRANSPORT_UNKNOWN_MODE_PAGE:
2223 		cmd->scsi_sense_reason = TCM_UNKNOWN_MODE_PAGE;
2224 		break;
2225 	case PYX_TRANSPORT_WRITE_PROTECTED:
2226 		cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
2227 		break;
2228 	case PYX_TRANSPORT_RESERVATION_CONFLICT:
2229 		/*
2230 		 * No SENSE Data payload for this case, set SCSI Status
2231 		 * and queue the response to $FABRIC_MOD.
2232 		 *
2233 		 * Uses linux/include/scsi/scsi.h SAM status codes defs
2234 		 */
2235 		cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
2236 		/*
2237 		 * For UA Interlock Code 11b, a RESERVATION CONFLICT will
2238 		 * establish a UNIT ATTENTION with PREVIOUS RESERVATION
2239 		 * CONFLICT STATUS.
2240 		 *
2241 		 * See spc4r17, section 7.4.6 Control Mode Page, Table 349
2242 		 */
2243 		if (SE_SESS(cmd) &&
2244 		    DEV_ATTRIB(cmd->se_dev)->emulate_ua_intlck_ctrl == 2)
2245 			core_scsi3_ua_allocate(SE_SESS(cmd)->se_node_acl,
2246 				cmd->orig_fe_lun, 0x2C,
2247 				ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
2248 
2249 		CMD_TFO(cmd)->queue_status(cmd);
2250 		goto check_stop;
2251 	case PYX_TRANSPORT_USE_SENSE_REASON:
2252 		/*
2253 		 * struct se_cmd->scsi_sense_reason already set
2254 		 */
2255 		break;
2256 	default:
2257 		printk(KERN_ERR "Unknown transport error for CDB 0x%02x: %d\n",
2258 			T_TASK(cmd)->t_task_cdb[0],
2259 			cmd->transport_error_status);
2260 		cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
2261 		break;
2262 	}
2263 
2264 	if (!sc)
2265 		transport_new_cmd_failure(cmd);
2266 	else
2267 		transport_send_check_condition_and_sense(cmd,
2268 			cmd->scsi_sense_reason, 0);
2269 check_stop:
2270 	transport_lun_remove_cmd(cmd);
2271 	if (!(transport_cmd_check_stop_to_fabric(cmd)))
2272 		;
2273 }
2274 
2275 static void transport_direct_request_timeout(struct se_cmd *cmd)
2276 {
2277 	unsigned long flags;
2278 
2279 	spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
2280 	if (!(atomic_read(&T_TASK(cmd)->t_transport_timeout))) {
2281 		spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
2282 		return;
2283 	}
2284 	if (atomic_read(&T_TASK(cmd)->t_task_cdbs_timeout_left)) {
2285 		spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
2286 		return;
2287 	}
2288 
2289 	atomic_sub(atomic_read(&T_TASK(cmd)->t_transport_timeout),
2290 		   &T_TASK(cmd)->t_se_count);
2291 	spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
2292 }
2293 
2294 static void transport_generic_request_timeout(struct se_cmd *cmd)
2295 {
2296 	unsigned long flags;
2297 
2298 	/*
2299 	 * Reset T_TASK(cmd)->t_se_count to allow transport_generic_remove()
2300 	 * to allow last call to free memory resources.
2301 	 */
2302 	spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
2303 	if (atomic_read(&T_TASK(cmd)->t_transport_timeout) > 1) {
2304 		int tmp = (atomic_read(&T_TASK(cmd)->t_transport_timeout) - 1);
2305 
2306 		atomic_sub(tmp, &T_TASK(cmd)->t_se_count);
2307 	}
2308 	spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
2309 
2310 	transport_generic_remove(cmd, 0, 0);
2311 }
2312 
2313 static int
2314 transport_generic_allocate_buf(struct se_cmd *cmd, u32 data_length)
2315 {
2316 	unsigned char *buf;
2317 
2318 	buf = kzalloc(data_length, GFP_KERNEL);
2319 	if (!(buf)) {
2320 		printk(KERN_ERR "Unable to allocate memory for buffer\n");
2321 		return -1;
2322 	}
2323 
2324 	T_TASK(cmd)->t_tasks_se_num = 0;
2325 	T_TASK(cmd)->t_task_buf = buf;
2326 
2327 	return 0;
2328 }
2329 
2330 static inline u32 transport_lba_21(unsigned char *cdb)
2331 {
2332 	return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3];
2333 }
2334 
2335 static inline u32 transport_lba_32(unsigned char *cdb)
2336 {
2337 	return (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
2338 }
2339 
2340 static inline unsigned long long transport_lba_64(unsigned char *cdb)
2341 {
2342 	unsigned int __v1, __v2;
2343 
2344 	__v1 = (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
2345 	__v2 = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
2346 
2347 	return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
2348 }
2349 
2350 /*
2351  * For VARIABLE_LENGTH_CDB w/ 32 byte extended CDBs
2352  */
2353 static inline unsigned long long transport_lba_64_ext(unsigned char *cdb)
2354 {
2355 	unsigned int __v1, __v2;
2356 
2357 	__v1 = (cdb[12] << 24) | (cdb[13] << 16) | (cdb[14] << 8) | cdb[15];
2358 	__v2 = (cdb[16] << 24) | (cdb[17] << 16) | (cdb[18] << 8) | cdb[19];
2359 
2360 	return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
2361 }
2362 
2363 static void transport_set_supported_SAM_opcode(struct se_cmd *se_cmd)
2364 {
2365 	unsigned long flags;
2366 
2367 	spin_lock_irqsave(&T_TASK(se_cmd)->t_state_lock, flags);
2368 	se_cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE;
2369 	spin_unlock_irqrestore(&T_TASK(se_cmd)->t_state_lock, flags);
2370 }
2371 
2372 /*
2373  * Called from interrupt context.
2374  */
2375 static void transport_task_timeout_handler(unsigned long data)
2376 {
2377 	struct se_task *task = (struct se_task *)data;
2378 	struct se_cmd *cmd = TASK_CMD(task);
2379 	unsigned long flags;
2380 
2381 	DEBUG_TT("transport task timeout fired! task: %p cmd: %p\n", task, cmd);
2382 
2383 	spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
2384 	if (task->task_flags & TF_STOP) {
2385 		spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
2386 		return;
2387 	}
2388 	task->task_flags &= ~TF_RUNNING;
2389 
2390 	/*
2391 	 * Determine if transport_complete_task() has already been called.
2392 	 */
2393 	if (!(atomic_read(&task->task_active))) {
2394 		DEBUG_TT("transport task: %p cmd: %p timeout task_active"
2395 				" == 0\n", task, cmd);
2396 		spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
2397 		return;
2398 	}
2399 
2400 	atomic_inc(&T_TASK(cmd)->t_se_count);
2401 	atomic_inc(&T_TASK(cmd)->t_transport_timeout);
2402 	T_TASK(cmd)->t_tasks_failed = 1;
2403 
2404 	atomic_set(&task->task_timeout, 1);
2405 	task->task_error_status = PYX_TRANSPORT_TASK_TIMEOUT;
2406 	task->task_scsi_status = 1;
2407 
2408 	if (atomic_read(&task->task_stop)) {
2409 		DEBUG_TT("transport task: %p cmd: %p timeout task_stop"
2410 				" == 1\n", task, cmd);
2411 		spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
2412 		complete(&task->task_stop_comp);
2413 		return;
2414 	}
2415 
2416 	if (!(atomic_dec_and_test(&T_TASK(cmd)->t_task_cdbs_left))) {
2417 		DEBUG_TT("transport task: %p cmd: %p timeout non zero"
2418 				" t_task_cdbs_left\n", task, cmd);
2419 		spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
2420 		return;
2421 	}
2422 	DEBUG_TT("transport task: %p cmd: %p timeout ZERO t_task_cdbs_left\n",
2423 			task, cmd);
2424 
2425 	cmd->t_state = TRANSPORT_COMPLETE_FAILURE;
2426 	spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
2427 
2428 	transport_add_cmd_to_queue(cmd, TRANSPORT_COMPLETE_FAILURE);
2429 }
2430 
2431 /*
2432  * Called with T_TASK(cmd)->t_state_lock held.
2433  */
2434 static void transport_start_task_timer(struct se_task *task)
2435 {
2436 	struct se_device *dev = task->se_dev;
2437 	int timeout;
2438 
2439 	if (task->task_flags & TF_RUNNING)
2440 		return;
2441 	/*
2442 	 * If the task_timeout is disabled, exit now.
2443 	 */
2444 	timeout = DEV_ATTRIB(dev)->task_timeout;
2445 	if (!(timeout))
2446 		return;
2447 
2448 	init_timer(&task->task_timer);
2449 	task->task_timer.expires = (get_jiffies_64() + timeout * HZ);
2450 	task->task_timer.data = (unsigned long) task;
2451 	task->task_timer.function = transport_task_timeout_handler;
2452 
2453 	task->task_flags |= TF_RUNNING;
2454 	add_timer(&task->task_timer);
2455 #if 0
2456 	printk(KERN_INFO "Starting task timer for cmd: %p task: %p seconds:"
2457 		" %d\n", task->task_se_cmd, task, timeout);
2458 #endif
2459 }
2460 
2461 /*
2462  * Called with spin_lock_irq(&T_TASK(cmd)->t_state_lock) held.
2463  */
2464 void __transport_stop_task_timer(struct se_task *task, unsigned long *flags)
2465 {
2466 	struct se_cmd *cmd = TASK_CMD(task);
2467 
2468 	if (!(task->task_flags & TF_RUNNING))
2469 		return;
2470 
2471 	task->task_flags |= TF_STOP;
2472 	spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, *flags);
2473 
2474 	del_timer_sync(&task->task_timer);
2475 
2476 	spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, *flags);
2477 	task->task_flags &= ~TF_RUNNING;
2478 	task->task_flags &= ~TF_STOP;
2479 }
2480 
2481 static void transport_stop_all_task_timers(struct se_cmd *cmd)
2482 {
2483 	struct se_task *task = NULL, *task_tmp;
2484 	unsigned long flags;
2485 
2486 	spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
2487 	list_for_each_entry_safe(task, task_tmp,
2488 				&T_TASK(cmd)->t_task_list, t_list)
2489 		__transport_stop_task_timer(task, &flags);
2490 	spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
2491 }
2492 
2493 static inline int transport_tcq_window_closed(struct se_device *dev)
2494 {
2495 	if (dev->dev_tcq_window_closed++ <
2496 			PYX_TRANSPORT_WINDOW_CLOSED_THRESHOLD) {
2497 		msleep(PYX_TRANSPORT_WINDOW_CLOSED_WAIT_SHORT);
2498 	} else
2499 		msleep(PYX_TRANSPORT_WINDOW_CLOSED_WAIT_LONG);
2500 
2501 	wake_up_interruptible(&dev->dev_queue_obj->thread_wq);
2502 	return 0;
2503 }
2504 
2505 /*
2506  * Called from Fabric Module context from transport_execute_tasks()
2507  *
2508  * The return of this function determins if the tasks from struct se_cmd
2509  * get added to the execution queue in transport_execute_tasks(),
2510  * or are added to the delayed or ordered lists here.
2511  */
2512 static inline int transport_execute_task_attr(struct se_cmd *cmd)
2513 {
2514 	if (SE_DEV(cmd)->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
2515 		return 1;
2516 	/*
2517 	 * Check for the existence of HEAD_OF_QUEUE, and if true return 1
2518 	 * to allow the passed struct se_cmd list of tasks to the front of the list.
2519 	 */
2520 	 if (cmd->sam_task_attr == MSG_HEAD_TAG) {
2521 		atomic_inc(&SE_DEV(cmd)->dev_hoq_count);
2522 		smp_mb__after_atomic_inc();
2523 		DEBUG_STA("Added HEAD_OF_QUEUE for CDB:"
2524 			" 0x%02x, se_ordered_id: %u\n",
2525 			T_TASK(cmd)->t_task_cdb[0],
2526 			cmd->se_ordered_id);
2527 		return 1;
2528 	} else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
2529 		spin_lock(&SE_DEV(cmd)->ordered_cmd_lock);
2530 		list_add_tail(&cmd->se_ordered_list,
2531 				&SE_DEV(cmd)->ordered_cmd_list);
2532 		spin_unlock(&SE_DEV(cmd)->ordered_cmd_lock);
2533 
2534 		atomic_inc(&SE_DEV(cmd)->dev_ordered_sync);
2535 		smp_mb__after_atomic_inc();
2536 
2537 		DEBUG_STA("Added ORDERED for CDB: 0x%02x to ordered"
2538 				" list, se_ordered_id: %u\n",
2539 				T_TASK(cmd)->t_task_cdb[0],
2540 				cmd->se_ordered_id);
2541 		/*
2542 		 * Add ORDERED command to tail of execution queue if
2543 		 * no other older commands exist that need to be
2544 		 * completed first.
2545 		 */
2546 		if (!(atomic_read(&SE_DEV(cmd)->simple_cmds)))
2547 			return 1;
2548 	} else {
2549 		/*
2550 		 * For SIMPLE and UNTAGGED Task Attribute commands
2551 		 */
2552 		atomic_inc(&SE_DEV(cmd)->simple_cmds);
2553 		smp_mb__after_atomic_inc();
2554 	}
2555 	/*
2556 	 * Otherwise if one or more outstanding ORDERED task attribute exist,
2557 	 * add the dormant task(s) built for the passed struct se_cmd to the
2558 	 * execution queue and become in Active state for this struct se_device.
2559 	 */
2560 	if (atomic_read(&SE_DEV(cmd)->dev_ordered_sync) != 0) {
2561 		/*
2562 		 * Otherwise, add cmd w/ tasks to delayed cmd queue that
2563 		 * will be drained upon completion of HEAD_OF_QUEUE task.
2564 		 */
2565 		spin_lock(&SE_DEV(cmd)->delayed_cmd_lock);
2566 		cmd->se_cmd_flags |= SCF_DELAYED_CMD_FROM_SAM_ATTR;
2567 		list_add_tail(&cmd->se_delayed_list,
2568 				&SE_DEV(cmd)->delayed_cmd_list);
2569 		spin_unlock(&SE_DEV(cmd)->delayed_cmd_lock);
2570 
2571 		DEBUG_STA("Added CDB: 0x%02x Task Attr: 0x%02x to"
2572 			" delayed CMD list, se_ordered_id: %u\n",
2573 			T_TASK(cmd)->t_task_cdb[0], cmd->sam_task_attr,
2574 			cmd->se_ordered_id);
2575 		/*
2576 		 * Return zero to let transport_execute_tasks() know
2577 		 * not to add the delayed tasks to the execution list.
2578 		 */
2579 		return 0;
2580 	}
2581 	/*
2582 	 * Otherwise, no ORDERED task attributes exist..
2583 	 */
2584 	return 1;
2585 }
2586 
2587 /*
2588  * Called from fabric module context in transport_generic_new_cmd() and
2589  * transport_generic_process_write()
2590  */
2591 static int transport_execute_tasks(struct se_cmd *cmd)
2592 {
2593 	int add_tasks;
2594 
2595 	if (!(cmd->se_cmd_flags & SCF_SE_DISABLE_ONLINE_CHECK)) {
2596 		if (se_dev_check_online(cmd->se_orig_obj_ptr) != 0) {
2597 			cmd->transport_error_status =
2598 				PYX_TRANSPORT_LU_COMM_FAILURE;
2599 			transport_generic_request_failure(cmd, NULL, 0, 1);
2600 			return 0;
2601 		}
2602 	}
2603 	/*
2604 	 * Call transport_cmd_check_stop() to see if a fabric exception
2605 	 * has occurred that prevents execution.
2606 	 */
2607 	if (!(transport_cmd_check_stop(cmd, 0, TRANSPORT_PROCESSING))) {
2608 		/*
2609 		 * Check for SAM Task Attribute emulation and HEAD_OF_QUEUE
2610 		 * attribute for the tasks of the received struct se_cmd CDB
2611 		 */
2612 		add_tasks = transport_execute_task_attr(cmd);
2613 		if (add_tasks == 0)
2614 			goto execute_tasks;
2615 		/*
2616 		 * This calls transport_add_tasks_from_cmd() to handle
2617 		 * HEAD_OF_QUEUE ordering for SAM Task Attribute emulation
2618 		 * (if enabled) in __transport_add_task_to_execute_queue() and
2619 		 * transport_add_task_check_sam_attr().
2620 		 */
2621 		transport_add_tasks_from_cmd(cmd);
2622 	}
2623 	/*
2624 	 * Kick the execution queue for the cmd associated struct se_device
2625 	 * storage object.
2626 	 */
2627 execute_tasks:
2628 	__transport_execute_tasks(SE_DEV(cmd));
2629 	return 0;
2630 }
2631 
2632 /*
2633  * Called to check struct se_device tcq depth window, and once open pull struct se_task
2634  * from struct se_device->execute_task_list and
2635  *
2636  * Called from transport_processing_thread()
2637  */
2638 static int __transport_execute_tasks(struct se_device *dev)
2639 {
2640 	int error;
2641 	struct se_cmd *cmd = NULL;
2642 	struct se_task *task;
2643 	unsigned long flags;
2644 
2645 	/*
2646 	 * Check if there is enough room in the device and HBA queue to send
2647 	 * struct se_transport_task's to the selected transport.
2648 	 */
2649 check_depth:
2650 	spin_lock_irqsave(&SE_HBA(dev)->hba_queue_lock, flags);
2651 	if (!(atomic_read(&dev->depth_left)) ||
2652 	    !(atomic_read(&SE_HBA(dev)->left_queue_depth))) {
2653 		spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags);
2654 		return transport_tcq_window_closed(dev);
2655 	}
2656 	dev->dev_tcq_window_closed = 0;
2657 
2658 	spin_lock(&dev->execute_task_lock);
2659 	task = transport_get_task_from_execute_queue(dev);
2660 	spin_unlock(&dev->execute_task_lock);
2661 
2662 	if (!task) {
2663 		spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags);
2664 		return 0;
2665 	}
2666 
2667 	atomic_dec(&dev->depth_left);
2668 	atomic_dec(&SE_HBA(dev)->left_queue_depth);
2669 	spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags);
2670 
2671 	cmd = TASK_CMD(task);
2672 
2673 	spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
2674 	atomic_set(&task->task_active, 1);
2675 	atomic_set(&task->task_sent, 1);
2676 	atomic_inc(&T_TASK(cmd)->t_task_cdbs_sent);
2677 
2678 	if (atomic_read(&T_TASK(cmd)->t_task_cdbs_sent) ==
2679 	    T_TASK(cmd)->t_task_cdbs)
2680 		atomic_set(&cmd->transport_sent, 1);
2681 
2682 	transport_start_task_timer(task);
2683 	spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
2684 	/*
2685 	 * The struct se_cmd->transport_emulate_cdb() function pointer is used
2686 	 * to grab REPORT_LUNS CDBs before they hit the
2687 	 * struct se_subsystem_api->do_task() caller below.
2688 	 */
2689 	if (cmd->transport_emulate_cdb) {
2690 		error = cmd->transport_emulate_cdb(cmd);
2691 		if (error != 0) {
2692 			cmd->transport_error_status = error;
2693 			atomic_set(&task->task_active, 0);
2694 			atomic_set(&cmd->transport_sent, 0);
2695 			transport_stop_tasks_for_cmd(cmd);
2696 			transport_generic_request_failure(cmd, dev, 0, 1);
2697 			goto check_depth;
2698 		}
2699 		/*
2700 		 * Handle the successful completion for transport_emulate_cdb()
2701 		 * for synchronous operation, following SCF_EMULATE_CDB_ASYNC
2702 		 * Otherwise the caller is expected to complete the task with
2703 		 * proper status.
2704 		 */
2705 		if (!(cmd->se_cmd_flags & SCF_EMULATE_CDB_ASYNC)) {
2706 			cmd->scsi_status = SAM_STAT_GOOD;
2707 			task->task_scsi_status = GOOD;
2708 			transport_complete_task(task, 1);
2709 		}
2710 	} else {
2711 		/*
2712 		 * Currently for all virtual TCM plugins including IBLOCK, FILEIO and
2713 		 * RAMDISK we use the internal transport_emulate_control_cdb() logic
2714 		 * with struct se_subsystem_api callers for the primary SPC-3 TYPE_DISK
2715 		 * LUN emulation code.
2716 		 *
2717 		 * For TCM/pSCSI and all other SCF_SCSI_DATA_SG_IO_CDB I/O tasks we
2718 		 * call ->do_task() directly and let the underlying TCM subsystem plugin
2719 		 * code handle the CDB emulation.
2720 		 */
2721 		if ((TRANSPORT(dev)->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) &&
2722 		    (!(TASK_CMD(task)->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)))
2723 			error = transport_emulate_control_cdb(task);
2724 		else
2725 			error = TRANSPORT(dev)->do_task(task);
2726 
2727 		if (error != 0) {
2728 			cmd->transport_error_status = error;
2729 			atomic_set(&task->task_active, 0);
2730 			atomic_set(&cmd->transport_sent, 0);
2731 			transport_stop_tasks_for_cmd(cmd);
2732 			transport_generic_request_failure(cmd, dev, 0, 1);
2733 		}
2734 	}
2735 
2736 	goto check_depth;
2737 
2738 	return 0;
2739 }
2740 
2741 void transport_new_cmd_failure(struct se_cmd *se_cmd)
2742 {
2743 	unsigned long flags;
2744 	/*
2745 	 * Any unsolicited data will get dumped for failed command inside of
2746 	 * the fabric plugin
2747 	 */
2748 	spin_lock_irqsave(&T_TASK(se_cmd)->t_state_lock, flags);
2749 	se_cmd->se_cmd_flags |= SCF_SE_CMD_FAILED;
2750 	se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
2751 	spin_unlock_irqrestore(&T_TASK(se_cmd)->t_state_lock, flags);
2752 
2753 	CMD_TFO(se_cmd)->new_cmd_failure(se_cmd);
2754 }
2755 
2756 static void transport_nop_wait_for_tasks(struct se_cmd *, int, int);
2757 
2758 static inline u32 transport_get_sectors_6(
2759 	unsigned char *cdb,
2760 	struct se_cmd *cmd,
2761 	int *ret)
2762 {
2763 	struct se_device *dev = SE_LUN(cmd)->lun_se_dev;
2764 
2765 	/*
2766 	 * Assume TYPE_DISK for non struct se_device objects.
2767 	 * Use 8-bit sector value.
2768 	 */
2769 	if (!dev)
2770 		goto type_disk;
2771 
2772 	/*
2773 	 * Use 24-bit allocation length for TYPE_TAPE.
2774 	 */
2775 	if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE)
2776 		return (u32)(cdb[2] << 16) + (cdb[3] << 8) + cdb[4];
2777 
2778 	/*
2779 	 * Everything else assume TYPE_DISK Sector CDB location.
2780 	 * Use 8-bit sector value.
2781 	 */
2782 type_disk:
2783 	return (u32)cdb[4];
2784 }
2785 
2786 static inline u32 transport_get_sectors_10(
2787 	unsigned char *cdb,
2788 	struct se_cmd *cmd,
2789 	int *ret)
2790 {
2791 	struct se_device *dev = SE_LUN(cmd)->lun_se_dev;
2792 
2793 	/*
2794 	 * Assume TYPE_DISK for non struct se_device objects.
2795 	 * Use 16-bit sector value.
2796 	 */
2797 	if (!dev)
2798 		goto type_disk;
2799 
2800 	/*
2801 	 * XXX_10 is not defined in SSC, throw an exception
2802 	 */
2803 	if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE) {
2804 		*ret = -1;
2805 		return 0;
2806 	}
2807 
2808 	/*
2809 	 * Everything else assume TYPE_DISK Sector CDB location.
2810 	 * Use 16-bit sector value.
2811 	 */
2812 type_disk:
2813 	return (u32)(cdb[7] << 8) + cdb[8];
2814 }
2815 
2816 static inline u32 transport_get_sectors_12(
2817 	unsigned char *cdb,
2818 	struct se_cmd *cmd,
2819 	int *ret)
2820 {
2821 	struct se_device *dev = SE_LUN(cmd)->lun_se_dev;
2822 
2823 	/*
2824 	 * Assume TYPE_DISK for non struct se_device objects.
2825 	 * Use 32-bit sector value.
2826 	 */
2827 	if (!dev)
2828 		goto type_disk;
2829 
2830 	/*
2831 	 * XXX_12 is not defined in SSC, throw an exception
2832 	 */
2833 	if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE) {
2834 		*ret = -1;
2835 		return 0;
2836 	}
2837 
2838 	/*
2839 	 * Everything else assume TYPE_DISK Sector CDB location.
2840 	 * Use 32-bit sector value.
2841 	 */
2842 type_disk:
2843 	return (u32)(cdb[6] << 24) + (cdb[7] << 16) + (cdb[8] << 8) + cdb[9];
2844 }
2845 
2846 static inline u32 transport_get_sectors_16(
2847 	unsigned char *cdb,
2848 	struct se_cmd *cmd,
2849 	int *ret)
2850 {
2851 	struct se_device *dev = SE_LUN(cmd)->lun_se_dev;
2852 
2853 	/*
2854 	 * Assume TYPE_DISK for non struct se_device objects.
2855 	 * Use 32-bit sector value.
2856 	 */
2857 	if (!dev)
2858 		goto type_disk;
2859 
2860 	/*
2861 	 * Use 24-bit allocation length for TYPE_TAPE.
2862 	 */
2863 	if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE)
2864 		return (u32)(cdb[12] << 16) + (cdb[13] << 8) + cdb[14];
2865 
2866 type_disk:
2867 	return (u32)(cdb[10] << 24) + (cdb[11] << 16) +
2868 		    (cdb[12] << 8) + cdb[13];
2869 }
2870 
2871 /*
2872  * Used for VARIABLE_LENGTH_CDB WRITE_32 and READ_32 variants
2873  */
2874 static inline u32 transport_get_sectors_32(
2875 	unsigned char *cdb,
2876 	struct se_cmd *cmd,
2877 	int *ret)
2878 {
2879 	/*
2880 	 * Assume TYPE_DISK for non struct se_device objects.
2881 	 * Use 32-bit sector value.
2882 	 */
2883 	return (u32)(cdb[28] << 24) + (cdb[29] << 16) +
2884 		    (cdb[30] << 8) + cdb[31];
2885 
2886 }
2887 
2888 static inline u32 transport_get_size(
2889 	u32 sectors,
2890 	unsigned char *cdb,
2891 	struct se_cmd *cmd)
2892 {
2893 	struct se_device *dev = SE_DEV(cmd);
2894 
2895 	if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE) {
2896 		if (cdb[1] & 1) { /* sectors */
2897 			return DEV_ATTRIB(dev)->block_size * sectors;
2898 		} else /* bytes */
2899 			return sectors;
2900 	}
2901 #if 0
2902 	printk(KERN_INFO "Returning block_size: %u, sectors: %u == %u for"
2903 			" %s object\n", DEV_ATTRIB(dev)->block_size, sectors,
2904 			DEV_ATTRIB(dev)->block_size * sectors,
2905 			TRANSPORT(dev)->name);
2906 #endif
2907 	return DEV_ATTRIB(dev)->block_size * sectors;
2908 }
2909 
2910 unsigned char transport_asciihex_to_binaryhex(unsigned char val[2])
2911 {
2912 	unsigned char result = 0;
2913 	/*
2914 	 * MSB
2915 	 */
2916 	if ((val[0] >= 'a') && (val[0] <= 'f'))
2917 		result = ((val[0] - 'a' + 10) & 0xf) << 4;
2918 	else
2919 		if ((val[0] >= 'A') && (val[0] <= 'F'))
2920 			result = ((val[0] - 'A' + 10) & 0xf) << 4;
2921 		else /* digit */
2922 			result = ((val[0] - '0') & 0xf) << 4;
2923 	/*
2924 	 * LSB
2925 	 */
2926 	if ((val[1] >= 'a') && (val[1] <= 'f'))
2927 		result |= ((val[1] - 'a' + 10) & 0xf);
2928 	else
2929 		if ((val[1] >= 'A') && (val[1] <= 'F'))
2930 			result |= ((val[1] - 'A' + 10) & 0xf);
2931 		else /* digit */
2932 			result |= ((val[1] - '0') & 0xf);
2933 
2934 	return result;
2935 }
2936 EXPORT_SYMBOL(transport_asciihex_to_binaryhex);
2937 
2938 static void transport_xor_callback(struct se_cmd *cmd)
2939 {
2940 	unsigned char *buf, *addr;
2941 	struct se_mem *se_mem;
2942 	unsigned int offset;
2943 	int i;
2944 	/*
2945 	 * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command
2946 	 *
2947 	 * 1) read the specified logical block(s);
2948 	 * 2) transfer logical blocks from the data-out buffer;
2949 	 * 3) XOR the logical blocks transferred from the data-out buffer with
2950 	 *    the logical blocks read, storing the resulting XOR data in a buffer;
2951 	 * 4) if the DISABLE WRITE bit is set to zero, then write the logical
2952 	 *    blocks transferred from the data-out buffer; and
2953 	 * 5) transfer the resulting XOR data to the data-in buffer.
2954 	 */
2955 	buf = kmalloc(cmd->data_length, GFP_KERNEL);
2956 	if (!(buf)) {
2957 		printk(KERN_ERR "Unable to allocate xor_callback buf\n");
2958 		return;
2959 	}
2960 	/*
2961 	 * Copy the scatterlist WRITE buffer located at T_TASK(cmd)->t_mem_list
2962 	 * into the locally allocated *buf
2963 	 */
2964 	transport_memcpy_se_mem_read_contig(cmd, buf, T_TASK(cmd)->t_mem_list);
2965 	/*
2966 	 * Now perform the XOR against the BIDI read memory located at
2967 	 * T_TASK(cmd)->t_mem_bidi_list
2968 	 */
2969 
2970 	offset = 0;
2971 	list_for_each_entry(se_mem, T_TASK(cmd)->t_mem_bidi_list, se_list) {
2972 		addr = (unsigned char *)kmap_atomic(se_mem->se_page, KM_USER0);
2973 		if (!(addr))
2974 			goto out;
2975 
2976 		for (i = 0; i < se_mem->se_len; i++)
2977 			*(addr + se_mem->se_off + i) ^= *(buf + offset + i);
2978 
2979 		offset += se_mem->se_len;
2980 		kunmap_atomic(addr, KM_USER0);
2981 	}
2982 out:
2983 	kfree(buf);
2984 }
2985 
2986 /*
2987  * Used to obtain Sense Data from underlying Linux/SCSI struct scsi_cmnd
2988  */
2989 static int transport_get_sense_data(struct se_cmd *cmd)
2990 {
2991 	unsigned char *buffer = cmd->sense_buffer, *sense_buffer = NULL;
2992 	struct se_device *dev;
2993 	struct se_task *task = NULL, *task_tmp;
2994 	unsigned long flags;
2995 	u32 offset = 0;
2996 
2997 	if (!SE_LUN(cmd)) {
2998 		printk(KERN_ERR "SE_LUN(cmd) is NULL\n");
2999 		return -1;
3000 	}
3001 	spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
3002 	if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
3003 		spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
3004 		return 0;
3005 	}
3006 
3007 	list_for_each_entry_safe(task, task_tmp,
3008 				&T_TASK(cmd)->t_task_list, t_list) {
3009 
3010 		if (!task->task_sense)
3011 			continue;
3012 
3013 		dev = task->se_dev;
3014 		if (!(dev))
3015 			continue;
3016 
3017 		if (!TRANSPORT(dev)->get_sense_buffer) {
3018 			printk(KERN_ERR "TRANSPORT(dev)->get_sense_buffer"
3019 					" is NULL\n");
3020 			continue;
3021 		}
3022 
3023 		sense_buffer = TRANSPORT(dev)->get_sense_buffer(task);
3024 		if (!(sense_buffer)) {
3025 			printk(KERN_ERR "ITT[0x%08x]_TASK[%d]: Unable to locate"
3026 				" sense buffer for task with sense\n",
3027 				CMD_TFO(cmd)->get_task_tag(cmd), task->task_no);
3028 			continue;
3029 		}
3030 		spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
3031 
3032 		offset = CMD_TFO(cmd)->set_fabric_sense_len(cmd,
3033 				TRANSPORT_SENSE_BUFFER);
3034 
3035 		memcpy((void *)&buffer[offset], (void *)sense_buffer,
3036 				TRANSPORT_SENSE_BUFFER);
3037 		cmd->scsi_status = task->task_scsi_status;
3038 		/* Automatically padded */
3039 		cmd->scsi_sense_length =
3040 				(TRANSPORT_SENSE_BUFFER + offset);
3041 
3042 		printk(KERN_INFO "HBA_[%u]_PLUG[%s]: Set SAM STATUS: 0x%02x"
3043 				" and sense\n",
3044 			dev->se_hba->hba_id, TRANSPORT(dev)->name,
3045 				cmd->scsi_status);
3046 		return 0;
3047 	}
3048 	spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
3049 
3050 	return -1;
3051 }
3052 
3053 static int transport_allocate_resources(struct se_cmd *cmd)
3054 {
3055 	u32 length = cmd->data_length;
3056 
3057 	if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) ||
3058 	    (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB))
3059 		return transport_generic_get_mem(cmd, length, PAGE_SIZE);
3060 	else if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_NONSG_IO_CDB)
3061 		return transport_generic_allocate_buf(cmd, length);
3062 	else
3063 		return 0;
3064 }
3065 
3066 static int
3067 transport_handle_reservation_conflict(struct se_cmd *cmd)
3068 {
3069 	cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks;
3070 	cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
3071 	cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT;
3072 	cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
3073 	/*
3074 	 * For UA Interlock Code 11b, a RESERVATION CONFLICT will
3075 	 * establish a UNIT ATTENTION with PREVIOUS RESERVATION
3076 	 * CONFLICT STATUS.
3077 	 *
3078 	 * See spc4r17, section 7.4.6 Control Mode Page, Table 349
3079 	 */
3080 	if (SE_SESS(cmd) &&
3081 	    DEV_ATTRIB(cmd->se_dev)->emulate_ua_intlck_ctrl == 2)
3082 		core_scsi3_ua_allocate(SE_SESS(cmd)->se_node_acl,
3083 			cmd->orig_fe_lun, 0x2C,
3084 			ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
3085 	return -2;
3086 }
3087 
3088 /*	transport_generic_cmd_sequencer():
3089  *
3090  *	Generic Command Sequencer that should work for most DAS transport
3091  *	drivers.
3092  *
3093  *	Called from transport_generic_allocate_tasks() in the $FABRIC_MOD
3094  *	RX Thread.
3095  *
3096  *	FIXME: Need to support other SCSI OPCODES where as well.
3097  */
3098 static int transport_generic_cmd_sequencer(
3099 	struct se_cmd *cmd,
3100 	unsigned char *cdb)
3101 {
3102 	struct se_device *dev = SE_DEV(cmd);
3103 	struct se_subsystem_dev *su_dev = dev->se_sub_dev;
3104 	int ret = 0, sector_ret = 0, passthrough;
3105 	u32 sectors = 0, size = 0, pr_reg_type = 0;
3106 	u16 service_action;
3107 	u8 alua_ascq = 0;
3108 	/*
3109 	 * Check for an existing UNIT ATTENTION condition
3110 	 */
3111 	if (core_scsi3_ua_check(cmd, cdb) < 0) {
3112 		cmd->transport_wait_for_tasks =
3113 				&transport_nop_wait_for_tasks;
3114 		cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
3115 		cmd->scsi_sense_reason = TCM_CHECK_CONDITION_UNIT_ATTENTION;
3116 		return -2;
3117 	}
3118 	/*
3119 	 * Check status of Asymmetric Logical Unit Assignment port
3120 	 */
3121 	ret = T10_ALUA(su_dev)->alua_state_check(cmd, cdb, &alua_ascq);
3122 	if (ret != 0) {
3123 		cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks;
3124 		/*
3125 		 * Set SCSI additional sense code (ASC) to 'LUN Not Accessible';
3126 		 * The ALUA additional sense code qualifier (ASCQ) is determined
3127 		 * by the ALUA primary or secondary access state..
3128 		 */
3129 		if (ret > 0) {
3130 #if 0
3131 			printk(KERN_INFO "[%s]: ALUA TG Port not available,"
3132 				" SenseKey: NOT_READY, ASC/ASCQ: 0x04/0x%02x\n",
3133 				CMD_TFO(cmd)->get_fabric_name(), alua_ascq);
3134 #endif
3135 			transport_set_sense_codes(cmd, 0x04, alua_ascq);
3136 			cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
3137 			cmd->scsi_sense_reason = TCM_CHECK_CONDITION_NOT_READY;
3138 			return -2;
3139 		}
3140 		goto out_invalid_cdb_field;
3141 	}
3142 	/*
3143 	 * Check status for SPC-3 Persistent Reservations
3144 	 */
3145 	if (T10_PR_OPS(su_dev)->t10_reservation_check(cmd, &pr_reg_type) != 0) {
3146 		if (T10_PR_OPS(su_dev)->t10_seq_non_holder(
3147 					cmd, cdb, pr_reg_type) != 0)
3148 			return transport_handle_reservation_conflict(cmd);
3149 		/*
3150 		 * This means the CDB is allowed for the SCSI Initiator port
3151 		 * when said port is *NOT* holding the legacy SPC-2 or
3152 		 * SPC-3 Persistent Reservation.
3153 		 */
3154 	}
3155 
3156 	switch (cdb[0]) {
3157 	case READ_6:
3158 		sectors = transport_get_sectors_6(cdb, cmd, &sector_ret);
3159 		if (sector_ret)
3160 			goto out_unsupported_cdb;
3161 		size = transport_get_size(sectors, cdb, cmd);
3162 		cmd->transport_split_cdb = &split_cdb_XX_6;
3163 		T_TASK(cmd)->t_task_lba = transport_lba_21(cdb);
3164 		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
3165 		break;
3166 	case READ_10:
3167 		sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
3168 		if (sector_ret)
3169 			goto out_unsupported_cdb;
3170 		size = transport_get_size(sectors, cdb, cmd);
3171 		cmd->transport_split_cdb = &split_cdb_XX_10;
3172 		T_TASK(cmd)->t_task_lba = transport_lba_32(cdb);
3173 		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
3174 		break;
3175 	case READ_12:
3176 		sectors = transport_get_sectors_12(cdb, cmd, &sector_ret);
3177 		if (sector_ret)
3178 			goto out_unsupported_cdb;
3179 		size = transport_get_size(sectors, cdb, cmd);
3180 		cmd->transport_split_cdb = &split_cdb_XX_12;
3181 		T_TASK(cmd)->t_task_lba = transport_lba_32(cdb);
3182 		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
3183 		break;
3184 	case READ_16:
3185 		sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
3186 		if (sector_ret)
3187 			goto out_unsupported_cdb;
3188 		size = transport_get_size(sectors, cdb, cmd);
3189 		cmd->transport_split_cdb = &split_cdb_XX_16;
3190 		T_TASK(cmd)->t_task_lba = transport_lba_64(cdb);
3191 		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
3192 		break;
3193 	case WRITE_6:
3194 		sectors = transport_get_sectors_6(cdb, cmd, &sector_ret);
3195 		if (sector_ret)
3196 			goto out_unsupported_cdb;
3197 		size = transport_get_size(sectors, cdb, cmd);
3198 		cmd->transport_split_cdb = &split_cdb_XX_6;
3199 		T_TASK(cmd)->t_task_lba = transport_lba_21(cdb);
3200 		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
3201 		break;
3202 	case WRITE_10:
3203 		sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
3204 		if (sector_ret)
3205 			goto out_unsupported_cdb;
3206 		size = transport_get_size(sectors, cdb, cmd);
3207 		cmd->transport_split_cdb = &split_cdb_XX_10;
3208 		T_TASK(cmd)->t_task_lba = transport_lba_32(cdb);
3209 		T_TASK(cmd)->t_tasks_fua = (cdb[1] & 0x8);
3210 		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
3211 		break;
3212 	case WRITE_12:
3213 		sectors = transport_get_sectors_12(cdb, cmd, &sector_ret);
3214 		if (sector_ret)
3215 			goto out_unsupported_cdb;
3216 		size = transport_get_size(sectors, cdb, cmd);
3217 		cmd->transport_split_cdb = &split_cdb_XX_12;
3218 		T_TASK(cmd)->t_task_lba = transport_lba_32(cdb);
3219 		T_TASK(cmd)->t_tasks_fua = (cdb[1] & 0x8);
3220 		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
3221 		break;
3222 	case WRITE_16:
3223 		sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
3224 		if (sector_ret)
3225 			goto out_unsupported_cdb;
3226 		size = transport_get_size(sectors, cdb, cmd);
3227 		cmd->transport_split_cdb = &split_cdb_XX_16;
3228 		T_TASK(cmd)->t_task_lba = transport_lba_64(cdb);
3229 		T_TASK(cmd)->t_tasks_fua = (cdb[1] & 0x8);
3230 		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
3231 		break;
3232 	case XDWRITEREAD_10:
3233 		if ((cmd->data_direction != DMA_TO_DEVICE) ||
3234 		    !(T_TASK(cmd)->t_tasks_bidi))
3235 			goto out_invalid_cdb_field;
3236 		sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
3237 		if (sector_ret)
3238 			goto out_unsupported_cdb;
3239 		size = transport_get_size(sectors, cdb, cmd);
3240 		cmd->transport_split_cdb = &split_cdb_XX_10;
3241 		T_TASK(cmd)->t_task_lba = transport_lba_32(cdb);
3242 		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
3243 		passthrough = (TRANSPORT(dev)->transport_type ==
3244 				TRANSPORT_PLUGIN_PHBA_PDEV);
3245 		/*
3246 		 * Skip the remaining assignments for TCM/PSCSI passthrough
3247 		 */
3248 		if (passthrough)
3249 			break;
3250 		/*
3251 		 * Setup BIDI XOR callback to be run during transport_generic_complete_ok()
3252 		 */
3253 		cmd->transport_complete_callback = &transport_xor_callback;
3254 		T_TASK(cmd)->t_tasks_fua = (cdb[1] & 0x8);
3255 		break;
3256 	case VARIABLE_LENGTH_CMD:
3257 		service_action = get_unaligned_be16(&cdb[8]);
3258 		/*
3259 		 * Determine if this is TCM/PSCSI device and we should disable
3260 		 * internal emulation for this CDB.
3261 		 */
3262 		passthrough = (TRANSPORT(dev)->transport_type ==
3263 					TRANSPORT_PLUGIN_PHBA_PDEV);
3264 
3265 		switch (service_action) {
3266 		case XDWRITEREAD_32:
3267 			sectors = transport_get_sectors_32(cdb, cmd, &sector_ret);
3268 			if (sector_ret)
3269 				goto out_unsupported_cdb;
3270 			size = transport_get_size(sectors, cdb, cmd);
3271 			/*
3272 			 * Use WRITE_32 and READ_32 opcodes for the emulated
3273 			 * XDWRITE_READ_32 logic.
3274 			 */
3275 			cmd->transport_split_cdb = &split_cdb_XX_32;
3276 			T_TASK(cmd)->t_task_lba = transport_lba_64_ext(cdb);
3277 			cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
3278 
3279 			/*
3280 			 * Skip the remaining assignments for TCM/PSCSI passthrough
3281 			 */
3282 			if (passthrough)
3283 				break;
3284 
3285 			/*
3286 			 * Setup BIDI XOR callback to be run during
3287 			 * transport_generic_complete_ok()
3288 			 */
3289 			cmd->transport_complete_callback = &transport_xor_callback;
3290 			T_TASK(cmd)->t_tasks_fua = (cdb[10] & 0x8);
3291 			break;
3292 		case WRITE_SAME_32:
3293 			sectors = transport_get_sectors_32(cdb, cmd, &sector_ret);
3294 			if (sector_ret)
3295 				goto out_unsupported_cdb;
3296 			size = transport_get_size(sectors, cdb, cmd);
3297 			T_TASK(cmd)->t_task_lba = get_unaligned_be64(&cdb[12]);
3298 			cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3299 
3300 			/*
3301 			 * Skip the remaining assignments for TCM/PSCSI passthrough
3302 			 */
3303 			if (passthrough)
3304 				break;
3305 
3306 			if ((cdb[10] & 0x04) || (cdb[10] & 0x02)) {
3307 				printk(KERN_ERR "WRITE_SAME PBDATA and LBDATA"
3308 					" bits not supported for Block Discard"
3309 					" Emulation\n");
3310 				goto out_invalid_cdb_field;
3311 			}
3312 			/*
3313 			 * Currently for the emulated case we only accept
3314 			 * tpws with the UNMAP=1 bit set.
3315 			 */
3316 			if (!(cdb[10] & 0x08)) {
3317 				printk(KERN_ERR "WRITE_SAME w/o UNMAP bit not"
3318 					" supported for Block Discard Emulation\n");
3319 				goto out_invalid_cdb_field;
3320 			}
3321 			break;
3322 		default:
3323 			printk(KERN_ERR "VARIABLE_LENGTH_CMD service action"
3324 				" 0x%04x not supported\n", service_action);
3325 			goto out_unsupported_cdb;
3326 		}
3327 		break;
3328 	case 0xa3:
3329 		if (TRANSPORT(dev)->get_device_type(dev) != TYPE_ROM) {
3330 			/* MAINTENANCE_IN from SCC-2 */
3331 			/*
3332 			 * Check for emulated MI_REPORT_TARGET_PGS.
3333 			 */
3334 			if (cdb[1] == MI_REPORT_TARGET_PGS) {
3335 				cmd->transport_emulate_cdb =
3336 				(T10_ALUA(su_dev)->alua_type ==
3337 				 SPC3_ALUA_EMULATED) ?
3338 				&core_emulate_report_target_port_groups :
3339 				NULL;
3340 			}
3341 			size = (cdb[6] << 24) | (cdb[7] << 16) |
3342 			       (cdb[8] << 8) | cdb[9];
3343 		} else {
3344 			/* GPCMD_SEND_KEY from multi media commands */
3345 			size = (cdb[8] << 8) + cdb[9];
3346 		}
3347 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
3348 		break;
3349 	case MODE_SELECT:
3350 		size = cdb[4];
3351 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3352 		break;
3353 	case MODE_SELECT_10:
3354 		size = (cdb[7] << 8) + cdb[8];
3355 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3356 		break;
3357 	case MODE_SENSE:
3358 		size = cdb[4];
3359 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
3360 		break;
3361 	case MODE_SENSE_10:
3362 	case GPCMD_READ_BUFFER_CAPACITY:
3363 	case GPCMD_SEND_OPC:
3364 	case LOG_SELECT:
3365 	case LOG_SENSE:
3366 		size = (cdb[7] << 8) + cdb[8];
3367 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
3368 		break;
3369 	case READ_BLOCK_LIMITS:
3370 		size = READ_BLOCK_LEN;
3371 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
3372 		break;
3373 	case GPCMD_GET_CONFIGURATION:
3374 	case GPCMD_READ_FORMAT_CAPACITIES:
3375 	case GPCMD_READ_DISC_INFO:
3376 	case GPCMD_READ_TRACK_RZONE_INFO:
3377 		size = (cdb[7] << 8) + cdb[8];
3378 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3379 		break;
3380 	case PERSISTENT_RESERVE_IN:
3381 	case PERSISTENT_RESERVE_OUT:
3382 		cmd->transport_emulate_cdb =
3383 			(T10_RES(su_dev)->res_type ==
3384 			 SPC3_PERSISTENT_RESERVATIONS) ?
3385 			&core_scsi3_emulate_pr : NULL;
3386 		size = (cdb[7] << 8) + cdb[8];
3387 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
3388 		break;
3389 	case GPCMD_MECHANISM_STATUS:
3390 	case GPCMD_READ_DVD_STRUCTURE:
3391 		size = (cdb[8] << 8) + cdb[9];
3392 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3393 		break;
3394 	case READ_POSITION:
3395 		size = READ_POSITION_LEN;
3396 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
3397 		break;
3398 	case 0xa4:
3399 		if (TRANSPORT(dev)->get_device_type(dev) != TYPE_ROM) {
3400 			/* MAINTENANCE_OUT from SCC-2
3401 			 *
3402 			 * Check for emulated MO_SET_TARGET_PGS.
3403 			 */
3404 			if (cdb[1] == MO_SET_TARGET_PGS) {
3405 				cmd->transport_emulate_cdb =
3406 				(T10_ALUA(su_dev)->alua_type ==
3407 					SPC3_ALUA_EMULATED) ?
3408 				&core_emulate_set_target_port_groups :
3409 				NULL;
3410 			}
3411 
3412 			size = (cdb[6] << 24) | (cdb[7] << 16) |
3413 			       (cdb[8] << 8) | cdb[9];
3414 		} else  {
3415 			/* GPCMD_REPORT_KEY from multi media commands */
3416 			size = (cdb[8] << 8) + cdb[9];
3417 		}
3418 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
3419 		break;
3420 	case INQUIRY:
3421 		size = (cdb[3] << 8) + cdb[4];
3422 		/*
3423 		 * Do implict HEAD_OF_QUEUE processing for INQUIRY.
3424 		 * See spc4r17 section 5.3
3425 		 */
3426 		if (SE_DEV(cmd)->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
3427 			cmd->sam_task_attr = MSG_HEAD_TAG;
3428 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
3429 		break;
3430 	case READ_BUFFER:
3431 		size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
3432 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
3433 		break;
3434 	case READ_CAPACITY:
3435 		size = READ_CAP_LEN;
3436 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
3437 		break;
3438 	case READ_MEDIA_SERIAL_NUMBER:
3439 	case SECURITY_PROTOCOL_IN:
3440 	case SECURITY_PROTOCOL_OUT:
3441 		size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
3442 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
3443 		break;
3444 	case SERVICE_ACTION_IN:
3445 	case ACCESS_CONTROL_IN:
3446 	case ACCESS_CONTROL_OUT:
3447 	case EXTENDED_COPY:
3448 	case READ_ATTRIBUTE:
3449 	case RECEIVE_COPY_RESULTS:
3450 	case WRITE_ATTRIBUTE:
3451 		size = (cdb[10] << 24) | (cdb[11] << 16) |
3452 		       (cdb[12] << 8) | cdb[13];
3453 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
3454 		break;
3455 	case RECEIVE_DIAGNOSTIC:
3456 	case SEND_DIAGNOSTIC:
3457 		size = (cdb[3] << 8) | cdb[4];
3458 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
3459 		break;
3460 /* #warning FIXME: Figure out correct GPCMD_READ_CD blocksize. */
3461 #if 0
3462 	case GPCMD_READ_CD:
3463 		sectors = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
3464 		size = (2336 * sectors);
3465 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
3466 		break;
3467 #endif
3468 	case READ_TOC:
3469 		size = cdb[8];
3470 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
3471 		break;
3472 	case REQUEST_SENSE:
3473 		size = cdb[4];
3474 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
3475 		break;
3476 	case READ_ELEMENT_STATUS:
3477 		size = 65536 * cdb[7] + 256 * cdb[8] + cdb[9];
3478 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
3479 		break;
3480 	case WRITE_BUFFER:
3481 		size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
3482 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
3483 		break;
3484 	case RESERVE:
3485 	case RESERVE_10:
3486 		/*
3487 		 * The SPC-2 RESERVE does not contain a size in the SCSI CDB.
3488 		 * Assume the passthrough or $FABRIC_MOD will tell us about it.
3489 		 */
3490 		if (cdb[0] == RESERVE_10)
3491 			size = (cdb[7] << 8) | cdb[8];
3492 		else
3493 			size = cmd->data_length;
3494 
3495 		/*
3496 		 * Setup the legacy emulated handler for SPC-2 and
3497 		 * >= SPC-3 compatible reservation handling (CRH=1)
3498 		 * Otherwise, we assume the underlying SCSI logic is
3499 		 * is running in SPC_PASSTHROUGH, and wants reservations
3500 		 * emulation disabled.
3501 		 */
3502 		cmd->transport_emulate_cdb =
3503 				(T10_RES(su_dev)->res_type !=
3504 				 SPC_PASSTHROUGH) ?
3505 				&core_scsi2_emulate_crh : NULL;
3506 		cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
3507 		break;
3508 	case RELEASE:
3509 	case RELEASE_10:
3510 		/*
3511 		 * The SPC-2 RELEASE does not contain a size in the SCSI CDB.
3512 		 * Assume the passthrough or $FABRIC_MOD will tell us about it.
3513 		*/
3514 		if (cdb[0] == RELEASE_10)
3515 			size = (cdb[7] << 8) | cdb[8];
3516 		else
3517 			size = cmd->data_length;
3518 
3519 		cmd->transport_emulate_cdb =
3520 				(T10_RES(su_dev)->res_type !=
3521 				 SPC_PASSTHROUGH) ?
3522 				&core_scsi2_emulate_crh : NULL;
3523 		cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
3524 		break;
3525 	case SYNCHRONIZE_CACHE:
3526 	case 0x91: /* SYNCHRONIZE_CACHE_16: */
3527 		/*
3528 		 * Extract LBA and range to be flushed for emulated SYNCHRONIZE_CACHE
3529 		 */
3530 		if (cdb[0] == SYNCHRONIZE_CACHE) {
3531 			sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
3532 			T_TASK(cmd)->t_task_lba = transport_lba_32(cdb);
3533 		} else {
3534 			sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
3535 			T_TASK(cmd)->t_task_lba = transport_lba_64(cdb);
3536 		}
3537 		if (sector_ret)
3538 			goto out_unsupported_cdb;
3539 
3540 		size = transport_get_size(sectors, cdb, cmd);
3541 		cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
3542 
3543 		/*
3544 		 * For TCM/pSCSI passthrough, skip cmd->transport_emulate_cdb()
3545 		 */
3546 		if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
3547 			break;
3548 		/*
3549 		 * Set SCF_EMULATE_CDB_ASYNC to ensure asynchronous operation
3550 		 * for SYNCHRONIZE_CACHE* Immed=1 case in __transport_execute_tasks()
3551 		 */
3552 		cmd->se_cmd_flags |= SCF_EMULATE_CDB_ASYNC;
3553 		/*
3554 		 * Check to ensure that LBA + Range does not exceed past end of
3555 		 * device.
3556 		 */
3557 		if (transport_get_sectors(cmd) < 0)
3558 			goto out_invalid_cdb_field;
3559 		break;
3560 	case UNMAP:
3561 		size = get_unaligned_be16(&cdb[7]);
3562 		passthrough = (TRANSPORT(dev)->transport_type ==
3563 				TRANSPORT_PLUGIN_PHBA_PDEV);
3564 		/*
3565 		 * Determine if the received UNMAP used to for direct passthrough
3566 		 * into Linux/SCSI with struct request via TCM/pSCSI or we are
3567 		 * signaling the use of internal transport_generic_unmap() emulation
3568 		 * for UNMAP -> Linux/BLOCK disbard with TCM/IBLOCK and TCM/FILEIO
3569 		 * subsystem plugin backstores.
3570 		 */
3571 		if (!(passthrough))
3572 			cmd->se_cmd_flags |= SCF_EMULATE_SYNC_UNMAP;
3573 
3574 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
3575 		break;
3576 	case WRITE_SAME_16:
3577 		sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
3578 		if (sector_ret)
3579 			goto out_unsupported_cdb;
3580 		size = transport_get_size(sectors, cdb, cmd);
3581 		T_TASK(cmd)->t_task_lba = get_unaligned_be16(&cdb[2]);
3582 		passthrough = (TRANSPORT(dev)->transport_type ==
3583 				TRANSPORT_PLUGIN_PHBA_PDEV);
3584 		/*
3585 		 * Determine if the received WRITE_SAME_16 is used to for direct
3586 		 * passthrough into Linux/SCSI with struct request via TCM/pSCSI
3587 		 * or we are signaling the use of internal WRITE_SAME + UNMAP=1
3588 		 * emulation for -> Linux/BLOCK disbard with TCM/IBLOCK and
3589 		 * TCM/FILEIO subsystem plugin backstores.
3590 		 */
3591 		if (!(passthrough)) {
3592 			if ((cdb[1] & 0x04) || (cdb[1] & 0x02)) {
3593 				printk(KERN_ERR "WRITE_SAME PBDATA and LBDATA"
3594 					" bits not supported for Block Discard"
3595 					" Emulation\n");
3596 				goto out_invalid_cdb_field;
3597 			}
3598 			/*
3599 			 * Currently for the emulated case we only accept
3600 			 * tpws with the UNMAP=1 bit set.
3601 			 */
3602 			if (!(cdb[1] & 0x08)) {
3603 				printk(KERN_ERR "WRITE_SAME w/o UNMAP bit not "
3604 					" supported for Block Discard Emulation\n");
3605 				goto out_invalid_cdb_field;
3606 			}
3607 		}
3608 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3609 		break;
3610 	case ALLOW_MEDIUM_REMOVAL:
3611 	case GPCMD_CLOSE_TRACK:
3612 	case ERASE:
3613 	case INITIALIZE_ELEMENT_STATUS:
3614 	case GPCMD_LOAD_UNLOAD:
3615 	case REZERO_UNIT:
3616 	case SEEK_10:
3617 	case GPCMD_SET_SPEED:
3618 	case SPACE:
3619 	case START_STOP:
3620 	case TEST_UNIT_READY:
3621 	case VERIFY:
3622 	case WRITE_FILEMARKS:
3623 	case MOVE_MEDIUM:
3624 		cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
3625 		break;
3626 	case REPORT_LUNS:
3627 		cmd->transport_emulate_cdb =
3628 				&transport_core_report_lun_response;
3629 		size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
3630 		/*
3631 		 * Do implict HEAD_OF_QUEUE processing for REPORT_LUNS
3632 		 * See spc4r17 section 5.3
3633 		 */
3634 		if (SE_DEV(cmd)->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
3635 			cmd->sam_task_attr = MSG_HEAD_TAG;
3636 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
3637 		break;
3638 	default:
3639 		printk(KERN_WARNING "TARGET_CORE[%s]: Unsupported SCSI Opcode"
3640 			" 0x%02x, sending CHECK_CONDITION.\n",
3641 			CMD_TFO(cmd)->get_fabric_name(), cdb[0]);
3642 		cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks;
3643 		goto out_unsupported_cdb;
3644 	}
3645 
3646 	if (size != cmd->data_length) {
3647 		printk(KERN_WARNING "TARGET_CORE[%s]: Expected Transfer Length:"
3648 			" %u does not match SCSI CDB Length: %u for SAM Opcode:"
3649 			" 0x%02x\n", CMD_TFO(cmd)->get_fabric_name(),
3650 				cmd->data_length, size, cdb[0]);
3651 
3652 		cmd->cmd_spdtl = size;
3653 
3654 		if (cmd->data_direction == DMA_TO_DEVICE) {
3655 			printk(KERN_ERR "Rejecting underflow/overflow"
3656 					" WRITE data\n");
3657 			goto out_invalid_cdb_field;
3658 		}
3659 		/*
3660 		 * Reject READ_* or WRITE_* with overflow/underflow for
3661 		 * type SCF_SCSI_DATA_SG_IO_CDB.
3662 		 */
3663 		if (!(ret) && (DEV_ATTRIB(dev)->block_size != 512))  {
3664 			printk(KERN_ERR "Failing OVERFLOW/UNDERFLOW for LBA op"
3665 				" CDB on non 512-byte sector setup subsystem"
3666 				" plugin: %s\n", TRANSPORT(dev)->name);
3667 			/* Returns CHECK_CONDITION + INVALID_CDB_FIELD */
3668 			goto out_invalid_cdb_field;
3669 		}
3670 
3671 		if (size > cmd->data_length) {
3672 			cmd->se_cmd_flags |= SCF_OVERFLOW_BIT;
3673 			cmd->residual_count = (size - cmd->data_length);
3674 		} else {
3675 			cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
3676 			cmd->residual_count = (cmd->data_length - size);
3677 		}
3678 		cmd->data_length = size;
3679 	}
3680 
3681 	transport_set_supported_SAM_opcode(cmd);
3682 	return ret;
3683 
3684 out_unsupported_cdb:
3685 	cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
3686 	cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
3687 	return -2;
3688 out_invalid_cdb_field:
3689 	cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
3690 	cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
3691 	return -2;
3692 }
3693 
3694 static inline void transport_release_tasks(struct se_cmd *);
3695 
3696 /*
3697  * This function will copy a contiguous *src buffer into a destination
3698  * struct scatterlist array.
3699  */
3700 static void transport_memcpy_write_contig(
3701 	struct se_cmd *cmd,
3702 	struct scatterlist *sg_d,
3703 	unsigned char *src)
3704 {
3705 	u32 i = 0, length = 0, total_length = cmd->data_length;
3706 	void *dst;
3707 
3708 	while (total_length) {
3709 		length = sg_d[i].length;
3710 
3711 		if (length > total_length)
3712 			length = total_length;
3713 
3714 		dst = sg_virt(&sg_d[i]);
3715 
3716 		memcpy(dst, src, length);
3717 
3718 		if (!(total_length -= length))
3719 			return;
3720 
3721 		src += length;
3722 		i++;
3723 	}
3724 }
3725 
3726 /*
3727  * This function will copy a struct scatterlist array *sg_s into a destination
3728  * contiguous *dst buffer.
3729  */
3730 static void transport_memcpy_read_contig(
3731 	struct se_cmd *cmd,
3732 	unsigned char *dst,
3733 	struct scatterlist *sg_s)
3734 {
3735 	u32 i = 0, length = 0, total_length = cmd->data_length;
3736 	void *src;
3737 
3738 	while (total_length) {
3739 		length = sg_s[i].length;
3740 
3741 		if (length > total_length)
3742 			length = total_length;
3743 
3744 		src = sg_virt(&sg_s[i]);
3745 
3746 		memcpy(dst, src, length);
3747 
3748 		if (!(total_length -= length))
3749 			return;
3750 
3751 		dst += length;
3752 		i++;
3753 	}
3754 }
3755 
3756 static void transport_memcpy_se_mem_read_contig(
3757 	struct se_cmd *cmd,
3758 	unsigned char *dst,
3759 	struct list_head *se_mem_list)
3760 {
3761 	struct se_mem *se_mem;
3762 	void *src;
3763 	u32 length = 0, total_length = cmd->data_length;
3764 
3765 	list_for_each_entry(se_mem, se_mem_list, se_list) {
3766 		length = se_mem->se_len;
3767 
3768 		if (length > total_length)
3769 			length = total_length;
3770 
3771 		src = page_address(se_mem->se_page) + se_mem->se_off;
3772 
3773 		memcpy(dst, src, length);
3774 
3775 		if (!(total_length -= length))
3776 			return;
3777 
3778 		dst += length;
3779 	}
3780 }
3781 
3782 /*
3783  * Called from transport_generic_complete_ok() and
3784  * transport_generic_request_failure() to determine which dormant/delayed
3785  * and ordered cmds need to have their tasks added to the execution queue.
3786  */
3787 static void transport_complete_task_attr(struct se_cmd *cmd)
3788 {
3789 	struct se_device *dev = SE_DEV(cmd);
3790 	struct se_cmd *cmd_p, *cmd_tmp;
3791 	int new_active_tasks = 0;
3792 
3793 	if (cmd->sam_task_attr == MSG_SIMPLE_TAG) {
3794 		atomic_dec(&dev->simple_cmds);
3795 		smp_mb__after_atomic_dec();
3796 		dev->dev_cur_ordered_id++;
3797 		DEBUG_STA("Incremented dev->dev_cur_ordered_id: %u for"
3798 			" SIMPLE: %u\n", dev->dev_cur_ordered_id,
3799 			cmd->se_ordered_id);
3800 	} else if (cmd->sam_task_attr == MSG_HEAD_TAG) {
3801 		atomic_dec(&dev->dev_hoq_count);
3802 		smp_mb__after_atomic_dec();
3803 		dev->dev_cur_ordered_id++;
3804 		DEBUG_STA("Incremented dev_cur_ordered_id: %u for"
3805 			" HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id,
3806 			cmd->se_ordered_id);
3807 	} else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
3808 		spin_lock(&dev->ordered_cmd_lock);
3809 		list_del(&cmd->se_ordered_list);
3810 		atomic_dec(&dev->dev_ordered_sync);
3811 		smp_mb__after_atomic_dec();
3812 		spin_unlock(&dev->ordered_cmd_lock);
3813 
3814 		dev->dev_cur_ordered_id++;
3815 		DEBUG_STA("Incremented dev_cur_ordered_id: %u for ORDERED:"
3816 			" %u\n", dev->dev_cur_ordered_id, cmd->se_ordered_id);
3817 	}
3818 	/*
3819 	 * Process all commands up to the last received
3820 	 * ORDERED task attribute which requires another blocking
3821 	 * boundary
3822 	 */
3823 	spin_lock(&dev->delayed_cmd_lock);
3824 	list_for_each_entry_safe(cmd_p, cmd_tmp,
3825 			&dev->delayed_cmd_list, se_delayed_list) {
3826 
3827 		list_del(&cmd_p->se_delayed_list);
3828 		spin_unlock(&dev->delayed_cmd_lock);
3829 
3830 		DEBUG_STA("Calling add_tasks() for"
3831 			" cmd_p: 0x%02x Task Attr: 0x%02x"
3832 			" Dormant -> Active, se_ordered_id: %u\n",
3833 			T_TASK(cmd_p)->t_task_cdb[0],
3834 			cmd_p->sam_task_attr, cmd_p->se_ordered_id);
3835 
3836 		transport_add_tasks_from_cmd(cmd_p);
3837 		new_active_tasks++;
3838 
3839 		spin_lock(&dev->delayed_cmd_lock);
3840 		if (cmd_p->sam_task_attr == MSG_ORDERED_TAG)
3841 			break;
3842 	}
3843 	spin_unlock(&dev->delayed_cmd_lock);
3844 	/*
3845 	 * If new tasks have become active, wake up the transport thread
3846 	 * to do the processing of the Active tasks.
3847 	 */
3848 	if (new_active_tasks != 0)
3849 		wake_up_interruptible(&dev->dev_queue_obj->thread_wq);
3850 }
3851 
3852 static void transport_generic_complete_ok(struct se_cmd *cmd)
3853 {
3854 	int reason = 0;
3855 	/*
3856 	 * Check if we need to move delayed/dormant tasks from cmds on the
3857 	 * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task
3858 	 * Attribute.
3859 	 */
3860 	if (SE_DEV(cmd)->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
3861 		transport_complete_task_attr(cmd);
3862 	/*
3863 	 * Check if we need to retrieve a sense buffer from
3864 	 * the struct se_cmd in question.
3865 	 */
3866 	if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
3867 		if (transport_get_sense_data(cmd) < 0)
3868 			reason = TCM_NON_EXISTENT_LUN;
3869 
3870 		/*
3871 		 * Only set when an struct se_task->task_scsi_status returned
3872 		 * a non GOOD status.
3873 		 */
3874 		if (cmd->scsi_status) {
3875 			transport_send_check_condition_and_sense(
3876 					cmd, reason, 1);
3877 			transport_lun_remove_cmd(cmd);
3878 			transport_cmd_check_stop_to_fabric(cmd);
3879 			return;
3880 		}
3881 	}
3882 	/*
3883 	 * Check for a callback, used by amongst other things
3884 	 * XDWRITE_READ_10 emulation.
3885 	 */
3886 	if (cmd->transport_complete_callback)
3887 		cmd->transport_complete_callback(cmd);
3888 
3889 	switch (cmd->data_direction) {
3890 	case DMA_FROM_DEVICE:
3891 		spin_lock(&cmd->se_lun->lun_sep_lock);
3892 		if (SE_LUN(cmd)->lun_sep) {
3893 			SE_LUN(cmd)->lun_sep->sep_stats.tx_data_octets +=
3894 					cmd->data_length;
3895 		}
3896 		spin_unlock(&cmd->se_lun->lun_sep_lock);
3897 		/*
3898 		 * If enabled by TCM fabirc module pre-registered SGL
3899 		 * memory, perform the memcpy() from the TCM internal
3900 		 * contigious buffer back to the original SGL.
3901 		 */
3902 		if (cmd->se_cmd_flags & SCF_PASSTHROUGH_CONTIG_TO_SG)
3903 			transport_memcpy_write_contig(cmd,
3904 				 T_TASK(cmd)->t_task_pt_sgl,
3905 				 T_TASK(cmd)->t_task_buf);
3906 
3907 		CMD_TFO(cmd)->queue_data_in(cmd);
3908 		break;
3909 	case DMA_TO_DEVICE:
3910 		spin_lock(&cmd->se_lun->lun_sep_lock);
3911 		if (SE_LUN(cmd)->lun_sep) {
3912 			SE_LUN(cmd)->lun_sep->sep_stats.rx_data_octets +=
3913 				cmd->data_length;
3914 		}
3915 		spin_unlock(&cmd->se_lun->lun_sep_lock);
3916 		/*
3917 		 * Check if we need to send READ payload for BIDI-COMMAND
3918 		 */
3919 		if (T_TASK(cmd)->t_mem_bidi_list != NULL) {
3920 			spin_lock(&cmd->se_lun->lun_sep_lock);
3921 			if (SE_LUN(cmd)->lun_sep) {
3922 				SE_LUN(cmd)->lun_sep->sep_stats.tx_data_octets +=
3923 					cmd->data_length;
3924 			}
3925 			spin_unlock(&cmd->se_lun->lun_sep_lock);
3926 			CMD_TFO(cmd)->queue_data_in(cmd);
3927 			break;
3928 		}
3929 		/* Fall through for DMA_TO_DEVICE */
3930 	case DMA_NONE:
3931 		CMD_TFO(cmd)->queue_status(cmd);
3932 		break;
3933 	default:
3934 		break;
3935 	}
3936 
3937 	transport_lun_remove_cmd(cmd);
3938 	transport_cmd_check_stop_to_fabric(cmd);
3939 }
3940 
3941 static void transport_free_dev_tasks(struct se_cmd *cmd)
3942 {
3943 	struct se_task *task, *task_tmp;
3944 	unsigned long flags;
3945 
3946 	spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
3947 	list_for_each_entry_safe(task, task_tmp,
3948 				&T_TASK(cmd)->t_task_list, t_list) {
3949 		if (atomic_read(&task->task_active))
3950 			continue;
3951 
3952 		kfree(task->task_sg_bidi);
3953 		kfree(task->task_sg);
3954 
3955 		list_del(&task->t_list);
3956 
3957 		spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
3958 		if (task->se_dev)
3959 			TRANSPORT(task->se_dev)->free_task(task);
3960 		else
3961 			printk(KERN_ERR "task[%u] - task->se_dev is NULL\n",
3962 				task->task_no);
3963 		spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
3964 	}
3965 	spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
3966 }
3967 
3968 static inline void transport_free_pages(struct se_cmd *cmd)
3969 {
3970 	struct se_mem *se_mem, *se_mem_tmp;
3971 	int free_page = 1;
3972 
3973 	if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC)
3974 		free_page = 0;
3975 	if (cmd->se_dev->transport->do_se_mem_map)
3976 		free_page = 0;
3977 
3978 	if (T_TASK(cmd)->t_task_buf) {
3979 		kfree(T_TASK(cmd)->t_task_buf);
3980 		T_TASK(cmd)->t_task_buf = NULL;
3981 		return;
3982 	}
3983 
3984 	/*
3985 	 * Caller will handle releasing of struct se_mem.
3986 	 */
3987 	if (cmd->se_cmd_flags & SCF_CMD_PASSTHROUGH_NOALLOC)
3988 		return;
3989 
3990 	if (!(T_TASK(cmd)->t_tasks_se_num))
3991 		return;
3992 
3993 	list_for_each_entry_safe(se_mem, se_mem_tmp,
3994 			T_TASK(cmd)->t_mem_list, se_list) {
3995 		/*
3996 		 * We only release call __free_page(struct se_mem->se_page) when
3997 		 * SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is NOT in use,
3998 		 */
3999 		if (free_page)
4000 			__free_page(se_mem->se_page);
4001 
4002 		list_del(&se_mem->se_list);
4003 		kmem_cache_free(se_mem_cache, se_mem);
4004 	}
4005 
4006 	if (T_TASK(cmd)->t_mem_bidi_list && T_TASK(cmd)->t_tasks_se_bidi_num) {
4007 		list_for_each_entry_safe(se_mem, se_mem_tmp,
4008 				T_TASK(cmd)->t_mem_bidi_list, se_list) {
4009 			/*
4010 			 * We only release call __free_page(struct se_mem->se_page) when
4011 			 * SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is NOT in use,
4012 			 */
4013 			if (free_page)
4014 				__free_page(se_mem->se_page);
4015 
4016 			list_del(&se_mem->se_list);
4017 			kmem_cache_free(se_mem_cache, se_mem);
4018 		}
4019 	}
4020 
4021 	kfree(T_TASK(cmd)->t_mem_bidi_list);
4022 	T_TASK(cmd)->t_mem_bidi_list = NULL;
4023 	kfree(T_TASK(cmd)->t_mem_list);
4024 	T_TASK(cmd)->t_mem_list = NULL;
4025 	T_TASK(cmd)->t_tasks_se_num = 0;
4026 }
4027 
4028 static inline void transport_release_tasks(struct se_cmd *cmd)
4029 {
4030 	transport_free_dev_tasks(cmd);
4031 }
4032 
4033 static inline int transport_dec_and_check(struct se_cmd *cmd)
4034 {
4035 	unsigned long flags;
4036 
4037 	spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
4038 	if (atomic_read(&T_TASK(cmd)->t_fe_count)) {
4039 		if (!(atomic_dec_and_test(&T_TASK(cmd)->t_fe_count))) {
4040 			spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
4041 					flags);
4042 			return 1;
4043 		}
4044 	}
4045 
4046 	if (atomic_read(&T_TASK(cmd)->t_se_count)) {
4047 		if (!(atomic_dec_and_test(&T_TASK(cmd)->t_se_count))) {
4048 			spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
4049 					flags);
4050 			return 1;
4051 		}
4052 	}
4053 	spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
4054 
4055 	return 0;
4056 }
4057 
4058 static void transport_release_fe_cmd(struct se_cmd *cmd)
4059 {
4060 	unsigned long flags;
4061 
4062 	if (transport_dec_and_check(cmd))
4063 		return;
4064 
4065 	spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
4066 	if (!(atomic_read(&T_TASK(cmd)->transport_dev_active))) {
4067 		spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
4068 		goto free_pages;
4069 	}
4070 	atomic_set(&T_TASK(cmd)->transport_dev_active, 0);
4071 	transport_all_task_dev_remove_state(cmd);
4072 	spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
4073 
4074 	transport_release_tasks(cmd);
4075 free_pages:
4076 	transport_free_pages(cmd);
4077 	transport_free_se_cmd(cmd);
4078 	CMD_TFO(cmd)->release_cmd_direct(cmd);
4079 }
4080 
4081 static int transport_generic_remove(
4082 	struct se_cmd *cmd,
4083 	int release_to_pool,
4084 	int session_reinstatement)
4085 {
4086 	unsigned long flags;
4087 
4088 	if (!(T_TASK(cmd)))
4089 		goto release_cmd;
4090 
4091 	if (transport_dec_and_check(cmd)) {
4092 		if (session_reinstatement) {
4093 			spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
4094 			transport_all_task_dev_remove_state(cmd);
4095 			spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
4096 					flags);
4097 		}
4098 		return 1;
4099 	}
4100 
4101 	spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
4102 	if (!(atomic_read(&T_TASK(cmd)->transport_dev_active))) {
4103 		spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
4104 		goto free_pages;
4105 	}
4106 	atomic_set(&T_TASK(cmd)->transport_dev_active, 0);
4107 	transport_all_task_dev_remove_state(cmd);
4108 	spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
4109 
4110 	transport_release_tasks(cmd);
4111 free_pages:
4112 	transport_free_pages(cmd);
4113 
4114 release_cmd:
4115 	if (release_to_pool) {
4116 		transport_release_cmd_to_pool(cmd);
4117 	} else {
4118 		transport_free_se_cmd(cmd);
4119 		CMD_TFO(cmd)->release_cmd_direct(cmd);
4120 	}
4121 
4122 	return 0;
4123 }
4124 
4125 /*
4126  * transport_generic_map_mem_to_cmd - Perform SGL -> struct se_mem map
4127  * @cmd:  Associated se_cmd descriptor
4128  * @mem:  SGL style memory for TCM WRITE / READ
4129  * @sg_mem_num: Number of SGL elements
4130  * @mem_bidi_in: SGL style memory for TCM BIDI READ
4131  * @sg_mem_bidi_num: Number of BIDI READ SGL elements
4132  *
4133  * Return: nonzero return cmd was rejected for -ENOMEM or inproper usage
4134  * of parameters.
4135  */
4136 int transport_generic_map_mem_to_cmd(
4137 	struct se_cmd *cmd,
4138 	struct scatterlist *mem,
4139 	u32 sg_mem_num,
4140 	struct scatterlist *mem_bidi_in,
4141 	u32 sg_mem_bidi_num)
4142 {
4143 	u32 se_mem_cnt_out = 0;
4144 	int ret;
4145 
4146 	if (!(mem) || !(sg_mem_num))
4147 		return 0;
4148 	/*
4149 	 * Passed *mem will contain a list_head containing preformatted
4150 	 * struct se_mem elements...
4151 	 */
4152 	if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM)) {
4153 		if ((mem_bidi_in) || (sg_mem_bidi_num)) {
4154 			printk(KERN_ERR "SCF_CMD_PASSTHROUGH_NOALLOC not supported"
4155 				" with BIDI-COMMAND\n");
4156 			return -ENOSYS;
4157 		}
4158 
4159 		T_TASK(cmd)->t_mem_list = (struct list_head *)mem;
4160 		T_TASK(cmd)->t_tasks_se_num = sg_mem_num;
4161 		cmd->se_cmd_flags |= SCF_CMD_PASSTHROUGH_NOALLOC;
4162 		return 0;
4163 	}
4164 	/*
4165 	 * Otherwise, assume the caller is passing a struct scatterlist
4166 	 * array from include/linux/scatterlist.h
4167 	 */
4168 	if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) ||
4169 	    (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB)) {
4170 		/*
4171 		 * For CDB using TCM struct se_mem linked list scatterlist memory
4172 		 * processed into a TCM struct se_subsystem_dev, we do the mapping
4173 		 * from the passed physical memory to struct se_mem->se_page here.
4174 		 */
4175 		T_TASK(cmd)->t_mem_list = transport_init_se_mem_list();
4176 		if (!(T_TASK(cmd)->t_mem_list))
4177 			return -ENOMEM;
4178 
4179 		ret = transport_map_sg_to_mem(cmd,
4180 			T_TASK(cmd)->t_mem_list, mem, &se_mem_cnt_out);
4181 		if (ret < 0)
4182 			return -ENOMEM;
4183 
4184 		T_TASK(cmd)->t_tasks_se_num = se_mem_cnt_out;
4185 		/*
4186 		 * Setup BIDI READ list of struct se_mem elements
4187 		 */
4188 		if ((mem_bidi_in) && (sg_mem_bidi_num)) {
4189 			T_TASK(cmd)->t_mem_bidi_list = transport_init_se_mem_list();
4190 			if (!(T_TASK(cmd)->t_mem_bidi_list)) {
4191 				kfree(T_TASK(cmd)->t_mem_list);
4192 				return -ENOMEM;
4193 			}
4194 			se_mem_cnt_out = 0;
4195 
4196 			ret = transport_map_sg_to_mem(cmd,
4197 				T_TASK(cmd)->t_mem_bidi_list, mem_bidi_in,
4198 				&se_mem_cnt_out);
4199 			if (ret < 0) {
4200 				kfree(T_TASK(cmd)->t_mem_list);
4201 				return -ENOMEM;
4202 			}
4203 
4204 			T_TASK(cmd)->t_tasks_se_bidi_num = se_mem_cnt_out;
4205 		}
4206 		cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
4207 
4208 	} else if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_NONSG_IO_CDB) {
4209 		if (mem_bidi_in || sg_mem_bidi_num) {
4210 			printk(KERN_ERR "BIDI-Commands not supported using "
4211 				"SCF_SCSI_CONTROL_NONSG_IO_CDB\n");
4212 			return -ENOSYS;
4213 		}
4214 		/*
4215 		 * For incoming CDBs using a contiguous buffer internall with TCM,
4216 		 * save the passed struct scatterlist memory.  After TCM storage object
4217 		 * processing has completed for this struct se_cmd, TCM core will call
4218 		 * transport_memcpy_[write,read]_contig() as necessary from
4219 		 * transport_generic_complete_ok() and transport_write_pending() in order
4220 		 * to copy the TCM buffer to/from the original passed *mem in SGL ->
4221 		 * struct scatterlist format.
4222 		 */
4223 		cmd->se_cmd_flags |= SCF_PASSTHROUGH_CONTIG_TO_SG;
4224 		T_TASK(cmd)->t_task_pt_sgl = mem;
4225 	}
4226 
4227 	return 0;
4228 }
4229 EXPORT_SYMBOL(transport_generic_map_mem_to_cmd);
4230 
4231 
4232 static inline long long transport_dev_end_lba(struct se_device *dev)
4233 {
4234 	return dev->transport->get_blocks(dev) + 1;
4235 }
4236 
4237 static int transport_get_sectors(struct se_cmd *cmd)
4238 {
4239 	struct se_device *dev = SE_DEV(cmd);
4240 
4241 	T_TASK(cmd)->t_tasks_sectors =
4242 		(cmd->data_length / DEV_ATTRIB(dev)->block_size);
4243 	if (!(T_TASK(cmd)->t_tasks_sectors))
4244 		T_TASK(cmd)->t_tasks_sectors = 1;
4245 
4246 	if (TRANSPORT(dev)->get_device_type(dev) != TYPE_DISK)
4247 		return 0;
4248 
4249 	if ((T_TASK(cmd)->t_task_lba + T_TASK(cmd)->t_tasks_sectors) >
4250 	     transport_dev_end_lba(dev)) {
4251 		printk(KERN_ERR "LBA: %llu Sectors: %u exceeds"
4252 			" transport_dev_end_lba(): %llu\n",
4253 			T_TASK(cmd)->t_task_lba, T_TASK(cmd)->t_tasks_sectors,
4254 			transport_dev_end_lba(dev));
4255 		cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
4256 		cmd->scsi_sense_reason = TCM_SECTOR_COUNT_TOO_MANY;
4257 		return PYX_TRANSPORT_REQ_TOO_MANY_SECTORS;
4258 	}
4259 
4260 	return 0;
4261 }
4262 
4263 static int transport_new_cmd_obj(struct se_cmd *cmd)
4264 {
4265 	struct se_device *dev = SE_DEV(cmd);
4266 	u32 task_cdbs = 0, rc;
4267 
4268 	if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)) {
4269 		task_cdbs++;
4270 		T_TASK(cmd)->t_task_cdbs++;
4271 	} else {
4272 		int set_counts = 1;
4273 
4274 		/*
4275 		 * Setup any BIDI READ tasks and memory from
4276 		 * T_TASK(cmd)->t_mem_bidi_list so the READ struct se_tasks
4277 		 * are queued first for the non pSCSI passthrough case.
4278 		 */
4279 		if ((T_TASK(cmd)->t_mem_bidi_list != NULL) &&
4280 		    (TRANSPORT(dev)->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV)) {
4281 			rc = transport_generic_get_cdb_count(cmd,
4282 				T_TASK(cmd)->t_task_lba,
4283 				T_TASK(cmd)->t_tasks_sectors,
4284 				DMA_FROM_DEVICE, T_TASK(cmd)->t_mem_bidi_list,
4285 				set_counts);
4286 			if (!(rc)) {
4287 				cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
4288 				cmd->scsi_sense_reason =
4289 					TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
4290 				return PYX_TRANSPORT_LU_COMM_FAILURE;
4291 			}
4292 			set_counts = 0;
4293 		}
4294 		/*
4295 		 * Setup the tasks and memory from T_TASK(cmd)->t_mem_list
4296 		 * Note for BIDI transfers this will contain the WRITE payload
4297 		 */
4298 		task_cdbs = transport_generic_get_cdb_count(cmd,
4299 				T_TASK(cmd)->t_task_lba,
4300 				T_TASK(cmd)->t_tasks_sectors,
4301 				cmd->data_direction, T_TASK(cmd)->t_mem_list,
4302 				set_counts);
4303 		if (!(task_cdbs)) {
4304 			cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
4305 			cmd->scsi_sense_reason =
4306 					TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
4307 			return PYX_TRANSPORT_LU_COMM_FAILURE;
4308 		}
4309 		T_TASK(cmd)->t_task_cdbs += task_cdbs;
4310 
4311 #if 0
4312 		printk(KERN_INFO "data_length: %u, LBA: %llu t_tasks_sectors:"
4313 			" %u, t_task_cdbs: %u\n", obj_ptr, cmd->data_length,
4314 			T_TASK(cmd)->t_task_lba, T_TASK(cmd)->t_tasks_sectors,
4315 			T_TASK(cmd)->t_task_cdbs);
4316 #endif
4317 	}
4318 
4319 	atomic_set(&T_TASK(cmd)->t_task_cdbs_left, task_cdbs);
4320 	atomic_set(&T_TASK(cmd)->t_task_cdbs_ex_left, task_cdbs);
4321 	atomic_set(&T_TASK(cmd)->t_task_cdbs_timeout_left, task_cdbs);
4322 	return 0;
4323 }
4324 
4325 static struct list_head *transport_init_se_mem_list(void)
4326 {
4327 	struct list_head *se_mem_list;
4328 
4329 	se_mem_list = kzalloc(sizeof(struct list_head), GFP_KERNEL);
4330 	if (!(se_mem_list)) {
4331 		printk(KERN_ERR "Unable to allocate memory for se_mem_list\n");
4332 		return NULL;
4333 	}
4334 	INIT_LIST_HEAD(se_mem_list);
4335 
4336 	return se_mem_list;
4337 }
4338 
4339 static int
4340 transport_generic_get_mem(struct se_cmd *cmd, u32 length, u32 dma_size)
4341 {
4342 	unsigned char *buf;
4343 	struct se_mem *se_mem;
4344 
4345 	T_TASK(cmd)->t_mem_list = transport_init_se_mem_list();
4346 	if (!(T_TASK(cmd)->t_mem_list))
4347 		return -ENOMEM;
4348 
4349 	/*
4350 	 * If the device uses memory mapping this is enough.
4351 	 */
4352 	if (cmd->se_dev->transport->do_se_mem_map)
4353 		return 0;
4354 
4355 	/*
4356 	 * Setup BIDI-COMMAND READ list of struct se_mem elements
4357 	 */
4358 	if (T_TASK(cmd)->t_tasks_bidi) {
4359 		T_TASK(cmd)->t_mem_bidi_list = transport_init_se_mem_list();
4360 		if (!(T_TASK(cmd)->t_mem_bidi_list)) {
4361 			kfree(T_TASK(cmd)->t_mem_list);
4362 			return -ENOMEM;
4363 		}
4364 	}
4365 
4366 	while (length) {
4367 		se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL);
4368 		if (!(se_mem)) {
4369 			printk(KERN_ERR "Unable to allocate struct se_mem\n");
4370 			goto out;
4371 		}
4372 
4373 /* #warning FIXME Allocate contigous pages for struct se_mem elements */
4374 		se_mem->se_page = alloc_pages(GFP_KERNEL, 0);
4375 		if (!(se_mem->se_page)) {
4376 			printk(KERN_ERR "alloc_pages() failed\n");
4377 			goto out;
4378 		}
4379 
4380 		buf = kmap_atomic(se_mem->se_page, KM_IRQ0);
4381 		if (!(buf)) {
4382 			printk(KERN_ERR "kmap_atomic() failed\n");
4383 			goto out;
4384 		}
4385 		INIT_LIST_HEAD(&se_mem->se_list);
4386 		se_mem->se_len = (length > dma_size) ? dma_size : length;
4387 		memset(buf, 0, se_mem->se_len);
4388 		kunmap_atomic(buf, KM_IRQ0);
4389 
4390 		list_add_tail(&se_mem->se_list, T_TASK(cmd)->t_mem_list);
4391 		T_TASK(cmd)->t_tasks_se_num++;
4392 
4393 		DEBUG_MEM("Allocated struct se_mem page(%p) Length(%u)"
4394 			" Offset(%u)\n", se_mem->se_page, se_mem->se_len,
4395 			se_mem->se_off);
4396 
4397 		length -= se_mem->se_len;
4398 	}
4399 
4400 	DEBUG_MEM("Allocated total struct se_mem elements(%u)\n",
4401 			T_TASK(cmd)->t_tasks_se_num);
4402 
4403 	return 0;
4404 out:
4405 	if (se_mem)
4406 		__free_pages(se_mem->se_page, 0);
4407 	kmem_cache_free(se_mem_cache, se_mem);
4408 	return -1;
4409 }
4410 
4411 u32 transport_calc_sg_num(
4412 	struct se_task *task,
4413 	struct se_mem *in_se_mem,
4414 	u32 task_offset)
4415 {
4416 	struct se_cmd *se_cmd = task->task_se_cmd;
4417 	struct se_device *se_dev = SE_DEV(se_cmd);
4418 	struct se_mem *se_mem = in_se_mem;
4419 	struct target_core_fabric_ops *tfo = CMD_TFO(se_cmd);
4420 	u32 sg_length, task_size = task->task_size, task_sg_num_padded;
4421 
4422 	while (task_size != 0) {
4423 		DEBUG_SC("se_mem->se_page(%p) se_mem->se_len(%u)"
4424 			" se_mem->se_off(%u) task_offset(%u)\n",
4425 			se_mem->se_page, se_mem->se_len,
4426 			se_mem->se_off, task_offset);
4427 
4428 		if (task_offset == 0) {
4429 			if (task_size >= se_mem->se_len) {
4430 				sg_length = se_mem->se_len;
4431 
4432 				if (!(list_is_last(&se_mem->se_list,
4433 						T_TASK(se_cmd)->t_mem_list)))
4434 					se_mem = list_entry(se_mem->se_list.next,
4435 							struct se_mem, se_list);
4436 			} else {
4437 				sg_length = task_size;
4438 				task_size -= sg_length;
4439 				goto next;
4440 			}
4441 
4442 			DEBUG_SC("sg_length(%u) task_size(%u)\n",
4443 					sg_length, task_size);
4444 		} else {
4445 			if ((se_mem->se_len - task_offset) > task_size) {
4446 				sg_length = task_size;
4447 				task_size -= sg_length;
4448 				goto next;
4449 			 } else {
4450 				sg_length = (se_mem->se_len - task_offset);
4451 
4452 				if (!(list_is_last(&se_mem->se_list,
4453 						T_TASK(se_cmd)->t_mem_list)))
4454 					se_mem = list_entry(se_mem->se_list.next,
4455 							struct se_mem, se_list);
4456 			}
4457 
4458 			DEBUG_SC("sg_length(%u) task_size(%u)\n",
4459 					sg_length, task_size);
4460 
4461 			task_offset = 0;
4462 		}
4463 		task_size -= sg_length;
4464 next:
4465 		DEBUG_SC("task[%u] - Reducing task_size to(%u)\n",
4466 			task->task_no, task_size);
4467 
4468 		task->task_sg_num++;
4469 	}
4470 	/*
4471 	 * Check if the fabric module driver is requesting that all
4472 	 * struct se_task->task_sg[] be chained together..  If so,
4473 	 * then allocate an extra padding SG entry for linking and
4474 	 * marking the end of the chained SGL.
4475 	 */
4476 	if (tfo->task_sg_chaining) {
4477 		task_sg_num_padded = (task->task_sg_num + 1);
4478 		task->task_padded_sg = 1;
4479 	} else
4480 		task_sg_num_padded = task->task_sg_num;
4481 
4482 	task->task_sg = kzalloc(task_sg_num_padded *
4483 			sizeof(struct scatterlist), GFP_KERNEL);
4484 	if (!(task->task_sg)) {
4485 		printk(KERN_ERR "Unable to allocate memory for"
4486 				" task->task_sg\n");
4487 		return 0;
4488 	}
4489 	sg_init_table(&task->task_sg[0], task_sg_num_padded);
4490 	/*
4491 	 * Setup task->task_sg_bidi for SCSI READ payload for
4492 	 * TCM/pSCSI passthrough if present for BIDI-COMMAND
4493 	 */
4494 	if ((T_TASK(se_cmd)->t_mem_bidi_list != NULL) &&
4495 	    (TRANSPORT(se_dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)) {
4496 		task->task_sg_bidi = kzalloc(task_sg_num_padded *
4497 				sizeof(struct scatterlist), GFP_KERNEL);
4498 		if (!(task->task_sg_bidi)) {
4499 			printk(KERN_ERR "Unable to allocate memory for"
4500 				" task->task_sg_bidi\n");
4501 			return 0;
4502 		}
4503 		sg_init_table(&task->task_sg_bidi[0], task_sg_num_padded);
4504 	}
4505 	/*
4506 	 * For the chaining case, setup the proper end of SGL for the
4507 	 * initial submission struct task into struct se_subsystem_api.
4508 	 * This will be cleared later by transport_do_task_sg_chain()
4509 	 */
4510 	if (task->task_padded_sg) {
4511 		sg_mark_end(&task->task_sg[task->task_sg_num - 1]);
4512 		/*
4513 		 * Added the 'if' check before marking end of bi-directional
4514 		 * scatterlist (which gets created only in case of request
4515 		 * (RD + WR).
4516 		 */
4517 		if (task->task_sg_bidi)
4518 			sg_mark_end(&task->task_sg_bidi[task->task_sg_num - 1]);
4519 	}
4520 
4521 	DEBUG_SC("Successfully allocated task->task_sg_num(%u),"
4522 		" task_sg_num_padded(%u)\n", task->task_sg_num,
4523 		task_sg_num_padded);
4524 
4525 	return task->task_sg_num;
4526 }
4527 
4528 static inline int transport_set_tasks_sectors_disk(
4529 	struct se_task *task,
4530 	struct se_device *dev,
4531 	unsigned long long lba,
4532 	u32 sectors,
4533 	int *max_sectors_set)
4534 {
4535 	if ((lba + sectors) > transport_dev_end_lba(dev)) {
4536 		task->task_sectors = ((transport_dev_end_lba(dev) - lba) + 1);
4537 
4538 		if (task->task_sectors > DEV_ATTRIB(dev)->max_sectors) {
4539 			task->task_sectors = DEV_ATTRIB(dev)->max_sectors;
4540 			*max_sectors_set = 1;
4541 		}
4542 	} else {
4543 		if (sectors > DEV_ATTRIB(dev)->max_sectors) {
4544 			task->task_sectors = DEV_ATTRIB(dev)->max_sectors;
4545 			*max_sectors_set = 1;
4546 		} else
4547 			task->task_sectors = sectors;
4548 	}
4549 
4550 	return 0;
4551 }
4552 
4553 static inline int transport_set_tasks_sectors_non_disk(
4554 	struct se_task *task,
4555 	struct se_device *dev,
4556 	unsigned long long lba,
4557 	u32 sectors,
4558 	int *max_sectors_set)
4559 {
4560 	if (sectors > DEV_ATTRIB(dev)->max_sectors) {
4561 		task->task_sectors = DEV_ATTRIB(dev)->max_sectors;
4562 		*max_sectors_set = 1;
4563 	} else
4564 		task->task_sectors = sectors;
4565 
4566 	return 0;
4567 }
4568 
4569 static inline int transport_set_tasks_sectors(
4570 	struct se_task *task,
4571 	struct se_device *dev,
4572 	unsigned long long lba,
4573 	u32 sectors,
4574 	int *max_sectors_set)
4575 {
4576 	return (TRANSPORT(dev)->get_device_type(dev) == TYPE_DISK) ?
4577 		transport_set_tasks_sectors_disk(task, dev, lba, sectors,
4578 				max_sectors_set) :
4579 		transport_set_tasks_sectors_non_disk(task, dev, lba, sectors,
4580 				max_sectors_set);
4581 }
4582 
4583 static int transport_map_sg_to_mem(
4584 	struct se_cmd *cmd,
4585 	struct list_head *se_mem_list,
4586 	void *in_mem,
4587 	u32 *se_mem_cnt)
4588 {
4589 	struct se_mem *se_mem;
4590 	struct scatterlist *sg;
4591 	u32 sg_count = 1, cmd_size = cmd->data_length;
4592 
4593 	if (!in_mem) {
4594 		printk(KERN_ERR "No source scatterlist\n");
4595 		return -1;
4596 	}
4597 	sg = (struct scatterlist *)in_mem;
4598 
4599 	while (cmd_size) {
4600 		se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL);
4601 		if (!(se_mem)) {
4602 			printk(KERN_ERR "Unable to allocate struct se_mem\n");
4603 			return -1;
4604 		}
4605 		INIT_LIST_HEAD(&se_mem->se_list);
4606 		DEBUG_MEM("sg_to_mem: Starting loop with cmd_size: %u"
4607 			" sg_page: %p offset: %d length: %d\n", cmd_size,
4608 			sg_page(sg), sg->offset, sg->length);
4609 
4610 		se_mem->se_page = sg_page(sg);
4611 		se_mem->se_off = sg->offset;
4612 
4613 		if (cmd_size > sg->length) {
4614 			se_mem->se_len = sg->length;
4615 			sg = sg_next(sg);
4616 			sg_count++;
4617 		} else
4618 			se_mem->se_len = cmd_size;
4619 
4620 		cmd_size -= se_mem->se_len;
4621 
4622 		DEBUG_MEM("sg_to_mem: *se_mem_cnt: %u cmd_size: %u\n",
4623 				*se_mem_cnt, cmd_size);
4624 		DEBUG_MEM("sg_to_mem: Final se_page: %p se_off: %d se_len: %d\n",
4625 				se_mem->se_page, se_mem->se_off, se_mem->se_len);
4626 
4627 		list_add_tail(&se_mem->se_list, se_mem_list);
4628 		(*se_mem_cnt)++;
4629 	}
4630 
4631 	DEBUG_MEM("task[0] - Mapped(%u) struct scatterlist segments to(%u)"
4632 		" struct se_mem\n", sg_count, *se_mem_cnt);
4633 
4634 	if (sg_count != *se_mem_cnt)
4635 		BUG();
4636 
4637 	return 0;
4638 }
4639 
4640 /*	transport_map_mem_to_sg():
4641  *
4642  *
4643  */
4644 int transport_map_mem_to_sg(
4645 	struct se_task *task,
4646 	struct list_head *se_mem_list,
4647 	void *in_mem,
4648 	struct se_mem *in_se_mem,
4649 	struct se_mem **out_se_mem,
4650 	u32 *se_mem_cnt,
4651 	u32 *task_offset)
4652 {
4653 	struct se_cmd *se_cmd = task->task_se_cmd;
4654 	struct se_mem *se_mem = in_se_mem;
4655 	struct scatterlist *sg = (struct scatterlist *)in_mem;
4656 	u32 task_size = task->task_size, sg_no = 0;
4657 
4658 	if (!sg) {
4659 		printk(KERN_ERR "Unable to locate valid struct"
4660 				" scatterlist pointer\n");
4661 		return -1;
4662 	}
4663 
4664 	while (task_size != 0) {
4665 		/*
4666 		 * Setup the contigious array of scatterlists for
4667 		 * this struct se_task.
4668 		 */
4669 		sg_assign_page(sg, se_mem->se_page);
4670 
4671 		if (*task_offset == 0) {
4672 			sg->offset = se_mem->se_off;
4673 
4674 			if (task_size >= se_mem->se_len) {
4675 				sg->length = se_mem->se_len;
4676 
4677 				if (!(list_is_last(&se_mem->se_list,
4678 						T_TASK(se_cmd)->t_mem_list))) {
4679 					se_mem = list_entry(se_mem->se_list.next,
4680 							struct se_mem, se_list);
4681 					(*se_mem_cnt)++;
4682 				}
4683 			} else {
4684 				sg->length = task_size;
4685 				/*
4686 				 * Determine if we need to calculate an offset
4687 				 * into the struct se_mem on the next go around..
4688 				 */
4689 				task_size -= sg->length;
4690 				if (!(task_size))
4691 					*task_offset = sg->length;
4692 
4693 				goto next;
4694 			}
4695 
4696 		} else {
4697 			sg->offset = (*task_offset + se_mem->se_off);
4698 
4699 			if ((se_mem->se_len - *task_offset) > task_size) {
4700 				sg->length = task_size;
4701 				/*
4702 				 * Determine if we need to calculate an offset
4703 				 * into the struct se_mem on the next go around..
4704 				 */
4705 				task_size -= sg->length;
4706 				if (!(task_size))
4707 					*task_offset += sg->length;
4708 
4709 				goto next;
4710 			} else {
4711 				sg->length = (se_mem->se_len - *task_offset);
4712 
4713 				if (!(list_is_last(&se_mem->se_list,
4714 						T_TASK(se_cmd)->t_mem_list))) {
4715 					se_mem = list_entry(se_mem->se_list.next,
4716 							struct se_mem, se_list);
4717 					(*se_mem_cnt)++;
4718 				}
4719 			}
4720 
4721 			*task_offset = 0;
4722 		}
4723 		task_size -= sg->length;
4724 next:
4725 		DEBUG_MEM("task[%u] mem_to_sg - sg[%u](%p)(%u)(%u) - Reducing"
4726 			" task_size to(%u), task_offset: %u\n", task->task_no, sg_no,
4727 			sg_page(sg), sg->length, sg->offset, task_size, *task_offset);
4728 
4729 		sg_no++;
4730 		if (!(task_size))
4731 			break;
4732 
4733 		sg = sg_next(sg);
4734 
4735 		if (task_size > se_cmd->data_length)
4736 			BUG();
4737 	}
4738 	*out_se_mem = se_mem;
4739 
4740 	DEBUG_MEM("task[%u] - Mapped(%u) struct se_mem segments to total(%u)"
4741 		" SGs\n", task->task_no, *se_mem_cnt, sg_no);
4742 
4743 	return 0;
4744 }
4745 
4746 /*
4747  * This function can be used by HW target mode drivers to create a linked
4748  * scatterlist from all contiguously allocated struct se_task->task_sg[].
4749  * This is intended to be called during the completion path by TCM Core
4750  * when struct target_core_fabric_ops->check_task_sg_chaining is enabled.
4751  */
4752 void transport_do_task_sg_chain(struct se_cmd *cmd)
4753 {
4754 	struct scatterlist *sg_head = NULL, *sg_link = NULL, *sg_first = NULL;
4755 	struct scatterlist *sg_head_cur = NULL, *sg_link_cur = NULL;
4756 	struct scatterlist *sg, *sg_end = NULL, *sg_end_cur = NULL;
4757 	struct se_task *task;
4758 	struct target_core_fabric_ops *tfo = CMD_TFO(cmd);
4759 	u32 task_sg_num = 0, sg_count = 0;
4760 	int i;
4761 
4762 	if (tfo->task_sg_chaining == 0) {
4763 		printk(KERN_ERR "task_sg_chaining is diabled for fabric module:"
4764 				" %s\n", tfo->get_fabric_name());
4765 		dump_stack();
4766 		return;
4767 	}
4768 	/*
4769 	 * Walk the struct se_task list and setup scatterlist chains
4770 	 * for each contiguosly allocated struct se_task->task_sg[].
4771 	 */
4772 	list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) {
4773 		if (!(task->task_sg) || !(task->task_padded_sg))
4774 			continue;
4775 
4776 		if (sg_head && sg_link) {
4777 			sg_head_cur = &task->task_sg[0];
4778 			sg_link_cur = &task->task_sg[task->task_sg_num];
4779 			/*
4780 			 * Either add chain or mark end of scatterlist
4781 			 */
4782 			if (!(list_is_last(&task->t_list,
4783 					&T_TASK(cmd)->t_task_list))) {
4784 				/*
4785 				 * Clear existing SGL termination bit set in
4786 				 * transport_calc_sg_num(), see sg_mark_end()
4787 				 */
4788 				sg_end_cur = &task->task_sg[task->task_sg_num - 1];
4789 				sg_end_cur->page_link &= ~0x02;
4790 
4791 				sg_chain(sg_head, task_sg_num, sg_head_cur);
4792 				sg_count += task->task_sg_num;
4793 				task_sg_num = (task->task_sg_num + 1);
4794 			} else {
4795 				sg_chain(sg_head, task_sg_num, sg_head_cur);
4796 				sg_count += task->task_sg_num;
4797 				task_sg_num = task->task_sg_num;
4798 			}
4799 
4800 			sg_head = sg_head_cur;
4801 			sg_link = sg_link_cur;
4802 			continue;
4803 		}
4804 		sg_head = sg_first = &task->task_sg[0];
4805 		sg_link = &task->task_sg[task->task_sg_num];
4806 		/*
4807 		 * Check for single task..
4808 		 */
4809 		if (!(list_is_last(&task->t_list, &T_TASK(cmd)->t_task_list))) {
4810 			/*
4811 			 * Clear existing SGL termination bit set in
4812 			 * transport_calc_sg_num(), see sg_mark_end()
4813 			 */
4814 			sg_end = &task->task_sg[task->task_sg_num - 1];
4815 			sg_end->page_link &= ~0x02;
4816 			sg_count += task->task_sg_num;
4817 			task_sg_num = (task->task_sg_num + 1);
4818 		} else {
4819 			sg_count += task->task_sg_num;
4820 			task_sg_num = task->task_sg_num;
4821 		}
4822 	}
4823 	/*
4824 	 * Setup the starting pointer and total t_tasks_sg_linked_no including
4825 	 * padding SGs for linking and to mark the end.
4826 	 */
4827 	T_TASK(cmd)->t_tasks_sg_chained = sg_first;
4828 	T_TASK(cmd)->t_tasks_sg_chained_no = sg_count;
4829 
4830 	DEBUG_CMD_M("Setup cmd: %p T_TASK(cmd)->t_tasks_sg_chained: %p and"
4831 		" t_tasks_sg_chained_no: %u\n", cmd, T_TASK(cmd)->t_tasks_sg_chained,
4832 		T_TASK(cmd)->t_tasks_sg_chained_no);
4833 
4834 	for_each_sg(T_TASK(cmd)->t_tasks_sg_chained, sg,
4835 			T_TASK(cmd)->t_tasks_sg_chained_no, i) {
4836 
4837 		DEBUG_CMD_M("SG[%d]: %p page: %p length: %d offset: %d, magic: 0x%08x\n",
4838 			i, sg, sg_page(sg), sg->length, sg->offset, sg->sg_magic);
4839 		if (sg_is_chain(sg))
4840 			DEBUG_CMD_M("SG: %p sg_is_chain=1\n", sg);
4841 		if (sg_is_last(sg))
4842 			DEBUG_CMD_M("SG: %p sg_is_last=1\n", sg);
4843 	}
4844 }
4845 EXPORT_SYMBOL(transport_do_task_sg_chain);
4846 
4847 static int transport_do_se_mem_map(
4848 	struct se_device *dev,
4849 	struct se_task *task,
4850 	struct list_head *se_mem_list,
4851 	void *in_mem,
4852 	struct se_mem *in_se_mem,
4853 	struct se_mem **out_se_mem,
4854 	u32 *se_mem_cnt,
4855 	u32 *task_offset_in)
4856 {
4857 	u32 task_offset = *task_offset_in;
4858 	int ret = 0;
4859 	/*
4860 	 * se_subsystem_api_t->do_se_mem_map is used when internal allocation
4861 	 * has been done by the transport plugin.
4862 	 */
4863 	if (TRANSPORT(dev)->do_se_mem_map) {
4864 		ret = TRANSPORT(dev)->do_se_mem_map(task, se_mem_list,
4865 				in_mem, in_se_mem, out_se_mem, se_mem_cnt,
4866 				task_offset_in);
4867 		if (ret == 0)
4868 			T_TASK(task->task_se_cmd)->t_tasks_se_num += *se_mem_cnt;
4869 
4870 		return ret;
4871 	}
4872 
4873 	BUG_ON(list_empty(se_mem_list));
4874 	/*
4875 	 * This is the normal path for all normal non BIDI and BIDI-COMMAND
4876 	 * WRITE payloads..  If we need to do BIDI READ passthrough for
4877 	 * TCM/pSCSI the first call to transport_do_se_mem_map ->
4878 	 * transport_calc_sg_num() -> transport_map_mem_to_sg() will do the
4879 	 * allocation for task->task_sg_bidi, and the subsequent call to
4880 	 * transport_do_se_mem_map() from transport_generic_get_cdb_count()
4881 	 */
4882 	if (!(task->task_sg_bidi)) {
4883 		/*
4884 		 * Assume default that transport plugin speaks preallocated
4885 		 * scatterlists.
4886 		 */
4887 		if (!(transport_calc_sg_num(task, in_se_mem, task_offset)))
4888 			return -1;
4889 		/*
4890 		 * struct se_task->task_sg now contains the struct scatterlist array.
4891 		 */
4892 		return transport_map_mem_to_sg(task, se_mem_list, task->task_sg,
4893 					in_se_mem, out_se_mem, se_mem_cnt,
4894 					task_offset_in);
4895 	}
4896 	/*
4897 	 * Handle the se_mem_list -> struct task->task_sg_bidi
4898 	 * memory map for the extra BIDI READ payload
4899 	 */
4900 	return transport_map_mem_to_sg(task, se_mem_list, task->task_sg_bidi,
4901 				in_se_mem, out_se_mem, se_mem_cnt,
4902 				task_offset_in);
4903 }
4904 
4905 static u32 transport_generic_get_cdb_count(
4906 	struct se_cmd *cmd,
4907 	unsigned long long lba,
4908 	u32 sectors,
4909 	enum dma_data_direction data_direction,
4910 	struct list_head *mem_list,
4911 	int set_counts)
4912 {
4913 	unsigned char *cdb = NULL;
4914 	struct se_task *task;
4915 	struct se_mem *se_mem = NULL, *se_mem_lout = NULL;
4916 	struct se_mem *se_mem_bidi = NULL, *se_mem_bidi_lout = NULL;
4917 	struct se_device *dev = SE_DEV(cmd);
4918 	int max_sectors_set = 0, ret;
4919 	u32 task_offset_in = 0, se_mem_cnt = 0, se_mem_bidi_cnt = 0, task_cdbs = 0;
4920 
4921 	if (!mem_list) {
4922 		printk(KERN_ERR "mem_list is NULL in transport_generic_get"
4923 				"_cdb_count()\n");
4924 		return 0;
4925 	}
4926 	/*
4927 	 * While using RAMDISK_DR backstores is the only case where
4928 	 * mem_list will ever be empty at this point.
4929 	 */
4930 	if (!(list_empty(mem_list)))
4931 		se_mem = list_entry(mem_list->next, struct se_mem, se_list);
4932 	/*
4933 	 * Check for extra se_mem_bidi mapping for BIDI-COMMANDs to
4934 	 * struct se_task->task_sg_bidi for TCM/pSCSI passthrough operation
4935 	 */
4936 	if ((T_TASK(cmd)->t_mem_bidi_list != NULL) &&
4937 	    !(list_empty(T_TASK(cmd)->t_mem_bidi_list)) &&
4938 	    (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV))
4939 		se_mem_bidi = list_entry(T_TASK(cmd)->t_mem_bidi_list->next,
4940 					struct se_mem, se_list);
4941 
4942 	while (sectors) {
4943 		DEBUG_VOL("ITT[0x%08x] LBA(%llu) SectorsLeft(%u) EOBJ(%llu)\n",
4944 			CMD_TFO(cmd)->get_task_tag(cmd), lba, sectors,
4945 			transport_dev_end_lba(dev));
4946 
4947 		task = transport_generic_get_task(cmd, data_direction);
4948 		if (!(task))
4949 			goto out;
4950 
4951 		transport_set_tasks_sectors(task, dev, lba, sectors,
4952 				&max_sectors_set);
4953 
4954 		task->task_lba = lba;
4955 		lba += task->task_sectors;
4956 		sectors -= task->task_sectors;
4957 		task->task_size = (task->task_sectors *
4958 				   DEV_ATTRIB(dev)->block_size);
4959 
4960 		cdb = TRANSPORT(dev)->get_cdb(task);
4961 		if ((cdb)) {
4962 			memcpy(cdb, T_TASK(cmd)->t_task_cdb,
4963 				scsi_command_size(T_TASK(cmd)->t_task_cdb));
4964 			cmd->transport_split_cdb(task->task_lba,
4965 					&task->task_sectors, cdb);
4966 		}
4967 
4968 		/*
4969 		 * Perform the SE OBJ plugin and/or Transport plugin specific
4970 		 * mapping for T_TASK(cmd)->t_mem_list. And setup the
4971 		 * task->task_sg and if necessary task->task_sg_bidi
4972 		 */
4973 		ret = transport_do_se_mem_map(dev, task, mem_list,
4974 				NULL, se_mem, &se_mem_lout, &se_mem_cnt,
4975 				&task_offset_in);
4976 		if (ret < 0)
4977 			goto out;
4978 
4979 		se_mem = se_mem_lout;
4980 		/*
4981 		 * Setup the T_TASK(cmd)->t_mem_bidi_list -> task->task_sg_bidi
4982 		 * mapping for SCSI READ for BIDI-COMMAND passthrough with TCM/pSCSI
4983 		 *
4984 		 * Note that the first call to transport_do_se_mem_map() above will
4985 		 * allocate struct se_task->task_sg_bidi in transport_do_se_mem_map()
4986 		 * -> transport_calc_sg_num(), and the second here will do the
4987 		 * mapping for SCSI READ for BIDI-COMMAND passthrough with TCM/pSCSI.
4988 		 */
4989 		if (task->task_sg_bidi != NULL) {
4990 			ret = transport_do_se_mem_map(dev, task,
4991 				T_TASK(cmd)->t_mem_bidi_list, NULL,
4992 				se_mem_bidi, &se_mem_bidi_lout, &se_mem_bidi_cnt,
4993 				&task_offset_in);
4994 			if (ret < 0)
4995 				goto out;
4996 
4997 			se_mem_bidi = se_mem_bidi_lout;
4998 		}
4999 		task_cdbs++;
5000 
5001 		DEBUG_VOL("Incremented task_cdbs(%u) task->task_sg_num(%u)\n",
5002 				task_cdbs, task->task_sg_num);
5003 
5004 		if (max_sectors_set) {
5005 			max_sectors_set = 0;
5006 			continue;
5007 		}
5008 
5009 		if (!sectors)
5010 			break;
5011 	}
5012 
5013 	if (set_counts) {
5014 		atomic_inc(&T_TASK(cmd)->t_fe_count);
5015 		atomic_inc(&T_TASK(cmd)->t_se_count);
5016 	}
5017 
5018 	DEBUG_VOL("ITT[0x%08x] total %s cdbs(%u)\n",
5019 		CMD_TFO(cmd)->get_task_tag(cmd), (data_direction == DMA_TO_DEVICE)
5020 		? "DMA_TO_DEVICE" : "DMA_FROM_DEVICE", task_cdbs);
5021 
5022 	return task_cdbs;
5023 out:
5024 	return 0;
5025 }
5026 
5027 static int
5028 transport_map_control_cmd_to_task(struct se_cmd *cmd)
5029 {
5030 	struct se_device *dev = SE_DEV(cmd);
5031 	unsigned char *cdb;
5032 	struct se_task *task;
5033 	int ret;
5034 
5035 	task = transport_generic_get_task(cmd, cmd->data_direction);
5036 	if (!task)
5037 		return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
5038 
5039 	cdb = TRANSPORT(dev)->get_cdb(task);
5040 	if (cdb)
5041 		memcpy(cdb, cmd->t_task->t_task_cdb,
5042 			scsi_command_size(cmd->t_task->t_task_cdb));
5043 
5044 	task->task_size = cmd->data_length;
5045 	task->task_sg_num =
5046 		(cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) ? 1 : 0;
5047 
5048 	atomic_inc(&cmd->t_task->t_fe_count);
5049 	atomic_inc(&cmd->t_task->t_se_count);
5050 
5051 	if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) {
5052 		struct se_mem *se_mem = NULL, *se_mem_lout = NULL;
5053 		u32 se_mem_cnt = 0, task_offset = 0;
5054 
5055 		if (!list_empty(T_TASK(cmd)->t_mem_list))
5056 			se_mem = list_entry(T_TASK(cmd)->t_mem_list->next,
5057 					struct se_mem, se_list);
5058 
5059 		ret = transport_do_se_mem_map(dev, task,
5060 				cmd->t_task->t_mem_list, NULL, se_mem,
5061 				&se_mem_lout, &se_mem_cnt, &task_offset);
5062 		if (ret < 0)
5063 			return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
5064 
5065 		if (dev->transport->map_task_SG)
5066 			return dev->transport->map_task_SG(task);
5067 		return 0;
5068 	} else if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_NONSG_IO_CDB) {
5069 		if (dev->transport->map_task_non_SG)
5070 			return dev->transport->map_task_non_SG(task);
5071 		return 0;
5072 	} else if (cmd->se_cmd_flags & SCF_SCSI_NON_DATA_CDB) {
5073 		if (dev->transport->cdb_none)
5074 			return dev->transport->cdb_none(task);
5075 		return 0;
5076 	} else {
5077 		BUG();
5078 		return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
5079 	}
5080 }
5081 
5082 /*	 transport_generic_new_cmd(): Called from transport_processing_thread()
5083  *
5084  *	 Allocate storage transport resources from a set of values predefined
5085  *	 by transport_generic_cmd_sequencer() from the iSCSI Target RX process.
5086  *	 Any non zero return here is treated as an "out of resource' op here.
5087  */
5088 	/*
5089 	 * Generate struct se_task(s) and/or their payloads for this CDB.
5090 	 */
5091 static int transport_generic_new_cmd(struct se_cmd *cmd)
5092 {
5093 	struct se_portal_group *se_tpg;
5094 	struct se_task *task;
5095 	struct se_device *dev = SE_DEV(cmd);
5096 	int ret = 0;
5097 
5098 	/*
5099 	 * Determine is the TCM fabric module has already allocated physical
5100 	 * memory, and is directly calling transport_generic_map_mem_to_cmd()
5101 	 * to setup beforehand the linked list of physical memory at
5102 	 * T_TASK(cmd)->t_mem_list of struct se_mem->se_page
5103 	 */
5104 	if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC)) {
5105 		ret = transport_allocate_resources(cmd);
5106 		if (ret < 0)
5107 			return ret;
5108 	}
5109 
5110 	ret = transport_get_sectors(cmd);
5111 	if (ret < 0)
5112 		return ret;
5113 
5114 	ret = transport_new_cmd_obj(cmd);
5115 	if (ret < 0)
5116 		return ret;
5117 
5118 	/*
5119 	 * Determine if the calling TCM fabric module is talking to
5120 	 * Linux/NET via kernel sockets and needs to allocate a
5121 	 * struct iovec array to complete the struct se_cmd
5122 	 */
5123 	se_tpg = SE_LUN(cmd)->lun_sep->sep_tpg;
5124 	if (TPG_TFO(se_tpg)->alloc_cmd_iovecs != NULL) {
5125 		ret = TPG_TFO(se_tpg)->alloc_cmd_iovecs(cmd);
5126 		if (ret < 0)
5127 			return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
5128 	}
5129 
5130 	if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) {
5131 		list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) {
5132 			if (atomic_read(&task->task_sent))
5133 				continue;
5134 			if (!dev->transport->map_task_SG)
5135 				continue;
5136 
5137 			ret = dev->transport->map_task_SG(task);
5138 			if (ret < 0)
5139 				return ret;
5140 		}
5141 	} else {
5142 		ret = transport_map_control_cmd_to_task(cmd);
5143 		if (ret < 0)
5144 			return ret;
5145 	}
5146 
5147 	/*
5148 	 * For WRITEs, let the iSCSI Target RX Thread know its buffer is ready..
5149 	 * This WRITE struct se_cmd (and all of its associated struct se_task's)
5150 	 * will be added to the struct se_device execution queue after its WRITE
5151 	 * data has arrived. (ie: It gets handled by the transport processing
5152 	 * thread a second time)
5153 	 */
5154 	if (cmd->data_direction == DMA_TO_DEVICE) {
5155 		transport_add_tasks_to_state_queue(cmd);
5156 		return transport_generic_write_pending(cmd);
5157 	}
5158 	/*
5159 	 * Everything else but a WRITE, add the struct se_cmd's struct se_task's
5160 	 * to the execution queue.
5161 	 */
5162 	transport_execute_tasks(cmd);
5163 	return 0;
5164 }
5165 
5166 /*	transport_generic_process_write():
5167  *
5168  *
5169  */
5170 void transport_generic_process_write(struct se_cmd *cmd)
5171 {
5172 #if 0
5173 	/*
5174 	 * Copy SCSI Presented DTL sector(s) from received buffers allocated to
5175 	 * original EDTL
5176 	 */
5177 	if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
5178 		if (!T_TASK(cmd)->t_tasks_se_num) {
5179 			unsigned char *dst, *buf =
5180 				(unsigned char *)T_TASK(cmd)->t_task_buf;
5181 
5182 			dst = kzalloc(cmd->cmd_spdtl), GFP_KERNEL);
5183 			if (!(dst)) {
5184 				printk(KERN_ERR "Unable to allocate memory for"
5185 						" WRITE underflow\n");
5186 				transport_generic_request_failure(cmd, NULL,
5187 					PYX_TRANSPORT_REQ_TOO_MANY_SECTORS, 1);
5188 				return;
5189 			}
5190 			memcpy(dst, buf, cmd->cmd_spdtl);
5191 
5192 			kfree(T_TASK(cmd)->t_task_buf);
5193 			T_TASK(cmd)->t_task_buf = dst;
5194 		} else {
5195 			struct scatterlist *sg =
5196 				(struct scatterlist *sg)T_TASK(cmd)->t_task_buf;
5197 			struct scatterlist *orig_sg;
5198 
5199 			orig_sg = kzalloc(sizeof(struct scatterlist) *
5200 					T_TASK(cmd)->t_tasks_se_num,
5201 					GFP_KERNEL))) {
5202 			if (!(orig_sg)) {
5203 				printk(KERN_ERR "Unable to allocate memory"
5204 						" for WRITE underflow\n");
5205 				transport_generic_request_failure(cmd, NULL,
5206 					PYX_TRANSPORT_REQ_TOO_MANY_SECTORS, 1);
5207 				return;
5208 			}
5209 
5210 			memcpy(orig_sg, T_TASK(cmd)->t_task_buf,
5211 					sizeof(struct scatterlist) *
5212 					T_TASK(cmd)->t_tasks_se_num);
5213 
5214 			cmd->data_length = cmd->cmd_spdtl;
5215 			/*
5216 			 * FIXME, clear out original struct se_task and state
5217 			 * information.
5218 			 */
5219 			if (transport_generic_new_cmd(cmd) < 0) {
5220 				transport_generic_request_failure(cmd, NULL,
5221 					PYX_TRANSPORT_REQ_TOO_MANY_SECTORS, 1);
5222 				kfree(orig_sg);
5223 				return;
5224 			}
5225 
5226 			transport_memcpy_write_sg(cmd, orig_sg);
5227 		}
5228 	}
5229 #endif
5230 	transport_execute_tasks(cmd);
5231 }
5232 EXPORT_SYMBOL(transport_generic_process_write);
5233 
5234 /*	transport_generic_write_pending():
5235  *
5236  *
5237  */
5238 static int transport_generic_write_pending(struct se_cmd *cmd)
5239 {
5240 	unsigned long flags;
5241 	int ret;
5242 
5243 	spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
5244 	cmd->t_state = TRANSPORT_WRITE_PENDING;
5245 	spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
5246 	/*
5247 	 * For the TCM control CDBs using a contiguous buffer, do the memcpy
5248 	 * from the passed Linux/SCSI struct scatterlist located at
5249 	 * T_TASK(se_cmd)->t_task_pt_buf to the contiguous buffer at
5250 	 * T_TASK(se_cmd)->t_task_buf.
5251 	 */
5252 	if (cmd->se_cmd_flags & SCF_PASSTHROUGH_CONTIG_TO_SG)
5253 		transport_memcpy_read_contig(cmd,
5254 				T_TASK(cmd)->t_task_buf,
5255 				T_TASK(cmd)->t_task_pt_sgl);
5256 	/*
5257 	 * Clear the se_cmd for WRITE_PENDING status in order to set
5258 	 * T_TASK(cmd)->t_transport_active=0 so that transport_generic_handle_data
5259 	 * can be called from HW target mode interrupt code.  This is safe
5260 	 * to be called with transport_off=1 before the CMD_TFO(cmd)->write_pending
5261 	 * because the se_cmd->se_lun pointer is not being cleared.
5262 	 */
5263 	transport_cmd_check_stop(cmd, 1, 0);
5264 
5265 	/*
5266 	 * Call the fabric write_pending function here to let the
5267 	 * frontend know that WRITE buffers are ready.
5268 	 */
5269 	ret = CMD_TFO(cmd)->write_pending(cmd);
5270 	if (ret < 0)
5271 		return ret;
5272 
5273 	return PYX_TRANSPORT_WRITE_PENDING;
5274 }
5275 
5276 /*	transport_release_cmd_to_pool():
5277  *
5278  *
5279  */
5280 void transport_release_cmd_to_pool(struct se_cmd *cmd)
5281 {
5282 	BUG_ON(!T_TASK(cmd));
5283 	BUG_ON(!CMD_TFO(cmd));
5284 
5285 	transport_free_se_cmd(cmd);
5286 	CMD_TFO(cmd)->release_cmd_to_pool(cmd);
5287 }
5288 EXPORT_SYMBOL(transport_release_cmd_to_pool);
5289 
5290 /*	transport_generic_free_cmd():
5291  *
5292  *	Called from processing frontend to release storage engine resources
5293  */
5294 void transport_generic_free_cmd(
5295 	struct se_cmd *cmd,
5296 	int wait_for_tasks,
5297 	int release_to_pool,
5298 	int session_reinstatement)
5299 {
5300 	if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) || !T_TASK(cmd))
5301 		transport_release_cmd_to_pool(cmd);
5302 	else {
5303 		core_dec_lacl_count(cmd->se_sess->se_node_acl, cmd);
5304 
5305 		if (SE_LUN(cmd)) {
5306 #if 0
5307 			printk(KERN_INFO "cmd: %p ITT: 0x%08x contains"
5308 				" SE_LUN(cmd)\n", cmd,
5309 				CMD_TFO(cmd)->get_task_tag(cmd));
5310 #endif
5311 			transport_lun_remove_cmd(cmd);
5312 		}
5313 
5314 		if (wait_for_tasks && cmd->transport_wait_for_tasks)
5315 			cmd->transport_wait_for_tasks(cmd, 0, 0);
5316 
5317 		transport_free_dev_tasks(cmd);
5318 
5319 		transport_generic_remove(cmd, release_to_pool,
5320 				session_reinstatement);
5321 	}
5322 }
5323 EXPORT_SYMBOL(transport_generic_free_cmd);
5324 
5325 static void transport_nop_wait_for_tasks(
5326 	struct se_cmd *cmd,
5327 	int remove_cmd,
5328 	int session_reinstatement)
5329 {
5330 	return;
5331 }
5332 
5333 /*	transport_lun_wait_for_tasks():
5334  *
5335  *	Called from ConfigFS context to stop the passed struct se_cmd to allow
5336  *	an struct se_lun to be successfully shutdown.
5337  */
5338 static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun)
5339 {
5340 	unsigned long flags;
5341 	int ret;
5342 	/*
5343 	 * If the frontend has already requested this struct se_cmd to
5344 	 * be stopped, we can safely ignore this struct se_cmd.
5345 	 */
5346 	spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
5347 	if (atomic_read(&T_TASK(cmd)->t_transport_stop)) {
5348 		atomic_set(&T_TASK(cmd)->transport_lun_stop, 0);
5349 		DEBUG_TRANSPORT_S("ConfigFS ITT[0x%08x] - t_transport_stop =="
5350 			" TRUE, skipping\n", CMD_TFO(cmd)->get_task_tag(cmd));
5351 		spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
5352 		transport_cmd_check_stop(cmd, 1, 0);
5353 		return -1;
5354 	}
5355 	atomic_set(&T_TASK(cmd)->transport_lun_fe_stop, 1);
5356 	spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
5357 
5358 	wake_up_interruptible(&SE_DEV(cmd)->dev_queue_obj->thread_wq);
5359 
5360 	ret = transport_stop_tasks_for_cmd(cmd);
5361 
5362 	DEBUG_TRANSPORT_S("ConfigFS: cmd: %p t_task_cdbs: %d stop tasks ret:"
5363 			" %d\n", cmd, T_TASK(cmd)->t_task_cdbs, ret);
5364 	if (!ret) {
5365 		DEBUG_TRANSPORT_S("ConfigFS: ITT[0x%08x] - stopping cmd....\n",
5366 				CMD_TFO(cmd)->get_task_tag(cmd));
5367 		wait_for_completion(&T_TASK(cmd)->transport_lun_stop_comp);
5368 		DEBUG_TRANSPORT_S("ConfigFS: ITT[0x%08x] - stopped cmd....\n",
5369 				CMD_TFO(cmd)->get_task_tag(cmd));
5370 	}
5371 	transport_remove_cmd_from_queue(cmd, SE_DEV(cmd)->dev_queue_obj);
5372 
5373 	return 0;
5374 }
5375 
5376 /* #define DEBUG_CLEAR_LUN */
5377 #ifdef DEBUG_CLEAR_LUN
5378 #define DEBUG_CLEAR_L(x...) printk(KERN_INFO x)
5379 #else
5380 #define DEBUG_CLEAR_L(x...)
5381 #endif
5382 
5383 static void __transport_clear_lun_from_sessions(struct se_lun *lun)
5384 {
5385 	struct se_cmd *cmd = NULL;
5386 	unsigned long lun_flags, cmd_flags;
5387 	/*
5388 	 * Do exception processing and return CHECK_CONDITION status to the
5389 	 * Initiator Port.
5390 	 */
5391 	spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
5392 	while (!list_empty_careful(&lun->lun_cmd_list)) {
5393 		cmd = list_entry(lun->lun_cmd_list.next,
5394 			struct se_cmd, se_lun_list);
5395 		list_del(&cmd->se_lun_list);
5396 
5397 		if (!(T_TASK(cmd))) {
5398 			printk(KERN_ERR "ITT: 0x%08x, T_TASK(cmd) = NULL"
5399 				"[i,t]_state: %u/%u\n",
5400 				CMD_TFO(cmd)->get_task_tag(cmd),
5401 				CMD_TFO(cmd)->get_cmd_state(cmd), cmd->t_state);
5402 			BUG();
5403 		}
5404 		atomic_set(&T_TASK(cmd)->transport_lun_active, 0);
5405 		/*
5406 		 * This will notify iscsi_target_transport.c:
5407 		 * transport_cmd_check_stop() that a LUN shutdown is in
5408 		 * progress for the iscsi_cmd_t.
5409 		 */
5410 		spin_lock(&T_TASK(cmd)->t_state_lock);
5411 		DEBUG_CLEAR_L("SE_LUN[%d] - Setting T_TASK(cmd)->transport"
5412 			"_lun_stop for  ITT: 0x%08x\n",
5413 			SE_LUN(cmd)->unpacked_lun,
5414 			CMD_TFO(cmd)->get_task_tag(cmd));
5415 		atomic_set(&T_TASK(cmd)->transport_lun_stop, 1);
5416 		spin_unlock(&T_TASK(cmd)->t_state_lock);
5417 
5418 		spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags);
5419 
5420 		if (!(SE_LUN(cmd))) {
5421 			printk(KERN_ERR "ITT: 0x%08x, [i,t]_state: %u/%u\n",
5422 				CMD_TFO(cmd)->get_task_tag(cmd),
5423 				CMD_TFO(cmd)->get_cmd_state(cmd), cmd->t_state);
5424 			BUG();
5425 		}
5426 		/*
5427 		 * If the Storage engine still owns the iscsi_cmd_t, determine
5428 		 * and/or stop its context.
5429 		 */
5430 		DEBUG_CLEAR_L("SE_LUN[%d] - ITT: 0x%08x before transport"
5431 			"_lun_wait_for_tasks()\n", SE_LUN(cmd)->unpacked_lun,
5432 			CMD_TFO(cmd)->get_task_tag(cmd));
5433 
5434 		if (transport_lun_wait_for_tasks(cmd, SE_LUN(cmd)) < 0) {
5435 			spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
5436 			continue;
5437 		}
5438 
5439 		DEBUG_CLEAR_L("SE_LUN[%d] - ITT: 0x%08x after transport_lun"
5440 			"_wait_for_tasks(): SUCCESS\n",
5441 			SE_LUN(cmd)->unpacked_lun,
5442 			CMD_TFO(cmd)->get_task_tag(cmd));
5443 
5444 		spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, cmd_flags);
5445 		if (!(atomic_read(&T_TASK(cmd)->transport_dev_active))) {
5446 			spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, cmd_flags);
5447 			goto check_cond;
5448 		}
5449 		atomic_set(&T_TASK(cmd)->transport_dev_active, 0);
5450 		transport_all_task_dev_remove_state(cmd);
5451 		spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, cmd_flags);
5452 
5453 		transport_free_dev_tasks(cmd);
5454 		/*
5455 		 * The Storage engine stopped this struct se_cmd before it was
5456 		 * send to the fabric frontend for delivery back to the
5457 		 * Initiator Node.  Return this SCSI CDB back with an
5458 		 * CHECK_CONDITION status.
5459 		 */
5460 check_cond:
5461 		transport_send_check_condition_and_sense(cmd,
5462 				TCM_NON_EXISTENT_LUN, 0);
5463 		/*
5464 		 *  If the fabric frontend is waiting for this iscsi_cmd_t to
5465 		 * be released, notify the waiting thread now that LU has
5466 		 * finished accessing it.
5467 		 */
5468 		spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, cmd_flags);
5469 		if (atomic_read(&T_TASK(cmd)->transport_lun_fe_stop)) {
5470 			DEBUG_CLEAR_L("SE_LUN[%d] - Detected FE stop for"
5471 				" struct se_cmd: %p ITT: 0x%08x\n",
5472 				lun->unpacked_lun,
5473 				cmd, CMD_TFO(cmd)->get_task_tag(cmd));
5474 
5475 			spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
5476 					cmd_flags);
5477 			transport_cmd_check_stop(cmd, 1, 0);
5478 			complete(&T_TASK(cmd)->transport_lun_fe_stop_comp);
5479 			spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
5480 			continue;
5481 		}
5482 		DEBUG_CLEAR_L("SE_LUN[%d] - ITT: 0x%08x finished processing\n",
5483 			lun->unpacked_lun, CMD_TFO(cmd)->get_task_tag(cmd));
5484 
5485 		spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, cmd_flags);
5486 		spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
5487 	}
5488 	spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags);
5489 }
5490 
5491 static int transport_clear_lun_thread(void *p)
5492 {
5493 	struct se_lun *lun = (struct se_lun *)p;
5494 
5495 	__transport_clear_lun_from_sessions(lun);
5496 	complete(&lun->lun_shutdown_comp);
5497 
5498 	return 0;
5499 }
5500 
5501 int transport_clear_lun_from_sessions(struct se_lun *lun)
5502 {
5503 	struct task_struct *kt;
5504 
5505 	kt = kthread_run(transport_clear_lun_thread, (void *)lun,
5506 			"tcm_cl_%u", lun->unpacked_lun);
5507 	if (IS_ERR(kt)) {
5508 		printk(KERN_ERR "Unable to start clear_lun thread\n");
5509 		return -1;
5510 	}
5511 	wait_for_completion(&lun->lun_shutdown_comp);
5512 
5513 	return 0;
5514 }
5515 
5516 /*	transport_generic_wait_for_tasks():
5517  *
5518  *	Called from frontend or passthrough context to wait for storage engine
5519  *	to pause and/or release frontend generated struct se_cmd.
5520  */
5521 static void transport_generic_wait_for_tasks(
5522 	struct se_cmd *cmd,
5523 	int remove_cmd,
5524 	int session_reinstatement)
5525 {
5526 	unsigned long flags;
5527 
5528 	if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && !(cmd->se_tmr_req))
5529 		return;
5530 
5531 	spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
5532 	/*
5533 	 * If we are already stopped due to an external event (ie: LUN shutdown)
5534 	 * sleep until the connection can have the passed struct se_cmd back.
5535 	 * The T_TASK(cmd)->transport_lun_stopped_sem will be upped by
5536 	 * transport_clear_lun_from_sessions() once the ConfigFS context caller
5537 	 * has completed its operation on the struct se_cmd.
5538 	 */
5539 	if (atomic_read(&T_TASK(cmd)->transport_lun_stop)) {
5540 
5541 		DEBUG_TRANSPORT_S("wait_for_tasks: Stopping"
5542 			" wait_for_completion(&T_TASK(cmd)transport_lun_fe"
5543 			"_stop_comp); for ITT: 0x%08x\n",
5544 			CMD_TFO(cmd)->get_task_tag(cmd));
5545 		/*
5546 		 * There is a special case for WRITES where a FE exception +
5547 		 * LUN shutdown means ConfigFS context is still sleeping on
5548 		 * transport_lun_stop_comp in transport_lun_wait_for_tasks().
5549 		 * We go ahead and up transport_lun_stop_comp just to be sure
5550 		 * here.
5551 		 */
5552 		spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
5553 		complete(&T_TASK(cmd)->transport_lun_stop_comp);
5554 		wait_for_completion(&T_TASK(cmd)->transport_lun_fe_stop_comp);
5555 		spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
5556 
5557 		transport_all_task_dev_remove_state(cmd);
5558 		/*
5559 		 * At this point, the frontend who was the originator of this
5560 		 * struct se_cmd, now owns the structure and can be released through
5561 		 * normal means below.
5562 		 */
5563 		DEBUG_TRANSPORT_S("wait_for_tasks: Stopped"
5564 			" wait_for_completion(&T_TASK(cmd)transport_lun_fe_"
5565 			"stop_comp); for ITT: 0x%08x\n",
5566 			CMD_TFO(cmd)->get_task_tag(cmd));
5567 
5568 		atomic_set(&T_TASK(cmd)->transport_lun_stop, 0);
5569 	}
5570 	if (!atomic_read(&T_TASK(cmd)->t_transport_active) ||
5571 	     atomic_read(&T_TASK(cmd)->t_transport_aborted))
5572 		goto remove;
5573 
5574 	atomic_set(&T_TASK(cmd)->t_transport_stop, 1);
5575 
5576 	DEBUG_TRANSPORT_S("wait_for_tasks: Stopping %p ITT: 0x%08x"
5577 		" i_state: %d, t_state/def_t_state: %d/%d, t_transport_stop"
5578 		" = TRUE\n", cmd, CMD_TFO(cmd)->get_task_tag(cmd),
5579 		CMD_TFO(cmd)->get_cmd_state(cmd), cmd->t_state,
5580 		cmd->deferred_t_state);
5581 
5582 	spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
5583 
5584 	wake_up_interruptible(&SE_DEV(cmd)->dev_queue_obj->thread_wq);
5585 
5586 	wait_for_completion(&T_TASK(cmd)->t_transport_stop_comp);
5587 
5588 	spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
5589 	atomic_set(&T_TASK(cmd)->t_transport_active, 0);
5590 	atomic_set(&T_TASK(cmd)->t_transport_stop, 0);
5591 
5592 	DEBUG_TRANSPORT_S("wait_for_tasks: Stopped wait_for_compltion("
5593 		"&T_TASK(cmd)->t_transport_stop_comp) for ITT: 0x%08x\n",
5594 		CMD_TFO(cmd)->get_task_tag(cmd));
5595 remove:
5596 	spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
5597 	if (!remove_cmd)
5598 		return;
5599 
5600 	transport_generic_free_cmd(cmd, 0, 0, session_reinstatement);
5601 }
5602 
5603 static int transport_get_sense_codes(
5604 	struct se_cmd *cmd,
5605 	u8 *asc,
5606 	u8 *ascq)
5607 {
5608 	*asc = cmd->scsi_asc;
5609 	*ascq = cmd->scsi_ascq;
5610 
5611 	return 0;
5612 }
5613 
5614 static int transport_set_sense_codes(
5615 	struct se_cmd *cmd,
5616 	u8 asc,
5617 	u8 ascq)
5618 {
5619 	cmd->scsi_asc = asc;
5620 	cmd->scsi_ascq = ascq;
5621 
5622 	return 0;
5623 }
5624 
5625 int transport_send_check_condition_and_sense(
5626 	struct se_cmd *cmd,
5627 	u8 reason,
5628 	int from_transport)
5629 {
5630 	unsigned char *buffer = cmd->sense_buffer;
5631 	unsigned long flags;
5632 	int offset;
5633 	u8 asc = 0, ascq = 0;
5634 
5635 	spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
5636 	if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
5637 		spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
5638 		return 0;
5639 	}
5640 	cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION;
5641 	spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
5642 
5643 	if (!reason && from_transport)
5644 		goto after_reason;
5645 
5646 	if (!from_transport)
5647 		cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE;
5648 	/*
5649 	 * Data Segment and SenseLength of the fabric response PDU.
5650 	 *
5651 	 * TRANSPORT_SENSE_BUFFER is now set to SCSI_SENSE_BUFFERSIZE
5652 	 * from include/scsi/scsi_cmnd.h
5653 	 */
5654 	offset = CMD_TFO(cmd)->set_fabric_sense_len(cmd,
5655 				TRANSPORT_SENSE_BUFFER);
5656 	/*
5657 	 * Actual SENSE DATA, see SPC-3 7.23.2  SPC_SENSE_KEY_OFFSET uses
5658 	 * SENSE KEY values from include/scsi/scsi.h
5659 	 */
5660 	switch (reason) {
5661 	case TCM_NON_EXISTENT_LUN:
5662 	case TCM_UNSUPPORTED_SCSI_OPCODE:
5663 	case TCM_SECTOR_COUNT_TOO_MANY:
5664 		/* CURRENT ERROR */
5665 		buffer[offset] = 0x70;
5666 		/* ILLEGAL REQUEST */
5667 		buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
5668 		/* INVALID COMMAND OPERATION CODE */
5669 		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x20;
5670 		break;
5671 	case TCM_UNKNOWN_MODE_PAGE:
5672 		/* CURRENT ERROR */
5673 		buffer[offset] = 0x70;
5674 		/* ILLEGAL REQUEST */
5675 		buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
5676 		/* INVALID FIELD IN CDB */
5677 		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24;
5678 		break;
5679 	case TCM_CHECK_CONDITION_ABORT_CMD:
5680 		/* CURRENT ERROR */
5681 		buffer[offset] = 0x70;
5682 		/* ABORTED COMMAND */
5683 		buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
5684 		/* BUS DEVICE RESET FUNCTION OCCURRED */
5685 		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x29;
5686 		buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x03;
5687 		break;
5688 	case TCM_INCORRECT_AMOUNT_OF_DATA:
5689 		/* CURRENT ERROR */
5690 		buffer[offset] = 0x70;
5691 		/* ABORTED COMMAND */
5692 		buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
5693 		/* WRITE ERROR */
5694 		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x0c;
5695 		/* NOT ENOUGH UNSOLICITED DATA */
5696 		buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x0d;
5697 		break;
5698 	case TCM_INVALID_CDB_FIELD:
5699 		/* CURRENT ERROR */
5700 		buffer[offset] = 0x70;
5701 		/* ABORTED COMMAND */
5702 		buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
5703 		/* INVALID FIELD IN CDB */
5704 		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24;
5705 		break;
5706 	case TCM_INVALID_PARAMETER_LIST:
5707 		/* CURRENT ERROR */
5708 		buffer[offset] = 0x70;
5709 		/* ABORTED COMMAND */
5710 		buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
5711 		/* INVALID FIELD IN PARAMETER LIST */
5712 		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x26;
5713 		break;
5714 	case TCM_UNEXPECTED_UNSOLICITED_DATA:
5715 		/* CURRENT ERROR */
5716 		buffer[offset] = 0x70;
5717 		/* ABORTED COMMAND */
5718 		buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
5719 		/* WRITE ERROR */
5720 		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x0c;
5721 		/* UNEXPECTED_UNSOLICITED_DATA */
5722 		buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x0c;
5723 		break;
5724 	case TCM_SERVICE_CRC_ERROR:
5725 		/* CURRENT ERROR */
5726 		buffer[offset] = 0x70;
5727 		/* ABORTED COMMAND */
5728 		buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
5729 		/* PROTOCOL SERVICE CRC ERROR */
5730 		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x47;
5731 		/* N/A */
5732 		buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x05;
5733 		break;
5734 	case TCM_SNACK_REJECTED:
5735 		/* CURRENT ERROR */
5736 		buffer[offset] = 0x70;
5737 		/* ABORTED COMMAND */
5738 		buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
5739 		/* READ ERROR */
5740 		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x11;
5741 		/* FAILED RETRANSMISSION REQUEST */
5742 		buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x13;
5743 		break;
5744 	case TCM_WRITE_PROTECTED:
5745 		/* CURRENT ERROR */
5746 		buffer[offset] = 0x70;
5747 		/* DATA PROTECT */
5748 		buffer[offset+SPC_SENSE_KEY_OFFSET] = DATA_PROTECT;
5749 		/* WRITE PROTECTED */
5750 		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x27;
5751 		break;
5752 	case TCM_CHECK_CONDITION_UNIT_ATTENTION:
5753 		/* CURRENT ERROR */
5754 		buffer[offset] = 0x70;
5755 		/* UNIT ATTENTION */
5756 		buffer[offset+SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION;
5757 		core_scsi3_ua_for_check_condition(cmd, &asc, &ascq);
5758 		buffer[offset+SPC_ASC_KEY_OFFSET] = asc;
5759 		buffer[offset+SPC_ASCQ_KEY_OFFSET] = ascq;
5760 		break;
5761 	case TCM_CHECK_CONDITION_NOT_READY:
5762 		/* CURRENT ERROR */
5763 		buffer[offset] = 0x70;
5764 		/* Not Ready */
5765 		buffer[offset+SPC_SENSE_KEY_OFFSET] = NOT_READY;
5766 		transport_get_sense_codes(cmd, &asc, &ascq);
5767 		buffer[offset+SPC_ASC_KEY_OFFSET] = asc;
5768 		buffer[offset+SPC_ASCQ_KEY_OFFSET] = ascq;
5769 		break;
5770 	case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
5771 	default:
5772 		/* CURRENT ERROR */
5773 		buffer[offset] = 0x70;
5774 		/* ILLEGAL REQUEST */
5775 		buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
5776 		/* LOGICAL UNIT COMMUNICATION FAILURE */
5777 		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x80;
5778 		break;
5779 	}
5780 	/*
5781 	 * This code uses linux/include/scsi/scsi.h SAM status codes!
5782 	 */
5783 	cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
5784 	/*
5785 	 * Automatically padded, this value is encoded in the fabric's
5786 	 * data_length response PDU containing the SCSI defined sense data.
5787 	 */
5788 	cmd->scsi_sense_length  = TRANSPORT_SENSE_BUFFER + offset;
5789 
5790 after_reason:
5791 	CMD_TFO(cmd)->queue_status(cmd);
5792 	return 0;
5793 }
5794 EXPORT_SYMBOL(transport_send_check_condition_and_sense);
5795 
5796 int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
5797 {
5798 	int ret = 0;
5799 
5800 	if (atomic_read(&T_TASK(cmd)->t_transport_aborted) != 0) {
5801 		if (!(send_status) ||
5802 		     (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
5803 			return 1;
5804 #if 0
5805 		printk(KERN_INFO "Sending delayed SAM_STAT_TASK_ABORTED"
5806 			" status for CDB: 0x%02x ITT: 0x%08x\n",
5807 			T_TASK(cmd)->t_task_cdb[0],
5808 			CMD_TFO(cmd)->get_task_tag(cmd));
5809 #endif
5810 		cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS;
5811 		CMD_TFO(cmd)->queue_status(cmd);
5812 		ret = 1;
5813 	}
5814 	return ret;
5815 }
5816 EXPORT_SYMBOL(transport_check_aborted_status);
5817 
5818 void transport_send_task_abort(struct se_cmd *cmd)
5819 {
5820 	/*
5821 	 * If there are still expected incoming fabric WRITEs, we wait
5822 	 * until until they have completed before sending a TASK_ABORTED
5823 	 * response.  This response with TASK_ABORTED status will be
5824 	 * queued back to fabric module by transport_check_aborted_status().
5825 	 */
5826 	if (cmd->data_direction == DMA_TO_DEVICE) {
5827 		if (CMD_TFO(cmd)->write_pending_status(cmd) != 0) {
5828 			atomic_inc(&T_TASK(cmd)->t_transport_aborted);
5829 			smp_mb__after_atomic_inc();
5830 			cmd->scsi_status = SAM_STAT_TASK_ABORTED;
5831 			transport_new_cmd_failure(cmd);
5832 			return;
5833 		}
5834 	}
5835 	cmd->scsi_status = SAM_STAT_TASK_ABORTED;
5836 #if 0
5837 	printk(KERN_INFO "Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x,"
5838 		" ITT: 0x%08x\n", T_TASK(cmd)->t_task_cdb[0],
5839 		CMD_TFO(cmd)->get_task_tag(cmd));
5840 #endif
5841 	CMD_TFO(cmd)->queue_status(cmd);
5842 }
5843 
5844 /*	transport_generic_do_tmr():
5845  *
5846  *
5847  */
5848 int transport_generic_do_tmr(struct se_cmd *cmd)
5849 {
5850 	struct se_cmd *ref_cmd;
5851 	struct se_device *dev = SE_DEV(cmd);
5852 	struct se_tmr_req *tmr = cmd->se_tmr_req;
5853 	int ret;
5854 
5855 	switch (tmr->function) {
5856 	case TMR_ABORT_TASK:
5857 		ref_cmd = tmr->ref_cmd;
5858 		tmr->response = TMR_FUNCTION_REJECTED;
5859 		break;
5860 	case TMR_ABORT_TASK_SET:
5861 	case TMR_CLEAR_ACA:
5862 	case TMR_CLEAR_TASK_SET:
5863 		tmr->response = TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED;
5864 		break;
5865 	case TMR_LUN_RESET:
5866 		ret = core_tmr_lun_reset(dev, tmr, NULL, NULL);
5867 		tmr->response = (!ret) ? TMR_FUNCTION_COMPLETE :
5868 					 TMR_FUNCTION_REJECTED;
5869 		break;
5870 	case TMR_TARGET_WARM_RESET:
5871 		tmr->response = TMR_FUNCTION_REJECTED;
5872 		break;
5873 	case TMR_TARGET_COLD_RESET:
5874 		tmr->response = TMR_FUNCTION_REJECTED;
5875 		break;
5876 	default:
5877 		printk(KERN_ERR "Uknown TMR function: 0x%02x.\n",
5878 				tmr->function);
5879 		tmr->response = TMR_FUNCTION_REJECTED;
5880 		break;
5881 	}
5882 
5883 	cmd->t_state = TRANSPORT_ISTATE_PROCESSING;
5884 	CMD_TFO(cmd)->queue_tm_rsp(cmd);
5885 
5886 	transport_cmd_check_stop(cmd, 2, 0);
5887 	return 0;
5888 }
5889 
5890 /*
5891  *	Called with spin_lock_irq(&dev->execute_task_lock); held
5892  *
5893  */
5894 static struct se_task *
5895 transport_get_task_from_state_list(struct se_device *dev)
5896 {
5897 	struct se_task *task;
5898 
5899 	if (list_empty(&dev->state_task_list))
5900 		return NULL;
5901 
5902 	list_for_each_entry(task, &dev->state_task_list, t_state_list)
5903 		break;
5904 
5905 	list_del(&task->t_state_list);
5906 	atomic_set(&task->task_state_active, 0);
5907 
5908 	return task;
5909 }
5910 
5911 static void transport_processing_shutdown(struct se_device *dev)
5912 {
5913 	struct se_cmd *cmd;
5914 	struct se_queue_req *qr;
5915 	struct se_task *task;
5916 	u8 state;
5917 	unsigned long flags;
5918 	/*
5919 	 * Empty the struct se_device's struct se_task state list.
5920 	 */
5921 	spin_lock_irqsave(&dev->execute_task_lock, flags);
5922 	while ((task = transport_get_task_from_state_list(dev))) {
5923 		if (!(TASK_CMD(task))) {
5924 			printk(KERN_ERR "TASK_CMD(task) is NULL!\n");
5925 			continue;
5926 		}
5927 		cmd = TASK_CMD(task);
5928 
5929 		if (!T_TASK(cmd)) {
5930 			printk(KERN_ERR "T_TASK(cmd) is NULL for task: %p cmd:"
5931 				" %p ITT: 0x%08x\n", task, cmd,
5932 				CMD_TFO(cmd)->get_task_tag(cmd));
5933 			continue;
5934 		}
5935 		spin_unlock_irqrestore(&dev->execute_task_lock, flags);
5936 
5937 		spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
5938 
5939 		DEBUG_DO("PT: cmd: %p task: %p ITT/CmdSN: 0x%08x/0x%08x,"
5940 			" i_state/def_i_state: %d/%d, t_state/def_t_state:"
5941 			" %d/%d cdb: 0x%02x\n", cmd, task,
5942 			CMD_TFO(cmd)->get_task_tag(cmd), cmd->cmd_sn,
5943 			CMD_TFO(cmd)->get_cmd_state(cmd), cmd->deferred_i_state,
5944 			cmd->t_state, cmd->deferred_t_state,
5945 			T_TASK(cmd)->t_task_cdb[0]);
5946 		DEBUG_DO("PT: ITT[0x%08x] - t_task_cdbs: %d t_task_cdbs_left:"
5947 			" %d t_task_cdbs_sent: %d -- t_transport_active: %d"
5948 			" t_transport_stop: %d t_transport_sent: %d\n",
5949 			CMD_TFO(cmd)->get_task_tag(cmd),
5950 			T_TASK(cmd)->t_task_cdbs,
5951 			atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
5952 			atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
5953 			atomic_read(&T_TASK(cmd)->t_transport_active),
5954 			atomic_read(&T_TASK(cmd)->t_transport_stop),
5955 			atomic_read(&T_TASK(cmd)->t_transport_sent));
5956 
5957 		if (atomic_read(&task->task_active)) {
5958 			atomic_set(&task->task_stop, 1);
5959 			spin_unlock_irqrestore(
5960 				&T_TASK(cmd)->t_state_lock, flags);
5961 
5962 			DEBUG_DO("Waiting for task: %p to shutdown for dev:"
5963 				" %p\n", task, dev);
5964 			wait_for_completion(&task->task_stop_comp);
5965 			DEBUG_DO("Completed task: %p shutdown for dev: %p\n",
5966 				task, dev);
5967 
5968 			spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
5969 			atomic_dec(&T_TASK(cmd)->t_task_cdbs_left);
5970 
5971 			atomic_set(&task->task_active, 0);
5972 			atomic_set(&task->task_stop, 0);
5973 		} else {
5974 			if (atomic_read(&task->task_execute_queue) != 0)
5975 				transport_remove_task_from_execute_queue(task, dev);
5976 		}
5977 		__transport_stop_task_timer(task, &flags);
5978 
5979 		if (!(atomic_dec_and_test(&T_TASK(cmd)->t_task_cdbs_ex_left))) {
5980 			spin_unlock_irqrestore(
5981 					&T_TASK(cmd)->t_state_lock, flags);
5982 
5983 			DEBUG_DO("Skipping task: %p, dev: %p for"
5984 				" t_task_cdbs_ex_left: %d\n", task, dev,
5985 				atomic_read(&T_TASK(cmd)->t_task_cdbs_ex_left));
5986 
5987 			spin_lock_irqsave(&dev->execute_task_lock, flags);
5988 			continue;
5989 		}
5990 
5991 		if (atomic_read(&T_TASK(cmd)->t_transport_active)) {
5992 			DEBUG_DO("got t_transport_active = 1 for task: %p, dev:"
5993 					" %p\n", task, dev);
5994 
5995 			if (atomic_read(&T_TASK(cmd)->t_fe_count)) {
5996 				spin_unlock_irqrestore(
5997 					&T_TASK(cmd)->t_state_lock, flags);
5998 				transport_send_check_condition_and_sense(
5999 					cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE,
6000 					0);
6001 				transport_remove_cmd_from_queue(cmd,
6002 					SE_DEV(cmd)->dev_queue_obj);
6003 
6004 				transport_lun_remove_cmd(cmd);
6005 				transport_cmd_check_stop(cmd, 1, 0);
6006 			} else {
6007 				spin_unlock_irqrestore(
6008 					&T_TASK(cmd)->t_state_lock, flags);
6009 
6010 				transport_remove_cmd_from_queue(cmd,
6011 					SE_DEV(cmd)->dev_queue_obj);
6012 
6013 				transport_lun_remove_cmd(cmd);
6014 
6015 				if (transport_cmd_check_stop(cmd, 1, 0))
6016 					transport_generic_remove(cmd, 0, 0);
6017 			}
6018 
6019 			spin_lock_irqsave(&dev->execute_task_lock, flags);
6020 			continue;
6021 		}
6022 		DEBUG_DO("Got t_transport_active = 0 for task: %p, dev: %p\n",
6023 				task, dev);
6024 
6025 		if (atomic_read(&T_TASK(cmd)->t_fe_count)) {
6026 			spin_unlock_irqrestore(
6027 				&T_TASK(cmd)->t_state_lock, flags);
6028 			transport_send_check_condition_and_sense(cmd,
6029 				TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
6030 			transport_remove_cmd_from_queue(cmd,
6031 				SE_DEV(cmd)->dev_queue_obj);
6032 
6033 			transport_lun_remove_cmd(cmd);
6034 			transport_cmd_check_stop(cmd, 1, 0);
6035 		} else {
6036 			spin_unlock_irqrestore(
6037 				&T_TASK(cmd)->t_state_lock, flags);
6038 
6039 			transport_remove_cmd_from_queue(cmd,
6040 				SE_DEV(cmd)->dev_queue_obj);
6041 			transport_lun_remove_cmd(cmd);
6042 
6043 			if (transport_cmd_check_stop(cmd, 1, 0))
6044 				transport_generic_remove(cmd, 0, 0);
6045 		}
6046 
6047 		spin_lock_irqsave(&dev->execute_task_lock, flags);
6048 	}
6049 	spin_unlock_irqrestore(&dev->execute_task_lock, flags);
6050 	/*
6051 	 * Empty the struct se_device's struct se_cmd list.
6052 	 */
6053 	spin_lock_irqsave(&dev->dev_queue_obj->cmd_queue_lock, flags);
6054 	while ((qr = __transport_get_qr_from_queue(dev->dev_queue_obj))) {
6055 		spin_unlock_irqrestore(
6056 				&dev->dev_queue_obj->cmd_queue_lock, flags);
6057 		cmd = (struct se_cmd *)qr->cmd;
6058 		state = qr->state;
6059 		kfree(qr);
6060 
6061 		DEBUG_DO("From Device Queue: cmd: %p t_state: %d\n",
6062 				cmd, state);
6063 
6064 		if (atomic_read(&T_TASK(cmd)->t_fe_count)) {
6065 			transport_send_check_condition_and_sense(cmd,
6066 				TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
6067 
6068 			transport_lun_remove_cmd(cmd);
6069 			transport_cmd_check_stop(cmd, 1, 0);
6070 		} else {
6071 			transport_lun_remove_cmd(cmd);
6072 			if (transport_cmd_check_stop(cmd, 1, 0))
6073 				transport_generic_remove(cmd, 0, 0);
6074 		}
6075 		spin_lock_irqsave(&dev->dev_queue_obj->cmd_queue_lock, flags);
6076 	}
6077 	spin_unlock_irqrestore(&dev->dev_queue_obj->cmd_queue_lock, flags);
6078 }
6079 
6080 /*	transport_processing_thread():
6081  *
6082  *
6083  */
6084 static int transport_processing_thread(void *param)
6085 {
6086 	int ret, t_state;
6087 	struct se_cmd *cmd;
6088 	struct se_device *dev = (struct se_device *) param;
6089 	struct se_queue_req *qr;
6090 
6091 	set_user_nice(current, -20);
6092 
6093 	while (!kthread_should_stop()) {
6094 		ret = wait_event_interruptible(dev->dev_queue_obj->thread_wq,
6095 				atomic_read(&dev->dev_queue_obj->queue_cnt) ||
6096 				kthread_should_stop());
6097 		if (ret < 0)
6098 			goto out;
6099 
6100 		spin_lock_irq(&dev->dev_status_lock);
6101 		if (dev->dev_status & TRANSPORT_DEVICE_SHUTDOWN) {
6102 			spin_unlock_irq(&dev->dev_status_lock);
6103 			transport_processing_shutdown(dev);
6104 			continue;
6105 		}
6106 		spin_unlock_irq(&dev->dev_status_lock);
6107 
6108 get_cmd:
6109 		__transport_execute_tasks(dev);
6110 
6111 		qr = transport_get_qr_from_queue(dev->dev_queue_obj);
6112 		if (!(qr))
6113 			continue;
6114 
6115 		cmd = (struct se_cmd *)qr->cmd;
6116 		t_state = qr->state;
6117 		kfree(qr);
6118 
6119 		switch (t_state) {
6120 		case TRANSPORT_NEW_CMD_MAP:
6121 			if (!(CMD_TFO(cmd)->new_cmd_map)) {
6122 				printk(KERN_ERR "CMD_TFO(cmd)->new_cmd_map is"
6123 					" NULL for TRANSPORT_NEW_CMD_MAP\n");
6124 				BUG();
6125 			}
6126 			ret = CMD_TFO(cmd)->new_cmd_map(cmd);
6127 			if (ret < 0) {
6128 				cmd->transport_error_status = ret;
6129 				transport_generic_request_failure(cmd, NULL,
6130 						0, (cmd->data_direction !=
6131 						    DMA_TO_DEVICE));
6132 				break;
6133 			}
6134 			/* Fall through */
6135 		case TRANSPORT_NEW_CMD:
6136 			ret = transport_generic_new_cmd(cmd);
6137 			if (ret < 0) {
6138 				cmd->transport_error_status = ret;
6139 				transport_generic_request_failure(cmd, NULL,
6140 					0, (cmd->data_direction !=
6141 					 DMA_TO_DEVICE));
6142 			}
6143 			break;
6144 		case TRANSPORT_PROCESS_WRITE:
6145 			transport_generic_process_write(cmd);
6146 			break;
6147 		case TRANSPORT_COMPLETE_OK:
6148 			transport_stop_all_task_timers(cmd);
6149 			transport_generic_complete_ok(cmd);
6150 			break;
6151 		case TRANSPORT_REMOVE:
6152 			transport_generic_remove(cmd, 1, 0);
6153 			break;
6154 		case TRANSPORT_FREE_CMD_INTR:
6155 			transport_generic_free_cmd(cmd, 0, 1, 0);
6156 			break;
6157 		case TRANSPORT_PROCESS_TMR:
6158 			transport_generic_do_tmr(cmd);
6159 			break;
6160 		case TRANSPORT_COMPLETE_FAILURE:
6161 			transport_generic_request_failure(cmd, NULL, 1, 1);
6162 			break;
6163 		case TRANSPORT_COMPLETE_TIMEOUT:
6164 			transport_stop_all_task_timers(cmd);
6165 			transport_generic_request_timeout(cmd);
6166 			break;
6167 		default:
6168 			printk(KERN_ERR "Unknown t_state: %d deferred_t_state:"
6169 				" %d for ITT: 0x%08x i_state: %d on SE LUN:"
6170 				" %u\n", t_state, cmd->deferred_t_state,
6171 				CMD_TFO(cmd)->get_task_tag(cmd),
6172 				CMD_TFO(cmd)->get_cmd_state(cmd),
6173 				SE_LUN(cmd)->unpacked_lun);
6174 			BUG();
6175 		}
6176 
6177 		goto get_cmd;
6178 	}
6179 
6180 out:
6181 	transport_release_all_cmds(dev);
6182 	dev->process_thread = NULL;
6183 	return 0;
6184 }
6185