xref: /linux/drivers/target/target_core_device.c (revision 22fd411ac9853f4becb3db9860f6d0b8398cac44)
1 /*******************************************************************************
2  * Filename:  target_core_device.c (based on iscsi_target_device.c)
3  *
4  * This file contains the iSCSI Virtual Device and Disk Transport
5  * agnostic related functions.
6  *
7  * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
8  * Copyright (c) 2005-2006 SBE, Inc.  All Rights Reserved.
9  * Copyright (c) 2007-2010 Rising Tide Systems
10  * Copyright (c) 2008-2010 Linux-iSCSI.org
11  *
12  * Nicholas A. Bellinger <nab@kernel.org>
13  *
14  * This program is free software; you can redistribute it and/or modify
15  * it under the terms of the GNU General Public License as published by
16  * the Free Software Foundation; either version 2 of the License, or
17  * (at your option) any later version.
18  *
19  * This program is distributed in the hope that it will be useful,
20  * but WITHOUT ANY WARRANTY; without even the implied warranty of
21  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
22  * GNU General Public License for more details.
23  *
24  * You should have received a copy of the GNU General Public License
25  * along with this program; if not, write to the Free Software
26  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
27  *
28  ******************************************************************************/
29 
30 #include <linux/net.h>
31 #include <linux/string.h>
32 #include <linux/delay.h>
33 #include <linux/timer.h>
34 #include <linux/slab.h>
35 #include <linux/spinlock.h>
36 #include <linux/smp_lock.h>
37 #include <linux/kthread.h>
38 #include <linux/in.h>
39 #include <net/sock.h>
40 #include <net/tcp.h>
41 #include <scsi/scsi.h>
42 
43 #include <target/target_core_base.h>
44 #include <target/target_core_device.h>
45 #include <target/target_core_tpg.h>
46 #include <target/target_core_transport.h>
47 #include <target/target_core_fabric_ops.h>
48 
49 #include "target_core_alua.h"
50 #include "target_core_hba.h"
51 #include "target_core_pr.h"
52 #include "target_core_ua.h"
53 
54 static void se_dev_start(struct se_device *dev);
55 static void se_dev_stop(struct se_device *dev);
56 
57 int transport_get_lun_for_cmd(
58 	struct se_cmd *se_cmd,
59 	unsigned char *cdb,
60 	u32 unpacked_lun)
61 {
62 	struct se_dev_entry *deve;
63 	struct se_lun *se_lun = NULL;
64 	struct se_session *se_sess = SE_SESS(se_cmd);
65 	unsigned long flags;
66 	int read_only = 0;
67 
68 	spin_lock_irq(&SE_NODE_ACL(se_sess)->device_list_lock);
69 	deve = se_cmd->se_deve =
70 			&SE_NODE_ACL(se_sess)->device_list[unpacked_lun];
71 	if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
72 		if (se_cmd) {
73 			deve->total_cmds++;
74 			deve->total_bytes += se_cmd->data_length;
75 
76 			if (se_cmd->data_direction == DMA_TO_DEVICE) {
77 				if (deve->lun_flags &
78 						TRANSPORT_LUNFLAGS_READ_ONLY) {
79 					read_only = 1;
80 					goto out;
81 				}
82 				deve->write_bytes += se_cmd->data_length;
83 			} else if (se_cmd->data_direction ==
84 				   DMA_FROM_DEVICE) {
85 				deve->read_bytes += se_cmd->data_length;
86 			}
87 		}
88 		deve->deve_cmds++;
89 
90 		se_lun = se_cmd->se_lun = deve->se_lun;
91 		se_cmd->pr_res_key = deve->pr_res_key;
92 		se_cmd->orig_fe_lun = unpacked_lun;
93 		se_cmd->se_orig_obj_ptr = SE_LUN(se_cmd)->lun_se_dev;
94 		se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
95 	}
96 out:
97 	spin_unlock_irq(&SE_NODE_ACL(se_sess)->device_list_lock);
98 
99 	if (!se_lun) {
100 		if (read_only) {
101 			se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
102 			se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
103 			printk("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
104 				" Access for 0x%08x\n",
105 				CMD_TFO(se_cmd)->get_fabric_name(),
106 				unpacked_lun);
107 			return -1;
108 		} else {
109 			/*
110 			 * Use the se_portal_group->tpg_virt_lun0 to allow for
111 			 * REPORT_LUNS, et al to be returned when no active
112 			 * MappedLUN=0 exists for this Initiator Port.
113 			 */
114 			if (unpacked_lun != 0) {
115 				se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
116 				se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
117 				printk("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
118 					" Access for 0x%08x\n",
119 					CMD_TFO(se_cmd)->get_fabric_name(),
120 					unpacked_lun);
121 				return -1;
122 			}
123 			/*
124 			 * Force WRITE PROTECT for virtual LUN 0
125 			 */
126 			if ((se_cmd->data_direction != DMA_FROM_DEVICE) &&
127 			    (se_cmd->data_direction != DMA_NONE)) {
128 				se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
129 				se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
130 				return -1;
131 			}
132 #if 0
133 			printk("TARGET_CORE[%s]: Using virtual LUN0! :-)\n",
134 				CMD_TFO(se_cmd)->get_fabric_name());
135 #endif
136 			se_lun = se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0;
137 			se_cmd->orig_fe_lun = 0;
138 			se_cmd->se_orig_obj_ptr = SE_LUN(se_cmd)->lun_se_dev;
139 			se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
140 		}
141 	}
142 	/*
143 	 * Determine if the struct se_lun is online.
144 	 */
145 /* #warning FIXME: Check for LUN_RESET + UNIT Attention */
146 	if (se_dev_check_online(se_lun->lun_se_dev) != 0) {
147 		se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
148 		se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
149 		return -1;
150 	}
151 
152 	{
153 	struct se_device *dev = se_lun->lun_se_dev;
154 	spin_lock(&dev->stats_lock);
155 	dev->num_cmds++;
156 	if (se_cmd->data_direction == DMA_TO_DEVICE)
157 		dev->write_bytes += se_cmd->data_length;
158 	else if (se_cmd->data_direction == DMA_FROM_DEVICE)
159 		dev->read_bytes += se_cmd->data_length;
160 	spin_unlock(&dev->stats_lock);
161 	}
162 
163 	/*
164 	 * Add the iscsi_cmd_t to the struct se_lun's cmd list.  This list is used
165 	 * for tracking state of struct se_cmds during LUN shutdown events.
166 	 */
167 	spin_lock_irqsave(&se_lun->lun_cmd_lock, flags);
168 	list_add_tail(&se_cmd->se_lun_list, &se_lun->lun_cmd_list);
169 	atomic_set(&T_TASK(se_cmd)->transport_lun_active, 1);
170 #if 0
171 	printk(KERN_INFO "Adding ITT: 0x%08x to LUN LIST[%d]\n",
172 		CMD_TFO(se_cmd)->get_task_tag(se_cmd), se_lun->unpacked_lun);
173 #endif
174 	spin_unlock_irqrestore(&se_lun->lun_cmd_lock, flags);
175 
176 	return 0;
177 }
178 EXPORT_SYMBOL(transport_get_lun_for_cmd);
179 
180 int transport_get_lun_for_tmr(
181 	struct se_cmd *se_cmd,
182 	u32 unpacked_lun)
183 {
184 	struct se_device *dev = NULL;
185 	struct se_dev_entry *deve;
186 	struct se_lun *se_lun = NULL;
187 	struct se_session *se_sess = SE_SESS(se_cmd);
188 	struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
189 
190 	spin_lock_irq(&SE_NODE_ACL(se_sess)->device_list_lock);
191 	deve = se_cmd->se_deve =
192 			&SE_NODE_ACL(se_sess)->device_list[unpacked_lun];
193 	if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
194 		se_lun = se_cmd->se_lun = se_tmr->tmr_lun = deve->se_lun;
195 		dev = se_tmr->tmr_dev = se_lun->lun_se_dev;
196 		se_cmd->pr_res_key = deve->pr_res_key;
197 		se_cmd->orig_fe_lun = unpacked_lun;
198 		se_cmd->se_orig_obj_ptr = SE_LUN(se_cmd)->lun_se_dev;
199 /*		se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; */
200 	}
201 	spin_unlock_irq(&SE_NODE_ACL(se_sess)->device_list_lock);
202 
203 	if (!se_lun) {
204 		printk(KERN_INFO "TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
205 			" Access for 0x%08x\n",
206 			CMD_TFO(se_cmd)->get_fabric_name(),
207 			unpacked_lun);
208 		se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
209 		return -1;
210 	}
211 	/*
212 	 * Determine if the struct se_lun is online.
213 	 */
214 /* #warning FIXME: Check for LUN_RESET + UNIT Attention */
215 	if (se_dev_check_online(se_lun->lun_se_dev) != 0) {
216 		se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
217 		return -1;
218 	}
219 
220 	spin_lock(&dev->se_tmr_lock);
221 	list_add_tail(&se_tmr->tmr_list, &dev->dev_tmr_list);
222 	spin_unlock(&dev->se_tmr_lock);
223 
224 	return 0;
225 }
226 EXPORT_SYMBOL(transport_get_lun_for_tmr);
227 
228 /*
229  * This function is called from core_scsi3_emulate_pro_register_and_move()
230  * and core_scsi3_decode_spec_i_port(), and will increment &deve->pr_ref_count
231  * when a matching rtpi is found.
232  */
233 struct se_dev_entry *core_get_se_deve_from_rtpi(
234 	struct se_node_acl *nacl,
235 	u16 rtpi)
236 {
237 	struct se_dev_entry *deve;
238 	struct se_lun *lun;
239 	struct se_port *port;
240 	struct se_portal_group *tpg = nacl->se_tpg;
241 	u32 i;
242 
243 	spin_lock_irq(&nacl->device_list_lock);
244 	for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
245 		deve = &nacl->device_list[i];
246 
247 		if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
248 			continue;
249 
250 		lun = deve->se_lun;
251 		if (!(lun)) {
252 			printk(KERN_ERR "%s device entries device pointer is"
253 				" NULL, but Initiator has access.\n",
254 				TPG_TFO(tpg)->get_fabric_name());
255 			continue;
256 		}
257 		port = lun->lun_sep;
258 		if (!(port)) {
259 			printk(KERN_ERR "%s device entries device pointer is"
260 				" NULL, but Initiator has access.\n",
261 				TPG_TFO(tpg)->get_fabric_name());
262 			continue;
263 		}
264 		if (port->sep_rtpi != rtpi)
265 			continue;
266 
267 		atomic_inc(&deve->pr_ref_count);
268 		smp_mb__after_atomic_inc();
269 		spin_unlock_irq(&nacl->device_list_lock);
270 
271 		return deve;
272 	}
273 	spin_unlock_irq(&nacl->device_list_lock);
274 
275 	return NULL;
276 }
277 
278 int core_free_device_list_for_node(
279 	struct se_node_acl *nacl,
280 	struct se_portal_group *tpg)
281 {
282 	struct se_dev_entry *deve;
283 	struct se_lun *lun;
284 	u32 i;
285 
286 	if (!nacl->device_list)
287 		return 0;
288 
289 	spin_lock_irq(&nacl->device_list_lock);
290 	for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
291 		deve = &nacl->device_list[i];
292 
293 		if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
294 			continue;
295 
296 		if (!deve->se_lun) {
297 			printk(KERN_ERR "%s device entries device pointer is"
298 				" NULL, but Initiator has access.\n",
299 				TPG_TFO(tpg)->get_fabric_name());
300 			continue;
301 		}
302 		lun = deve->se_lun;
303 
304 		spin_unlock_irq(&nacl->device_list_lock);
305 		core_update_device_list_for_node(lun, NULL, deve->mapped_lun,
306 			TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0);
307 		spin_lock_irq(&nacl->device_list_lock);
308 	}
309 	spin_unlock_irq(&nacl->device_list_lock);
310 
311 	kfree(nacl->device_list);
312 	nacl->device_list = NULL;
313 
314 	return 0;
315 }
316 
317 void core_dec_lacl_count(struct se_node_acl *se_nacl, struct se_cmd *se_cmd)
318 {
319 	struct se_dev_entry *deve;
320 
321 	spin_lock_irq(&se_nacl->device_list_lock);
322 	deve = &se_nacl->device_list[se_cmd->orig_fe_lun];
323 	deve->deve_cmds--;
324 	spin_unlock_irq(&se_nacl->device_list_lock);
325 
326 	return;
327 }
328 
329 void core_update_device_list_access(
330 	u32 mapped_lun,
331 	u32 lun_access,
332 	struct se_node_acl *nacl)
333 {
334 	struct se_dev_entry *deve;
335 
336 	spin_lock_irq(&nacl->device_list_lock);
337 	deve = &nacl->device_list[mapped_lun];
338 	if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
339 		deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
340 		deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
341 	} else {
342 		deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE;
343 		deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
344 	}
345 	spin_unlock_irq(&nacl->device_list_lock);
346 
347 	return;
348 }
349 
350 /*      core_update_device_list_for_node():
351  *
352  *
353  */
354 int core_update_device_list_for_node(
355 	struct se_lun *lun,
356 	struct se_lun_acl *lun_acl,
357 	u32 mapped_lun,
358 	u32 lun_access,
359 	struct se_node_acl *nacl,
360 	struct se_portal_group *tpg,
361 	int enable)
362 {
363 	struct se_port *port = lun->lun_sep;
364 	struct se_dev_entry *deve = &nacl->device_list[mapped_lun];
365 	int trans = 0;
366 	/*
367 	 * If the MappedLUN entry is being disabled, the entry in
368 	 * port->sep_alua_list must be removed now before clearing the
369 	 * struct se_dev_entry pointers below as logic in
370 	 * core_alua_do_transition_tg_pt() depends on these being present.
371 	 */
372 	if (!(enable)) {
373 		/*
374 		 * deve->se_lun_acl will be NULL for demo-mode created LUNs
375 		 * that have not been explictly concerted to MappedLUNs ->
376 		 * struct se_lun_acl.
377 		 */
378 		if (!(deve->se_lun_acl))
379 			return 0;
380 
381 		spin_lock_bh(&port->sep_alua_lock);
382 		list_del(&deve->alua_port_list);
383 		spin_unlock_bh(&port->sep_alua_lock);
384 	}
385 
386 	spin_lock_irq(&nacl->device_list_lock);
387 	if (enable) {
388 		/*
389 		 * Check if the call is handling demo mode -> explict LUN ACL
390 		 * transition.  This transition must be for the same struct se_lun
391 		 * + mapped_lun that was setup in demo mode..
392 		 */
393 		if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
394 			if (deve->se_lun_acl != NULL) {
395 				printk(KERN_ERR "struct se_dev_entry->se_lun_acl"
396 					" already set for demo mode -> explict"
397 					" LUN ACL transition\n");
398 				return -1;
399 			}
400 			if (deve->se_lun != lun) {
401 				printk(KERN_ERR "struct se_dev_entry->se_lun does"
402 					" match passed struct se_lun for demo mode"
403 					" -> explict LUN ACL transition\n");
404 				return -1;
405 			}
406 			deve->se_lun_acl = lun_acl;
407 			trans = 1;
408 		} else {
409 			deve->se_lun = lun;
410 			deve->se_lun_acl = lun_acl;
411 			deve->mapped_lun = mapped_lun;
412 			deve->lun_flags |= TRANSPORT_LUNFLAGS_INITIATOR_ACCESS;
413 		}
414 
415 		if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
416 			deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
417 			deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
418 		} else {
419 			deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE;
420 			deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
421 		}
422 
423 		if (trans) {
424 			spin_unlock_irq(&nacl->device_list_lock);
425 			return 0;
426 		}
427 		deve->creation_time = get_jiffies_64();
428 		deve->attach_count++;
429 		spin_unlock_irq(&nacl->device_list_lock);
430 
431 		spin_lock_bh(&port->sep_alua_lock);
432 		list_add_tail(&deve->alua_port_list, &port->sep_alua_list);
433 		spin_unlock_bh(&port->sep_alua_lock);
434 
435 		return 0;
436 	}
437 	/*
438 	 * Wait for any in process SPEC_I_PT=1 or REGISTER_AND_MOVE
439 	 * PR operation to complete.
440 	 */
441 	spin_unlock_irq(&nacl->device_list_lock);
442 	while (atomic_read(&deve->pr_ref_count) != 0)
443 		cpu_relax();
444 	spin_lock_irq(&nacl->device_list_lock);
445 	/*
446 	 * Disable struct se_dev_entry LUN ACL mapping
447 	 */
448 	core_scsi3_ua_release_all(deve);
449 	deve->se_lun = NULL;
450 	deve->se_lun_acl = NULL;
451 	deve->lun_flags = 0;
452 	deve->creation_time = 0;
453 	deve->attach_count--;
454 	spin_unlock_irq(&nacl->device_list_lock);
455 
456 	core_scsi3_free_pr_reg_from_nacl(lun->lun_se_dev, nacl);
457 	return 0;
458 }
459 
460 /*      core_clear_lun_from_tpg():
461  *
462  *
463  */
464 void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg)
465 {
466 	struct se_node_acl *nacl;
467 	struct se_dev_entry *deve;
468 	u32 i;
469 
470 	spin_lock_bh(&tpg->acl_node_lock);
471 	list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) {
472 		spin_unlock_bh(&tpg->acl_node_lock);
473 
474 		spin_lock_irq(&nacl->device_list_lock);
475 		for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
476 			deve = &nacl->device_list[i];
477 			if (lun != deve->se_lun)
478 				continue;
479 			spin_unlock_irq(&nacl->device_list_lock);
480 
481 			core_update_device_list_for_node(lun, NULL,
482 				deve->mapped_lun, TRANSPORT_LUNFLAGS_NO_ACCESS,
483 				nacl, tpg, 0);
484 
485 			spin_lock_irq(&nacl->device_list_lock);
486 		}
487 		spin_unlock_irq(&nacl->device_list_lock);
488 
489 		spin_lock_bh(&tpg->acl_node_lock);
490 	}
491 	spin_unlock_bh(&tpg->acl_node_lock);
492 
493 	return;
494 }
495 
496 static struct se_port *core_alloc_port(struct se_device *dev)
497 {
498 	struct se_port *port, *port_tmp;
499 
500 	port = kzalloc(sizeof(struct se_port), GFP_KERNEL);
501 	if (!(port)) {
502 		printk(KERN_ERR "Unable to allocate struct se_port\n");
503 		return NULL;
504 	}
505 	INIT_LIST_HEAD(&port->sep_alua_list);
506 	INIT_LIST_HEAD(&port->sep_list);
507 	atomic_set(&port->sep_tg_pt_secondary_offline, 0);
508 	spin_lock_init(&port->sep_alua_lock);
509 	mutex_init(&port->sep_tg_pt_md_mutex);
510 
511 	spin_lock(&dev->se_port_lock);
512 	if (dev->dev_port_count == 0x0000ffff) {
513 		printk(KERN_WARNING "Reached dev->dev_port_count =="
514 				" 0x0000ffff\n");
515 		spin_unlock(&dev->se_port_lock);
516 		return NULL;
517 	}
518 again:
519 	/*
520 	 * Allocate the next RELATIVE TARGET PORT IDENTIFER for this struct se_device
521 	 * Here is the table from spc4r17 section 7.7.3.8.
522 	 *
523 	 *    Table 473 -- RELATIVE TARGET PORT IDENTIFIER field
524 	 *
525 	 * Code      Description
526 	 * 0h        Reserved
527 	 * 1h        Relative port 1, historically known as port A
528 	 * 2h        Relative port 2, historically known as port B
529 	 * 3h to FFFFh    Relative port 3 through 65 535
530 	 */
531 	port->sep_rtpi = dev->dev_rpti_counter++;
532 	if (!(port->sep_rtpi))
533 		goto again;
534 
535 	list_for_each_entry(port_tmp, &dev->dev_sep_list, sep_list) {
536 		/*
537 		 * Make sure RELATIVE TARGET PORT IDENTIFER is unique
538 		 * for 16-bit wrap..
539 		 */
540 		if (port->sep_rtpi == port_tmp->sep_rtpi)
541 			goto again;
542 	}
543 	spin_unlock(&dev->se_port_lock);
544 
545 	return port;
546 }
547 
548 static void core_export_port(
549 	struct se_device *dev,
550 	struct se_portal_group *tpg,
551 	struct se_port *port,
552 	struct se_lun *lun)
553 {
554 	struct se_subsystem_dev *su_dev = SU_DEV(dev);
555 	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem = NULL;
556 
557 	spin_lock(&dev->se_port_lock);
558 	spin_lock(&lun->lun_sep_lock);
559 	port->sep_tpg = tpg;
560 	port->sep_lun = lun;
561 	lun->lun_sep = port;
562 	spin_unlock(&lun->lun_sep_lock);
563 
564 	list_add_tail(&port->sep_list, &dev->dev_sep_list);
565 	spin_unlock(&dev->se_port_lock);
566 
567 	if (T10_ALUA(su_dev)->alua_type == SPC3_ALUA_EMULATED) {
568 		tg_pt_gp_mem = core_alua_allocate_tg_pt_gp_mem(port);
569 		if (IS_ERR(tg_pt_gp_mem) || !tg_pt_gp_mem) {
570 			printk(KERN_ERR "Unable to allocate t10_alua_tg_pt"
571 					"_gp_member_t\n");
572 			return;
573 		}
574 		spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
575 		__core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
576 			T10_ALUA(su_dev)->default_tg_pt_gp);
577 		spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
578 		printk(KERN_INFO "%s/%s: Adding to default ALUA Target Port"
579 			" Group: alua/default_tg_pt_gp\n",
580 			TRANSPORT(dev)->name, TPG_TFO(tpg)->get_fabric_name());
581 	}
582 
583 	dev->dev_port_count++;
584 	port->sep_index = port->sep_rtpi; /* RELATIVE TARGET PORT IDENTIFER */
585 }
586 
587 /*
588  *	Called with struct se_device->se_port_lock spinlock held.
589  */
590 static void core_release_port(struct se_device *dev, struct se_port *port)
591 {
592 	/*
593 	 * Wait for any port reference for PR ALL_TG_PT=1 operation
594 	 * to complete in __core_scsi3_alloc_registration()
595 	 */
596 	spin_unlock(&dev->se_port_lock);
597 	if (atomic_read(&port->sep_tg_pt_ref_cnt))
598 		cpu_relax();
599 	spin_lock(&dev->se_port_lock);
600 
601 	core_alua_free_tg_pt_gp_mem(port);
602 
603 	list_del(&port->sep_list);
604 	dev->dev_port_count--;
605 	kfree(port);
606 
607 	return;
608 }
609 
610 int core_dev_export(
611 	struct se_device *dev,
612 	struct se_portal_group *tpg,
613 	struct se_lun *lun)
614 {
615 	struct se_port *port;
616 
617 	port = core_alloc_port(dev);
618 	if (!(port))
619 		return -1;
620 
621 	lun->lun_se_dev = dev;
622 	se_dev_start(dev);
623 
624 	atomic_inc(&dev->dev_export_obj.obj_access_count);
625 	core_export_port(dev, tpg, port, lun);
626 	return 0;
627 }
628 
629 void core_dev_unexport(
630 	struct se_device *dev,
631 	struct se_portal_group *tpg,
632 	struct se_lun *lun)
633 {
634 	struct se_port *port = lun->lun_sep;
635 
636 	spin_lock(&lun->lun_sep_lock);
637 	if (lun->lun_se_dev == NULL) {
638 		spin_unlock(&lun->lun_sep_lock);
639 		return;
640 	}
641 	spin_unlock(&lun->lun_sep_lock);
642 
643 	spin_lock(&dev->se_port_lock);
644 	atomic_dec(&dev->dev_export_obj.obj_access_count);
645 	core_release_port(dev, port);
646 	spin_unlock(&dev->se_port_lock);
647 
648 	se_dev_stop(dev);
649 	lun->lun_se_dev = NULL;
650 }
651 
652 int transport_core_report_lun_response(struct se_cmd *se_cmd)
653 {
654 	struct se_dev_entry *deve;
655 	struct se_lun *se_lun;
656 	struct se_session *se_sess = SE_SESS(se_cmd);
657 	struct se_task *se_task;
658 	unsigned char *buf = (unsigned char *)T_TASK(se_cmd)->t_task_buf;
659 	u32 cdb_offset = 0, lun_count = 0, offset = 8;
660 	u64 i, lun;
661 
662 	list_for_each_entry(se_task, &T_TASK(se_cmd)->t_task_list, t_list)
663 		break;
664 
665 	if (!(se_task)) {
666 		printk(KERN_ERR "Unable to locate struct se_task for struct se_cmd\n");
667 		return PYX_TRANSPORT_LU_COMM_FAILURE;
668 	}
669 
670 	/*
671 	 * If no struct se_session pointer is present, this struct se_cmd is
672 	 * coming via a target_core_mod PASSTHROUGH op, and not through
673 	 * a $FABRIC_MOD.  In that case, report LUN=0 only.
674 	 */
675 	if (!(se_sess)) {
676 		lun = 0;
677 		buf[offset++] = ((lun >> 56) & 0xff);
678 		buf[offset++] = ((lun >> 48) & 0xff);
679 		buf[offset++] = ((lun >> 40) & 0xff);
680 		buf[offset++] = ((lun >> 32) & 0xff);
681 		buf[offset++] = ((lun >> 24) & 0xff);
682 		buf[offset++] = ((lun >> 16) & 0xff);
683 		buf[offset++] = ((lun >> 8) & 0xff);
684 		buf[offset++] = (lun & 0xff);
685 		lun_count = 1;
686 		goto done;
687 	}
688 
689 	spin_lock_irq(&SE_NODE_ACL(se_sess)->device_list_lock);
690 	for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
691 		deve = &SE_NODE_ACL(se_sess)->device_list[i];
692 		if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
693 			continue;
694 		se_lun = deve->se_lun;
695 		/*
696 		 * We determine the correct LUN LIST LENGTH even once we
697 		 * have reached the initial allocation length.
698 		 * See SPC2-R20 7.19.
699 		 */
700 		lun_count++;
701 		if ((cdb_offset + 8) >= se_cmd->data_length)
702 			continue;
703 
704 		lun = cpu_to_be64(CMD_TFO(se_cmd)->pack_lun(deve->mapped_lun));
705 		buf[offset++] = ((lun >> 56) & 0xff);
706 		buf[offset++] = ((lun >> 48) & 0xff);
707 		buf[offset++] = ((lun >> 40) & 0xff);
708 		buf[offset++] = ((lun >> 32) & 0xff);
709 		buf[offset++] = ((lun >> 24) & 0xff);
710 		buf[offset++] = ((lun >> 16) & 0xff);
711 		buf[offset++] = ((lun >> 8) & 0xff);
712 		buf[offset++] = (lun & 0xff);
713 		cdb_offset += 8;
714 	}
715 	spin_unlock_irq(&SE_NODE_ACL(se_sess)->device_list_lock);
716 
717 	/*
718 	 * See SPC3 r07, page 159.
719 	 */
720 done:
721 	lun_count *= 8;
722 	buf[0] = ((lun_count >> 24) & 0xff);
723 	buf[1] = ((lun_count >> 16) & 0xff);
724 	buf[2] = ((lun_count >> 8) & 0xff);
725 	buf[3] = (lun_count & 0xff);
726 
727 	return PYX_TRANSPORT_SENT_TO_TRANSPORT;
728 }
729 
730 /*	se_release_device_for_hba():
731  *
732  *
733  */
734 void se_release_device_for_hba(struct se_device *dev)
735 {
736 	struct se_hba *hba = dev->se_hba;
737 
738 	if ((dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) ||
739 	    (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED) ||
740 	    (dev->dev_status & TRANSPORT_DEVICE_SHUTDOWN) ||
741 	    (dev->dev_status & TRANSPORT_DEVICE_OFFLINE_ACTIVATED) ||
742 	    (dev->dev_status & TRANSPORT_DEVICE_OFFLINE_DEACTIVATED))
743 		se_dev_stop(dev);
744 
745 	if (dev->dev_ptr) {
746 		kthread_stop(dev->process_thread);
747 		if (dev->transport->free_device)
748 			dev->transport->free_device(dev->dev_ptr);
749 	}
750 
751 	spin_lock(&hba->device_lock);
752 	list_del(&dev->dev_list);
753 	hba->dev_count--;
754 	spin_unlock(&hba->device_lock);
755 
756 	core_scsi3_free_all_registrations(dev);
757 	se_release_vpd_for_dev(dev);
758 
759 	kfree(dev->dev_status_queue_obj);
760 	kfree(dev->dev_queue_obj);
761 	kfree(dev);
762 
763 	return;
764 }
765 
766 void se_release_vpd_for_dev(struct se_device *dev)
767 {
768 	struct t10_vpd *vpd, *vpd_tmp;
769 
770 	spin_lock(&DEV_T10_WWN(dev)->t10_vpd_lock);
771 	list_for_each_entry_safe(vpd, vpd_tmp,
772 			&DEV_T10_WWN(dev)->t10_vpd_list, vpd_list) {
773 		list_del(&vpd->vpd_list);
774 		kfree(vpd);
775 	}
776 	spin_unlock(&DEV_T10_WWN(dev)->t10_vpd_lock);
777 
778 	return;
779 }
780 
781 /*
782  * Called with struct se_hba->device_lock held.
783  */
784 void se_clear_dev_ports(struct se_device *dev)
785 {
786 	struct se_hba *hba = dev->se_hba;
787 	struct se_lun *lun;
788 	struct se_portal_group *tpg;
789 	struct se_port *sep, *sep_tmp;
790 
791 	spin_lock(&dev->se_port_lock);
792 	list_for_each_entry_safe(sep, sep_tmp, &dev->dev_sep_list, sep_list) {
793 		spin_unlock(&dev->se_port_lock);
794 		spin_unlock(&hba->device_lock);
795 
796 		lun = sep->sep_lun;
797 		tpg = sep->sep_tpg;
798 		spin_lock(&lun->lun_sep_lock);
799 		if (lun->lun_se_dev == NULL) {
800 			spin_unlock(&lun->lun_sep_lock);
801 			continue;
802 		}
803 		spin_unlock(&lun->lun_sep_lock);
804 
805 		core_dev_del_lun(tpg, lun->unpacked_lun);
806 
807 		spin_lock(&hba->device_lock);
808 		spin_lock(&dev->se_port_lock);
809 	}
810 	spin_unlock(&dev->se_port_lock);
811 
812 	return;
813 }
814 
815 /*	se_free_virtual_device():
816  *
817  *	Used for IBLOCK, RAMDISK, and FILEIO Transport Drivers.
818  */
819 int se_free_virtual_device(struct se_device *dev, struct se_hba *hba)
820 {
821 	spin_lock(&hba->device_lock);
822 	se_clear_dev_ports(dev);
823 	spin_unlock(&hba->device_lock);
824 
825 	core_alua_free_lu_gp_mem(dev);
826 	se_release_device_for_hba(dev);
827 
828 	return 0;
829 }
830 
831 static void se_dev_start(struct se_device *dev)
832 {
833 	struct se_hba *hba = dev->se_hba;
834 
835 	spin_lock(&hba->device_lock);
836 	atomic_inc(&dev->dev_obj.obj_access_count);
837 	if (atomic_read(&dev->dev_obj.obj_access_count) == 1) {
838 		if (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED) {
839 			dev->dev_status &= ~TRANSPORT_DEVICE_DEACTIVATED;
840 			dev->dev_status |= TRANSPORT_DEVICE_ACTIVATED;
841 		} else if (dev->dev_status &
842 			   TRANSPORT_DEVICE_OFFLINE_DEACTIVATED) {
843 			dev->dev_status &=
844 				~TRANSPORT_DEVICE_OFFLINE_DEACTIVATED;
845 			dev->dev_status |= TRANSPORT_DEVICE_OFFLINE_ACTIVATED;
846 		}
847 	}
848 	spin_unlock(&hba->device_lock);
849 }
850 
851 static void se_dev_stop(struct se_device *dev)
852 {
853 	struct se_hba *hba = dev->se_hba;
854 
855 	spin_lock(&hba->device_lock);
856 	atomic_dec(&dev->dev_obj.obj_access_count);
857 	if (atomic_read(&dev->dev_obj.obj_access_count) == 0) {
858 		if (dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) {
859 			dev->dev_status &= ~TRANSPORT_DEVICE_ACTIVATED;
860 			dev->dev_status |= TRANSPORT_DEVICE_DEACTIVATED;
861 		} else if (dev->dev_status &
862 			   TRANSPORT_DEVICE_OFFLINE_ACTIVATED) {
863 			dev->dev_status &= ~TRANSPORT_DEVICE_OFFLINE_ACTIVATED;
864 			dev->dev_status |= TRANSPORT_DEVICE_OFFLINE_DEACTIVATED;
865 		}
866 	}
867 	spin_unlock(&hba->device_lock);
868 
869 	while (atomic_read(&hba->dev_mib_access_count))
870 		cpu_relax();
871 }
872 
873 int se_dev_check_online(struct se_device *dev)
874 {
875 	int ret;
876 
877 	spin_lock_irq(&dev->dev_status_lock);
878 	ret = ((dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) ||
879 	       (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED)) ? 0 : 1;
880 	spin_unlock_irq(&dev->dev_status_lock);
881 
882 	return ret;
883 }
884 
885 int se_dev_check_shutdown(struct se_device *dev)
886 {
887 	int ret;
888 
889 	spin_lock_irq(&dev->dev_status_lock);
890 	ret = (dev->dev_status & TRANSPORT_DEVICE_SHUTDOWN);
891 	spin_unlock_irq(&dev->dev_status_lock);
892 
893 	return ret;
894 }
895 
896 void se_dev_set_default_attribs(
897 	struct se_device *dev,
898 	struct se_dev_limits *dev_limits)
899 {
900 	struct queue_limits *limits = &dev_limits->limits;
901 
902 	DEV_ATTRIB(dev)->emulate_dpo = DA_EMULATE_DPO;
903 	DEV_ATTRIB(dev)->emulate_fua_write = DA_EMULATE_FUA_WRITE;
904 	DEV_ATTRIB(dev)->emulate_fua_read = DA_EMULATE_FUA_READ;
905 	DEV_ATTRIB(dev)->emulate_write_cache = DA_EMULATE_WRITE_CACHE;
906 	DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl = DA_EMULATE_UA_INTLLCK_CTRL;
907 	DEV_ATTRIB(dev)->emulate_tas = DA_EMULATE_TAS;
908 	DEV_ATTRIB(dev)->emulate_tpu = DA_EMULATE_TPU;
909 	DEV_ATTRIB(dev)->emulate_tpws = DA_EMULATE_TPWS;
910 	DEV_ATTRIB(dev)->emulate_reservations = DA_EMULATE_RESERVATIONS;
911 	DEV_ATTRIB(dev)->emulate_alua = DA_EMULATE_ALUA;
912 	DEV_ATTRIB(dev)->enforce_pr_isids = DA_ENFORCE_PR_ISIDS;
913 	/*
914 	 * The TPU=1 and TPWS=1 settings will be set in TCM/IBLOCK
915 	 * iblock_create_virtdevice() from struct queue_limits values
916 	 * if blk_queue_discard()==1
917 	 */
918 	DEV_ATTRIB(dev)->max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT;
919 	DEV_ATTRIB(dev)->max_unmap_block_desc_count =
920 				DA_MAX_UNMAP_BLOCK_DESC_COUNT;
921 	DEV_ATTRIB(dev)->unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT;
922 	DEV_ATTRIB(dev)->unmap_granularity_alignment =
923 				DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT;
924 	/*
925 	 * block_size is based on subsystem plugin dependent requirements.
926 	 */
927 	DEV_ATTRIB(dev)->hw_block_size = limits->logical_block_size;
928 	DEV_ATTRIB(dev)->block_size = limits->logical_block_size;
929 	/*
930 	 * max_sectors is based on subsystem plugin dependent requirements.
931 	 */
932 	DEV_ATTRIB(dev)->hw_max_sectors = limits->max_hw_sectors;
933 	DEV_ATTRIB(dev)->max_sectors = limits->max_sectors;
934 	/*
935 	 * Set optimal_sectors from max_sectors, which can be lowered via
936 	 * configfs.
937 	 */
938 	DEV_ATTRIB(dev)->optimal_sectors = limits->max_sectors;
939 	/*
940 	 * queue_depth is based on subsystem plugin dependent requirements.
941 	 */
942 	DEV_ATTRIB(dev)->hw_queue_depth = dev_limits->hw_queue_depth;
943 	DEV_ATTRIB(dev)->queue_depth = dev_limits->queue_depth;
944 }
945 
946 int se_dev_set_task_timeout(struct se_device *dev, u32 task_timeout)
947 {
948 	if (task_timeout > DA_TASK_TIMEOUT_MAX) {
949 		printk(KERN_ERR "dev[%p]: Passed task_timeout: %u larger then"
950 			" DA_TASK_TIMEOUT_MAX\n", dev, task_timeout);
951 		return -1;
952 	} else {
953 		DEV_ATTRIB(dev)->task_timeout = task_timeout;
954 		printk(KERN_INFO "dev[%p]: Set SE Device task_timeout: %u\n",
955 			dev, task_timeout);
956 	}
957 
958 	return 0;
959 }
960 
961 int se_dev_set_max_unmap_lba_count(
962 	struct se_device *dev,
963 	u32 max_unmap_lba_count)
964 {
965 	DEV_ATTRIB(dev)->max_unmap_lba_count = max_unmap_lba_count;
966 	printk(KERN_INFO "dev[%p]: Set max_unmap_lba_count: %u\n",
967 			dev, DEV_ATTRIB(dev)->max_unmap_lba_count);
968 	return 0;
969 }
970 
971 int se_dev_set_max_unmap_block_desc_count(
972 	struct se_device *dev,
973 	u32 max_unmap_block_desc_count)
974 {
975 	DEV_ATTRIB(dev)->max_unmap_block_desc_count = max_unmap_block_desc_count;
976 	printk(KERN_INFO "dev[%p]: Set max_unmap_block_desc_count: %u\n",
977 			dev, DEV_ATTRIB(dev)->max_unmap_block_desc_count);
978 	return 0;
979 }
980 
981 int se_dev_set_unmap_granularity(
982 	struct se_device *dev,
983 	u32 unmap_granularity)
984 {
985 	DEV_ATTRIB(dev)->unmap_granularity = unmap_granularity;
986 	printk(KERN_INFO "dev[%p]: Set unmap_granularity: %u\n",
987 			dev, DEV_ATTRIB(dev)->unmap_granularity);
988 	return 0;
989 }
990 
991 int se_dev_set_unmap_granularity_alignment(
992 	struct se_device *dev,
993 	u32 unmap_granularity_alignment)
994 {
995 	DEV_ATTRIB(dev)->unmap_granularity_alignment = unmap_granularity_alignment;
996 	printk(KERN_INFO "dev[%p]: Set unmap_granularity_alignment: %u\n",
997 			dev, DEV_ATTRIB(dev)->unmap_granularity_alignment);
998 	return 0;
999 }
1000 
1001 int se_dev_set_emulate_dpo(struct se_device *dev, int flag)
1002 {
1003 	if ((flag != 0) && (flag != 1)) {
1004 		printk(KERN_ERR "Illegal value %d\n", flag);
1005 		return -1;
1006 	}
1007 	if (TRANSPORT(dev)->dpo_emulated == NULL) {
1008 		printk(KERN_ERR "TRANSPORT(dev)->dpo_emulated is NULL\n");
1009 		return -1;
1010 	}
1011 	if (TRANSPORT(dev)->dpo_emulated(dev) == 0) {
1012 		printk(KERN_ERR "TRANSPORT(dev)->dpo_emulated not supported\n");
1013 		return -1;
1014 	}
1015 	DEV_ATTRIB(dev)->emulate_dpo = flag;
1016 	printk(KERN_INFO "dev[%p]: SE Device Page Out (DPO) Emulation"
1017 			" bit: %d\n", dev, DEV_ATTRIB(dev)->emulate_dpo);
1018 	return 0;
1019 }
1020 
1021 int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
1022 {
1023 	if ((flag != 0) && (flag != 1)) {
1024 		printk(KERN_ERR "Illegal value %d\n", flag);
1025 		return -1;
1026 	}
1027 	if (TRANSPORT(dev)->fua_write_emulated == NULL) {
1028 		printk(KERN_ERR "TRANSPORT(dev)->fua_write_emulated is NULL\n");
1029 		return -1;
1030 	}
1031 	if (TRANSPORT(dev)->fua_write_emulated(dev) == 0) {
1032 		printk(KERN_ERR "TRANSPORT(dev)->fua_write_emulated not supported\n");
1033 		return -1;
1034 	}
1035 	DEV_ATTRIB(dev)->emulate_fua_write = flag;
1036 	printk(KERN_INFO "dev[%p]: SE Device Forced Unit Access WRITEs: %d\n",
1037 			dev, DEV_ATTRIB(dev)->emulate_fua_write);
1038 	return 0;
1039 }
1040 
1041 int se_dev_set_emulate_fua_read(struct se_device *dev, int flag)
1042 {
1043 	if ((flag != 0) && (flag != 1)) {
1044 		printk(KERN_ERR "Illegal value %d\n", flag);
1045 		return -1;
1046 	}
1047 	if (TRANSPORT(dev)->fua_read_emulated == NULL) {
1048 		printk(KERN_ERR "TRANSPORT(dev)->fua_read_emulated is NULL\n");
1049 		return -1;
1050 	}
1051 	if (TRANSPORT(dev)->fua_read_emulated(dev) == 0) {
1052 		printk(KERN_ERR "TRANSPORT(dev)->fua_read_emulated not supported\n");
1053 		return -1;
1054 	}
1055 	DEV_ATTRIB(dev)->emulate_fua_read = flag;
1056 	printk(KERN_INFO "dev[%p]: SE Device Forced Unit Access READs: %d\n",
1057 			dev, DEV_ATTRIB(dev)->emulate_fua_read);
1058 	return 0;
1059 }
1060 
1061 int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
1062 {
1063 	if ((flag != 0) && (flag != 1)) {
1064 		printk(KERN_ERR "Illegal value %d\n", flag);
1065 		return -1;
1066 	}
1067 	if (TRANSPORT(dev)->write_cache_emulated == NULL) {
1068 		printk(KERN_ERR "TRANSPORT(dev)->write_cache_emulated is NULL\n");
1069 		return -1;
1070 	}
1071 	if (TRANSPORT(dev)->write_cache_emulated(dev) == 0) {
1072 		printk(KERN_ERR "TRANSPORT(dev)->write_cache_emulated not supported\n");
1073 		return -1;
1074 	}
1075 	DEV_ATTRIB(dev)->emulate_write_cache = flag;
1076 	printk(KERN_INFO "dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n",
1077 			dev, DEV_ATTRIB(dev)->emulate_write_cache);
1078 	return 0;
1079 }
1080 
1081 int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag)
1082 {
1083 	if ((flag != 0) && (flag != 1) && (flag != 2)) {
1084 		printk(KERN_ERR "Illegal value %d\n", flag);
1085 		return -1;
1086 	}
1087 
1088 	if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
1089 		printk(KERN_ERR "dev[%p]: Unable to change SE Device"
1090 			" UA_INTRLCK_CTRL while dev_export_obj: %d count"
1091 			" exists\n", dev,
1092 			atomic_read(&dev->dev_export_obj.obj_access_count));
1093 		return -1;
1094 	}
1095 	DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl = flag;
1096 	printk(KERN_INFO "dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n",
1097 		dev, DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl);
1098 
1099 	return 0;
1100 }
1101 
1102 int se_dev_set_emulate_tas(struct se_device *dev, int flag)
1103 {
1104 	if ((flag != 0) && (flag != 1)) {
1105 		printk(KERN_ERR "Illegal value %d\n", flag);
1106 		return -1;
1107 	}
1108 
1109 	if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
1110 		printk(KERN_ERR "dev[%p]: Unable to change SE Device TAS while"
1111 			" dev_export_obj: %d count exists\n", dev,
1112 			atomic_read(&dev->dev_export_obj.obj_access_count));
1113 		return -1;
1114 	}
1115 	DEV_ATTRIB(dev)->emulate_tas = flag;
1116 	printk(KERN_INFO "dev[%p]: SE Device TASK_ABORTED status bit: %s\n",
1117 		dev, (DEV_ATTRIB(dev)->emulate_tas) ? "Enabled" : "Disabled");
1118 
1119 	return 0;
1120 }
1121 
1122 int se_dev_set_emulate_tpu(struct se_device *dev, int flag)
1123 {
1124 	if ((flag != 0) && (flag != 1)) {
1125 		printk(KERN_ERR "Illegal value %d\n", flag);
1126 		return -1;
1127 	}
1128 	/*
1129 	 * We expect this value to be non-zero when generic Block Layer
1130 	 * Discard supported is detected iblock_create_virtdevice().
1131 	 */
1132 	if (!(DEV_ATTRIB(dev)->max_unmap_block_desc_count)) {
1133 		printk(KERN_ERR "Generic Block Discard not supported\n");
1134 		return -ENOSYS;
1135 	}
1136 
1137 	DEV_ATTRIB(dev)->emulate_tpu = flag;
1138 	printk(KERN_INFO "dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n",
1139 				dev, flag);
1140 	return 0;
1141 }
1142 
1143 int se_dev_set_emulate_tpws(struct se_device *dev, int flag)
1144 {
1145 	if ((flag != 0) && (flag != 1)) {
1146 		printk(KERN_ERR "Illegal value %d\n", flag);
1147 		return -1;
1148 	}
1149 	/*
1150 	 * We expect this value to be non-zero when generic Block Layer
1151 	 * Discard supported is detected iblock_create_virtdevice().
1152 	 */
1153 	if (!(DEV_ATTRIB(dev)->max_unmap_block_desc_count)) {
1154 		printk(KERN_ERR "Generic Block Discard not supported\n");
1155 		return -ENOSYS;
1156 	}
1157 
1158 	DEV_ATTRIB(dev)->emulate_tpws = flag;
1159 	printk(KERN_INFO "dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n",
1160 				dev, flag);
1161 	return 0;
1162 }
1163 
1164 int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag)
1165 {
1166 	if ((flag != 0) && (flag != 1)) {
1167 		printk(KERN_ERR "Illegal value %d\n", flag);
1168 		return -1;
1169 	}
1170 	DEV_ATTRIB(dev)->enforce_pr_isids = flag;
1171 	printk(KERN_INFO "dev[%p]: SE Device enforce_pr_isids bit: %s\n", dev,
1172 		(DEV_ATTRIB(dev)->enforce_pr_isids) ? "Enabled" : "Disabled");
1173 	return 0;
1174 }
1175 
1176 /*
1177  * Note, this can only be called on unexported SE Device Object.
1178  */
1179 int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)
1180 {
1181 	u32 orig_queue_depth = dev->queue_depth;
1182 
1183 	if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
1184 		printk(KERN_ERR "dev[%p]: Unable to change SE Device TCQ while"
1185 			" dev_export_obj: %d count exists\n", dev,
1186 			atomic_read(&dev->dev_export_obj.obj_access_count));
1187 		return -1;
1188 	}
1189 	if (!(queue_depth)) {
1190 		printk(KERN_ERR "dev[%p]: Illegal ZERO value for queue"
1191 			"_depth\n", dev);
1192 		return -1;
1193 	}
1194 
1195 	if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1196 		if (queue_depth > DEV_ATTRIB(dev)->hw_queue_depth) {
1197 			printk(KERN_ERR "dev[%p]: Passed queue_depth: %u"
1198 				" exceeds TCM/SE_Device TCQ: %u\n",
1199 				dev, queue_depth,
1200 				DEV_ATTRIB(dev)->hw_queue_depth);
1201 			return -1;
1202 		}
1203 	} else {
1204 		if (queue_depth > DEV_ATTRIB(dev)->queue_depth) {
1205 			if (queue_depth > DEV_ATTRIB(dev)->hw_queue_depth) {
1206 				printk(KERN_ERR "dev[%p]: Passed queue_depth:"
1207 					" %u exceeds TCM/SE_Device MAX"
1208 					" TCQ: %u\n", dev, queue_depth,
1209 					DEV_ATTRIB(dev)->hw_queue_depth);
1210 				return -1;
1211 			}
1212 		}
1213 	}
1214 
1215 	DEV_ATTRIB(dev)->queue_depth = dev->queue_depth = queue_depth;
1216 	if (queue_depth > orig_queue_depth)
1217 		atomic_add(queue_depth - orig_queue_depth, &dev->depth_left);
1218 	else if (queue_depth < orig_queue_depth)
1219 		atomic_sub(orig_queue_depth - queue_depth, &dev->depth_left);
1220 
1221 	printk(KERN_INFO "dev[%p]: SE Device TCQ Depth changed to: %u\n",
1222 			dev, queue_depth);
1223 	return 0;
1224 }
1225 
1226 int se_dev_set_max_sectors(struct se_device *dev, u32 max_sectors)
1227 {
1228 	int force = 0; /* Force setting for VDEVS */
1229 
1230 	if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
1231 		printk(KERN_ERR "dev[%p]: Unable to change SE Device"
1232 			" max_sectors while dev_export_obj: %d count exists\n",
1233 			dev, atomic_read(&dev->dev_export_obj.obj_access_count));
1234 		return -1;
1235 	}
1236 	if (!(max_sectors)) {
1237 		printk(KERN_ERR "dev[%p]: Illegal ZERO value for"
1238 			" max_sectors\n", dev);
1239 		return -1;
1240 	}
1241 	if (max_sectors < DA_STATUS_MAX_SECTORS_MIN) {
1242 		printk(KERN_ERR "dev[%p]: Passed max_sectors: %u less than"
1243 			" DA_STATUS_MAX_SECTORS_MIN: %u\n", dev, max_sectors,
1244 				DA_STATUS_MAX_SECTORS_MIN);
1245 		return -1;
1246 	}
1247 	if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1248 		if (max_sectors > DEV_ATTRIB(dev)->hw_max_sectors) {
1249 			printk(KERN_ERR "dev[%p]: Passed max_sectors: %u"
1250 				" greater than TCM/SE_Device max_sectors:"
1251 				" %u\n", dev, max_sectors,
1252 				DEV_ATTRIB(dev)->hw_max_sectors);
1253 			 return -1;
1254 		}
1255 	} else {
1256 		if (!(force) && (max_sectors >
1257 				 DEV_ATTRIB(dev)->hw_max_sectors)) {
1258 			printk(KERN_ERR "dev[%p]: Passed max_sectors: %u"
1259 				" greater than TCM/SE_Device max_sectors"
1260 				": %u, use force=1 to override.\n", dev,
1261 				max_sectors, DEV_ATTRIB(dev)->hw_max_sectors);
1262 			return -1;
1263 		}
1264 		if (max_sectors > DA_STATUS_MAX_SECTORS_MAX) {
1265 			printk(KERN_ERR "dev[%p]: Passed max_sectors: %u"
1266 				" greater than DA_STATUS_MAX_SECTORS_MAX:"
1267 				" %u\n", dev, max_sectors,
1268 				DA_STATUS_MAX_SECTORS_MAX);
1269 			return -1;
1270 		}
1271 	}
1272 
1273 	DEV_ATTRIB(dev)->max_sectors = max_sectors;
1274 	printk("dev[%p]: SE Device max_sectors changed to %u\n",
1275 			dev, max_sectors);
1276 	return 0;
1277 }
1278 
1279 int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
1280 {
1281 	if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
1282 		printk(KERN_ERR "dev[%p]: Unable to change SE Device"
1283 			" optimal_sectors while dev_export_obj: %d count exists\n",
1284 			dev, atomic_read(&dev->dev_export_obj.obj_access_count));
1285 		return -EINVAL;
1286 	}
1287 	if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1288 		printk(KERN_ERR "dev[%p]: Passed optimal_sectors cannot be"
1289 				" changed for TCM/pSCSI\n", dev);
1290 		return -EINVAL;
1291 	}
1292 	if (optimal_sectors > DEV_ATTRIB(dev)->max_sectors) {
1293 		printk(KERN_ERR "dev[%p]: Passed optimal_sectors %u cannot be"
1294 			" greater than max_sectors: %u\n", dev,
1295 			optimal_sectors, DEV_ATTRIB(dev)->max_sectors);
1296 		return -EINVAL;
1297 	}
1298 
1299 	DEV_ATTRIB(dev)->optimal_sectors = optimal_sectors;
1300 	printk(KERN_INFO "dev[%p]: SE Device optimal_sectors changed to %u\n",
1301 			dev, optimal_sectors);
1302 	return 0;
1303 }
1304 
1305 int se_dev_set_block_size(struct se_device *dev, u32 block_size)
1306 {
1307 	if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
1308 		printk(KERN_ERR "dev[%p]: Unable to change SE Device block_size"
1309 			" while dev_export_obj: %d count exists\n", dev,
1310 			atomic_read(&dev->dev_export_obj.obj_access_count));
1311 		return -1;
1312 	}
1313 
1314 	if ((block_size != 512) &&
1315 	    (block_size != 1024) &&
1316 	    (block_size != 2048) &&
1317 	    (block_size != 4096)) {
1318 		printk(KERN_ERR "dev[%p]: Illegal value for block_device: %u"
1319 			" for SE device, must be 512, 1024, 2048 or 4096\n",
1320 			dev, block_size);
1321 		return -1;
1322 	}
1323 
1324 	if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1325 		printk(KERN_ERR "dev[%p]: Not allowed to change block_size for"
1326 			" Physical Device, use for Linux/SCSI to change"
1327 			" block_size for underlying hardware\n", dev);
1328 		return -1;
1329 	}
1330 
1331 	DEV_ATTRIB(dev)->block_size = block_size;
1332 	printk(KERN_INFO "dev[%p]: SE Device block_size changed to %u\n",
1333 			dev, block_size);
1334 	return 0;
1335 }
1336 
1337 struct se_lun *core_dev_add_lun(
1338 	struct se_portal_group *tpg,
1339 	struct se_hba *hba,
1340 	struct se_device *dev,
1341 	u32 lun)
1342 {
1343 	struct se_lun *lun_p;
1344 	u32 lun_access = 0;
1345 
1346 	if (atomic_read(&dev->dev_access_obj.obj_access_count) != 0) {
1347 		printk(KERN_ERR "Unable to export struct se_device while dev_access_obj: %d\n",
1348 			atomic_read(&dev->dev_access_obj.obj_access_count));
1349 		return NULL;
1350 	}
1351 
1352 	lun_p = core_tpg_pre_addlun(tpg, lun);
1353 	if ((IS_ERR(lun_p)) || !(lun_p))
1354 		return NULL;
1355 
1356 	if (dev->dev_flags & DF_READ_ONLY)
1357 		lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
1358 	else
1359 		lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
1360 
1361 	if (core_tpg_post_addlun(tpg, lun_p, lun_access, dev) < 0)
1362 		return NULL;
1363 
1364 	printk(KERN_INFO "%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from"
1365 		" CORE HBA: %u\n", TPG_TFO(tpg)->get_fabric_name(),
1366 		TPG_TFO(tpg)->tpg_get_tag(tpg), lun_p->unpacked_lun,
1367 		TPG_TFO(tpg)->get_fabric_name(), hba->hba_id);
1368 	/*
1369 	 * Update LUN maps for dynamically added initiators when
1370 	 * generate_node_acl is enabled.
1371 	 */
1372 	if (TPG_TFO(tpg)->tpg_check_demo_mode(tpg)) {
1373 		struct se_node_acl *acl;
1374 		spin_lock_bh(&tpg->acl_node_lock);
1375 		list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
1376 			if (acl->dynamic_node_acl) {
1377 				spin_unlock_bh(&tpg->acl_node_lock);
1378 				core_tpg_add_node_to_devs(acl, tpg);
1379 				spin_lock_bh(&tpg->acl_node_lock);
1380 			}
1381 		}
1382 		spin_unlock_bh(&tpg->acl_node_lock);
1383 	}
1384 
1385 	return lun_p;
1386 }
1387 
1388 /*      core_dev_del_lun():
1389  *
1390  *
1391  */
1392 int core_dev_del_lun(
1393 	struct se_portal_group *tpg,
1394 	u32 unpacked_lun)
1395 {
1396 	struct se_lun *lun;
1397 	int ret = 0;
1398 
1399 	lun = core_tpg_pre_dellun(tpg, unpacked_lun, &ret);
1400 	if (!(lun))
1401 		return ret;
1402 
1403 	core_tpg_post_dellun(tpg, lun);
1404 
1405 	printk(KERN_INFO "%s_TPG[%u]_LUN[%u] - Deactivated %s Logical Unit from"
1406 		" device object\n", TPG_TFO(tpg)->get_fabric_name(),
1407 		TPG_TFO(tpg)->tpg_get_tag(tpg), unpacked_lun,
1408 		TPG_TFO(tpg)->get_fabric_name());
1409 
1410 	return 0;
1411 }
1412 
1413 struct se_lun *core_get_lun_from_tpg(struct se_portal_group *tpg, u32 unpacked_lun)
1414 {
1415 	struct se_lun *lun;
1416 
1417 	spin_lock(&tpg->tpg_lun_lock);
1418 	if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
1419 		printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS"
1420 			"_PER_TPG-1: %u for Target Portal Group: %hu\n",
1421 			TPG_TFO(tpg)->get_fabric_name(), unpacked_lun,
1422 			TRANSPORT_MAX_LUNS_PER_TPG-1,
1423 			TPG_TFO(tpg)->tpg_get_tag(tpg));
1424 		spin_unlock(&tpg->tpg_lun_lock);
1425 		return NULL;
1426 	}
1427 	lun = &tpg->tpg_lun_list[unpacked_lun];
1428 
1429 	if (lun->lun_status != TRANSPORT_LUN_STATUS_FREE) {
1430 		printk(KERN_ERR "%s Logical Unit Number: %u is not free on"
1431 			" Target Portal Group: %hu, ignoring request.\n",
1432 			TPG_TFO(tpg)->get_fabric_name(), unpacked_lun,
1433 			TPG_TFO(tpg)->tpg_get_tag(tpg));
1434 		spin_unlock(&tpg->tpg_lun_lock);
1435 		return NULL;
1436 	}
1437 	spin_unlock(&tpg->tpg_lun_lock);
1438 
1439 	return lun;
1440 }
1441 
1442 /*      core_dev_get_lun():
1443  *
1444  *
1445  */
1446 static struct se_lun *core_dev_get_lun(struct se_portal_group *tpg, u32 unpacked_lun)
1447 {
1448 	struct se_lun *lun;
1449 
1450 	spin_lock(&tpg->tpg_lun_lock);
1451 	if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
1452 		printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER"
1453 			"_TPG-1: %u for Target Portal Group: %hu\n",
1454 			TPG_TFO(tpg)->get_fabric_name(), unpacked_lun,
1455 			TRANSPORT_MAX_LUNS_PER_TPG-1,
1456 			TPG_TFO(tpg)->tpg_get_tag(tpg));
1457 		spin_unlock(&tpg->tpg_lun_lock);
1458 		return NULL;
1459 	}
1460 	lun = &tpg->tpg_lun_list[unpacked_lun];
1461 
1462 	if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) {
1463 		printk(KERN_ERR "%s Logical Unit Number: %u is not active on"
1464 			" Target Portal Group: %hu, ignoring request.\n",
1465 			TPG_TFO(tpg)->get_fabric_name(), unpacked_lun,
1466 			TPG_TFO(tpg)->tpg_get_tag(tpg));
1467 		spin_unlock(&tpg->tpg_lun_lock);
1468 		return NULL;
1469 	}
1470 	spin_unlock(&tpg->tpg_lun_lock);
1471 
1472 	return lun;
1473 }
1474 
1475 struct se_lun_acl *core_dev_init_initiator_node_lun_acl(
1476 	struct se_portal_group *tpg,
1477 	u32 mapped_lun,
1478 	char *initiatorname,
1479 	int *ret)
1480 {
1481 	struct se_lun_acl *lacl;
1482 	struct se_node_acl *nacl;
1483 
1484 	if (strlen(initiatorname) > TRANSPORT_IQN_LEN) {
1485 		printk(KERN_ERR "%s InitiatorName exceeds maximum size.\n",
1486 			TPG_TFO(tpg)->get_fabric_name());
1487 		*ret = -EOVERFLOW;
1488 		return NULL;
1489 	}
1490 	nacl = core_tpg_get_initiator_node_acl(tpg, initiatorname);
1491 	if (!(nacl)) {
1492 		*ret = -EINVAL;
1493 		return NULL;
1494 	}
1495 	lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL);
1496 	if (!(lacl)) {
1497 		printk(KERN_ERR "Unable to allocate memory for struct se_lun_acl.\n");
1498 		*ret = -ENOMEM;
1499 		return NULL;
1500 	}
1501 
1502 	INIT_LIST_HEAD(&lacl->lacl_list);
1503 	lacl->mapped_lun = mapped_lun;
1504 	lacl->se_lun_nacl = nacl;
1505 	snprintf(lacl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
1506 
1507 	return lacl;
1508 }
1509 
1510 int core_dev_add_initiator_node_lun_acl(
1511 	struct se_portal_group *tpg,
1512 	struct se_lun_acl *lacl,
1513 	u32 unpacked_lun,
1514 	u32 lun_access)
1515 {
1516 	struct se_lun *lun;
1517 	struct se_node_acl *nacl;
1518 
1519 	lun = core_dev_get_lun(tpg, unpacked_lun);
1520 	if (!(lun)) {
1521 		printk(KERN_ERR "%s Logical Unit Number: %u is not active on"
1522 			" Target Portal Group: %hu, ignoring request.\n",
1523 			TPG_TFO(tpg)->get_fabric_name(), unpacked_lun,
1524 			TPG_TFO(tpg)->tpg_get_tag(tpg));
1525 		return -EINVAL;
1526 	}
1527 
1528 	nacl = lacl->se_lun_nacl;
1529 	if (!(nacl))
1530 		return -EINVAL;
1531 
1532 	if ((lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) &&
1533 	    (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE))
1534 		lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
1535 
1536 	lacl->se_lun = lun;
1537 
1538 	if (core_update_device_list_for_node(lun, lacl, lacl->mapped_lun,
1539 			lun_access, nacl, tpg, 1) < 0)
1540 		return -EINVAL;
1541 
1542 	spin_lock(&lun->lun_acl_lock);
1543 	list_add_tail(&lacl->lacl_list, &lun->lun_acl_list);
1544 	atomic_inc(&lun->lun_acl_count);
1545 	smp_mb__after_atomic_inc();
1546 	spin_unlock(&lun->lun_acl_lock);
1547 
1548 	printk(KERN_INFO "%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for "
1549 		" InitiatorNode: %s\n", TPG_TFO(tpg)->get_fabric_name(),
1550 		TPG_TFO(tpg)->tpg_get_tag(tpg), unpacked_lun, lacl->mapped_lun,
1551 		(lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) ? "RW" : "RO",
1552 		lacl->initiatorname);
1553 	/*
1554 	 * Check to see if there are any existing persistent reservation APTPL
1555 	 * pre-registrations that need to be enabled for this LUN ACL..
1556 	 */
1557 	core_scsi3_check_aptpl_registration(lun->lun_se_dev, tpg, lun, lacl);
1558 	return 0;
1559 }
1560 
1561 /*      core_dev_del_initiator_node_lun_acl():
1562  *
1563  *
1564  */
1565 int core_dev_del_initiator_node_lun_acl(
1566 	struct se_portal_group *tpg,
1567 	struct se_lun *lun,
1568 	struct se_lun_acl *lacl)
1569 {
1570 	struct se_node_acl *nacl;
1571 
1572 	nacl = lacl->se_lun_nacl;
1573 	if (!(nacl))
1574 		return -EINVAL;
1575 
1576 	spin_lock(&lun->lun_acl_lock);
1577 	list_del(&lacl->lacl_list);
1578 	atomic_dec(&lun->lun_acl_count);
1579 	smp_mb__after_atomic_dec();
1580 	spin_unlock(&lun->lun_acl_lock);
1581 
1582 	core_update_device_list_for_node(lun, NULL, lacl->mapped_lun,
1583 		TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0);
1584 
1585 	lacl->se_lun = NULL;
1586 
1587 	printk(KERN_INFO "%s_TPG[%hu]_LUN[%u] - Removed ACL for"
1588 		" InitiatorNode: %s Mapped LUN: %u\n",
1589 		TPG_TFO(tpg)->get_fabric_name(),
1590 		TPG_TFO(tpg)->tpg_get_tag(tpg), lun->unpacked_lun,
1591 		lacl->initiatorname, lacl->mapped_lun);
1592 
1593 	return 0;
1594 }
1595 
1596 void core_dev_free_initiator_node_lun_acl(
1597 	struct se_portal_group *tpg,
1598 	struct se_lun_acl *lacl)
1599 {
1600 	printk("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s"
1601 		" Mapped LUN: %u\n", TPG_TFO(tpg)->get_fabric_name(),
1602 		TPG_TFO(tpg)->tpg_get_tag(tpg),
1603 		TPG_TFO(tpg)->get_fabric_name(),
1604 		lacl->initiatorname, lacl->mapped_lun);
1605 
1606 	kfree(lacl);
1607 }
1608 
1609 int core_dev_setup_virtual_lun0(void)
1610 {
1611 	struct se_hba *hba;
1612 	struct se_device *dev;
1613 	struct se_subsystem_dev *se_dev = NULL;
1614 	struct se_subsystem_api *t;
1615 	char buf[16];
1616 	int ret;
1617 
1618 	hba = core_alloc_hba("rd_dr", 0, HBA_FLAGS_INTERNAL_USE);
1619 	if (IS_ERR(hba))
1620 		return PTR_ERR(hba);
1621 
1622 	se_global->g_lun0_hba = hba;
1623 	t = hba->transport;
1624 
1625 	se_dev = kzalloc(sizeof(struct se_subsystem_dev), GFP_KERNEL);
1626 	if (!(se_dev)) {
1627 		printk(KERN_ERR "Unable to allocate memory for"
1628 				" struct se_subsystem_dev\n");
1629 		ret = -ENOMEM;
1630 		goto out;
1631 	}
1632 	INIT_LIST_HEAD(&se_dev->g_se_dev_list);
1633 	INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list);
1634 	spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock);
1635 	INIT_LIST_HEAD(&se_dev->t10_reservation.registration_list);
1636 	INIT_LIST_HEAD(&se_dev->t10_reservation.aptpl_reg_list);
1637 	spin_lock_init(&se_dev->t10_reservation.registration_lock);
1638 	spin_lock_init(&se_dev->t10_reservation.aptpl_reg_lock);
1639 	INIT_LIST_HEAD(&se_dev->t10_alua.tg_pt_gps_list);
1640 	spin_lock_init(&se_dev->t10_alua.tg_pt_gps_lock);
1641 	spin_lock_init(&se_dev->se_dev_lock);
1642 	se_dev->t10_reservation.pr_aptpl_buf_len = PR_APTPL_BUF_LEN;
1643 	se_dev->t10_wwn.t10_sub_dev = se_dev;
1644 	se_dev->t10_alua.t10_sub_dev = se_dev;
1645 	se_dev->se_dev_attrib.da_sub_dev = se_dev;
1646 	se_dev->se_dev_hba = hba;
1647 
1648 	se_dev->se_dev_su_ptr = t->allocate_virtdevice(hba, "virt_lun0");
1649 	if (!(se_dev->se_dev_su_ptr)) {
1650 		printk(KERN_ERR "Unable to locate subsystem dependent pointer"
1651 			" from allocate_virtdevice()\n");
1652 		ret = -ENOMEM;
1653 		goto out;
1654 	}
1655 	se_global->g_lun0_su_dev = se_dev;
1656 
1657 	memset(buf, 0, 16);
1658 	sprintf(buf, "rd_pages=8");
1659 	t->set_configfs_dev_params(hba, se_dev, buf, sizeof(buf));
1660 
1661 	dev = t->create_virtdevice(hba, se_dev, se_dev->se_dev_su_ptr);
1662 	if (!(dev) || IS_ERR(dev)) {
1663 		ret = -ENOMEM;
1664 		goto out;
1665 	}
1666 	se_dev->se_dev_ptr = dev;
1667 	se_global->g_lun0_dev = dev;
1668 
1669 	return 0;
1670 out:
1671 	se_global->g_lun0_su_dev = NULL;
1672 	kfree(se_dev);
1673 	if (se_global->g_lun0_hba) {
1674 		core_delete_hba(se_global->g_lun0_hba);
1675 		se_global->g_lun0_hba = NULL;
1676 	}
1677 	return ret;
1678 }
1679 
1680 
1681 void core_dev_release_virtual_lun0(void)
1682 {
1683 	struct se_hba *hba = se_global->g_lun0_hba;
1684 	struct se_subsystem_dev *su_dev = se_global->g_lun0_su_dev;
1685 
1686 	if (!(hba))
1687 		return;
1688 
1689 	if (se_global->g_lun0_dev)
1690 		se_free_virtual_device(se_global->g_lun0_dev, hba);
1691 
1692 	kfree(su_dev);
1693 	core_delete_hba(hba);
1694 }
1695