xref: /linux/drivers/target/target_core_tpg.c (revision f2ee442115c9b6219083c019939a9cc0c9abb2f8)
1 /*******************************************************************************
2  * Filename:  target_core_tpg.c
3  *
4  * This file contains generic Target Portal Group related functions.
5  *
6  * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc.
7  * Copyright (c) 2005, 2006, 2007 SBE, Inc.
8  * Copyright (c) 2007-2010 Rising Tide Systems
9  * Copyright (c) 2008-2010 Linux-iSCSI.org
10  *
11  * Nicholas A. Bellinger <nab@kernel.org>
12  *
13  * This program is free software; you can redistribute it and/or modify
14  * it under the terms of the GNU General Public License as published by
15  * the Free Software Foundation; either version 2 of the License, or
16  * (at your option) any later version.
17  *
18  * This program is distributed in the hope that it will be useful,
19  * but WITHOUT ANY WARRANTY; without even the implied warranty of
20  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
21  * GNU General Public License for more details.
22  *
23  * You should have received a copy of the GNU General Public License
24  * along with this program; if not, write to the Free Software
25  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
26  *
27  ******************************************************************************/
28 
29 #include <linux/net.h>
30 #include <linux/string.h>
31 #include <linux/timer.h>
32 #include <linux/slab.h>
33 #include <linux/spinlock.h>
34 #include <linux/in.h>
35 #include <linux/export.h>
36 #include <net/sock.h>
37 #include <net/tcp.h>
38 #include <scsi/scsi.h>
39 #include <scsi/scsi_cmnd.h>
40 
41 #include <target/target_core_base.h>
42 #include <target/target_core_device.h>
43 #include <target/target_core_tpg.h>
44 #include <target/target_core_transport.h>
45 #include <target/target_core_fabric_ops.h>
46 
47 #include "target_core_hba.h"
48 #include "target_core_stat.h"
49 
50 extern struct se_device *g_lun0_dev;
51 
52 static DEFINE_SPINLOCK(tpg_lock);
53 static LIST_HEAD(tpg_list);
54 
55 /*	core_clear_initiator_node_from_tpg():
56  *
57  *
58  */
59 static void core_clear_initiator_node_from_tpg(
60 	struct se_node_acl *nacl,
61 	struct se_portal_group *tpg)
62 {
63 	int i;
64 	struct se_dev_entry *deve;
65 	struct se_lun *lun;
66 	struct se_lun_acl *acl, *acl_tmp;
67 
68 	spin_lock_irq(&nacl->device_list_lock);
69 	for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
70 		deve = &nacl->device_list[i];
71 
72 		if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
73 			continue;
74 
75 		if (!deve->se_lun) {
76 			pr_err("%s device entries device pointer is"
77 				" NULL, but Initiator has access.\n",
78 				tpg->se_tpg_tfo->get_fabric_name());
79 			continue;
80 		}
81 
82 		lun = deve->se_lun;
83 		spin_unlock_irq(&nacl->device_list_lock);
84 		core_update_device_list_for_node(lun, NULL, deve->mapped_lun,
85 			TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0);
86 
87 		spin_lock(&lun->lun_acl_lock);
88 		list_for_each_entry_safe(acl, acl_tmp,
89 					&lun->lun_acl_list, lacl_list) {
90 			if (!strcmp(acl->initiatorname, nacl->initiatorname) &&
91 			    (acl->mapped_lun == deve->mapped_lun))
92 				break;
93 		}
94 
95 		if (!acl) {
96 			pr_err("Unable to locate struct se_lun_acl for %s,"
97 				" mapped_lun: %u\n", nacl->initiatorname,
98 				deve->mapped_lun);
99 			spin_unlock(&lun->lun_acl_lock);
100 			spin_lock_irq(&nacl->device_list_lock);
101 			continue;
102 		}
103 
104 		list_del(&acl->lacl_list);
105 		spin_unlock(&lun->lun_acl_lock);
106 
107 		spin_lock_irq(&nacl->device_list_lock);
108 		kfree(acl);
109 	}
110 	spin_unlock_irq(&nacl->device_list_lock);
111 }
112 
113 /*	__core_tpg_get_initiator_node_acl():
114  *
115  *	spin_lock_bh(&tpg->acl_node_lock); must be held when calling
116  */
117 struct se_node_acl *__core_tpg_get_initiator_node_acl(
118 	struct se_portal_group *tpg,
119 	const char *initiatorname)
120 {
121 	struct se_node_acl *acl;
122 
123 	list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
124 		if (!strcmp(acl->initiatorname, initiatorname))
125 			return acl;
126 	}
127 
128 	return NULL;
129 }
130 
131 /*	core_tpg_get_initiator_node_acl():
132  *
133  *
134  */
135 struct se_node_acl *core_tpg_get_initiator_node_acl(
136 	struct se_portal_group *tpg,
137 	unsigned char *initiatorname)
138 {
139 	struct se_node_acl *acl;
140 
141 	spin_lock_irq(&tpg->acl_node_lock);
142 	list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
143 		if (!strcmp(acl->initiatorname, initiatorname) &&
144 		    !acl->dynamic_node_acl) {
145 			spin_unlock_irq(&tpg->acl_node_lock);
146 			return acl;
147 		}
148 	}
149 	spin_unlock_irq(&tpg->acl_node_lock);
150 
151 	return NULL;
152 }
153 
154 /*	core_tpg_add_node_to_devs():
155  *
156  *
157  */
158 void core_tpg_add_node_to_devs(
159 	struct se_node_acl *acl,
160 	struct se_portal_group *tpg)
161 {
162 	int i = 0;
163 	u32 lun_access = 0;
164 	struct se_lun *lun;
165 	struct se_device *dev;
166 
167 	spin_lock(&tpg->tpg_lun_lock);
168 	for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
169 		lun = &tpg->tpg_lun_list[i];
170 		if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE)
171 			continue;
172 
173 		spin_unlock(&tpg->tpg_lun_lock);
174 
175 		dev = lun->lun_se_dev;
176 		/*
177 		 * By default in LIO-Target $FABRIC_MOD,
178 		 * demo_mode_write_protect is ON, or READ_ONLY;
179 		 */
180 		if (!tpg->se_tpg_tfo->tpg_check_demo_mode_write_protect(tpg)) {
181 			if (dev->dev_flags & DF_READ_ONLY)
182 				lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
183 			else
184 				lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
185 		} else {
186 			/*
187 			 * Allow only optical drives to issue R/W in default RO
188 			 * demo mode.
189 			 */
190 			if (dev->transport->get_device_type(dev) == TYPE_DISK)
191 				lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
192 			else
193 				lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
194 		}
195 
196 		pr_debug("TARGET_CORE[%s]->TPG[%u]_LUN[%u] - Adding %s"
197 			" access for LUN in Demo Mode\n",
198 			tpg->se_tpg_tfo->get_fabric_name(),
199 			tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
200 			(lun_access == TRANSPORT_LUNFLAGS_READ_WRITE) ?
201 			"READ-WRITE" : "READ-ONLY");
202 
203 		core_update_device_list_for_node(lun, NULL, lun->unpacked_lun,
204 				lun_access, acl, tpg, 1);
205 		spin_lock(&tpg->tpg_lun_lock);
206 	}
207 	spin_unlock(&tpg->tpg_lun_lock);
208 }
209 
210 /*      core_set_queue_depth_for_node():
211  *
212  *
213  */
214 static int core_set_queue_depth_for_node(
215 	struct se_portal_group *tpg,
216 	struct se_node_acl *acl)
217 {
218 	if (!acl->queue_depth) {
219 		pr_err("Queue depth for %s Initiator Node: %s is 0,"
220 			"defaulting to 1.\n", tpg->se_tpg_tfo->get_fabric_name(),
221 			acl->initiatorname);
222 		acl->queue_depth = 1;
223 	}
224 
225 	return 0;
226 }
227 
228 /*      core_create_device_list_for_node():
229  *
230  *
231  */
232 static int core_create_device_list_for_node(struct se_node_acl *nacl)
233 {
234 	struct se_dev_entry *deve;
235 	int i;
236 
237 	nacl->device_list = kzalloc(sizeof(struct se_dev_entry) *
238 				TRANSPORT_MAX_LUNS_PER_TPG, GFP_KERNEL);
239 	if (!nacl->device_list) {
240 		pr_err("Unable to allocate memory for"
241 			" struct se_node_acl->device_list\n");
242 		return -ENOMEM;
243 	}
244 	for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
245 		deve = &nacl->device_list[i];
246 
247 		atomic_set(&deve->ua_count, 0);
248 		atomic_set(&deve->pr_ref_count, 0);
249 		spin_lock_init(&deve->ua_lock);
250 		INIT_LIST_HEAD(&deve->alua_port_list);
251 		INIT_LIST_HEAD(&deve->ua_list);
252 	}
253 
254 	return 0;
255 }
256 
257 /*	core_tpg_check_initiator_node_acl()
258  *
259  *
260  */
261 struct se_node_acl *core_tpg_check_initiator_node_acl(
262 	struct se_portal_group *tpg,
263 	unsigned char *initiatorname)
264 {
265 	struct se_node_acl *acl;
266 
267 	acl = core_tpg_get_initiator_node_acl(tpg, initiatorname);
268 	if (acl)
269 		return acl;
270 
271 	if (!tpg->se_tpg_tfo->tpg_check_demo_mode(tpg))
272 		return NULL;
273 
274 	acl =  tpg->se_tpg_tfo->tpg_alloc_fabric_acl(tpg);
275 	if (!acl)
276 		return NULL;
277 
278 	INIT_LIST_HEAD(&acl->acl_list);
279 	INIT_LIST_HEAD(&acl->acl_sess_list);
280 	spin_lock_init(&acl->device_list_lock);
281 	spin_lock_init(&acl->nacl_sess_lock);
282 	atomic_set(&acl->acl_pr_ref_count, 0);
283 	acl->queue_depth = tpg->se_tpg_tfo->tpg_get_default_depth(tpg);
284 	snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
285 	acl->se_tpg = tpg;
286 	acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
287 	spin_lock_init(&acl->stats_lock);
288 	acl->dynamic_node_acl = 1;
289 
290 	tpg->se_tpg_tfo->set_default_node_attributes(acl);
291 
292 	if (core_create_device_list_for_node(acl) < 0) {
293 		tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
294 		return NULL;
295 	}
296 
297 	if (core_set_queue_depth_for_node(tpg, acl) < 0) {
298 		core_free_device_list_for_node(acl, tpg);
299 		tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
300 		return NULL;
301 	}
302 	/*
303 	 * Here we only create demo-mode MappedLUNs from the active
304 	 * TPG LUNs if the fabric is not explictly asking for
305 	 * tpg_check_demo_mode_login_only() == 1.
306 	 */
307 	if ((tpg->se_tpg_tfo->tpg_check_demo_mode_login_only != NULL) &&
308 	    (tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg) == 1))
309 		do { ; } while (0);
310 	else
311 		core_tpg_add_node_to_devs(acl, tpg);
312 
313 	spin_lock_irq(&tpg->acl_node_lock);
314 	list_add_tail(&acl->acl_list, &tpg->acl_node_list);
315 	tpg->num_node_acls++;
316 	spin_unlock_irq(&tpg->acl_node_lock);
317 
318 	pr_debug("%s_TPG[%u] - Added DYNAMIC ACL with TCQ Depth: %d for %s"
319 		" Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
320 		tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
321 		tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
322 
323 	return acl;
324 }
325 EXPORT_SYMBOL(core_tpg_check_initiator_node_acl);
326 
327 void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *nacl)
328 {
329 	while (atomic_read(&nacl->acl_pr_ref_count) != 0)
330 		cpu_relax();
331 }
332 
333 void core_tpg_clear_object_luns(struct se_portal_group *tpg)
334 {
335 	int i, ret;
336 	struct se_lun *lun;
337 
338 	spin_lock(&tpg->tpg_lun_lock);
339 	for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
340 		lun = &tpg->tpg_lun_list[i];
341 
342 		if ((lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) ||
343 		    (lun->lun_se_dev == NULL))
344 			continue;
345 
346 		spin_unlock(&tpg->tpg_lun_lock);
347 		ret = core_dev_del_lun(tpg, lun->unpacked_lun);
348 		spin_lock(&tpg->tpg_lun_lock);
349 	}
350 	spin_unlock(&tpg->tpg_lun_lock);
351 }
352 EXPORT_SYMBOL(core_tpg_clear_object_luns);
353 
354 /*	core_tpg_add_initiator_node_acl():
355  *
356  *
357  */
358 struct se_node_acl *core_tpg_add_initiator_node_acl(
359 	struct se_portal_group *tpg,
360 	struct se_node_acl *se_nacl,
361 	const char *initiatorname,
362 	u32 queue_depth)
363 {
364 	struct se_node_acl *acl = NULL;
365 
366 	spin_lock_irq(&tpg->acl_node_lock);
367 	acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
368 	if (acl) {
369 		if (acl->dynamic_node_acl) {
370 			acl->dynamic_node_acl = 0;
371 			pr_debug("%s_TPG[%u] - Replacing dynamic ACL"
372 				" for %s\n", tpg->se_tpg_tfo->get_fabric_name(),
373 				tpg->se_tpg_tfo->tpg_get_tag(tpg), initiatorname);
374 			spin_unlock_irq(&tpg->acl_node_lock);
375 			/*
376 			 * Release the locally allocated struct se_node_acl
377 			 * because * core_tpg_add_initiator_node_acl() returned
378 			 * a pointer to an existing demo mode node ACL.
379 			 */
380 			if (se_nacl)
381 				tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg,
382 							se_nacl);
383 			goto done;
384 		}
385 
386 		pr_err("ACL entry for %s Initiator"
387 			" Node %s already exists for TPG %u, ignoring"
388 			" request.\n",  tpg->se_tpg_tfo->get_fabric_name(),
389 			initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
390 		spin_unlock_irq(&tpg->acl_node_lock);
391 		return ERR_PTR(-EEXIST);
392 	}
393 	spin_unlock_irq(&tpg->acl_node_lock);
394 
395 	if (!se_nacl) {
396 		pr_err("struct se_node_acl pointer is NULL\n");
397 		return ERR_PTR(-EINVAL);
398 	}
399 	/*
400 	 * For v4.x logic the se_node_acl_s is hanging off a fabric
401 	 * dependent structure allocated via
402 	 * struct target_core_fabric_ops->fabric_make_nodeacl()
403 	 */
404 	acl = se_nacl;
405 
406 	INIT_LIST_HEAD(&acl->acl_list);
407 	INIT_LIST_HEAD(&acl->acl_sess_list);
408 	spin_lock_init(&acl->device_list_lock);
409 	spin_lock_init(&acl->nacl_sess_lock);
410 	atomic_set(&acl->acl_pr_ref_count, 0);
411 	acl->queue_depth = queue_depth;
412 	snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
413 	acl->se_tpg = tpg;
414 	acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
415 	spin_lock_init(&acl->stats_lock);
416 
417 	tpg->se_tpg_tfo->set_default_node_attributes(acl);
418 
419 	if (core_create_device_list_for_node(acl) < 0) {
420 		tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
421 		return ERR_PTR(-ENOMEM);
422 	}
423 
424 	if (core_set_queue_depth_for_node(tpg, acl) < 0) {
425 		core_free_device_list_for_node(acl, tpg);
426 		tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
427 		return ERR_PTR(-EINVAL);
428 	}
429 
430 	spin_lock_irq(&tpg->acl_node_lock);
431 	list_add_tail(&acl->acl_list, &tpg->acl_node_list);
432 	tpg->num_node_acls++;
433 	spin_unlock_irq(&tpg->acl_node_lock);
434 
435 done:
436 	pr_debug("%s_TPG[%hu] - Added ACL with TCQ Depth: %d for %s"
437 		" Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
438 		tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
439 		tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
440 
441 	return acl;
442 }
443 EXPORT_SYMBOL(core_tpg_add_initiator_node_acl);
444 
445 /*	core_tpg_del_initiator_node_acl():
446  *
447  *
448  */
449 int core_tpg_del_initiator_node_acl(
450 	struct se_portal_group *tpg,
451 	struct se_node_acl *acl,
452 	int force)
453 {
454 	struct se_session *sess, *sess_tmp;
455 	int dynamic_acl = 0;
456 
457 	spin_lock_irq(&tpg->acl_node_lock);
458 	if (acl->dynamic_node_acl) {
459 		acl->dynamic_node_acl = 0;
460 		dynamic_acl = 1;
461 	}
462 	list_del(&acl->acl_list);
463 	tpg->num_node_acls--;
464 	spin_unlock_irq(&tpg->acl_node_lock);
465 
466 	spin_lock_bh(&tpg->session_lock);
467 	list_for_each_entry_safe(sess, sess_tmp,
468 				&tpg->tpg_sess_list, sess_list) {
469 		if (sess->se_node_acl != acl)
470 			continue;
471 		/*
472 		 * Determine if the session needs to be closed by our context.
473 		 */
474 		if (!tpg->se_tpg_tfo->shutdown_session(sess))
475 			continue;
476 
477 		spin_unlock_bh(&tpg->session_lock);
478 		/*
479 		 * If the $FABRIC_MOD session for the Initiator Node ACL exists,
480 		 * forcefully shutdown the $FABRIC_MOD session/nexus.
481 		 */
482 		tpg->se_tpg_tfo->close_session(sess);
483 
484 		spin_lock_bh(&tpg->session_lock);
485 	}
486 	spin_unlock_bh(&tpg->session_lock);
487 
488 	core_tpg_wait_for_nacl_pr_ref(acl);
489 	core_clear_initiator_node_from_tpg(acl, tpg);
490 	core_free_device_list_for_node(acl, tpg);
491 
492 	pr_debug("%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s"
493 		" Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
494 		tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
495 		tpg->se_tpg_tfo->get_fabric_name(), acl->initiatorname);
496 
497 	return 0;
498 }
499 EXPORT_SYMBOL(core_tpg_del_initiator_node_acl);
500 
501 /*	core_tpg_set_initiator_node_queue_depth():
502  *
503  *
504  */
505 int core_tpg_set_initiator_node_queue_depth(
506 	struct se_portal_group *tpg,
507 	unsigned char *initiatorname,
508 	u32 queue_depth,
509 	int force)
510 {
511 	struct se_session *sess, *init_sess = NULL;
512 	struct se_node_acl *acl;
513 	int dynamic_acl = 0;
514 
515 	spin_lock_irq(&tpg->acl_node_lock);
516 	acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
517 	if (!acl) {
518 		pr_err("Access Control List entry for %s Initiator"
519 			" Node %s does not exists for TPG %hu, ignoring"
520 			" request.\n", tpg->se_tpg_tfo->get_fabric_name(),
521 			initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
522 		spin_unlock_irq(&tpg->acl_node_lock);
523 		return -ENODEV;
524 	}
525 	if (acl->dynamic_node_acl) {
526 		acl->dynamic_node_acl = 0;
527 		dynamic_acl = 1;
528 	}
529 	spin_unlock_irq(&tpg->acl_node_lock);
530 
531 	spin_lock_bh(&tpg->session_lock);
532 	list_for_each_entry(sess, &tpg->tpg_sess_list, sess_list) {
533 		if (sess->se_node_acl != acl)
534 			continue;
535 
536 		if (!force) {
537 			pr_err("Unable to change queue depth for %s"
538 				" Initiator Node: %s while session is"
539 				" operational.  To forcefully change the queue"
540 				" depth and force session reinstatement"
541 				" use the \"force=1\" parameter.\n",
542 				tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
543 			spin_unlock_bh(&tpg->session_lock);
544 
545 			spin_lock_irq(&tpg->acl_node_lock);
546 			if (dynamic_acl)
547 				acl->dynamic_node_acl = 1;
548 			spin_unlock_irq(&tpg->acl_node_lock);
549 			return -EEXIST;
550 		}
551 		/*
552 		 * Determine if the session needs to be closed by our context.
553 		 */
554 		if (!tpg->se_tpg_tfo->shutdown_session(sess))
555 			continue;
556 
557 		init_sess = sess;
558 		break;
559 	}
560 
561 	/*
562 	 * User has requested to change the queue depth for a Initiator Node.
563 	 * Change the value in the Node's struct se_node_acl, and call
564 	 * core_set_queue_depth_for_node() to add the requested queue depth.
565 	 *
566 	 * Finally call  tpg->se_tpg_tfo->close_session() to force session
567 	 * reinstatement to occur if there is an active session for the
568 	 * $FABRIC_MOD Initiator Node in question.
569 	 */
570 	acl->queue_depth = queue_depth;
571 
572 	if (core_set_queue_depth_for_node(tpg, acl) < 0) {
573 		spin_unlock_bh(&tpg->session_lock);
574 		/*
575 		 * Force session reinstatement if
576 		 * core_set_queue_depth_for_node() failed, because we assume
577 		 * the $FABRIC_MOD has already the set session reinstatement
578 		 * bit from tpg->se_tpg_tfo->shutdown_session() called above.
579 		 */
580 		if (init_sess)
581 			tpg->se_tpg_tfo->close_session(init_sess);
582 
583 		spin_lock_irq(&tpg->acl_node_lock);
584 		if (dynamic_acl)
585 			acl->dynamic_node_acl = 1;
586 		spin_unlock_irq(&tpg->acl_node_lock);
587 		return -EINVAL;
588 	}
589 	spin_unlock_bh(&tpg->session_lock);
590 	/*
591 	 * If the $FABRIC_MOD session for the Initiator Node ACL exists,
592 	 * forcefully shutdown the $FABRIC_MOD session/nexus.
593 	 */
594 	if (init_sess)
595 		tpg->se_tpg_tfo->close_session(init_sess);
596 
597 	pr_debug("Successfully changed queue depth to: %d for Initiator"
598 		" Node: %s on %s Target Portal Group: %u\n", queue_depth,
599 		initiatorname, tpg->se_tpg_tfo->get_fabric_name(),
600 		tpg->se_tpg_tfo->tpg_get_tag(tpg));
601 
602 	spin_lock_irq(&tpg->acl_node_lock);
603 	if (dynamic_acl)
604 		acl->dynamic_node_acl = 1;
605 	spin_unlock_irq(&tpg->acl_node_lock);
606 
607 	return 0;
608 }
609 EXPORT_SYMBOL(core_tpg_set_initiator_node_queue_depth);
610 
611 static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg)
612 {
613 	/* Set in core_dev_setup_virtual_lun0() */
614 	struct se_device *dev = g_lun0_dev;
615 	struct se_lun *lun = &se_tpg->tpg_virt_lun0;
616 	u32 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
617 	int ret;
618 
619 	lun->unpacked_lun = 0;
620 	lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
621 	atomic_set(&lun->lun_acl_count, 0);
622 	init_completion(&lun->lun_shutdown_comp);
623 	INIT_LIST_HEAD(&lun->lun_acl_list);
624 	INIT_LIST_HEAD(&lun->lun_cmd_list);
625 	spin_lock_init(&lun->lun_acl_lock);
626 	spin_lock_init(&lun->lun_cmd_lock);
627 	spin_lock_init(&lun->lun_sep_lock);
628 
629 	ret = core_tpg_post_addlun(se_tpg, lun, lun_access, dev);
630 	if (ret < 0)
631 		return ret;
632 
633 	return 0;
634 }
635 
636 static void core_tpg_release_virtual_lun0(struct se_portal_group *se_tpg)
637 {
638 	struct se_lun *lun = &se_tpg->tpg_virt_lun0;
639 
640 	core_tpg_post_dellun(se_tpg, lun);
641 }
642 
643 int core_tpg_register(
644 	struct target_core_fabric_ops *tfo,
645 	struct se_wwn *se_wwn,
646 	struct se_portal_group *se_tpg,
647 	void *tpg_fabric_ptr,
648 	int se_tpg_type)
649 {
650 	struct se_lun *lun;
651 	u32 i;
652 
653 	se_tpg->tpg_lun_list = kzalloc((sizeof(struct se_lun) *
654 				TRANSPORT_MAX_LUNS_PER_TPG), GFP_KERNEL);
655 	if (!se_tpg->tpg_lun_list) {
656 		pr_err("Unable to allocate struct se_portal_group->"
657 				"tpg_lun_list\n");
658 		return -ENOMEM;
659 	}
660 
661 	for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
662 		lun = &se_tpg->tpg_lun_list[i];
663 		lun->unpacked_lun = i;
664 		lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
665 		atomic_set(&lun->lun_acl_count, 0);
666 		init_completion(&lun->lun_shutdown_comp);
667 		INIT_LIST_HEAD(&lun->lun_acl_list);
668 		INIT_LIST_HEAD(&lun->lun_cmd_list);
669 		spin_lock_init(&lun->lun_acl_lock);
670 		spin_lock_init(&lun->lun_cmd_lock);
671 		spin_lock_init(&lun->lun_sep_lock);
672 	}
673 
674 	se_tpg->se_tpg_type = se_tpg_type;
675 	se_tpg->se_tpg_fabric_ptr = tpg_fabric_ptr;
676 	se_tpg->se_tpg_tfo = tfo;
677 	se_tpg->se_tpg_wwn = se_wwn;
678 	atomic_set(&se_tpg->tpg_pr_ref_count, 0);
679 	INIT_LIST_HEAD(&se_tpg->acl_node_list);
680 	INIT_LIST_HEAD(&se_tpg->se_tpg_node);
681 	INIT_LIST_HEAD(&se_tpg->tpg_sess_list);
682 	spin_lock_init(&se_tpg->acl_node_lock);
683 	spin_lock_init(&se_tpg->session_lock);
684 	spin_lock_init(&se_tpg->tpg_lun_lock);
685 
686 	if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) {
687 		if (core_tpg_setup_virtual_lun0(se_tpg) < 0) {
688 			kfree(se_tpg);
689 			return -ENOMEM;
690 		}
691 	}
692 
693 	spin_lock_bh(&tpg_lock);
694 	list_add_tail(&se_tpg->se_tpg_node, &tpg_list);
695 	spin_unlock_bh(&tpg_lock);
696 
697 	pr_debug("TARGET_CORE[%s]: Allocated %s struct se_portal_group for"
698 		" endpoint: %s, Portal Tag: %u\n", tfo->get_fabric_name(),
699 		(se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ?
700 		"Normal" : "Discovery", (tfo->tpg_get_wwn(se_tpg) == NULL) ?
701 		"None" : tfo->tpg_get_wwn(se_tpg), tfo->tpg_get_tag(se_tpg));
702 
703 	return 0;
704 }
705 EXPORT_SYMBOL(core_tpg_register);
706 
707 int core_tpg_deregister(struct se_portal_group *se_tpg)
708 {
709 	struct se_node_acl *nacl, *nacl_tmp;
710 
711 	pr_debug("TARGET_CORE[%s]: Deallocating %s struct se_portal_group"
712 		" for endpoint: %s Portal Tag %u\n",
713 		(se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ?
714 		"Normal" : "Discovery", se_tpg->se_tpg_tfo->get_fabric_name(),
715 		se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg),
716 		se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
717 
718 	spin_lock_bh(&tpg_lock);
719 	list_del(&se_tpg->se_tpg_node);
720 	spin_unlock_bh(&tpg_lock);
721 
722 	while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0)
723 		cpu_relax();
724 	/*
725 	 * Release any remaining demo-mode generated se_node_acl that have
726 	 * not been released because of TFO->tpg_check_demo_mode_cache() == 1
727 	 * in transport_deregister_session().
728 	 */
729 	spin_lock_irq(&se_tpg->acl_node_lock);
730 	list_for_each_entry_safe(nacl, nacl_tmp, &se_tpg->acl_node_list,
731 			acl_list) {
732 		list_del(&nacl->acl_list);
733 		se_tpg->num_node_acls--;
734 		spin_unlock_irq(&se_tpg->acl_node_lock);
735 
736 		core_tpg_wait_for_nacl_pr_ref(nacl);
737 		core_free_device_list_for_node(nacl, se_tpg);
738 		se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg, nacl);
739 
740 		spin_lock_irq(&se_tpg->acl_node_lock);
741 	}
742 	spin_unlock_irq(&se_tpg->acl_node_lock);
743 
744 	if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL)
745 		core_tpg_release_virtual_lun0(se_tpg);
746 
747 	se_tpg->se_tpg_fabric_ptr = NULL;
748 	kfree(se_tpg->tpg_lun_list);
749 	return 0;
750 }
751 EXPORT_SYMBOL(core_tpg_deregister);
752 
753 struct se_lun *core_tpg_pre_addlun(
754 	struct se_portal_group *tpg,
755 	u32 unpacked_lun)
756 {
757 	struct se_lun *lun;
758 
759 	if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
760 		pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
761 			"-1: %u for Target Portal Group: %u\n",
762 			tpg->se_tpg_tfo->get_fabric_name(),
763 			unpacked_lun, TRANSPORT_MAX_LUNS_PER_TPG-1,
764 			tpg->se_tpg_tfo->tpg_get_tag(tpg));
765 		return ERR_PTR(-EOVERFLOW);
766 	}
767 
768 	spin_lock(&tpg->tpg_lun_lock);
769 	lun = &tpg->tpg_lun_list[unpacked_lun];
770 	if (lun->lun_status == TRANSPORT_LUN_STATUS_ACTIVE) {
771 		pr_err("TPG Logical Unit Number: %u is already active"
772 			" on %s Target Portal Group: %u, ignoring request.\n",
773 			unpacked_lun, tpg->se_tpg_tfo->get_fabric_name(),
774 			tpg->se_tpg_tfo->tpg_get_tag(tpg));
775 		spin_unlock(&tpg->tpg_lun_lock);
776 		return ERR_PTR(-EINVAL);
777 	}
778 	spin_unlock(&tpg->tpg_lun_lock);
779 
780 	return lun;
781 }
782 
783 int core_tpg_post_addlun(
784 	struct se_portal_group *tpg,
785 	struct se_lun *lun,
786 	u32 lun_access,
787 	void *lun_ptr)
788 {
789 	int ret;
790 
791 	ret = core_dev_export(lun_ptr, tpg, lun);
792 	if (ret < 0)
793 		return ret;
794 
795 	spin_lock(&tpg->tpg_lun_lock);
796 	lun->lun_access = lun_access;
797 	lun->lun_status = TRANSPORT_LUN_STATUS_ACTIVE;
798 	spin_unlock(&tpg->tpg_lun_lock);
799 
800 	return 0;
801 }
802 
803 static void core_tpg_shutdown_lun(
804 	struct se_portal_group *tpg,
805 	struct se_lun *lun)
806 {
807 	core_clear_lun_from_tpg(lun, tpg);
808 	transport_clear_lun_from_sessions(lun);
809 }
810 
811 struct se_lun *core_tpg_pre_dellun(
812 	struct se_portal_group *tpg,
813 	u32 unpacked_lun,
814 	int *ret)
815 {
816 	struct se_lun *lun;
817 
818 	if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
819 		pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
820 			"-1: %u for Target Portal Group: %u\n",
821 			tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
822 			TRANSPORT_MAX_LUNS_PER_TPG-1,
823 			tpg->se_tpg_tfo->tpg_get_tag(tpg));
824 		return ERR_PTR(-EOVERFLOW);
825 	}
826 
827 	spin_lock(&tpg->tpg_lun_lock);
828 	lun = &tpg->tpg_lun_list[unpacked_lun];
829 	if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) {
830 		pr_err("%s Logical Unit Number: %u is not active on"
831 			" Target Portal Group: %u, ignoring request.\n",
832 			tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
833 			tpg->se_tpg_tfo->tpg_get_tag(tpg));
834 		spin_unlock(&tpg->tpg_lun_lock);
835 		return ERR_PTR(-ENODEV);
836 	}
837 	spin_unlock(&tpg->tpg_lun_lock);
838 
839 	return lun;
840 }
841 
842 int core_tpg_post_dellun(
843 	struct se_portal_group *tpg,
844 	struct se_lun *lun)
845 {
846 	core_tpg_shutdown_lun(tpg, lun);
847 
848 	core_dev_unexport(lun->lun_se_dev, tpg, lun);
849 
850 	spin_lock(&tpg->tpg_lun_lock);
851 	lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
852 	spin_unlock(&tpg->tpg_lun_lock);
853 
854 	return 0;
855 }
856