xref: /linux/drivers/target/target_core_tpg.c (revision 8ec3b8432e4fe8d452f88f1ed9a3450e715bb797)
1 /*******************************************************************************
2  * Filename:  target_core_tpg.c
3  *
4  * This file contains generic Target Portal Group related functions.
5  *
6  * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc.
7  * Copyright (c) 2005, 2006, 2007 SBE, Inc.
8  * Copyright (c) 2007-2010 Rising Tide Systems
9  * Copyright (c) 2008-2010 Linux-iSCSI.org
10  *
11  * Nicholas A. Bellinger <nab@kernel.org>
12  *
13  * This program is free software; you can redistribute it and/or modify
14  * it under the terms of the GNU General Public License as published by
15  * the Free Software Foundation; either version 2 of the License, or
16  * (at your option) any later version.
17  *
18  * This program is distributed in the hope that it will be useful,
19  * but WITHOUT ANY WARRANTY; without even the implied warranty of
20  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
21  * GNU General Public License for more details.
22  *
23  * You should have received a copy of the GNU General Public License
24  * along with this program; if not, write to the Free Software
25  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
26  *
27  ******************************************************************************/
28 
29 #include <linux/net.h>
30 #include <linux/string.h>
31 #include <linux/timer.h>
32 #include <linux/slab.h>
33 #include <linux/spinlock.h>
34 #include <linux/smp_lock.h>
35 #include <linux/in.h>
36 #include <net/sock.h>
37 #include <net/tcp.h>
38 #include <scsi/scsi.h>
39 #include <scsi/scsi_cmnd.h>
40 
41 #include <target/target_core_base.h>
42 #include <target/target_core_device.h>
43 #include <target/target_core_tpg.h>
44 #include <target/target_core_transport.h>
45 #include <target/target_core_fabric_ops.h>
46 
47 #include "target_core_hba.h"
48 
49 /*	core_clear_initiator_node_from_tpg():
50  *
51  *
52  */
53 static void core_clear_initiator_node_from_tpg(
54 	struct se_node_acl *nacl,
55 	struct se_portal_group *tpg)
56 {
57 	int i;
58 	struct se_dev_entry *deve;
59 	struct se_lun *lun;
60 	struct se_lun_acl *acl, *acl_tmp;
61 
62 	spin_lock_irq(&nacl->device_list_lock);
63 	for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
64 		deve = &nacl->device_list[i];
65 
66 		if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
67 			continue;
68 
69 		if (!deve->se_lun) {
70 			printk(KERN_ERR "%s device entries device pointer is"
71 				" NULL, but Initiator has access.\n",
72 				TPG_TFO(tpg)->get_fabric_name());
73 			continue;
74 		}
75 
76 		lun = deve->se_lun;
77 		spin_unlock_irq(&nacl->device_list_lock);
78 		core_update_device_list_for_node(lun, NULL, deve->mapped_lun,
79 			TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0);
80 
81 		spin_lock(&lun->lun_acl_lock);
82 		list_for_each_entry_safe(acl, acl_tmp,
83 					&lun->lun_acl_list, lacl_list) {
84 			if (!(strcmp(acl->initiatorname,
85 					nacl->initiatorname)) &&
86 			     (acl->mapped_lun == deve->mapped_lun))
87 				break;
88 		}
89 
90 		if (!acl) {
91 			printk(KERN_ERR "Unable to locate struct se_lun_acl for %s,"
92 				" mapped_lun: %u\n", nacl->initiatorname,
93 				deve->mapped_lun);
94 			spin_unlock(&lun->lun_acl_lock);
95 			spin_lock_irq(&nacl->device_list_lock);
96 			continue;
97 		}
98 
99 		list_del(&acl->lacl_list);
100 		spin_unlock(&lun->lun_acl_lock);
101 
102 		spin_lock_irq(&nacl->device_list_lock);
103 		kfree(acl);
104 	}
105 	spin_unlock_irq(&nacl->device_list_lock);
106 }
107 
108 /*	__core_tpg_get_initiator_node_acl():
109  *
110  *	spin_lock_bh(&tpg->acl_node_lock); must be held when calling
111  */
112 struct se_node_acl *__core_tpg_get_initiator_node_acl(
113 	struct se_portal_group *tpg,
114 	const char *initiatorname)
115 {
116 	struct se_node_acl *acl;
117 
118 	list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
119 		if (!(strcmp(acl->initiatorname, initiatorname)))
120 			return acl;
121 	}
122 
123 	return NULL;
124 }
125 
126 /*	core_tpg_get_initiator_node_acl():
127  *
128  *
129  */
130 struct se_node_acl *core_tpg_get_initiator_node_acl(
131 	struct se_portal_group *tpg,
132 	unsigned char *initiatorname)
133 {
134 	struct se_node_acl *acl;
135 
136 	spin_lock_bh(&tpg->acl_node_lock);
137 	list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
138 		if (!(strcmp(acl->initiatorname, initiatorname)) &&
139 		   (!(acl->dynamic_node_acl))) {
140 			spin_unlock_bh(&tpg->acl_node_lock);
141 			return acl;
142 		}
143 	}
144 	spin_unlock_bh(&tpg->acl_node_lock);
145 
146 	return NULL;
147 }
148 
149 /*	core_tpg_add_node_to_devs():
150  *
151  *
152  */
153 void core_tpg_add_node_to_devs(
154 	struct se_node_acl *acl,
155 	struct se_portal_group *tpg)
156 {
157 	int i = 0;
158 	u32 lun_access = 0;
159 	struct se_lun *lun;
160 	struct se_device *dev;
161 
162 	spin_lock(&tpg->tpg_lun_lock);
163 	for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
164 		lun = &tpg->tpg_lun_list[i];
165 		if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE)
166 			continue;
167 
168 		spin_unlock(&tpg->tpg_lun_lock);
169 
170 		dev = lun->lun_se_dev;
171 		/*
172 		 * By default in LIO-Target $FABRIC_MOD,
173 		 * demo_mode_write_protect is ON, or READ_ONLY;
174 		 */
175 		if (!(TPG_TFO(tpg)->tpg_check_demo_mode_write_protect(tpg))) {
176 			if (dev->dev_flags & DF_READ_ONLY)
177 				lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
178 			else
179 				lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
180 		} else {
181 			/*
182 			 * Allow only optical drives to issue R/W in default RO
183 			 * demo mode.
184 			 */
185 			if (TRANSPORT(dev)->get_device_type(dev) == TYPE_DISK)
186 				lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
187 			else
188 				lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
189 		}
190 
191 		printk(KERN_INFO "TARGET_CORE[%s]->TPG[%u]_LUN[%u] - Adding %s"
192 			" access for LUN in Demo Mode\n",
193 			TPG_TFO(tpg)->get_fabric_name(),
194 			TPG_TFO(tpg)->tpg_get_tag(tpg), lun->unpacked_lun,
195 			(lun_access == TRANSPORT_LUNFLAGS_READ_WRITE) ?
196 			"READ-WRITE" : "READ-ONLY");
197 
198 		core_update_device_list_for_node(lun, NULL, lun->unpacked_lun,
199 				lun_access, acl, tpg, 1);
200 		spin_lock(&tpg->tpg_lun_lock);
201 	}
202 	spin_unlock(&tpg->tpg_lun_lock);
203 }
204 
205 /*      core_set_queue_depth_for_node():
206  *
207  *
208  */
209 static int core_set_queue_depth_for_node(
210 	struct se_portal_group *tpg,
211 	struct se_node_acl *acl)
212 {
213 	if (!acl->queue_depth) {
214 		printk(KERN_ERR "Queue depth for %s Initiator Node: %s is 0,"
215 			"defaulting to 1.\n", TPG_TFO(tpg)->get_fabric_name(),
216 			acl->initiatorname);
217 		acl->queue_depth = 1;
218 	}
219 
220 	return 0;
221 }
222 
223 /*      core_create_device_list_for_node():
224  *
225  *
226  */
227 static int core_create_device_list_for_node(struct se_node_acl *nacl)
228 {
229 	struct se_dev_entry *deve;
230 	int i;
231 
232 	nacl->device_list = kzalloc(sizeof(struct se_dev_entry) *
233 				TRANSPORT_MAX_LUNS_PER_TPG, GFP_KERNEL);
234 	if (!(nacl->device_list)) {
235 		printk(KERN_ERR "Unable to allocate memory for"
236 			" struct se_node_acl->device_list\n");
237 		return -1;
238 	}
239 	for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
240 		deve = &nacl->device_list[i];
241 
242 		atomic_set(&deve->ua_count, 0);
243 		atomic_set(&deve->pr_ref_count, 0);
244 		spin_lock_init(&deve->ua_lock);
245 		INIT_LIST_HEAD(&deve->alua_port_list);
246 		INIT_LIST_HEAD(&deve->ua_list);
247 	}
248 
249 	return 0;
250 }
251 
252 /*	core_tpg_check_initiator_node_acl()
253  *
254  *
255  */
256 struct se_node_acl *core_tpg_check_initiator_node_acl(
257 	struct se_portal_group *tpg,
258 	unsigned char *initiatorname)
259 {
260 	struct se_node_acl *acl;
261 
262 	acl = core_tpg_get_initiator_node_acl(tpg, initiatorname);
263 	if ((acl))
264 		return acl;
265 
266 	if (!(TPG_TFO(tpg)->tpg_check_demo_mode(tpg)))
267 		return NULL;
268 
269 	acl =  TPG_TFO(tpg)->tpg_alloc_fabric_acl(tpg);
270 	if (!(acl))
271 		return NULL;
272 
273 	INIT_LIST_HEAD(&acl->acl_list);
274 	INIT_LIST_HEAD(&acl->acl_sess_list);
275 	spin_lock_init(&acl->device_list_lock);
276 	spin_lock_init(&acl->nacl_sess_lock);
277 	atomic_set(&acl->acl_pr_ref_count, 0);
278 	acl->queue_depth = TPG_TFO(tpg)->tpg_get_default_depth(tpg);
279 	snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
280 	acl->se_tpg = tpg;
281 	acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
282 	spin_lock_init(&acl->stats_lock);
283 	acl->dynamic_node_acl = 1;
284 
285 	TPG_TFO(tpg)->set_default_node_attributes(acl);
286 
287 	if (core_create_device_list_for_node(acl) < 0) {
288 		TPG_TFO(tpg)->tpg_release_fabric_acl(tpg, acl);
289 		return NULL;
290 	}
291 
292 	if (core_set_queue_depth_for_node(tpg, acl) < 0) {
293 		core_free_device_list_for_node(acl, tpg);
294 		TPG_TFO(tpg)->tpg_release_fabric_acl(tpg, acl);
295 		return NULL;
296 	}
297 
298 	core_tpg_add_node_to_devs(acl, tpg);
299 
300 	spin_lock_bh(&tpg->acl_node_lock);
301 	list_add_tail(&acl->acl_list, &tpg->acl_node_list);
302 	tpg->num_node_acls++;
303 	spin_unlock_bh(&tpg->acl_node_lock);
304 
305 	printk("%s_TPG[%u] - Added DYNAMIC ACL with TCQ Depth: %d for %s"
306 		" Initiator Node: %s\n", TPG_TFO(tpg)->get_fabric_name(),
307 		TPG_TFO(tpg)->tpg_get_tag(tpg), acl->queue_depth,
308 		TPG_TFO(tpg)->get_fabric_name(), initiatorname);
309 
310 	return acl;
311 }
312 EXPORT_SYMBOL(core_tpg_check_initiator_node_acl);
313 
314 void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *nacl)
315 {
316 	while (atomic_read(&nacl->acl_pr_ref_count) != 0)
317 		cpu_relax();
318 }
319 
320 void core_tpg_clear_object_luns(struct se_portal_group *tpg)
321 {
322 	int i, ret;
323 	struct se_lun *lun;
324 
325 	spin_lock(&tpg->tpg_lun_lock);
326 	for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
327 		lun = &tpg->tpg_lun_list[i];
328 
329 		if ((lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) ||
330 		    (lun->lun_se_dev == NULL))
331 			continue;
332 
333 		spin_unlock(&tpg->tpg_lun_lock);
334 		ret = core_dev_del_lun(tpg, lun->unpacked_lun);
335 		spin_lock(&tpg->tpg_lun_lock);
336 	}
337 	spin_unlock(&tpg->tpg_lun_lock);
338 }
339 EXPORT_SYMBOL(core_tpg_clear_object_luns);
340 
341 /*	core_tpg_add_initiator_node_acl():
342  *
343  *
344  */
345 struct se_node_acl *core_tpg_add_initiator_node_acl(
346 	struct se_portal_group *tpg,
347 	struct se_node_acl *se_nacl,
348 	const char *initiatorname,
349 	u32 queue_depth)
350 {
351 	struct se_node_acl *acl = NULL;
352 
353 	spin_lock_bh(&tpg->acl_node_lock);
354 	acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
355 	if ((acl)) {
356 		if (acl->dynamic_node_acl) {
357 			acl->dynamic_node_acl = 0;
358 			printk(KERN_INFO "%s_TPG[%u] - Replacing dynamic ACL"
359 				" for %s\n", TPG_TFO(tpg)->get_fabric_name(),
360 				TPG_TFO(tpg)->tpg_get_tag(tpg), initiatorname);
361 			spin_unlock_bh(&tpg->acl_node_lock);
362 			/*
363 			 * Release the locally allocated struct se_node_acl
364 			 * because * core_tpg_add_initiator_node_acl() returned
365 			 * a pointer to an existing demo mode node ACL.
366 			 */
367 			if (se_nacl)
368 				TPG_TFO(tpg)->tpg_release_fabric_acl(tpg,
369 							se_nacl);
370 			goto done;
371 		}
372 
373 		printk(KERN_ERR "ACL entry for %s Initiator"
374 			" Node %s already exists for TPG %u, ignoring"
375 			" request.\n",  TPG_TFO(tpg)->get_fabric_name(),
376 			initiatorname, TPG_TFO(tpg)->tpg_get_tag(tpg));
377 		spin_unlock_bh(&tpg->acl_node_lock);
378 		return ERR_PTR(-EEXIST);
379 	}
380 	spin_unlock_bh(&tpg->acl_node_lock);
381 
382 	if (!(se_nacl)) {
383 		printk("struct se_node_acl pointer is NULL\n");
384 		return ERR_PTR(-EINVAL);
385 	}
386 	/*
387 	 * For v4.x logic the se_node_acl_s is hanging off a fabric
388 	 * dependent structure allocated via
389 	 * struct target_core_fabric_ops->fabric_make_nodeacl()
390 	 */
391 	acl = se_nacl;
392 
393 	INIT_LIST_HEAD(&acl->acl_list);
394 	INIT_LIST_HEAD(&acl->acl_sess_list);
395 	spin_lock_init(&acl->device_list_lock);
396 	spin_lock_init(&acl->nacl_sess_lock);
397 	atomic_set(&acl->acl_pr_ref_count, 0);
398 	acl->queue_depth = queue_depth;
399 	snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
400 	acl->se_tpg = tpg;
401 	acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
402 	spin_lock_init(&acl->stats_lock);
403 
404 	TPG_TFO(tpg)->set_default_node_attributes(acl);
405 
406 	if (core_create_device_list_for_node(acl) < 0) {
407 		TPG_TFO(tpg)->tpg_release_fabric_acl(tpg, acl);
408 		return ERR_PTR(-ENOMEM);
409 	}
410 
411 	if (core_set_queue_depth_for_node(tpg, acl) < 0) {
412 		core_free_device_list_for_node(acl, tpg);
413 		TPG_TFO(tpg)->tpg_release_fabric_acl(tpg, acl);
414 		return ERR_PTR(-EINVAL);
415 	}
416 
417 	spin_lock_bh(&tpg->acl_node_lock);
418 	list_add_tail(&acl->acl_list, &tpg->acl_node_list);
419 	tpg->num_node_acls++;
420 	spin_unlock_bh(&tpg->acl_node_lock);
421 
422 done:
423 	printk(KERN_INFO "%s_TPG[%hu] - Added ACL with TCQ Depth: %d for %s"
424 		" Initiator Node: %s\n", TPG_TFO(tpg)->get_fabric_name(),
425 		TPG_TFO(tpg)->tpg_get_tag(tpg), acl->queue_depth,
426 		TPG_TFO(tpg)->get_fabric_name(), initiatorname);
427 
428 	return acl;
429 }
430 EXPORT_SYMBOL(core_tpg_add_initiator_node_acl);
431 
432 /*	core_tpg_del_initiator_node_acl():
433  *
434  *
435  */
436 int core_tpg_del_initiator_node_acl(
437 	struct se_portal_group *tpg,
438 	struct se_node_acl *acl,
439 	int force)
440 {
441 	struct se_session *sess, *sess_tmp;
442 	int dynamic_acl = 0;
443 
444 	spin_lock_bh(&tpg->acl_node_lock);
445 	if (acl->dynamic_node_acl) {
446 		acl->dynamic_node_acl = 0;
447 		dynamic_acl = 1;
448 	}
449 	list_del(&acl->acl_list);
450 	tpg->num_node_acls--;
451 	spin_unlock_bh(&tpg->acl_node_lock);
452 
453 	spin_lock_bh(&tpg->session_lock);
454 	list_for_each_entry_safe(sess, sess_tmp,
455 				&tpg->tpg_sess_list, sess_list) {
456 		if (sess->se_node_acl != acl)
457 			continue;
458 		/*
459 		 * Determine if the session needs to be closed by our context.
460 		 */
461 		if (!(TPG_TFO(tpg)->shutdown_session(sess)))
462 			continue;
463 
464 		spin_unlock_bh(&tpg->session_lock);
465 		/*
466 		 * If the $FABRIC_MOD session for the Initiator Node ACL exists,
467 		 * forcefully shutdown the $FABRIC_MOD session/nexus.
468 		 */
469 		TPG_TFO(tpg)->close_session(sess);
470 
471 		spin_lock_bh(&tpg->session_lock);
472 	}
473 	spin_unlock_bh(&tpg->session_lock);
474 
475 	core_tpg_wait_for_nacl_pr_ref(acl);
476 	core_clear_initiator_node_from_tpg(acl, tpg);
477 	core_free_device_list_for_node(acl, tpg);
478 
479 	printk(KERN_INFO "%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s"
480 		" Initiator Node: %s\n", TPG_TFO(tpg)->get_fabric_name(),
481 		TPG_TFO(tpg)->tpg_get_tag(tpg), acl->queue_depth,
482 		TPG_TFO(tpg)->get_fabric_name(), acl->initiatorname);
483 
484 	return 0;
485 }
486 EXPORT_SYMBOL(core_tpg_del_initiator_node_acl);
487 
488 /*	core_tpg_set_initiator_node_queue_depth():
489  *
490  *
491  */
492 int core_tpg_set_initiator_node_queue_depth(
493 	struct se_portal_group *tpg,
494 	unsigned char *initiatorname,
495 	u32 queue_depth,
496 	int force)
497 {
498 	struct se_session *sess, *init_sess = NULL;
499 	struct se_node_acl *acl;
500 	int dynamic_acl = 0;
501 
502 	spin_lock_bh(&tpg->acl_node_lock);
503 	acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
504 	if (!(acl)) {
505 		printk(KERN_ERR "Access Control List entry for %s Initiator"
506 			" Node %s does not exists for TPG %hu, ignoring"
507 			" request.\n", TPG_TFO(tpg)->get_fabric_name(),
508 			initiatorname, TPG_TFO(tpg)->tpg_get_tag(tpg));
509 		spin_unlock_bh(&tpg->acl_node_lock);
510 		return -ENODEV;
511 	}
512 	if (acl->dynamic_node_acl) {
513 		acl->dynamic_node_acl = 0;
514 		dynamic_acl = 1;
515 	}
516 	spin_unlock_bh(&tpg->acl_node_lock);
517 
518 	spin_lock_bh(&tpg->session_lock);
519 	list_for_each_entry(sess, &tpg->tpg_sess_list, sess_list) {
520 		if (sess->se_node_acl != acl)
521 			continue;
522 
523 		if (!force) {
524 			printk(KERN_ERR "Unable to change queue depth for %s"
525 				" Initiator Node: %s while session is"
526 				" operational.  To forcefully change the queue"
527 				" depth and force session reinstatement"
528 				" use the \"force=1\" parameter.\n",
529 				TPG_TFO(tpg)->get_fabric_name(), initiatorname);
530 			spin_unlock_bh(&tpg->session_lock);
531 
532 			spin_lock_bh(&tpg->acl_node_lock);
533 			if (dynamic_acl)
534 				acl->dynamic_node_acl = 1;
535 			spin_unlock_bh(&tpg->acl_node_lock);
536 			return -EEXIST;
537 		}
538 		/*
539 		 * Determine if the session needs to be closed by our context.
540 		 */
541 		if (!(TPG_TFO(tpg)->shutdown_session(sess)))
542 			continue;
543 
544 		init_sess = sess;
545 		break;
546 	}
547 
548 	/*
549 	 * User has requested to change the queue depth for a Initiator Node.
550 	 * Change the value in the Node's struct se_node_acl, and call
551 	 * core_set_queue_depth_for_node() to add the requested queue depth.
552 	 *
553 	 * Finally call  TPG_TFO(tpg)->close_session() to force session
554 	 * reinstatement to occur if there is an active session for the
555 	 * $FABRIC_MOD Initiator Node in question.
556 	 */
557 	acl->queue_depth = queue_depth;
558 
559 	if (core_set_queue_depth_for_node(tpg, acl) < 0) {
560 		spin_unlock_bh(&tpg->session_lock);
561 		/*
562 		 * Force session reinstatement if
563 		 * core_set_queue_depth_for_node() failed, because we assume
564 		 * the $FABRIC_MOD has already the set session reinstatement
565 		 * bit from TPG_TFO(tpg)->shutdown_session() called above.
566 		 */
567 		if (init_sess)
568 			TPG_TFO(tpg)->close_session(init_sess);
569 
570 		spin_lock_bh(&tpg->acl_node_lock);
571 		if (dynamic_acl)
572 			acl->dynamic_node_acl = 1;
573 		spin_unlock_bh(&tpg->acl_node_lock);
574 		return -EINVAL;
575 	}
576 	spin_unlock_bh(&tpg->session_lock);
577 	/*
578 	 * If the $FABRIC_MOD session for the Initiator Node ACL exists,
579 	 * forcefully shutdown the $FABRIC_MOD session/nexus.
580 	 */
581 	if (init_sess)
582 		TPG_TFO(tpg)->close_session(init_sess);
583 
584 	printk(KERN_INFO "Successfuly changed queue depth to: %d for Initiator"
585 		" Node: %s on %s Target Portal Group: %u\n", queue_depth,
586 		initiatorname, TPG_TFO(tpg)->get_fabric_name(),
587 		TPG_TFO(tpg)->tpg_get_tag(tpg));
588 
589 	spin_lock_bh(&tpg->acl_node_lock);
590 	if (dynamic_acl)
591 		acl->dynamic_node_acl = 1;
592 	spin_unlock_bh(&tpg->acl_node_lock);
593 
594 	return 0;
595 }
596 EXPORT_SYMBOL(core_tpg_set_initiator_node_queue_depth);
597 
598 static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg)
599 {
600 	/* Set in core_dev_setup_virtual_lun0() */
601 	struct se_device *dev = se_global->g_lun0_dev;
602 	struct se_lun *lun = &se_tpg->tpg_virt_lun0;
603 	u32 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
604 	int ret;
605 
606 	lun->unpacked_lun = 0;
607 	lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
608 	atomic_set(&lun->lun_acl_count, 0);
609 	init_completion(&lun->lun_shutdown_comp);
610 	INIT_LIST_HEAD(&lun->lun_acl_list);
611 	INIT_LIST_HEAD(&lun->lun_cmd_list);
612 	spin_lock_init(&lun->lun_acl_lock);
613 	spin_lock_init(&lun->lun_cmd_lock);
614 	spin_lock_init(&lun->lun_sep_lock);
615 
616 	ret = core_tpg_post_addlun(se_tpg, lun, lun_access, dev);
617 	if (ret < 0)
618 		return -1;
619 
620 	return 0;
621 }
622 
623 static void core_tpg_release_virtual_lun0(struct se_portal_group *se_tpg)
624 {
625 	struct se_lun *lun = &se_tpg->tpg_virt_lun0;
626 
627 	core_tpg_post_dellun(se_tpg, lun);
628 }
629 
630 int core_tpg_register(
631 	struct target_core_fabric_ops *tfo,
632 	struct se_wwn *se_wwn,
633 	struct se_portal_group *se_tpg,
634 	void *tpg_fabric_ptr,
635 	int se_tpg_type)
636 {
637 	struct se_lun *lun;
638 	u32 i;
639 
640 	se_tpg->tpg_lun_list = kzalloc((sizeof(struct se_lun) *
641 				TRANSPORT_MAX_LUNS_PER_TPG), GFP_KERNEL);
642 	if (!(se_tpg->tpg_lun_list)) {
643 		printk(KERN_ERR "Unable to allocate struct se_portal_group->"
644 				"tpg_lun_list\n");
645 		return -ENOMEM;
646 	}
647 
648 	for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
649 		lun = &se_tpg->tpg_lun_list[i];
650 		lun->unpacked_lun = i;
651 		lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
652 		atomic_set(&lun->lun_acl_count, 0);
653 		init_completion(&lun->lun_shutdown_comp);
654 		INIT_LIST_HEAD(&lun->lun_acl_list);
655 		INIT_LIST_HEAD(&lun->lun_cmd_list);
656 		spin_lock_init(&lun->lun_acl_lock);
657 		spin_lock_init(&lun->lun_cmd_lock);
658 		spin_lock_init(&lun->lun_sep_lock);
659 	}
660 
661 	se_tpg->se_tpg_type = se_tpg_type;
662 	se_tpg->se_tpg_fabric_ptr = tpg_fabric_ptr;
663 	se_tpg->se_tpg_tfo = tfo;
664 	se_tpg->se_tpg_wwn = se_wwn;
665 	atomic_set(&se_tpg->tpg_pr_ref_count, 0);
666 	INIT_LIST_HEAD(&se_tpg->acl_node_list);
667 	INIT_LIST_HEAD(&se_tpg->se_tpg_list);
668 	INIT_LIST_HEAD(&se_tpg->tpg_sess_list);
669 	spin_lock_init(&se_tpg->acl_node_lock);
670 	spin_lock_init(&se_tpg->session_lock);
671 	spin_lock_init(&se_tpg->tpg_lun_lock);
672 
673 	if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) {
674 		if (core_tpg_setup_virtual_lun0(se_tpg) < 0) {
675 			kfree(se_tpg);
676 			return -ENOMEM;
677 		}
678 	}
679 
680 	spin_lock_bh(&se_global->se_tpg_lock);
681 	list_add_tail(&se_tpg->se_tpg_list, &se_global->g_se_tpg_list);
682 	spin_unlock_bh(&se_global->se_tpg_lock);
683 
684 	printk(KERN_INFO "TARGET_CORE[%s]: Allocated %s struct se_portal_group for"
685 		" endpoint: %s, Portal Tag: %u\n", tfo->get_fabric_name(),
686 		(se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ?
687 		"Normal" : "Discovery", (tfo->tpg_get_wwn(se_tpg) == NULL) ?
688 		"None" : tfo->tpg_get_wwn(se_tpg), tfo->tpg_get_tag(se_tpg));
689 
690 	return 0;
691 }
692 EXPORT_SYMBOL(core_tpg_register);
693 
694 int core_tpg_deregister(struct se_portal_group *se_tpg)
695 {
696 	struct se_node_acl *nacl, *nacl_tmp;
697 
698 	printk(KERN_INFO "TARGET_CORE[%s]: Deallocating %s struct se_portal_group"
699 		" for endpoint: %s Portal Tag %u\n",
700 		(se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ?
701 		"Normal" : "Discovery", TPG_TFO(se_tpg)->get_fabric_name(),
702 		TPG_TFO(se_tpg)->tpg_get_wwn(se_tpg),
703 		TPG_TFO(se_tpg)->tpg_get_tag(se_tpg));
704 
705 	spin_lock_bh(&se_global->se_tpg_lock);
706 	list_del(&se_tpg->se_tpg_list);
707 	spin_unlock_bh(&se_global->se_tpg_lock);
708 
709 	while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0)
710 		cpu_relax();
711 	/*
712 	 * Release any remaining demo-mode generated se_node_acl that have
713 	 * not been released because of TFO->tpg_check_demo_mode_cache() == 1
714 	 * in transport_deregister_session().
715 	 */
716 	spin_lock_bh(&se_tpg->acl_node_lock);
717 	list_for_each_entry_safe(nacl, nacl_tmp, &se_tpg->acl_node_list,
718 			acl_list) {
719 		list_del(&nacl->acl_list);
720 		se_tpg->num_node_acls--;
721 		spin_unlock_bh(&se_tpg->acl_node_lock);
722 
723 		core_tpg_wait_for_nacl_pr_ref(nacl);
724 		core_free_device_list_for_node(nacl, se_tpg);
725 		TPG_TFO(se_tpg)->tpg_release_fabric_acl(se_tpg, nacl);
726 
727 		spin_lock_bh(&se_tpg->acl_node_lock);
728 	}
729 	spin_unlock_bh(&se_tpg->acl_node_lock);
730 
731 	if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL)
732 		core_tpg_release_virtual_lun0(se_tpg);
733 
734 	se_tpg->se_tpg_fabric_ptr = NULL;
735 	kfree(se_tpg->tpg_lun_list);
736 	return 0;
737 }
738 EXPORT_SYMBOL(core_tpg_deregister);
739 
740 struct se_lun *core_tpg_pre_addlun(
741 	struct se_portal_group *tpg,
742 	u32 unpacked_lun)
743 {
744 	struct se_lun *lun;
745 
746 	if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
747 		printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
748 			"-1: %u for Target Portal Group: %u\n",
749 			TPG_TFO(tpg)->get_fabric_name(),
750 			unpacked_lun, TRANSPORT_MAX_LUNS_PER_TPG-1,
751 			TPG_TFO(tpg)->tpg_get_tag(tpg));
752 		return ERR_PTR(-EOVERFLOW);
753 	}
754 
755 	spin_lock(&tpg->tpg_lun_lock);
756 	lun = &tpg->tpg_lun_list[unpacked_lun];
757 	if (lun->lun_status == TRANSPORT_LUN_STATUS_ACTIVE) {
758 		printk(KERN_ERR "TPG Logical Unit Number: %u is already active"
759 			" on %s Target Portal Group: %u, ignoring request.\n",
760 			unpacked_lun, TPG_TFO(tpg)->get_fabric_name(),
761 			TPG_TFO(tpg)->tpg_get_tag(tpg));
762 		spin_unlock(&tpg->tpg_lun_lock);
763 		return ERR_PTR(-EINVAL);
764 	}
765 	spin_unlock(&tpg->tpg_lun_lock);
766 
767 	return lun;
768 }
769 
770 int core_tpg_post_addlun(
771 	struct se_portal_group *tpg,
772 	struct se_lun *lun,
773 	u32 lun_access,
774 	void *lun_ptr)
775 {
776 	if (core_dev_export(lun_ptr, tpg, lun) < 0)
777 		return -1;
778 
779 	spin_lock(&tpg->tpg_lun_lock);
780 	lun->lun_access = lun_access;
781 	lun->lun_status = TRANSPORT_LUN_STATUS_ACTIVE;
782 	spin_unlock(&tpg->tpg_lun_lock);
783 
784 	return 0;
785 }
786 
787 static void core_tpg_shutdown_lun(
788 	struct se_portal_group *tpg,
789 	struct se_lun *lun)
790 {
791 	core_clear_lun_from_tpg(lun, tpg);
792 	transport_clear_lun_from_sessions(lun);
793 }
794 
795 struct se_lun *core_tpg_pre_dellun(
796 	struct se_portal_group *tpg,
797 	u32 unpacked_lun,
798 	int *ret)
799 {
800 	struct se_lun *lun;
801 
802 	if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
803 		printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
804 			"-1: %u for Target Portal Group: %u\n",
805 			TPG_TFO(tpg)->get_fabric_name(), unpacked_lun,
806 			TRANSPORT_MAX_LUNS_PER_TPG-1,
807 			TPG_TFO(tpg)->tpg_get_tag(tpg));
808 		return ERR_PTR(-EOVERFLOW);
809 	}
810 
811 	spin_lock(&tpg->tpg_lun_lock);
812 	lun = &tpg->tpg_lun_list[unpacked_lun];
813 	if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) {
814 		printk(KERN_ERR "%s Logical Unit Number: %u is not active on"
815 			" Target Portal Group: %u, ignoring request.\n",
816 			TPG_TFO(tpg)->get_fabric_name(), unpacked_lun,
817 			TPG_TFO(tpg)->tpg_get_tag(tpg));
818 		spin_unlock(&tpg->tpg_lun_lock);
819 		return ERR_PTR(-ENODEV);
820 	}
821 	spin_unlock(&tpg->tpg_lun_lock);
822 
823 	return lun;
824 }
825 
826 int core_tpg_post_dellun(
827 	struct se_portal_group *tpg,
828 	struct se_lun *lun)
829 {
830 	core_tpg_shutdown_lun(tpg, lun);
831 
832 	core_dev_unexport(lun->lun_se_dev, tpg, lun);
833 
834 	spin_lock(&tpg->tpg_lun_lock);
835 	lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
836 	spin_unlock(&tpg->tpg_lun_lock);
837 
838 	return 0;
839 }
840