xref: /linux/drivers/target/target_core_tpg.c (revision ca55b2fef3a9373fcfc30f82fd26bc7fccbda732)
1 /*******************************************************************************
2  * Filename:  target_core_tpg.c
3  *
4  * This file contains generic Target Portal Group related functions.
5  *
6  * (c) Copyright 2002-2013 Datera, Inc.
7  *
8  * Nicholas A. Bellinger <nab@kernel.org>
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License as published by
12  * the Free Software Foundation; either version 2 of the License, or
13  * (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public License
21  * along with this program; if not, write to the Free Software
22  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23  *
24  ******************************************************************************/
25 
26 #include <linux/net.h>
27 #include <linux/string.h>
28 #include <linux/timer.h>
29 #include <linux/slab.h>
30 #include <linux/spinlock.h>
31 #include <linux/in.h>
32 #include <linux/export.h>
33 #include <net/sock.h>
34 #include <net/tcp.h>
35 #include <scsi/scsi_proto.h>
36 
37 #include <target/target_core_base.h>
38 #include <target/target_core_backend.h>
39 #include <target/target_core_fabric.h>
40 
41 #include "target_core_internal.h"
42 #include "target_core_alua.h"
43 #include "target_core_pr.h"
44 #include "target_core_ua.h"
45 
46 extern struct se_device *g_lun0_dev;
47 
48 static DEFINE_SPINLOCK(tpg_lock);
49 static LIST_HEAD(tpg_list);
50 
51 /*	__core_tpg_get_initiator_node_acl():
52  *
53  *	mutex_lock(&tpg->acl_node_mutex); must be held when calling
54  */
55 struct se_node_acl *__core_tpg_get_initiator_node_acl(
56 	struct se_portal_group *tpg,
57 	const char *initiatorname)
58 {
59 	struct se_node_acl *acl;
60 
61 	list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
62 		if (!strcmp(acl->initiatorname, initiatorname))
63 			return acl;
64 	}
65 
66 	return NULL;
67 }
68 
69 /*	core_tpg_get_initiator_node_acl():
70  *
71  *
72  */
73 struct se_node_acl *core_tpg_get_initiator_node_acl(
74 	struct se_portal_group *tpg,
75 	unsigned char *initiatorname)
76 {
77 	struct se_node_acl *acl;
78 
79 	mutex_lock(&tpg->acl_node_mutex);
80 	acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
81 	mutex_unlock(&tpg->acl_node_mutex);
82 
83 	return acl;
84 }
85 EXPORT_SYMBOL(core_tpg_get_initiator_node_acl);
86 
87 void core_allocate_nexus_loss_ua(
88 	struct se_node_acl *nacl)
89 {
90 	struct se_dev_entry *deve;
91 
92 	if (!nacl)
93 		return;
94 
95 	rcu_read_lock();
96 	hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link)
97 		core_scsi3_ua_allocate(deve, 0x29,
98 			ASCQ_29H_NEXUS_LOSS_OCCURRED);
99 	rcu_read_unlock();
100 }
101 EXPORT_SYMBOL(core_allocate_nexus_loss_ua);
102 
103 /*	core_tpg_add_node_to_devs():
104  *
105  *
106  */
107 void core_tpg_add_node_to_devs(
108 	struct se_node_acl *acl,
109 	struct se_portal_group *tpg,
110 	struct se_lun *lun_orig)
111 {
112 	u32 lun_access = 0;
113 	struct se_lun *lun;
114 	struct se_device *dev;
115 
116 	mutex_lock(&tpg->tpg_lun_mutex);
117 	hlist_for_each_entry_rcu(lun, &tpg->tpg_lun_hlist, link) {
118 		if (lun_orig && lun != lun_orig)
119 			continue;
120 
121 		dev = rcu_dereference_check(lun->lun_se_dev,
122 					    lockdep_is_held(&tpg->tpg_lun_mutex));
123 		/*
124 		 * By default in LIO-Target $FABRIC_MOD,
125 		 * demo_mode_write_protect is ON, or READ_ONLY;
126 		 */
127 		if (!tpg->se_tpg_tfo->tpg_check_demo_mode_write_protect(tpg)) {
128 			lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
129 		} else {
130 			/*
131 			 * Allow only optical drives to issue R/W in default RO
132 			 * demo mode.
133 			 */
134 			if (dev->transport->get_device_type(dev) == TYPE_DISK)
135 				lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
136 			else
137 				lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
138 		}
139 
140 		pr_debug("TARGET_CORE[%s]->TPG[%u]_LUN[%llu] - Adding %s"
141 			" access for LUN in Demo Mode\n",
142 			tpg->se_tpg_tfo->get_fabric_name(),
143 			tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
144 			(lun_access == TRANSPORT_LUNFLAGS_READ_WRITE) ?
145 			"READ-WRITE" : "READ-ONLY");
146 
147 		core_enable_device_list_for_node(lun, NULL, lun->unpacked_lun,
148 						 lun_access, acl, tpg);
149 		/*
150 		 * Check to see if there are any existing persistent reservation
151 		 * APTPL pre-registrations that need to be enabled for this dynamic
152 		 * LUN ACL now..
153 		 */
154 		core_scsi3_check_aptpl_registration(dev, tpg, lun, acl,
155 						    lun->unpacked_lun);
156 	}
157 	mutex_unlock(&tpg->tpg_lun_mutex);
158 }
159 
160 /*      core_set_queue_depth_for_node():
161  *
162  *
163  */
164 static int core_set_queue_depth_for_node(
165 	struct se_portal_group *tpg,
166 	struct se_node_acl *acl)
167 {
168 	if (!acl->queue_depth) {
169 		pr_err("Queue depth for %s Initiator Node: %s is 0,"
170 			"defaulting to 1.\n", tpg->se_tpg_tfo->get_fabric_name(),
171 			acl->initiatorname);
172 		acl->queue_depth = 1;
173 	}
174 
175 	return 0;
176 }
177 
178 static struct se_node_acl *target_alloc_node_acl(struct se_portal_group *tpg,
179 		const unsigned char *initiatorname)
180 {
181 	struct se_node_acl *acl;
182 
183 	acl = kzalloc(max(sizeof(*acl), tpg->se_tpg_tfo->node_acl_size),
184 			GFP_KERNEL);
185 	if (!acl)
186 		return NULL;
187 
188 	INIT_LIST_HEAD(&acl->acl_list);
189 	INIT_LIST_HEAD(&acl->acl_sess_list);
190 	INIT_HLIST_HEAD(&acl->lun_entry_hlist);
191 	kref_init(&acl->acl_kref);
192 	init_completion(&acl->acl_free_comp);
193 	spin_lock_init(&acl->nacl_sess_lock);
194 	mutex_init(&acl->lun_entry_mutex);
195 	atomic_set(&acl->acl_pr_ref_count, 0);
196 	if (tpg->se_tpg_tfo->tpg_get_default_depth)
197 		acl->queue_depth = tpg->se_tpg_tfo->tpg_get_default_depth(tpg);
198 	else
199 		acl->queue_depth = 1;
200 	snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
201 	acl->se_tpg = tpg;
202 	acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
203 
204 	tpg->se_tpg_tfo->set_default_node_attributes(acl);
205 
206 	if (core_set_queue_depth_for_node(tpg, acl) < 0)
207 		goto out_free_acl;
208 
209 	return acl;
210 
211 out_free_acl:
212 	kfree(acl);
213 	return NULL;
214 }
215 
216 static void target_add_node_acl(struct se_node_acl *acl)
217 {
218 	struct se_portal_group *tpg = acl->se_tpg;
219 
220 	mutex_lock(&tpg->acl_node_mutex);
221 	list_add_tail(&acl->acl_list, &tpg->acl_node_list);
222 	tpg->num_node_acls++;
223 	mutex_unlock(&tpg->acl_node_mutex);
224 
225 	pr_debug("%s_TPG[%hu] - Added %s ACL with TCQ Depth: %d for %s"
226 		" Initiator Node: %s\n",
227 		tpg->se_tpg_tfo->get_fabric_name(),
228 		tpg->se_tpg_tfo->tpg_get_tag(tpg),
229 		acl->dynamic_node_acl ? "DYNAMIC" : "",
230 		acl->queue_depth,
231 		tpg->se_tpg_tfo->get_fabric_name(),
232 		acl->initiatorname);
233 }
234 
235 struct se_node_acl *core_tpg_check_initiator_node_acl(
236 	struct se_portal_group *tpg,
237 	unsigned char *initiatorname)
238 {
239 	struct se_node_acl *acl;
240 
241 	acl = core_tpg_get_initiator_node_acl(tpg, initiatorname);
242 	if (acl)
243 		return acl;
244 
245 	if (!tpg->se_tpg_tfo->tpg_check_demo_mode(tpg))
246 		return NULL;
247 
248 	acl = target_alloc_node_acl(tpg, initiatorname);
249 	if (!acl)
250 		return NULL;
251 	acl->dynamic_node_acl = 1;
252 
253 	/*
254 	 * Here we only create demo-mode MappedLUNs from the active
255 	 * TPG LUNs if the fabric is not explicitly asking for
256 	 * tpg_check_demo_mode_login_only() == 1.
257 	 */
258 	if ((tpg->se_tpg_tfo->tpg_check_demo_mode_login_only == NULL) ||
259 	    (tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg) != 1))
260 		core_tpg_add_node_to_devs(acl, tpg, NULL);
261 
262 	target_add_node_acl(acl);
263 	return acl;
264 }
265 EXPORT_SYMBOL(core_tpg_check_initiator_node_acl);
266 
267 void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *nacl)
268 {
269 	while (atomic_read(&nacl->acl_pr_ref_count) != 0)
270 		cpu_relax();
271 }
272 
273 struct se_node_acl *core_tpg_add_initiator_node_acl(
274 	struct se_portal_group *tpg,
275 	const char *initiatorname)
276 {
277 	struct se_node_acl *acl;
278 
279 	mutex_lock(&tpg->acl_node_mutex);
280 	acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
281 	if (acl) {
282 		if (acl->dynamic_node_acl) {
283 			acl->dynamic_node_acl = 0;
284 			pr_debug("%s_TPG[%u] - Replacing dynamic ACL"
285 				" for %s\n", tpg->se_tpg_tfo->get_fabric_name(),
286 				tpg->se_tpg_tfo->tpg_get_tag(tpg), initiatorname);
287 			mutex_unlock(&tpg->acl_node_mutex);
288 			return acl;
289 		}
290 
291 		pr_err("ACL entry for %s Initiator"
292 			" Node %s already exists for TPG %u, ignoring"
293 			" request.\n",  tpg->se_tpg_tfo->get_fabric_name(),
294 			initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
295 		mutex_unlock(&tpg->acl_node_mutex);
296 		return ERR_PTR(-EEXIST);
297 	}
298 	mutex_unlock(&tpg->acl_node_mutex);
299 
300 	acl = target_alloc_node_acl(tpg, initiatorname);
301 	if (!acl)
302 		return ERR_PTR(-ENOMEM);
303 
304 	target_add_node_acl(acl);
305 	return acl;
306 }
307 
308 void core_tpg_del_initiator_node_acl(struct se_node_acl *acl)
309 {
310 	struct se_portal_group *tpg = acl->se_tpg;
311 	LIST_HEAD(sess_list);
312 	struct se_session *sess, *sess_tmp;
313 	unsigned long flags;
314 	int rc;
315 
316 	mutex_lock(&tpg->acl_node_mutex);
317 	if (acl->dynamic_node_acl) {
318 		acl->dynamic_node_acl = 0;
319 	}
320 	list_del(&acl->acl_list);
321 	tpg->num_node_acls--;
322 	mutex_unlock(&tpg->acl_node_mutex);
323 
324 	spin_lock_irqsave(&acl->nacl_sess_lock, flags);
325 	acl->acl_stop = 1;
326 
327 	list_for_each_entry_safe(sess, sess_tmp, &acl->acl_sess_list,
328 				sess_acl_list) {
329 		if (sess->sess_tearing_down != 0)
330 			continue;
331 
332 		target_get_session(sess);
333 		list_move(&sess->sess_acl_list, &sess_list);
334 	}
335 	spin_unlock_irqrestore(&acl->nacl_sess_lock, flags);
336 
337 	list_for_each_entry_safe(sess, sess_tmp, &sess_list, sess_acl_list) {
338 		list_del(&sess->sess_acl_list);
339 
340 		rc = tpg->se_tpg_tfo->shutdown_session(sess);
341 		target_put_session(sess);
342 		if (!rc)
343 			continue;
344 		target_put_session(sess);
345 	}
346 	target_put_nacl(acl);
347 	/*
348 	 * Wait for last target_put_nacl() to complete in target_complete_nacl()
349 	 * for active fabric session transport_deregister_session() callbacks.
350 	 */
351 	wait_for_completion(&acl->acl_free_comp);
352 
353 	core_tpg_wait_for_nacl_pr_ref(acl);
354 	core_free_device_list_for_node(acl, tpg);
355 
356 	pr_debug("%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s"
357 		" Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
358 		tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
359 		tpg->se_tpg_tfo->get_fabric_name(), acl->initiatorname);
360 
361 	kfree(acl);
362 }
363 
364 /*	core_tpg_set_initiator_node_queue_depth():
365  *
366  *
367  */
368 int core_tpg_set_initiator_node_queue_depth(
369 	struct se_portal_group *tpg,
370 	unsigned char *initiatorname,
371 	u32 queue_depth,
372 	int force)
373 {
374 	struct se_session *sess, *init_sess = NULL;
375 	struct se_node_acl *acl;
376 	unsigned long flags;
377 	int dynamic_acl = 0;
378 
379 	mutex_lock(&tpg->acl_node_mutex);
380 	acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
381 	if (!acl) {
382 		pr_err("Access Control List entry for %s Initiator"
383 			" Node %s does not exists for TPG %hu, ignoring"
384 			" request.\n", tpg->se_tpg_tfo->get_fabric_name(),
385 			initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
386 		mutex_unlock(&tpg->acl_node_mutex);
387 		return -ENODEV;
388 	}
389 	if (acl->dynamic_node_acl) {
390 		acl->dynamic_node_acl = 0;
391 		dynamic_acl = 1;
392 	}
393 	mutex_unlock(&tpg->acl_node_mutex);
394 
395 	spin_lock_irqsave(&tpg->session_lock, flags);
396 	list_for_each_entry(sess, &tpg->tpg_sess_list, sess_list) {
397 		if (sess->se_node_acl != acl)
398 			continue;
399 
400 		if (!force) {
401 			pr_err("Unable to change queue depth for %s"
402 				" Initiator Node: %s while session is"
403 				" operational.  To forcefully change the queue"
404 				" depth and force session reinstatement"
405 				" use the \"force=1\" parameter.\n",
406 				tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
407 			spin_unlock_irqrestore(&tpg->session_lock, flags);
408 
409 			mutex_lock(&tpg->acl_node_mutex);
410 			if (dynamic_acl)
411 				acl->dynamic_node_acl = 1;
412 			mutex_unlock(&tpg->acl_node_mutex);
413 			return -EEXIST;
414 		}
415 		/*
416 		 * Determine if the session needs to be closed by our context.
417 		 */
418 		if (!tpg->se_tpg_tfo->shutdown_session(sess))
419 			continue;
420 
421 		init_sess = sess;
422 		break;
423 	}
424 
425 	/*
426 	 * User has requested to change the queue depth for a Initiator Node.
427 	 * Change the value in the Node's struct se_node_acl, and call
428 	 * core_set_queue_depth_for_node() to add the requested queue depth.
429 	 *
430 	 * Finally call  tpg->se_tpg_tfo->close_session() to force session
431 	 * reinstatement to occur if there is an active session for the
432 	 * $FABRIC_MOD Initiator Node in question.
433 	 */
434 	acl->queue_depth = queue_depth;
435 
436 	if (core_set_queue_depth_for_node(tpg, acl) < 0) {
437 		spin_unlock_irqrestore(&tpg->session_lock, flags);
438 		/*
439 		 * Force session reinstatement if
440 		 * core_set_queue_depth_for_node() failed, because we assume
441 		 * the $FABRIC_MOD has already the set session reinstatement
442 		 * bit from tpg->se_tpg_tfo->shutdown_session() called above.
443 		 */
444 		if (init_sess)
445 			tpg->se_tpg_tfo->close_session(init_sess);
446 
447 		mutex_lock(&tpg->acl_node_mutex);
448 		if (dynamic_acl)
449 			acl->dynamic_node_acl = 1;
450 		mutex_unlock(&tpg->acl_node_mutex);
451 		return -EINVAL;
452 	}
453 	spin_unlock_irqrestore(&tpg->session_lock, flags);
454 	/*
455 	 * If the $FABRIC_MOD session for the Initiator Node ACL exists,
456 	 * forcefully shutdown the $FABRIC_MOD session/nexus.
457 	 */
458 	if (init_sess)
459 		tpg->se_tpg_tfo->close_session(init_sess);
460 
461 	pr_debug("Successfully changed queue depth to: %d for Initiator"
462 		" Node: %s on %s Target Portal Group: %u\n", queue_depth,
463 		initiatorname, tpg->se_tpg_tfo->get_fabric_name(),
464 		tpg->se_tpg_tfo->tpg_get_tag(tpg));
465 
466 	mutex_lock(&tpg->acl_node_mutex);
467 	if (dynamic_acl)
468 		acl->dynamic_node_acl = 1;
469 	mutex_unlock(&tpg->acl_node_mutex);
470 
471 	return 0;
472 }
473 EXPORT_SYMBOL(core_tpg_set_initiator_node_queue_depth);
474 
475 /*	core_tpg_set_initiator_node_tag():
476  *
477  *	Initiator nodeacl tags are not used internally, but may be used by
478  *	userspace to emulate aliases or groups.
479  *	Returns length of newly-set tag or -EINVAL.
480  */
481 int core_tpg_set_initiator_node_tag(
482 	struct se_portal_group *tpg,
483 	struct se_node_acl *acl,
484 	const char *new_tag)
485 {
486 	if (strlen(new_tag) >= MAX_ACL_TAG_SIZE)
487 		return -EINVAL;
488 
489 	if (!strncmp("NULL", new_tag, 4)) {
490 		acl->acl_tag[0] = '\0';
491 		return 0;
492 	}
493 
494 	return snprintf(acl->acl_tag, MAX_ACL_TAG_SIZE, "%s", new_tag);
495 }
496 EXPORT_SYMBOL(core_tpg_set_initiator_node_tag);
497 
498 static void core_tpg_lun_ref_release(struct percpu_ref *ref)
499 {
500 	struct se_lun *lun = container_of(ref, struct se_lun, lun_ref);
501 
502 	complete(&lun->lun_ref_comp);
503 }
504 
505 int core_tpg_register(
506 	struct se_wwn *se_wwn,
507 	struct se_portal_group *se_tpg,
508 	int proto_id)
509 {
510 	int ret;
511 
512 	if (!se_tpg)
513 		return -EINVAL;
514 	/*
515 	 * For the typical case where core_tpg_register() is called by a
516 	 * fabric driver from target_core_fabric_ops->fabric_make_tpg()
517 	 * configfs context, use the original tf_ops pointer already saved
518 	 * by target-core in target_fabric_make_wwn().
519 	 *
520 	 * Otherwise, for special cases like iscsi-target discovery TPGs
521 	 * the caller is responsible for setting ->se_tpg_tfo ahead of
522 	 * calling core_tpg_register().
523 	 */
524 	if (se_wwn)
525 		se_tpg->se_tpg_tfo = se_wwn->wwn_tf->tf_ops;
526 
527 	if (!se_tpg->se_tpg_tfo) {
528 		pr_err("Unable to locate se_tpg->se_tpg_tfo pointer\n");
529 		return -EINVAL;
530 	}
531 
532 	INIT_HLIST_HEAD(&se_tpg->tpg_lun_hlist);
533 	se_tpg->proto_id = proto_id;
534 	se_tpg->se_tpg_wwn = se_wwn;
535 	atomic_set(&se_tpg->tpg_pr_ref_count, 0);
536 	INIT_LIST_HEAD(&se_tpg->acl_node_list);
537 	INIT_LIST_HEAD(&se_tpg->se_tpg_node);
538 	INIT_LIST_HEAD(&se_tpg->tpg_sess_list);
539 	spin_lock_init(&se_tpg->session_lock);
540 	mutex_init(&se_tpg->tpg_lun_mutex);
541 	mutex_init(&se_tpg->acl_node_mutex);
542 
543 	if (se_tpg->proto_id >= 0) {
544 		se_tpg->tpg_virt_lun0 = core_tpg_alloc_lun(se_tpg, 0);
545 		if (IS_ERR(se_tpg->tpg_virt_lun0))
546 			return PTR_ERR(se_tpg->tpg_virt_lun0);
547 
548 		ret = core_tpg_add_lun(se_tpg, se_tpg->tpg_virt_lun0,
549 				TRANSPORT_LUNFLAGS_READ_ONLY, g_lun0_dev);
550 		if (ret < 0) {
551 			kfree(se_tpg->tpg_virt_lun0);
552 			return ret;
553 		}
554 	}
555 
556 	spin_lock_bh(&tpg_lock);
557 	list_add_tail(&se_tpg->se_tpg_node, &tpg_list);
558 	spin_unlock_bh(&tpg_lock);
559 
560 	pr_debug("TARGET_CORE[%s]: Allocated portal_group for endpoint: %s, "
561 		 "Proto: %d, Portal Tag: %u\n", se_tpg->se_tpg_tfo->get_fabric_name(),
562 		se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg) ?
563 		se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg) : NULL,
564 		se_tpg->proto_id, se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
565 
566 	return 0;
567 }
568 EXPORT_SYMBOL(core_tpg_register);
569 
570 int core_tpg_deregister(struct se_portal_group *se_tpg)
571 {
572 	const struct target_core_fabric_ops *tfo = se_tpg->se_tpg_tfo;
573 	struct se_node_acl *nacl, *nacl_tmp;
574 	LIST_HEAD(node_list);
575 
576 	pr_debug("TARGET_CORE[%s]: Deallocating portal_group for endpoint: %s, "
577 		 "Proto: %d, Portal Tag: %u\n", tfo->get_fabric_name(),
578 		tfo->tpg_get_wwn(se_tpg) ? tfo->tpg_get_wwn(se_tpg) : NULL,
579 		se_tpg->proto_id, tfo->tpg_get_tag(se_tpg));
580 
581 	spin_lock_bh(&tpg_lock);
582 	list_del(&se_tpg->se_tpg_node);
583 	spin_unlock_bh(&tpg_lock);
584 
585 	while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0)
586 		cpu_relax();
587 
588 	mutex_lock(&se_tpg->acl_node_mutex);
589 	list_splice_init(&se_tpg->acl_node_list, &node_list);
590 	mutex_unlock(&se_tpg->acl_node_mutex);
591 	/*
592 	 * Release any remaining demo-mode generated se_node_acl that have
593 	 * not been released because of TFO->tpg_check_demo_mode_cache() == 1
594 	 * in transport_deregister_session().
595 	 */
596 	list_for_each_entry_safe(nacl, nacl_tmp, &node_list, acl_list) {
597 		list_del(&nacl->acl_list);
598 		se_tpg->num_node_acls--;
599 
600 		core_tpg_wait_for_nacl_pr_ref(nacl);
601 		core_free_device_list_for_node(nacl, se_tpg);
602 		kfree(nacl);
603 	}
604 
605 	if (se_tpg->proto_id >= 0) {
606 		core_tpg_remove_lun(se_tpg, se_tpg->tpg_virt_lun0);
607 		kfree_rcu(se_tpg->tpg_virt_lun0, rcu_head);
608 	}
609 
610 	return 0;
611 }
612 EXPORT_SYMBOL(core_tpg_deregister);
613 
614 struct se_lun *core_tpg_alloc_lun(
615 	struct se_portal_group *tpg,
616 	u64 unpacked_lun)
617 {
618 	struct se_lun *lun;
619 
620 	lun = kzalloc(sizeof(*lun), GFP_KERNEL);
621 	if (!lun) {
622 		pr_err("Unable to allocate se_lun memory\n");
623 		return ERR_PTR(-ENOMEM);
624 	}
625 	lun->unpacked_lun = unpacked_lun;
626 	lun->lun_link_magic = SE_LUN_LINK_MAGIC;
627 	atomic_set(&lun->lun_acl_count, 0);
628 	init_completion(&lun->lun_ref_comp);
629 	INIT_LIST_HEAD(&lun->lun_deve_list);
630 	INIT_LIST_HEAD(&lun->lun_dev_link);
631 	atomic_set(&lun->lun_tg_pt_secondary_offline, 0);
632 	spin_lock_init(&lun->lun_deve_lock);
633 	mutex_init(&lun->lun_tg_pt_md_mutex);
634 	INIT_LIST_HEAD(&lun->lun_tg_pt_gp_link);
635 	spin_lock_init(&lun->lun_tg_pt_gp_lock);
636 	lun->lun_tpg = tpg;
637 
638 	return lun;
639 }
640 
641 int core_tpg_add_lun(
642 	struct se_portal_group *tpg,
643 	struct se_lun *lun,
644 	u32 lun_access,
645 	struct se_device *dev)
646 {
647 	int ret;
648 
649 	ret = percpu_ref_init(&lun->lun_ref, core_tpg_lun_ref_release, 0,
650 			      GFP_KERNEL);
651 	if (ret < 0)
652 		goto out;
653 
654 	ret = core_alloc_rtpi(lun, dev);
655 	if (ret)
656 		goto out_kill_ref;
657 
658 	if (!(dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) &&
659 	    !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
660 		target_attach_tg_pt_gp(lun, dev->t10_alua.default_tg_pt_gp);
661 
662 	mutex_lock(&tpg->tpg_lun_mutex);
663 
664 	spin_lock(&dev->se_port_lock);
665 	lun->lun_index = dev->dev_index;
666 	rcu_assign_pointer(lun->lun_se_dev, dev);
667 	dev->export_count++;
668 	list_add_tail(&lun->lun_dev_link, &dev->dev_sep_list);
669 	spin_unlock(&dev->se_port_lock);
670 
671 	if (dev->dev_flags & DF_READ_ONLY)
672 		lun->lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
673 	else
674 		lun->lun_access = lun_access;
675 	if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
676 		hlist_add_head_rcu(&lun->link, &tpg->tpg_lun_hlist);
677 	mutex_unlock(&tpg->tpg_lun_mutex);
678 
679 	return 0;
680 
681 out_kill_ref:
682 	percpu_ref_exit(&lun->lun_ref);
683 out:
684 	return ret;
685 }
686 
687 void core_tpg_remove_lun(
688 	struct se_portal_group *tpg,
689 	struct se_lun *lun)
690 {
691 	/*
692 	 * rcu_dereference_raw protected by se_lun->lun_group symlink
693 	 * reference to se_device->dev_group.
694 	 */
695 	struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
696 
697 	core_clear_lun_from_tpg(lun, tpg);
698 	/*
699 	 * Wait for any active I/O references to percpu se_lun->lun_ref to
700 	 * be released.  Also, se_lun->lun_ref is now used by PR and ALUA
701 	 * logic when referencing a remote target port during ALL_TGT_PT=1
702 	 * and generating UNIT_ATTENTIONs for ALUA access state transition.
703 	 */
704 	transport_clear_lun_ref(lun);
705 
706 	mutex_lock(&tpg->tpg_lun_mutex);
707 	if (lun->lun_se_dev) {
708 		target_detach_tg_pt_gp(lun);
709 
710 		spin_lock(&dev->se_port_lock);
711 		list_del(&lun->lun_dev_link);
712 		dev->export_count--;
713 		rcu_assign_pointer(lun->lun_se_dev, NULL);
714 		spin_unlock(&dev->se_port_lock);
715 	}
716 	if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
717 		hlist_del_rcu(&lun->link);
718 	mutex_unlock(&tpg->tpg_lun_mutex);
719 
720 	percpu_ref_exit(&lun->lun_ref);
721 }
722