xref: /linux/drivers/target/target_core_tpg.c (revision 005438a8eef063495ac059d128eea71b58de50e5)
1 /*******************************************************************************
2  * Filename:  target_core_tpg.c
3  *
4  * This file contains generic Target Portal Group related functions.
5  *
6  * (c) Copyright 2002-2013 Datera, Inc.
7  *
8  * Nicholas A. Bellinger <nab@kernel.org>
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License as published by
12  * the Free Software Foundation; either version 2 of the License, or
13  * (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public License
21  * along with this program; if not, write to the Free Software
22  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23  *
24  ******************************************************************************/
25 
26 #include <linux/net.h>
27 #include <linux/string.h>
28 #include <linux/timer.h>
29 #include <linux/slab.h>
30 #include <linux/spinlock.h>
31 #include <linux/in.h>
32 #include <linux/export.h>
33 #include <net/sock.h>
34 #include <net/tcp.h>
35 #include <scsi/scsi_proto.h>
36 
37 #include <target/target_core_base.h>
38 #include <target/target_core_backend.h>
39 #include <target/target_core_fabric.h>
40 
41 #include "target_core_internal.h"
42 #include "target_core_alua.h"
43 #include "target_core_pr.h"
44 
45 extern struct se_device *g_lun0_dev;
46 
47 static DEFINE_SPINLOCK(tpg_lock);
48 static LIST_HEAD(tpg_list);
49 
50 /*	__core_tpg_get_initiator_node_acl():
51  *
52  *	mutex_lock(&tpg->acl_node_mutex); must be held when calling
53  */
54 struct se_node_acl *__core_tpg_get_initiator_node_acl(
55 	struct se_portal_group *tpg,
56 	const char *initiatorname)
57 {
58 	struct se_node_acl *acl;
59 
60 	list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
61 		if (!strcmp(acl->initiatorname, initiatorname))
62 			return acl;
63 	}
64 
65 	return NULL;
66 }
67 
68 /*	core_tpg_get_initiator_node_acl():
69  *
70  *
71  */
72 struct se_node_acl *core_tpg_get_initiator_node_acl(
73 	struct se_portal_group *tpg,
74 	unsigned char *initiatorname)
75 {
76 	struct se_node_acl *acl;
77 
78 	mutex_lock(&tpg->acl_node_mutex);
79 	acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
80 	mutex_unlock(&tpg->acl_node_mutex);
81 
82 	return acl;
83 }
84 EXPORT_SYMBOL(core_tpg_get_initiator_node_acl);
85 
86 /*	core_tpg_add_node_to_devs():
87  *
88  *
89  */
90 void core_tpg_add_node_to_devs(
91 	struct se_node_acl *acl,
92 	struct se_portal_group *tpg,
93 	struct se_lun *lun_orig)
94 {
95 	u32 lun_access = 0;
96 	struct se_lun *lun;
97 	struct se_device *dev;
98 
99 	mutex_lock(&tpg->tpg_lun_mutex);
100 	hlist_for_each_entry_rcu(lun, &tpg->tpg_lun_hlist, link) {
101 		if (lun_orig && lun != lun_orig)
102 			continue;
103 
104 		dev = rcu_dereference_check(lun->lun_se_dev,
105 					    lockdep_is_held(&tpg->tpg_lun_mutex));
106 		/*
107 		 * By default in LIO-Target $FABRIC_MOD,
108 		 * demo_mode_write_protect is ON, or READ_ONLY;
109 		 */
110 		if (!tpg->se_tpg_tfo->tpg_check_demo_mode_write_protect(tpg)) {
111 			lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
112 		} else {
113 			/*
114 			 * Allow only optical drives to issue R/W in default RO
115 			 * demo mode.
116 			 */
117 			if (dev->transport->get_device_type(dev) == TYPE_DISK)
118 				lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
119 			else
120 				lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
121 		}
122 
123 		pr_debug("TARGET_CORE[%s]->TPG[%u]_LUN[%llu] - Adding %s"
124 			" access for LUN in Demo Mode\n",
125 			tpg->se_tpg_tfo->get_fabric_name(),
126 			tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
127 			(lun_access == TRANSPORT_LUNFLAGS_READ_WRITE) ?
128 			"READ-WRITE" : "READ-ONLY");
129 
130 		core_enable_device_list_for_node(lun, NULL, lun->unpacked_lun,
131 						 lun_access, acl, tpg);
132 		/*
133 		 * Check to see if there are any existing persistent reservation
134 		 * APTPL pre-registrations that need to be enabled for this dynamic
135 		 * LUN ACL now..
136 		 */
137 		core_scsi3_check_aptpl_registration(dev, tpg, lun, acl,
138 						    lun->unpacked_lun);
139 	}
140 	mutex_unlock(&tpg->tpg_lun_mutex);
141 }
142 
143 /*      core_set_queue_depth_for_node():
144  *
145  *
146  */
147 static int core_set_queue_depth_for_node(
148 	struct se_portal_group *tpg,
149 	struct se_node_acl *acl)
150 {
151 	if (!acl->queue_depth) {
152 		pr_err("Queue depth for %s Initiator Node: %s is 0,"
153 			"defaulting to 1.\n", tpg->se_tpg_tfo->get_fabric_name(),
154 			acl->initiatorname);
155 		acl->queue_depth = 1;
156 	}
157 
158 	return 0;
159 }
160 
161 static struct se_node_acl *target_alloc_node_acl(struct se_portal_group *tpg,
162 		const unsigned char *initiatorname)
163 {
164 	struct se_node_acl *acl;
165 
166 	acl = kzalloc(max(sizeof(*acl), tpg->se_tpg_tfo->node_acl_size),
167 			GFP_KERNEL);
168 	if (!acl)
169 		return NULL;
170 
171 	INIT_LIST_HEAD(&acl->acl_list);
172 	INIT_LIST_HEAD(&acl->acl_sess_list);
173 	INIT_HLIST_HEAD(&acl->lun_entry_hlist);
174 	kref_init(&acl->acl_kref);
175 	init_completion(&acl->acl_free_comp);
176 	spin_lock_init(&acl->nacl_sess_lock);
177 	mutex_init(&acl->lun_entry_mutex);
178 	atomic_set(&acl->acl_pr_ref_count, 0);
179 	if (tpg->se_tpg_tfo->tpg_get_default_depth)
180 		acl->queue_depth = tpg->se_tpg_tfo->tpg_get_default_depth(tpg);
181 	else
182 		acl->queue_depth = 1;
183 	snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
184 	acl->se_tpg = tpg;
185 	acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
186 
187 	tpg->se_tpg_tfo->set_default_node_attributes(acl);
188 
189 	if (core_set_queue_depth_for_node(tpg, acl) < 0)
190 		goto out_free_acl;
191 
192 	return acl;
193 
194 out_free_acl:
195 	kfree(acl);
196 	return NULL;
197 }
198 
199 static void target_add_node_acl(struct se_node_acl *acl)
200 {
201 	struct se_portal_group *tpg = acl->se_tpg;
202 
203 	mutex_lock(&tpg->acl_node_mutex);
204 	list_add_tail(&acl->acl_list, &tpg->acl_node_list);
205 	tpg->num_node_acls++;
206 	mutex_unlock(&tpg->acl_node_mutex);
207 
208 	pr_debug("%s_TPG[%hu] - Added %s ACL with TCQ Depth: %d for %s"
209 		" Initiator Node: %s\n",
210 		tpg->se_tpg_tfo->get_fabric_name(),
211 		tpg->se_tpg_tfo->tpg_get_tag(tpg),
212 		acl->dynamic_node_acl ? "DYNAMIC" : "",
213 		acl->queue_depth,
214 		tpg->se_tpg_tfo->get_fabric_name(),
215 		acl->initiatorname);
216 }
217 
218 struct se_node_acl *core_tpg_check_initiator_node_acl(
219 	struct se_portal_group *tpg,
220 	unsigned char *initiatorname)
221 {
222 	struct se_node_acl *acl;
223 
224 	acl = core_tpg_get_initiator_node_acl(tpg, initiatorname);
225 	if (acl)
226 		return acl;
227 
228 	if (!tpg->se_tpg_tfo->tpg_check_demo_mode(tpg))
229 		return NULL;
230 
231 	acl = target_alloc_node_acl(tpg, initiatorname);
232 	if (!acl)
233 		return NULL;
234 	acl->dynamic_node_acl = 1;
235 
236 	/*
237 	 * Here we only create demo-mode MappedLUNs from the active
238 	 * TPG LUNs if the fabric is not explicitly asking for
239 	 * tpg_check_demo_mode_login_only() == 1.
240 	 */
241 	if ((tpg->se_tpg_tfo->tpg_check_demo_mode_login_only == NULL) ||
242 	    (tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg) != 1))
243 		core_tpg_add_node_to_devs(acl, tpg, NULL);
244 
245 	target_add_node_acl(acl);
246 	return acl;
247 }
248 EXPORT_SYMBOL(core_tpg_check_initiator_node_acl);
249 
250 void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *nacl)
251 {
252 	while (atomic_read(&nacl->acl_pr_ref_count) != 0)
253 		cpu_relax();
254 }
255 
256 struct se_node_acl *core_tpg_add_initiator_node_acl(
257 	struct se_portal_group *tpg,
258 	const char *initiatorname)
259 {
260 	struct se_node_acl *acl;
261 
262 	mutex_lock(&tpg->acl_node_mutex);
263 	acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
264 	if (acl) {
265 		if (acl->dynamic_node_acl) {
266 			acl->dynamic_node_acl = 0;
267 			pr_debug("%s_TPG[%u] - Replacing dynamic ACL"
268 				" for %s\n", tpg->se_tpg_tfo->get_fabric_name(),
269 				tpg->se_tpg_tfo->tpg_get_tag(tpg), initiatorname);
270 			mutex_unlock(&tpg->acl_node_mutex);
271 			return acl;
272 		}
273 
274 		pr_err("ACL entry for %s Initiator"
275 			" Node %s already exists for TPG %u, ignoring"
276 			" request.\n",  tpg->se_tpg_tfo->get_fabric_name(),
277 			initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
278 		mutex_unlock(&tpg->acl_node_mutex);
279 		return ERR_PTR(-EEXIST);
280 	}
281 	mutex_unlock(&tpg->acl_node_mutex);
282 
283 	acl = target_alloc_node_acl(tpg, initiatorname);
284 	if (!acl)
285 		return ERR_PTR(-ENOMEM);
286 
287 	target_add_node_acl(acl);
288 	return acl;
289 }
290 
291 void core_tpg_del_initiator_node_acl(struct se_node_acl *acl)
292 {
293 	struct se_portal_group *tpg = acl->se_tpg;
294 	LIST_HEAD(sess_list);
295 	struct se_session *sess, *sess_tmp;
296 	unsigned long flags;
297 	int rc;
298 
299 	mutex_lock(&tpg->acl_node_mutex);
300 	if (acl->dynamic_node_acl) {
301 		acl->dynamic_node_acl = 0;
302 	}
303 	list_del(&acl->acl_list);
304 	tpg->num_node_acls--;
305 	mutex_unlock(&tpg->acl_node_mutex);
306 
307 	spin_lock_irqsave(&acl->nacl_sess_lock, flags);
308 	acl->acl_stop = 1;
309 
310 	list_for_each_entry_safe(sess, sess_tmp, &acl->acl_sess_list,
311 				sess_acl_list) {
312 		if (sess->sess_tearing_down != 0)
313 			continue;
314 
315 		target_get_session(sess);
316 		list_move(&sess->sess_acl_list, &sess_list);
317 	}
318 	spin_unlock_irqrestore(&acl->nacl_sess_lock, flags);
319 
320 	list_for_each_entry_safe(sess, sess_tmp, &sess_list, sess_acl_list) {
321 		list_del(&sess->sess_acl_list);
322 
323 		rc = tpg->se_tpg_tfo->shutdown_session(sess);
324 		target_put_session(sess);
325 		if (!rc)
326 			continue;
327 		target_put_session(sess);
328 	}
329 	target_put_nacl(acl);
330 	/*
331 	 * Wait for last target_put_nacl() to complete in target_complete_nacl()
332 	 * for active fabric session transport_deregister_session() callbacks.
333 	 */
334 	wait_for_completion(&acl->acl_free_comp);
335 
336 	core_tpg_wait_for_nacl_pr_ref(acl);
337 	core_free_device_list_for_node(acl, tpg);
338 
339 	pr_debug("%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s"
340 		" Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
341 		tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
342 		tpg->se_tpg_tfo->get_fabric_name(), acl->initiatorname);
343 
344 	kfree(acl);
345 }
346 
347 /*	core_tpg_set_initiator_node_queue_depth():
348  *
349  *
350  */
351 int core_tpg_set_initiator_node_queue_depth(
352 	struct se_portal_group *tpg,
353 	unsigned char *initiatorname,
354 	u32 queue_depth,
355 	int force)
356 {
357 	struct se_session *sess, *init_sess = NULL;
358 	struct se_node_acl *acl;
359 	unsigned long flags;
360 	int dynamic_acl = 0;
361 
362 	mutex_lock(&tpg->acl_node_mutex);
363 	acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
364 	if (!acl) {
365 		pr_err("Access Control List entry for %s Initiator"
366 			" Node %s does not exists for TPG %hu, ignoring"
367 			" request.\n", tpg->se_tpg_tfo->get_fabric_name(),
368 			initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
369 		mutex_unlock(&tpg->acl_node_mutex);
370 		return -ENODEV;
371 	}
372 	if (acl->dynamic_node_acl) {
373 		acl->dynamic_node_acl = 0;
374 		dynamic_acl = 1;
375 	}
376 	mutex_unlock(&tpg->acl_node_mutex);
377 
378 	spin_lock_irqsave(&tpg->session_lock, flags);
379 	list_for_each_entry(sess, &tpg->tpg_sess_list, sess_list) {
380 		if (sess->se_node_acl != acl)
381 			continue;
382 
383 		if (!force) {
384 			pr_err("Unable to change queue depth for %s"
385 				" Initiator Node: %s while session is"
386 				" operational.  To forcefully change the queue"
387 				" depth and force session reinstatement"
388 				" use the \"force=1\" parameter.\n",
389 				tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
390 			spin_unlock_irqrestore(&tpg->session_lock, flags);
391 
392 			mutex_lock(&tpg->acl_node_mutex);
393 			if (dynamic_acl)
394 				acl->dynamic_node_acl = 1;
395 			mutex_unlock(&tpg->acl_node_mutex);
396 			return -EEXIST;
397 		}
398 		/*
399 		 * Determine if the session needs to be closed by our context.
400 		 */
401 		if (!tpg->se_tpg_tfo->shutdown_session(sess))
402 			continue;
403 
404 		init_sess = sess;
405 		break;
406 	}
407 
408 	/*
409 	 * User has requested to change the queue depth for a Initiator Node.
410 	 * Change the value in the Node's struct se_node_acl, and call
411 	 * core_set_queue_depth_for_node() to add the requested queue depth.
412 	 *
413 	 * Finally call  tpg->se_tpg_tfo->close_session() to force session
414 	 * reinstatement to occur if there is an active session for the
415 	 * $FABRIC_MOD Initiator Node in question.
416 	 */
417 	acl->queue_depth = queue_depth;
418 
419 	if (core_set_queue_depth_for_node(tpg, acl) < 0) {
420 		spin_unlock_irqrestore(&tpg->session_lock, flags);
421 		/*
422 		 * Force session reinstatement if
423 		 * core_set_queue_depth_for_node() failed, because we assume
424 		 * the $FABRIC_MOD has already the set session reinstatement
425 		 * bit from tpg->se_tpg_tfo->shutdown_session() called above.
426 		 */
427 		if (init_sess)
428 			tpg->se_tpg_tfo->close_session(init_sess);
429 
430 		mutex_lock(&tpg->acl_node_mutex);
431 		if (dynamic_acl)
432 			acl->dynamic_node_acl = 1;
433 		mutex_unlock(&tpg->acl_node_mutex);
434 		return -EINVAL;
435 	}
436 	spin_unlock_irqrestore(&tpg->session_lock, flags);
437 	/*
438 	 * If the $FABRIC_MOD session for the Initiator Node ACL exists,
439 	 * forcefully shutdown the $FABRIC_MOD session/nexus.
440 	 */
441 	if (init_sess)
442 		tpg->se_tpg_tfo->close_session(init_sess);
443 
444 	pr_debug("Successfully changed queue depth to: %d for Initiator"
445 		" Node: %s on %s Target Portal Group: %u\n", queue_depth,
446 		initiatorname, tpg->se_tpg_tfo->get_fabric_name(),
447 		tpg->se_tpg_tfo->tpg_get_tag(tpg));
448 
449 	mutex_lock(&tpg->acl_node_mutex);
450 	if (dynamic_acl)
451 		acl->dynamic_node_acl = 1;
452 	mutex_unlock(&tpg->acl_node_mutex);
453 
454 	return 0;
455 }
456 EXPORT_SYMBOL(core_tpg_set_initiator_node_queue_depth);
457 
458 /*	core_tpg_set_initiator_node_tag():
459  *
460  *	Initiator nodeacl tags are not used internally, but may be used by
461  *	userspace to emulate aliases or groups.
462  *	Returns length of newly-set tag or -EINVAL.
463  */
464 int core_tpg_set_initiator_node_tag(
465 	struct se_portal_group *tpg,
466 	struct se_node_acl *acl,
467 	const char *new_tag)
468 {
469 	if (strlen(new_tag) >= MAX_ACL_TAG_SIZE)
470 		return -EINVAL;
471 
472 	if (!strncmp("NULL", new_tag, 4)) {
473 		acl->acl_tag[0] = '\0';
474 		return 0;
475 	}
476 
477 	return snprintf(acl->acl_tag, MAX_ACL_TAG_SIZE, "%s", new_tag);
478 }
479 EXPORT_SYMBOL(core_tpg_set_initiator_node_tag);
480 
481 static void core_tpg_lun_ref_release(struct percpu_ref *ref)
482 {
483 	struct se_lun *lun = container_of(ref, struct se_lun, lun_ref);
484 
485 	complete(&lun->lun_ref_comp);
486 }
487 
488 int core_tpg_register(
489 	struct se_wwn *se_wwn,
490 	struct se_portal_group *se_tpg,
491 	int proto_id)
492 {
493 	int ret;
494 
495 	if (!se_tpg)
496 		return -EINVAL;
497 	/*
498 	 * For the typical case where core_tpg_register() is called by a
499 	 * fabric driver from target_core_fabric_ops->fabric_make_tpg()
500 	 * configfs context, use the original tf_ops pointer already saved
501 	 * by target-core in target_fabric_make_wwn().
502 	 *
503 	 * Otherwise, for special cases like iscsi-target discovery TPGs
504 	 * the caller is responsible for setting ->se_tpg_tfo ahead of
505 	 * calling core_tpg_register().
506 	 */
507 	if (se_wwn)
508 		se_tpg->se_tpg_tfo = se_wwn->wwn_tf->tf_ops;
509 
510 	if (!se_tpg->se_tpg_tfo) {
511 		pr_err("Unable to locate se_tpg->se_tpg_tfo pointer\n");
512 		return -EINVAL;
513 	}
514 
515 	INIT_HLIST_HEAD(&se_tpg->tpg_lun_hlist);
516 	se_tpg->proto_id = proto_id;
517 	se_tpg->se_tpg_wwn = se_wwn;
518 	atomic_set(&se_tpg->tpg_pr_ref_count, 0);
519 	INIT_LIST_HEAD(&se_tpg->acl_node_list);
520 	INIT_LIST_HEAD(&se_tpg->se_tpg_node);
521 	INIT_LIST_HEAD(&se_tpg->tpg_sess_list);
522 	spin_lock_init(&se_tpg->session_lock);
523 	mutex_init(&se_tpg->tpg_lun_mutex);
524 	mutex_init(&se_tpg->acl_node_mutex);
525 
526 	if (se_tpg->proto_id >= 0) {
527 		se_tpg->tpg_virt_lun0 = core_tpg_alloc_lun(se_tpg, 0);
528 		if (IS_ERR(se_tpg->tpg_virt_lun0))
529 			return PTR_ERR(se_tpg->tpg_virt_lun0);
530 
531 		ret = core_tpg_add_lun(se_tpg, se_tpg->tpg_virt_lun0,
532 				TRANSPORT_LUNFLAGS_READ_ONLY, g_lun0_dev);
533 		if (ret < 0) {
534 			kfree(se_tpg->tpg_virt_lun0);
535 			return ret;
536 		}
537 	}
538 
539 	spin_lock_bh(&tpg_lock);
540 	list_add_tail(&se_tpg->se_tpg_node, &tpg_list);
541 	spin_unlock_bh(&tpg_lock);
542 
543 	pr_debug("TARGET_CORE[%s]: Allocated portal_group for endpoint: %s, "
544 		 "Proto: %d, Portal Tag: %u\n", se_tpg->se_tpg_tfo->get_fabric_name(),
545 		se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg) ?
546 		se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg) : NULL,
547 		se_tpg->proto_id, se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
548 
549 	return 0;
550 }
551 EXPORT_SYMBOL(core_tpg_register);
552 
553 int core_tpg_deregister(struct se_portal_group *se_tpg)
554 {
555 	const struct target_core_fabric_ops *tfo = se_tpg->se_tpg_tfo;
556 	struct se_node_acl *nacl, *nacl_tmp;
557 	LIST_HEAD(node_list);
558 
559 	pr_debug("TARGET_CORE[%s]: Deallocating portal_group for endpoint: %s, "
560 		 "Proto: %d, Portal Tag: %u\n", tfo->get_fabric_name(),
561 		tfo->tpg_get_wwn(se_tpg) ? tfo->tpg_get_wwn(se_tpg) : NULL,
562 		se_tpg->proto_id, tfo->tpg_get_tag(se_tpg));
563 
564 	spin_lock_bh(&tpg_lock);
565 	list_del(&se_tpg->se_tpg_node);
566 	spin_unlock_bh(&tpg_lock);
567 
568 	while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0)
569 		cpu_relax();
570 
571 	mutex_lock(&se_tpg->acl_node_mutex);
572 	list_splice_init(&se_tpg->acl_node_list, &node_list);
573 	mutex_unlock(&se_tpg->acl_node_mutex);
574 	/*
575 	 * Release any remaining demo-mode generated se_node_acl that have
576 	 * not been released because of TFO->tpg_check_demo_mode_cache() == 1
577 	 * in transport_deregister_session().
578 	 */
579 	list_for_each_entry_safe(nacl, nacl_tmp, &node_list, acl_list) {
580 		list_del(&nacl->acl_list);
581 		se_tpg->num_node_acls--;
582 
583 		core_tpg_wait_for_nacl_pr_ref(nacl);
584 		core_free_device_list_for_node(nacl, se_tpg);
585 		kfree(nacl);
586 	}
587 
588 	if (se_tpg->proto_id >= 0) {
589 		core_tpg_remove_lun(se_tpg, se_tpg->tpg_virt_lun0);
590 		kfree_rcu(se_tpg->tpg_virt_lun0, rcu_head);
591 	}
592 
593 	return 0;
594 }
595 EXPORT_SYMBOL(core_tpg_deregister);
596 
597 struct se_lun *core_tpg_alloc_lun(
598 	struct se_portal_group *tpg,
599 	u64 unpacked_lun)
600 {
601 	struct se_lun *lun;
602 
603 	lun = kzalloc(sizeof(*lun), GFP_KERNEL);
604 	if (!lun) {
605 		pr_err("Unable to allocate se_lun memory\n");
606 		return ERR_PTR(-ENOMEM);
607 	}
608 	lun->unpacked_lun = unpacked_lun;
609 	lun->lun_link_magic = SE_LUN_LINK_MAGIC;
610 	atomic_set(&lun->lun_acl_count, 0);
611 	init_completion(&lun->lun_ref_comp);
612 	INIT_LIST_HEAD(&lun->lun_deve_list);
613 	INIT_LIST_HEAD(&lun->lun_dev_link);
614 	atomic_set(&lun->lun_tg_pt_secondary_offline, 0);
615 	spin_lock_init(&lun->lun_deve_lock);
616 	mutex_init(&lun->lun_tg_pt_md_mutex);
617 	INIT_LIST_HEAD(&lun->lun_tg_pt_gp_link);
618 	spin_lock_init(&lun->lun_tg_pt_gp_lock);
619 	lun->lun_tpg = tpg;
620 
621 	return lun;
622 }
623 
624 int core_tpg_add_lun(
625 	struct se_portal_group *tpg,
626 	struct se_lun *lun,
627 	u32 lun_access,
628 	struct se_device *dev)
629 {
630 	int ret;
631 
632 	ret = percpu_ref_init(&lun->lun_ref, core_tpg_lun_ref_release, 0,
633 			      GFP_KERNEL);
634 	if (ret < 0)
635 		goto out;
636 
637 	ret = core_alloc_rtpi(lun, dev);
638 	if (ret)
639 		goto out_kill_ref;
640 
641 	if (!(dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) &&
642 	    !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
643 		target_attach_tg_pt_gp(lun, dev->t10_alua.default_tg_pt_gp);
644 
645 	mutex_lock(&tpg->tpg_lun_mutex);
646 
647 	spin_lock(&dev->se_port_lock);
648 	lun->lun_index = dev->dev_index;
649 	rcu_assign_pointer(lun->lun_se_dev, dev);
650 	dev->export_count++;
651 	list_add_tail(&lun->lun_dev_link, &dev->dev_sep_list);
652 	spin_unlock(&dev->se_port_lock);
653 
654 	lun->lun_access = lun_access;
655 	if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
656 		hlist_add_head_rcu(&lun->link, &tpg->tpg_lun_hlist);
657 	mutex_unlock(&tpg->tpg_lun_mutex);
658 
659 	return 0;
660 
661 out_kill_ref:
662 	percpu_ref_exit(&lun->lun_ref);
663 out:
664 	return ret;
665 }
666 
667 void core_tpg_remove_lun(
668 	struct se_portal_group *tpg,
669 	struct se_lun *lun)
670 {
671 	/*
672 	 * rcu_dereference_raw protected by se_lun->lun_group symlink
673 	 * reference to se_device->dev_group.
674 	 */
675 	struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
676 
677 	core_clear_lun_from_tpg(lun, tpg);
678 	/*
679 	 * Wait for any active I/O references to percpu se_lun->lun_ref to
680 	 * be released.  Also, se_lun->lun_ref is now used by PR and ALUA
681 	 * logic when referencing a remote target port during ALL_TGT_PT=1
682 	 * and generating UNIT_ATTENTIONs for ALUA access state transition.
683 	 */
684 	transport_clear_lun_ref(lun);
685 
686 	mutex_lock(&tpg->tpg_lun_mutex);
687 	if (lun->lun_se_dev) {
688 		target_detach_tg_pt_gp(lun);
689 
690 		spin_lock(&dev->se_port_lock);
691 		list_del(&lun->lun_dev_link);
692 		dev->export_count--;
693 		rcu_assign_pointer(lun->lun_se_dev, NULL);
694 		spin_unlock(&dev->se_port_lock);
695 	}
696 	if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
697 		hlist_del_rcu(&lun->link);
698 	mutex_unlock(&tpg->tpg_lun_mutex);
699 
700 	percpu_ref_exit(&lun->lun_ref);
701 }
702