xref: /linux/drivers/target/target_core_alua.c (revision b889fcf63cb62e7fdb7816565e28f44dbe4a76a5)
1 /*******************************************************************************
2  * Filename:  target_core_alua.c
3  *
4  * This file contains SPC-3 compliant asymmetric logical unit assigntment (ALUA)
5  *
6  * (c) Copyright 2009-2012 RisingTide Systems LLC.
7  *
8  * Nicholas A. Bellinger <nab@kernel.org>
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License as published by
12  * the Free Software Foundation; either version 2 of the License, or
13  * (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public License
21  * along with this program; if not, write to the Free Software
22  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23  *
24  ******************************************************************************/
25 
26 #include <linux/slab.h>
27 #include <linux/spinlock.h>
28 #include <linux/configfs.h>
29 #include <linux/export.h>
30 #include <scsi/scsi.h>
31 #include <scsi/scsi_cmnd.h>
32 #include <asm/unaligned.h>
33 
34 #include <target/target_core_base.h>
35 #include <target/target_core_backend.h>
36 #include <target/target_core_fabric.h>
37 #include <target/target_core_configfs.h>
38 
39 #include "target_core_internal.h"
40 #include "target_core_alua.h"
41 #include "target_core_ua.h"
42 
43 static sense_reason_t core_alua_check_transition(int state, int *primary);
44 static int core_alua_set_tg_pt_secondary_state(
45 		struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
46 		struct se_port *port, int explict, int offline);
47 
48 static u16 alua_lu_gps_counter;
49 static u32 alua_lu_gps_count;
50 
51 static DEFINE_SPINLOCK(lu_gps_lock);
52 static LIST_HEAD(lu_gps_list);
53 
54 struct t10_alua_lu_gp *default_lu_gp;
55 
56 /*
57  * REPORT_TARGET_PORT_GROUPS
58  *
59  * See spc4r17 section 6.27
60  */
61 sense_reason_t
62 target_emulate_report_target_port_groups(struct se_cmd *cmd)
63 {
64 	struct se_device *dev = cmd->se_dev;
65 	struct se_port *port;
66 	struct t10_alua_tg_pt_gp *tg_pt_gp;
67 	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
68 	unsigned char *buf;
69 	u32 rd_len = 0, off;
70 	int ext_hdr = (cmd->t_task_cdb[1] & 0x20);
71 
72 	/*
73 	 * Skip over RESERVED area to first Target port group descriptor
74 	 * depending on the PARAMETER DATA FORMAT type..
75 	 */
76 	if (ext_hdr != 0)
77 		off = 8;
78 	else
79 		off = 4;
80 
81 	if (cmd->data_length < off) {
82 		pr_warn("REPORT TARGET PORT GROUPS allocation length %u too"
83 			" small for %s header\n", cmd->data_length,
84 			(ext_hdr) ? "extended" : "normal");
85 		return TCM_INVALID_CDB_FIELD;
86 	}
87 	buf = transport_kmap_data_sg(cmd);
88 	if (!buf)
89 		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
90 
91 	spin_lock(&dev->t10_alua.tg_pt_gps_lock);
92 	list_for_each_entry(tg_pt_gp, &dev->t10_alua.tg_pt_gps_list,
93 			tg_pt_gp_list) {
94 		/*
95 		 * Check if the Target port group and Target port descriptor list
96 		 * based on tg_pt_gp_members count will fit into the response payload.
97 		 * Otherwise, bump rd_len to let the initiator know we have exceeded
98 		 * the allocation length and the response is truncated.
99 		 */
100 		if ((off + 8 + (tg_pt_gp->tg_pt_gp_members * 4)) >
101 		     cmd->data_length) {
102 			rd_len += 8 + (tg_pt_gp->tg_pt_gp_members * 4);
103 			continue;
104 		}
105 		/*
106 		 * PREF: Preferred target port bit, determine if this
107 		 * bit should be set for port group.
108 		 */
109 		if (tg_pt_gp->tg_pt_gp_pref)
110 			buf[off] = 0x80;
111 		/*
112 		 * Set the ASYMMETRIC ACCESS State
113 		 */
114 		buf[off++] |= (atomic_read(
115 			&tg_pt_gp->tg_pt_gp_alua_access_state) & 0xff);
116 		/*
117 		 * Set supported ASYMMETRIC ACCESS State bits
118 		 */
119 		buf[off] = 0x80; /* T_SUP */
120 		buf[off] |= 0x40; /* O_SUP */
121 		buf[off] |= 0x8; /* U_SUP */
122 		buf[off] |= 0x4; /* S_SUP */
123 		buf[off] |= 0x2; /* AN_SUP */
124 		buf[off++] |= 0x1; /* AO_SUP */
125 		/*
126 		 * TARGET PORT GROUP
127 		 */
128 		buf[off++] = ((tg_pt_gp->tg_pt_gp_id >> 8) & 0xff);
129 		buf[off++] = (tg_pt_gp->tg_pt_gp_id & 0xff);
130 
131 		off++; /* Skip over Reserved */
132 		/*
133 		 * STATUS CODE
134 		 */
135 		buf[off++] = (tg_pt_gp->tg_pt_gp_alua_access_status & 0xff);
136 		/*
137 		 * Vendor Specific field
138 		 */
139 		buf[off++] = 0x00;
140 		/*
141 		 * TARGET PORT COUNT
142 		 */
143 		buf[off++] = (tg_pt_gp->tg_pt_gp_members & 0xff);
144 		rd_len += 8;
145 
146 		spin_lock(&tg_pt_gp->tg_pt_gp_lock);
147 		list_for_each_entry(tg_pt_gp_mem, &tg_pt_gp->tg_pt_gp_mem_list,
148 				tg_pt_gp_mem_list) {
149 			port = tg_pt_gp_mem->tg_pt;
150 			/*
151 			 * Start Target Port descriptor format
152 			 *
153 			 * See spc4r17 section 6.2.7 Table 247
154 			 */
155 			off += 2; /* Skip over Obsolete */
156 			/*
157 			 * Set RELATIVE TARGET PORT IDENTIFIER
158 			 */
159 			buf[off++] = ((port->sep_rtpi >> 8) & 0xff);
160 			buf[off++] = (port->sep_rtpi & 0xff);
161 			rd_len += 4;
162 		}
163 		spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
164 	}
165 	spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
166 	/*
167 	 * Set the RETURN DATA LENGTH set in the header of the DataIN Payload
168 	 */
169 	put_unaligned_be32(rd_len, &buf[0]);
170 
171 	/*
172 	 * Fill in the Extended header parameter data format if requested
173 	 */
174 	if (ext_hdr != 0) {
175 		buf[4] = 0x10;
176 		/*
177 		 * Set the implict transition time (in seconds) for the application
178 		 * client to use as a base for it's transition timeout value.
179 		 *
180 		 * Use the current tg_pt_gp_mem -> tg_pt_gp membership from the LUN
181 		 * this CDB was received upon to determine this value individually
182 		 * for ALUA target port group.
183 		 */
184 		port = cmd->se_lun->lun_sep;
185 		tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
186 		if (tg_pt_gp_mem) {
187 			spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
188 			tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
189 			if (tg_pt_gp)
190 				buf[5] = tg_pt_gp->tg_pt_gp_implict_trans_secs;
191 			spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
192 		}
193 	}
194 	transport_kunmap_data_sg(cmd);
195 
196 	target_complete_cmd(cmd, GOOD);
197 	return 0;
198 }
199 
200 /*
201  * SET_TARGET_PORT_GROUPS for explict ALUA operation.
202  *
203  * See spc4r17 section 6.35
204  */
205 sense_reason_t
206 target_emulate_set_target_port_groups(struct se_cmd *cmd)
207 {
208 	struct se_device *dev = cmd->se_dev;
209 	struct se_port *port, *l_port = cmd->se_lun->lun_sep;
210 	struct se_node_acl *nacl = cmd->se_sess->se_node_acl;
211 	struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *l_tg_pt_gp;
212 	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, *l_tg_pt_gp_mem;
213 	unsigned char *buf;
214 	unsigned char *ptr;
215 	sense_reason_t rc;
216 	u32 len = 4; /* Skip over RESERVED area in header */
217 	int alua_access_state, primary = 0;
218 	u16 tg_pt_id, rtpi;
219 
220 	if (!l_port)
221 		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
222 
223 	if (cmd->data_length < 4) {
224 		pr_warn("SET TARGET PORT GROUPS parameter list length %u too"
225 			" small\n", cmd->data_length);
226 		return TCM_INVALID_PARAMETER_LIST;
227 	}
228 
229 	buf = transport_kmap_data_sg(cmd);
230 	if (!buf)
231 		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
232 
233 	/*
234 	 * Determine if explict ALUA via SET_TARGET_PORT_GROUPS is allowed
235 	 * for the local tg_pt_gp.
236 	 */
237 	l_tg_pt_gp_mem = l_port->sep_alua_tg_pt_gp_mem;
238 	if (!l_tg_pt_gp_mem) {
239 		pr_err("Unable to access l_port->sep_alua_tg_pt_gp_mem\n");
240 		rc = TCM_UNSUPPORTED_SCSI_OPCODE;
241 		goto out;
242 	}
243 	spin_lock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
244 	l_tg_pt_gp = l_tg_pt_gp_mem->tg_pt_gp;
245 	if (!l_tg_pt_gp) {
246 		spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
247 		pr_err("Unable to access *l_tg_pt_gp_mem->tg_pt_gp\n");
248 		rc = TCM_UNSUPPORTED_SCSI_OPCODE;
249 		goto out;
250 	}
251 	spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
252 
253 	if (!(l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA)) {
254 		pr_debug("Unable to process SET_TARGET_PORT_GROUPS"
255 				" while TPGS_EXPLICT_ALUA is disabled\n");
256 		rc = TCM_UNSUPPORTED_SCSI_OPCODE;
257 		goto out;
258 	}
259 
260 	ptr = &buf[4]; /* Skip over RESERVED area in header */
261 
262 	while (len < cmd->data_length) {
263 		bool found = false;
264 		alua_access_state = (ptr[0] & 0x0f);
265 		/*
266 		 * Check the received ALUA access state, and determine if
267 		 * the state is a primary or secondary target port asymmetric
268 		 * access state.
269 		 */
270 		rc = core_alua_check_transition(alua_access_state, &primary);
271 		if (rc) {
272 			/*
273 			 * If the SET TARGET PORT GROUPS attempts to establish
274 			 * an invalid combination of target port asymmetric
275 			 * access states or attempts to establish an
276 			 * unsupported target port asymmetric access state,
277 			 * then the command shall be terminated with CHECK
278 			 * CONDITION status, with the sense key set to ILLEGAL
279 			 * REQUEST, and the additional sense code set to INVALID
280 			 * FIELD IN PARAMETER LIST.
281 			 */
282 			goto out;
283 		}
284 
285 		/*
286 		 * If the ASYMMETRIC ACCESS STATE field (see table 267)
287 		 * specifies a primary target port asymmetric access state,
288 		 * then the TARGET PORT GROUP OR TARGET PORT field specifies
289 		 * a primary target port group for which the primary target
290 		 * port asymmetric access state shall be changed. If the
291 		 * ASYMMETRIC ACCESS STATE field specifies a secondary target
292 		 * port asymmetric access state, then the TARGET PORT GROUP OR
293 		 * TARGET PORT field specifies the relative target port
294 		 * identifier (see 3.1.120) of the target port for which the
295 		 * secondary target port asymmetric access state shall be
296 		 * changed.
297 		 */
298 		if (primary) {
299 			tg_pt_id = get_unaligned_be16(ptr + 2);
300 			/*
301 			 * Locate the matching target port group ID from
302 			 * the global tg_pt_gp list
303 			 */
304 			spin_lock(&dev->t10_alua.tg_pt_gps_lock);
305 			list_for_each_entry(tg_pt_gp,
306 					&dev->t10_alua.tg_pt_gps_list,
307 					tg_pt_gp_list) {
308 				if (!tg_pt_gp->tg_pt_gp_valid_id)
309 					continue;
310 
311 				if (tg_pt_id != tg_pt_gp->tg_pt_gp_id)
312 					continue;
313 
314 				atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
315 				smp_mb__after_atomic_inc();
316 
317 				spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
318 
319 				if (!core_alua_do_port_transition(tg_pt_gp,
320 						dev, l_port, nacl,
321 						alua_access_state, 1))
322 					found = true;
323 
324 				spin_lock(&dev->t10_alua.tg_pt_gps_lock);
325 				atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
326 				smp_mb__after_atomic_dec();
327 				break;
328 			}
329 			spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
330 		} else {
331 			/*
332 			 * Extact the RELATIVE TARGET PORT IDENTIFIER to identify
333 			 * the Target Port in question for the the incoming
334 			 * SET_TARGET_PORT_GROUPS op.
335 			 */
336 			rtpi = get_unaligned_be16(ptr + 2);
337 			/*
338 			 * Locate the matching relative target port identifier
339 			 * for the struct se_device storage object.
340 			 */
341 			spin_lock(&dev->se_port_lock);
342 			list_for_each_entry(port, &dev->dev_sep_list,
343 							sep_list) {
344 				if (port->sep_rtpi != rtpi)
345 					continue;
346 
347 				tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
348 
349 				spin_unlock(&dev->se_port_lock);
350 
351 				if (!core_alua_set_tg_pt_secondary_state(
352 						tg_pt_gp_mem, port, 1, 1))
353 					found = true;
354 
355 				spin_lock(&dev->se_port_lock);
356 				break;
357 			}
358 			spin_unlock(&dev->se_port_lock);
359 		}
360 
361 		if (!found) {
362 			rc = TCM_INVALID_PARAMETER_LIST;
363 			goto out;
364 		}
365 
366 		ptr += 4;
367 		len += 4;
368 	}
369 
370 out:
371 	transport_kunmap_data_sg(cmd);
372 	if (!rc)
373 		target_complete_cmd(cmd, GOOD);
374 	return rc;
375 }
376 
377 static inline int core_alua_state_nonoptimized(
378 	struct se_cmd *cmd,
379 	unsigned char *cdb,
380 	int nonop_delay_msecs,
381 	u8 *alua_ascq)
382 {
383 	/*
384 	 * Set SCF_ALUA_NON_OPTIMIZED here, this value will be checked
385 	 * later to determine if processing of this cmd needs to be
386 	 * temporarily delayed for the Active/NonOptimized primary access state.
387 	 */
388 	cmd->se_cmd_flags |= SCF_ALUA_NON_OPTIMIZED;
389 	cmd->alua_nonop_delay = nonop_delay_msecs;
390 	return 0;
391 }
392 
393 static inline int core_alua_state_standby(
394 	struct se_cmd *cmd,
395 	unsigned char *cdb,
396 	u8 *alua_ascq)
397 {
398 	/*
399 	 * Allowed CDBs for ALUA_ACCESS_STATE_STANDBY as defined by
400 	 * spc4r17 section 5.9.2.4.4
401 	 */
402 	switch (cdb[0]) {
403 	case INQUIRY:
404 	case LOG_SELECT:
405 	case LOG_SENSE:
406 	case MODE_SELECT:
407 	case MODE_SENSE:
408 	case REPORT_LUNS:
409 	case RECEIVE_DIAGNOSTIC:
410 	case SEND_DIAGNOSTIC:
411 	case MAINTENANCE_IN:
412 		switch (cdb[1] & 0x1f) {
413 		case MI_REPORT_TARGET_PGS:
414 			return 0;
415 		default:
416 			*alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY;
417 			return 1;
418 		}
419 	case MAINTENANCE_OUT:
420 		switch (cdb[1]) {
421 		case MO_SET_TARGET_PGS:
422 			return 0;
423 		default:
424 			*alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY;
425 			return 1;
426 		}
427 	case REQUEST_SENSE:
428 	case PERSISTENT_RESERVE_IN:
429 	case PERSISTENT_RESERVE_OUT:
430 	case READ_BUFFER:
431 	case WRITE_BUFFER:
432 		return 0;
433 	default:
434 		*alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY;
435 		return 1;
436 	}
437 
438 	return 0;
439 }
440 
441 static inline int core_alua_state_unavailable(
442 	struct se_cmd *cmd,
443 	unsigned char *cdb,
444 	u8 *alua_ascq)
445 {
446 	/*
447 	 * Allowed CDBs for ALUA_ACCESS_STATE_UNAVAILABLE as defined by
448 	 * spc4r17 section 5.9.2.4.5
449 	 */
450 	switch (cdb[0]) {
451 	case INQUIRY:
452 	case REPORT_LUNS:
453 	case MAINTENANCE_IN:
454 		switch (cdb[1] & 0x1f) {
455 		case MI_REPORT_TARGET_PGS:
456 			return 0;
457 		default:
458 			*alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
459 			return 1;
460 		}
461 	case MAINTENANCE_OUT:
462 		switch (cdb[1]) {
463 		case MO_SET_TARGET_PGS:
464 			return 0;
465 		default:
466 			*alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
467 			return 1;
468 		}
469 	case REQUEST_SENSE:
470 	case READ_BUFFER:
471 	case WRITE_BUFFER:
472 		return 0;
473 	default:
474 		*alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
475 		return 1;
476 	}
477 
478 	return 0;
479 }
480 
481 static inline int core_alua_state_transition(
482 	struct se_cmd *cmd,
483 	unsigned char *cdb,
484 	u8 *alua_ascq)
485 {
486 	/*
487 	 * Allowed CDBs for ALUA_ACCESS_STATE_TRANSITIO as defined by
488 	 * spc4r17 section 5.9.2.5
489 	 */
490 	switch (cdb[0]) {
491 	case INQUIRY:
492 	case REPORT_LUNS:
493 	case MAINTENANCE_IN:
494 		switch (cdb[1] & 0x1f) {
495 		case MI_REPORT_TARGET_PGS:
496 			return 0;
497 		default:
498 			*alua_ascq = ASCQ_04H_ALUA_STATE_TRANSITION;
499 			return 1;
500 		}
501 	case REQUEST_SENSE:
502 	case READ_BUFFER:
503 	case WRITE_BUFFER:
504 		return 0;
505 	default:
506 		*alua_ascq = ASCQ_04H_ALUA_STATE_TRANSITION;
507 		return 1;
508 	}
509 
510 	return 0;
511 }
512 
513 /*
514  * return 1: Is used to signal LUN not accecsable, and check condition/not ready
515  * return 0: Used to signal success
516  * reutrn -1: Used to signal failure, and invalid cdb field
517  */
518 sense_reason_t
519 target_alua_state_check(struct se_cmd *cmd)
520 {
521 	struct se_device *dev = cmd->se_dev;
522 	unsigned char *cdb = cmd->t_task_cdb;
523 	struct se_lun *lun = cmd->se_lun;
524 	struct se_port *port = lun->lun_sep;
525 	struct t10_alua_tg_pt_gp *tg_pt_gp;
526 	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
527 	int out_alua_state, nonop_delay_msecs;
528 	u8 alua_ascq;
529 	int ret;
530 
531 	if (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)
532 		return 0;
533 	if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
534 		return 0;
535 
536 	if (!port)
537 		return 0;
538 	/*
539 	 * First, check for a struct se_port specific secondary ALUA target port
540 	 * access state: OFFLINE
541 	 */
542 	if (atomic_read(&port->sep_tg_pt_secondary_offline)) {
543 		pr_debug("ALUA: Got secondary offline status for local"
544 				" target port\n");
545 		alua_ascq = ASCQ_04H_ALUA_OFFLINE;
546 		ret = 1;
547 		goto out;
548 	}
549 	 /*
550 	 * Second, obtain the struct t10_alua_tg_pt_gp_member pointer to the
551 	 * ALUA target port group, to obtain current ALUA access state.
552 	 * Otherwise look for the underlying struct se_device association with
553 	 * a ALUA logical unit group.
554 	 */
555 	tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
556 	spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
557 	tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
558 	out_alua_state = atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state);
559 	nonop_delay_msecs = tg_pt_gp->tg_pt_gp_nonop_delay_msecs;
560 	spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
561 	/*
562 	 * Process ALUA_ACCESS_STATE_ACTIVE_OPTMIZED in a separate conditional
563 	 * statement so the compiler knows explicitly to check this case first.
564 	 * For the Optimized ALUA access state case, we want to process the
565 	 * incoming fabric cmd ASAP..
566 	 */
567 	if (out_alua_state == ALUA_ACCESS_STATE_ACTIVE_OPTMIZED)
568 		return 0;
569 
570 	switch (out_alua_state) {
571 	case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
572 		ret = core_alua_state_nonoptimized(cmd, cdb,
573 					nonop_delay_msecs, &alua_ascq);
574 		break;
575 	case ALUA_ACCESS_STATE_STANDBY:
576 		ret = core_alua_state_standby(cmd, cdb, &alua_ascq);
577 		break;
578 	case ALUA_ACCESS_STATE_UNAVAILABLE:
579 		ret = core_alua_state_unavailable(cmd, cdb, &alua_ascq);
580 		break;
581 	case ALUA_ACCESS_STATE_TRANSITION:
582 		ret = core_alua_state_transition(cmd, cdb, &alua_ascq);
583 		break;
584 	/*
585 	 * OFFLINE is a secondary ALUA target port group access state, that is
586 	 * handled above with struct se_port->sep_tg_pt_secondary_offline=1
587 	 */
588 	case ALUA_ACCESS_STATE_OFFLINE:
589 	default:
590 		pr_err("Unknown ALUA access state: 0x%02x\n",
591 				out_alua_state);
592 		return TCM_INVALID_CDB_FIELD;
593 	}
594 
595 out:
596 	if (ret > 0) {
597 		/*
598 		 * Set SCSI additional sense code (ASC) to 'LUN Not Accessible';
599 		 * The ALUA additional sense code qualifier (ASCQ) is determined
600 		 * by the ALUA primary or secondary access state..
601 		 */
602 		pr_debug("[%s]: ALUA TG Port not available, "
603 			"SenseKey: NOT_READY, ASC/ASCQ: "
604 			"0x04/0x%02x\n",
605 			cmd->se_tfo->get_fabric_name(), alua_ascq);
606 
607 		cmd->scsi_asc = 0x04;
608 		cmd->scsi_ascq = alua_ascq;
609 		return TCM_CHECK_CONDITION_NOT_READY;
610 	}
611 
612 	return 0;
613 }
614 
615 /*
616  * Check implict and explict ALUA state change request.
617  */
618 static sense_reason_t
619 core_alua_check_transition(int state, int *primary)
620 {
621 	switch (state) {
622 	case ALUA_ACCESS_STATE_ACTIVE_OPTMIZED:
623 	case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
624 	case ALUA_ACCESS_STATE_STANDBY:
625 	case ALUA_ACCESS_STATE_UNAVAILABLE:
626 		/*
627 		 * OPTIMIZED, NON-OPTIMIZED, STANDBY and UNAVAILABLE are
628 		 * defined as primary target port asymmetric access states.
629 		 */
630 		*primary = 1;
631 		break;
632 	case ALUA_ACCESS_STATE_OFFLINE:
633 		/*
634 		 * OFFLINE state is defined as a secondary target port
635 		 * asymmetric access state.
636 		 */
637 		*primary = 0;
638 		break;
639 	default:
640 		pr_err("Unknown ALUA access state: 0x%02x\n", state);
641 		return TCM_INVALID_PARAMETER_LIST;
642 	}
643 
644 	return 0;
645 }
646 
647 static char *core_alua_dump_state(int state)
648 {
649 	switch (state) {
650 	case ALUA_ACCESS_STATE_ACTIVE_OPTMIZED:
651 		return "Active/Optimized";
652 	case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
653 		return "Active/NonOptimized";
654 	case ALUA_ACCESS_STATE_STANDBY:
655 		return "Standby";
656 	case ALUA_ACCESS_STATE_UNAVAILABLE:
657 		return "Unavailable";
658 	case ALUA_ACCESS_STATE_OFFLINE:
659 		return "Offline";
660 	default:
661 		return "Unknown";
662 	}
663 
664 	return NULL;
665 }
666 
667 char *core_alua_dump_status(int status)
668 {
669 	switch (status) {
670 	case ALUA_STATUS_NONE:
671 		return "None";
672 	case ALUA_STATUS_ALTERED_BY_EXPLICT_STPG:
673 		return "Altered by Explict STPG";
674 	case ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA:
675 		return "Altered by Implict ALUA";
676 	default:
677 		return "Unknown";
678 	}
679 
680 	return NULL;
681 }
682 
683 /*
684  * Used by fabric modules to determine when we need to delay processing
685  * for the Active/NonOptimized paths..
686  */
687 int core_alua_check_nonop_delay(
688 	struct se_cmd *cmd)
689 {
690 	if (!(cmd->se_cmd_flags & SCF_ALUA_NON_OPTIMIZED))
691 		return 0;
692 	if (in_interrupt())
693 		return 0;
694 	/*
695 	 * The ALUA Active/NonOptimized access state delay can be disabled
696 	 * in via configfs with a value of zero
697 	 */
698 	if (!cmd->alua_nonop_delay)
699 		return 0;
700 	/*
701 	 * struct se_cmd->alua_nonop_delay gets set by a target port group
702 	 * defined interval in core_alua_state_nonoptimized()
703 	 */
704 	msleep_interruptible(cmd->alua_nonop_delay);
705 	return 0;
706 }
707 EXPORT_SYMBOL(core_alua_check_nonop_delay);
708 
709 /*
710  * Called with tg_pt_gp->tg_pt_gp_md_mutex or tg_pt_gp_mem->sep_tg_pt_md_mutex
711  *
712  */
713 static int core_alua_write_tpg_metadata(
714 	const char *path,
715 	unsigned char *md_buf,
716 	u32 md_buf_len)
717 {
718 	mm_segment_t old_fs;
719 	struct file *file;
720 	struct iovec iov[1];
721 	int flags = O_RDWR | O_CREAT | O_TRUNC, ret;
722 
723 	memset(iov, 0, sizeof(struct iovec));
724 
725 	file = filp_open(path, flags, 0600);
726 	if (IS_ERR(file) || !file || !file->f_dentry) {
727 		pr_err("filp_open(%s) for ALUA metadata failed\n",
728 			path);
729 		return -ENODEV;
730 	}
731 
732 	iov[0].iov_base = &md_buf[0];
733 	iov[0].iov_len = md_buf_len;
734 
735 	old_fs = get_fs();
736 	set_fs(get_ds());
737 	ret = vfs_writev(file, &iov[0], 1, &file->f_pos);
738 	set_fs(old_fs);
739 
740 	if (ret < 0) {
741 		pr_err("Error writing ALUA metadata file: %s\n", path);
742 		filp_close(file, NULL);
743 		return -EIO;
744 	}
745 	filp_close(file, NULL);
746 
747 	return 0;
748 }
749 
750 /*
751  * Called with tg_pt_gp->tg_pt_gp_md_mutex held
752  */
753 static int core_alua_update_tpg_primary_metadata(
754 	struct t10_alua_tg_pt_gp *tg_pt_gp,
755 	int primary_state,
756 	unsigned char *md_buf)
757 {
758 	struct t10_wwn *wwn = &tg_pt_gp->tg_pt_gp_dev->t10_wwn;
759 	char path[ALUA_METADATA_PATH_LEN];
760 	int len;
761 
762 	memset(path, 0, ALUA_METADATA_PATH_LEN);
763 
764 	len = snprintf(md_buf, tg_pt_gp->tg_pt_gp_md_buf_len,
765 			"tg_pt_gp_id=%hu\n"
766 			"alua_access_state=0x%02x\n"
767 			"alua_access_status=0x%02x\n",
768 			tg_pt_gp->tg_pt_gp_id, primary_state,
769 			tg_pt_gp->tg_pt_gp_alua_access_status);
770 
771 	snprintf(path, ALUA_METADATA_PATH_LEN,
772 		"/var/target/alua/tpgs_%s/%s", &wwn->unit_serial[0],
773 		config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item));
774 
775 	return core_alua_write_tpg_metadata(path, md_buf, len);
776 }
777 
778 static int core_alua_do_transition_tg_pt(
779 	struct t10_alua_tg_pt_gp *tg_pt_gp,
780 	struct se_port *l_port,
781 	struct se_node_acl *nacl,
782 	unsigned char *md_buf,
783 	int new_state,
784 	int explict)
785 {
786 	struct se_dev_entry *se_deve;
787 	struct se_lun_acl *lacl;
788 	struct se_port *port;
789 	struct t10_alua_tg_pt_gp_member *mem;
790 	int old_state = 0;
791 	/*
792 	 * Save the old primary ALUA access state, and set the current state
793 	 * to ALUA_ACCESS_STATE_TRANSITION.
794 	 */
795 	old_state = atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state);
796 	atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
797 			ALUA_ACCESS_STATE_TRANSITION);
798 	tg_pt_gp->tg_pt_gp_alua_access_status = (explict) ?
799 				ALUA_STATUS_ALTERED_BY_EXPLICT_STPG :
800 				ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA;
801 	/*
802 	 * Check for the optional ALUA primary state transition delay
803 	 */
804 	if (tg_pt_gp->tg_pt_gp_trans_delay_msecs != 0)
805 		msleep_interruptible(tg_pt_gp->tg_pt_gp_trans_delay_msecs);
806 
807 	spin_lock(&tg_pt_gp->tg_pt_gp_lock);
808 	list_for_each_entry(mem, &tg_pt_gp->tg_pt_gp_mem_list,
809 				tg_pt_gp_mem_list) {
810 		port = mem->tg_pt;
811 		/*
812 		 * After an implicit target port asymmetric access state
813 		 * change, a device server shall establish a unit attention
814 		 * condition for the initiator port associated with every I_T
815 		 * nexus with the additional sense code set to ASYMMETRIC
816 		 * ACCESS STATE CHAGED.
817 		 *
818 		 * After an explicit target port asymmetric access state
819 		 * change, a device server shall establish a unit attention
820 		 * condition with the additional sense code set to ASYMMETRIC
821 		 * ACCESS STATE CHANGED for the initiator port associated with
822 		 * every I_T nexus other than the I_T nexus on which the SET
823 		 * TARGET PORT GROUPS command
824 		 */
825 		atomic_inc(&mem->tg_pt_gp_mem_ref_cnt);
826 		smp_mb__after_atomic_inc();
827 		spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
828 
829 		spin_lock_bh(&port->sep_alua_lock);
830 		list_for_each_entry(se_deve, &port->sep_alua_list,
831 					alua_port_list) {
832 			lacl = se_deve->se_lun_acl;
833 			/*
834 			 * se_deve->se_lun_acl pointer may be NULL for a
835 			 * entry created without explict Node+MappedLUN ACLs
836 			 */
837 			if (!lacl)
838 				continue;
839 
840 			if (explict &&
841 			   (nacl != NULL) && (nacl == lacl->se_lun_nacl) &&
842 			   (l_port != NULL) && (l_port == port))
843 				continue;
844 
845 			core_scsi3_ua_allocate(lacl->se_lun_nacl,
846 				se_deve->mapped_lun, 0x2A,
847 				ASCQ_2AH_ASYMMETRIC_ACCESS_STATE_CHANGED);
848 		}
849 		spin_unlock_bh(&port->sep_alua_lock);
850 
851 		spin_lock(&tg_pt_gp->tg_pt_gp_lock);
852 		atomic_dec(&mem->tg_pt_gp_mem_ref_cnt);
853 		smp_mb__after_atomic_dec();
854 	}
855 	spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
856 	/*
857 	 * Update the ALUA metadata buf that has been allocated in
858 	 * core_alua_do_port_transition(), this metadata will be written
859 	 * to struct file.
860 	 *
861 	 * Note that there is the case where we do not want to update the
862 	 * metadata when the saved metadata is being parsed in userspace
863 	 * when setting the existing port access state and access status.
864 	 *
865 	 * Also note that the failure to write out the ALUA metadata to
866 	 * struct file does NOT affect the actual ALUA transition.
867 	 */
868 	if (tg_pt_gp->tg_pt_gp_write_metadata) {
869 		mutex_lock(&tg_pt_gp->tg_pt_gp_md_mutex);
870 		core_alua_update_tpg_primary_metadata(tg_pt_gp,
871 					new_state, md_buf);
872 		mutex_unlock(&tg_pt_gp->tg_pt_gp_md_mutex);
873 	}
874 	/*
875 	 * Set the current primary ALUA access state to the requested new state
876 	 */
877 	atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, new_state);
878 
879 	pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu"
880 		" from primary access state %s to %s\n", (explict) ? "explict" :
881 		"implict", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
882 		tg_pt_gp->tg_pt_gp_id, core_alua_dump_state(old_state),
883 		core_alua_dump_state(new_state));
884 
885 	return 0;
886 }
887 
888 int core_alua_do_port_transition(
889 	struct t10_alua_tg_pt_gp *l_tg_pt_gp,
890 	struct se_device *l_dev,
891 	struct se_port *l_port,
892 	struct se_node_acl *l_nacl,
893 	int new_state,
894 	int explict)
895 {
896 	struct se_device *dev;
897 	struct se_port *port;
898 	struct se_node_acl *nacl;
899 	struct t10_alua_lu_gp *lu_gp;
900 	struct t10_alua_lu_gp_member *lu_gp_mem, *local_lu_gp_mem;
901 	struct t10_alua_tg_pt_gp *tg_pt_gp;
902 	unsigned char *md_buf;
903 	int primary;
904 
905 	if (core_alua_check_transition(new_state, &primary) != 0)
906 		return -EINVAL;
907 
908 	md_buf = kzalloc(l_tg_pt_gp->tg_pt_gp_md_buf_len, GFP_KERNEL);
909 	if (!md_buf) {
910 		pr_err("Unable to allocate buf for ALUA metadata\n");
911 		return -ENOMEM;
912 	}
913 
914 	local_lu_gp_mem = l_dev->dev_alua_lu_gp_mem;
915 	spin_lock(&local_lu_gp_mem->lu_gp_mem_lock);
916 	lu_gp = local_lu_gp_mem->lu_gp;
917 	atomic_inc(&lu_gp->lu_gp_ref_cnt);
918 	smp_mb__after_atomic_inc();
919 	spin_unlock(&local_lu_gp_mem->lu_gp_mem_lock);
920 	/*
921 	 * For storage objects that are members of the 'default_lu_gp',
922 	 * we only do transition on the passed *l_tp_pt_gp, and not
923 	 * on all of the matching target port groups IDs in default_lu_gp.
924 	 */
925 	if (!lu_gp->lu_gp_id) {
926 		/*
927 		 * core_alua_do_transition_tg_pt() will always return
928 		 * success.
929 		 */
930 		core_alua_do_transition_tg_pt(l_tg_pt_gp, l_port, l_nacl,
931 					md_buf, new_state, explict);
932 		atomic_dec(&lu_gp->lu_gp_ref_cnt);
933 		smp_mb__after_atomic_dec();
934 		kfree(md_buf);
935 		return 0;
936 	}
937 	/*
938 	 * For all other LU groups aside from 'default_lu_gp', walk all of
939 	 * the associated storage objects looking for a matching target port
940 	 * group ID from the local target port group.
941 	 */
942 	spin_lock(&lu_gp->lu_gp_lock);
943 	list_for_each_entry(lu_gp_mem, &lu_gp->lu_gp_mem_list,
944 				lu_gp_mem_list) {
945 
946 		dev = lu_gp_mem->lu_gp_mem_dev;
947 		atomic_inc(&lu_gp_mem->lu_gp_mem_ref_cnt);
948 		smp_mb__after_atomic_inc();
949 		spin_unlock(&lu_gp->lu_gp_lock);
950 
951 		spin_lock(&dev->t10_alua.tg_pt_gps_lock);
952 		list_for_each_entry(tg_pt_gp,
953 				&dev->t10_alua.tg_pt_gps_list,
954 				tg_pt_gp_list) {
955 
956 			if (!tg_pt_gp->tg_pt_gp_valid_id)
957 				continue;
958 			/*
959 			 * If the target behavior port asymmetric access state
960 			 * is changed for any target port group accessiable via
961 			 * a logical unit within a LU group, the target port
962 			 * behavior group asymmetric access states for the same
963 			 * target port group accessible via other logical units
964 			 * in that LU group will also change.
965 			 */
966 			if (l_tg_pt_gp->tg_pt_gp_id != tg_pt_gp->tg_pt_gp_id)
967 				continue;
968 
969 			if (l_tg_pt_gp == tg_pt_gp) {
970 				port = l_port;
971 				nacl = l_nacl;
972 			} else {
973 				port = NULL;
974 				nacl = NULL;
975 			}
976 			atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
977 			smp_mb__after_atomic_inc();
978 			spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
979 			/*
980 			 * core_alua_do_transition_tg_pt() will always return
981 			 * success.
982 			 */
983 			core_alua_do_transition_tg_pt(tg_pt_gp, port,
984 					nacl, md_buf, new_state, explict);
985 
986 			spin_lock(&dev->t10_alua.tg_pt_gps_lock);
987 			atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
988 			smp_mb__after_atomic_dec();
989 		}
990 		spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
991 
992 		spin_lock(&lu_gp->lu_gp_lock);
993 		atomic_dec(&lu_gp_mem->lu_gp_mem_ref_cnt);
994 		smp_mb__after_atomic_dec();
995 	}
996 	spin_unlock(&lu_gp->lu_gp_lock);
997 
998 	pr_debug("Successfully processed LU Group: %s all ALUA TG PT"
999 		" Group IDs: %hu %s transition to primary state: %s\n",
1000 		config_item_name(&lu_gp->lu_gp_group.cg_item),
1001 		l_tg_pt_gp->tg_pt_gp_id, (explict) ? "explict" : "implict",
1002 		core_alua_dump_state(new_state));
1003 
1004 	atomic_dec(&lu_gp->lu_gp_ref_cnt);
1005 	smp_mb__after_atomic_dec();
1006 	kfree(md_buf);
1007 	return 0;
1008 }
1009 
1010 /*
1011  * Called with tg_pt_gp_mem->sep_tg_pt_md_mutex held
1012  */
1013 static int core_alua_update_tpg_secondary_metadata(
1014 	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
1015 	struct se_port *port,
1016 	unsigned char *md_buf,
1017 	u32 md_buf_len)
1018 {
1019 	struct se_portal_group *se_tpg = port->sep_tpg;
1020 	char path[ALUA_METADATA_PATH_LEN], wwn[ALUA_SECONDARY_METADATA_WWN_LEN];
1021 	int len;
1022 
1023 	memset(path, 0, ALUA_METADATA_PATH_LEN);
1024 	memset(wwn, 0, ALUA_SECONDARY_METADATA_WWN_LEN);
1025 
1026 	len = snprintf(wwn, ALUA_SECONDARY_METADATA_WWN_LEN, "%s",
1027 			se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg));
1028 
1029 	if (se_tpg->se_tpg_tfo->tpg_get_tag != NULL)
1030 		snprintf(wwn+len, ALUA_SECONDARY_METADATA_WWN_LEN-len, "+%hu",
1031 				se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
1032 
1033 	len = snprintf(md_buf, md_buf_len, "alua_tg_pt_offline=%d\n"
1034 			"alua_tg_pt_status=0x%02x\n",
1035 			atomic_read(&port->sep_tg_pt_secondary_offline),
1036 			port->sep_tg_pt_secondary_stat);
1037 
1038 	snprintf(path, ALUA_METADATA_PATH_LEN, "/var/target/alua/%s/%s/lun_%u",
1039 			se_tpg->se_tpg_tfo->get_fabric_name(), wwn,
1040 			port->sep_lun->unpacked_lun);
1041 
1042 	return core_alua_write_tpg_metadata(path, md_buf, len);
1043 }
1044 
1045 static int core_alua_set_tg_pt_secondary_state(
1046 	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
1047 	struct se_port *port,
1048 	int explict,
1049 	int offline)
1050 {
1051 	struct t10_alua_tg_pt_gp *tg_pt_gp;
1052 	unsigned char *md_buf;
1053 	u32 md_buf_len;
1054 	int trans_delay_msecs;
1055 
1056 	spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1057 	tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
1058 	if (!tg_pt_gp) {
1059 		spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1060 		pr_err("Unable to complete secondary state"
1061 				" transition\n");
1062 		return -EINVAL;
1063 	}
1064 	trans_delay_msecs = tg_pt_gp->tg_pt_gp_trans_delay_msecs;
1065 	/*
1066 	 * Set the secondary ALUA target port access state to OFFLINE
1067 	 * or release the previously secondary state for struct se_port
1068 	 */
1069 	if (offline)
1070 		atomic_set(&port->sep_tg_pt_secondary_offline, 1);
1071 	else
1072 		atomic_set(&port->sep_tg_pt_secondary_offline, 0);
1073 
1074 	md_buf_len = tg_pt_gp->tg_pt_gp_md_buf_len;
1075 	port->sep_tg_pt_secondary_stat = (explict) ?
1076 			ALUA_STATUS_ALTERED_BY_EXPLICT_STPG :
1077 			ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA;
1078 
1079 	pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu"
1080 		" to secondary access state: %s\n", (explict) ? "explict" :
1081 		"implict", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
1082 		tg_pt_gp->tg_pt_gp_id, (offline) ? "OFFLINE" : "ONLINE");
1083 
1084 	spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1085 	/*
1086 	 * Do the optional transition delay after we set the secondary
1087 	 * ALUA access state.
1088 	 */
1089 	if (trans_delay_msecs != 0)
1090 		msleep_interruptible(trans_delay_msecs);
1091 	/*
1092 	 * See if we need to update the ALUA fabric port metadata for
1093 	 * secondary state and status
1094 	 */
1095 	if (port->sep_tg_pt_secondary_write_md) {
1096 		md_buf = kzalloc(md_buf_len, GFP_KERNEL);
1097 		if (!md_buf) {
1098 			pr_err("Unable to allocate md_buf for"
1099 				" secondary ALUA access metadata\n");
1100 			return -ENOMEM;
1101 		}
1102 		mutex_lock(&port->sep_tg_pt_md_mutex);
1103 		core_alua_update_tpg_secondary_metadata(tg_pt_gp_mem, port,
1104 				md_buf, md_buf_len);
1105 		mutex_unlock(&port->sep_tg_pt_md_mutex);
1106 
1107 		kfree(md_buf);
1108 	}
1109 
1110 	return 0;
1111 }
1112 
1113 struct t10_alua_lu_gp *
1114 core_alua_allocate_lu_gp(const char *name, int def_group)
1115 {
1116 	struct t10_alua_lu_gp *lu_gp;
1117 
1118 	lu_gp = kmem_cache_zalloc(t10_alua_lu_gp_cache, GFP_KERNEL);
1119 	if (!lu_gp) {
1120 		pr_err("Unable to allocate struct t10_alua_lu_gp\n");
1121 		return ERR_PTR(-ENOMEM);
1122 	}
1123 	INIT_LIST_HEAD(&lu_gp->lu_gp_node);
1124 	INIT_LIST_HEAD(&lu_gp->lu_gp_mem_list);
1125 	spin_lock_init(&lu_gp->lu_gp_lock);
1126 	atomic_set(&lu_gp->lu_gp_ref_cnt, 0);
1127 
1128 	if (def_group) {
1129 		lu_gp->lu_gp_id = alua_lu_gps_counter++;
1130 		lu_gp->lu_gp_valid_id = 1;
1131 		alua_lu_gps_count++;
1132 	}
1133 
1134 	return lu_gp;
1135 }
1136 
1137 int core_alua_set_lu_gp_id(struct t10_alua_lu_gp *lu_gp, u16 lu_gp_id)
1138 {
1139 	struct t10_alua_lu_gp *lu_gp_tmp;
1140 	u16 lu_gp_id_tmp;
1141 	/*
1142 	 * The lu_gp->lu_gp_id may only be set once..
1143 	 */
1144 	if (lu_gp->lu_gp_valid_id) {
1145 		pr_warn("ALUA LU Group already has a valid ID,"
1146 			" ignoring request\n");
1147 		return -EINVAL;
1148 	}
1149 
1150 	spin_lock(&lu_gps_lock);
1151 	if (alua_lu_gps_count == 0x0000ffff) {
1152 		pr_err("Maximum ALUA alua_lu_gps_count:"
1153 				" 0x0000ffff reached\n");
1154 		spin_unlock(&lu_gps_lock);
1155 		kmem_cache_free(t10_alua_lu_gp_cache, lu_gp);
1156 		return -ENOSPC;
1157 	}
1158 again:
1159 	lu_gp_id_tmp = (lu_gp_id != 0) ? lu_gp_id :
1160 				alua_lu_gps_counter++;
1161 
1162 	list_for_each_entry(lu_gp_tmp, &lu_gps_list, lu_gp_node) {
1163 		if (lu_gp_tmp->lu_gp_id == lu_gp_id_tmp) {
1164 			if (!lu_gp_id)
1165 				goto again;
1166 
1167 			pr_warn("ALUA Logical Unit Group ID: %hu"
1168 				" already exists, ignoring request\n",
1169 				lu_gp_id);
1170 			spin_unlock(&lu_gps_lock);
1171 			return -EINVAL;
1172 		}
1173 	}
1174 
1175 	lu_gp->lu_gp_id = lu_gp_id_tmp;
1176 	lu_gp->lu_gp_valid_id = 1;
1177 	list_add_tail(&lu_gp->lu_gp_node, &lu_gps_list);
1178 	alua_lu_gps_count++;
1179 	spin_unlock(&lu_gps_lock);
1180 
1181 	return 0;
1182 }
1183 
1184 static struct t10_alua_lu_gp_member *
1185 core_alua_allocate_lu_gp_mem(struct se_device *dev)
1186 {
1187 	struct t10_alua_lu_gp_member *lu_gp_mem;
1188 
1189 	lu_gp_mem = kmem_cache_zalloc(t10_alua_lu_gp_mem_cache, GFP_KERNEL);
1190 	if (!lu_gp_mem) {
1191 		pr_err("Unable to allocate struct t10_alua_lu_gp_member\n");
1192 		return ERR_PTR(-ENOMEM);
1193 	}
1194 	INIT_LIST_HEAD(&lu_gp_mem->lu_gp_mem_list);
1195 	spin_lock_init(&lu_gp_mem->lu_gp_mem_lock);
1196 	atomic_set(&lu_gp_mem->lu_gp_mem_ref_cnt, 0);
1197 
1198 	lu_gp_mem->lu_gp_mem_dev = dev;
1199 	dev->dev_alua_lu_gp_mem = lu_gp_mem;
1200 
1201 	return lu_gp_mem;
1202 }
1203 
1204 void core_alua_free_lu_gp(struct t10_alua_lu_gp *lu_gp)
1205 {
1206 	struct t10_alua_lu_gp_member *lu_gp_mem, *lu_gp_mem_tmp;
1207 	/*
1208 	 * Once we have reached this point, config_item_put() has
1209 	 * already been called from target_core_alua_drop_lu_gp().
1210 	 *
1211 	 * Here, we remove the *lu_gp from the global list so that
1212 	 * no associations can be made while we are releasing
1213 	 * struct t10_alua_lu_gp.
1214 	 */
1215 	spin_lock(&lu_gps_lock);
1216 	list_del(&lu_gp->lu_gp_node);
1217 	alua_lu_gps_count--;
1218 	spin_unlock(&lu_gps_lock);
1219 	/*
1220 	 * Allow struct t10_alua_lu_gp * referenced by core_alua_get_lu_gp_by_name()
1221 	 * in target_core_configfs.c:target_core_store_alua_lu_gp() to be
1222 	 * released with core_alua_put_lu_gp_from_name()
1223 	 */
1224 	while (atomic_read(&lu_gp->lu_gp_ref_cnt))
1225 		cpu_relax();
1226 	/*
1227 	 * Release reference to struct t10_alua_lu_gp * from all associated
1228 	 * struct se_device.
1229 	 */
1230 	spin_lock(&lu_gp->lu_gp_lock);
1231 	list_for_each_entry_safe(lu_gp_mem, lu_gp_mem_tmp,
1232 				&lu_gp->lu_gp_mem_list, lu_gp_mem_list) {
1233 		if (lu_gp_mem->lu_gp_assoc) {
1234 			list_del(&lu_gp_mem->lu_gp_mem_list);
1235 			lu_gp->lu_gp_members--;
1236 			lu_gp_mem->lu_gp_assoc = 0;
1237 		}
1238 		spin_unlock(&lu_gp->lu_gp_lock);
1239 		/*
1240 		 *
1241 		 * lu_gp_mem is associated with a single
1242 		 * struct se_device->dev_alua_lu_gp_mem, and is released when
1243 		 * struct se_device is released via core_alua_free_lu_gp_mem().
1244 		 *
1245 		 * If the passed lu_gp does NOT match the default_lu_gp, assume
1246 		 * we want to re-assocate a given lu_gp_mem with default_lu_gp.
1247 		 */
1248 		spin_lock(&lu_gp_mem->lu_gp_mem_lock);
1249 		if (lu_gp != default_lu_gp)
1250 			__core_alua_attach_lu_gp_mem(lu_gp_mem,
1251 					default_lu_gp);
1252 		else
1253 			lu_gp_mem->lu_gp = NULL;
1254 		spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
1255 
1256 		spin_lock(&lu_gp->lu_gp_lock);
1257 	}
1258 	spin_unlock(&lu_gp->lu_gp_lock);
1259 
1260 	kmem_cache_free(t10_alua_lu_gp_cache, lu_gp);
1261 }
1262 
1263 void core_alua_free_lu_gp_mem(struct se_device *dev)
1264 {
1265 	struct t10_alua_lu_gp *lu_gp;
1266 	struct t10_alua_lu_gp_member *lu_gp_mem;
1267 
1268 	lu_gp_mem = dev->dev_alua_lu_gp_mem;
1269 	if (!lu_gp_mem)
1270 		return;
1271 
1272 	while (atomic_read(&lu_gp_mem->lu_gp_mem_ref_cnt))
1273 		cpu_relax();
1274 
1275 	spin_lock(&lu_gp_mem->lu_gp_mem_lock);
1276 	lu_gp = lu_gp_mem->lu_gp;
1277 	if (lu_gp) {
1278 		spin_lock(&lu_gp->lu_gp_lock);
1279 		if (lu_gp_mem->lu_gp_assoc) {
1280 			list_del(&lu_gp_mem->lu_gp_mem_list);
1281 			lu_gp->lu_gp_members--;
1282 			lu_gp_mem->lu_gp_assoc = 0;
1283 		}
1284 		spin_unlock(&lu_gp->lu_gp_lock);
1285 		lu_gp_mem->lu_gp = NULL;
1286 	}
1287 	spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
1288 
1289 	kmem_cache_free(t10_alua_lu_gp_mem_cache, lu_gp_mem);
1290 }
1291 
1292 struct t10_alua_lu_gp *core_alua_get_lu_gp_by_name(const char *name)
1293 {
1294 	struct t10_alua_lu_gp *lu_gp;
1295 	struct config_item *ci;
1296 
1297 	spin_lock(&lu_gps_lock);
1298 	list_for_each_entry(lu_gp, &lu_gps_list, lu_gp_node) {
1299 		if (!lu_gp->lu_gp_valid_id)
1300 			continue;
1301 		ci = &lu_gp->lu_gp_group.cg_item;
1302 		if (!strcmp(config_item_name(ci), name)) {
1303 			atomic_inc(&lu_gp->lu_gp_ref_cnt);
1304 			spin_unlock(&lu_gps_lock);
1305 			return lu_gp;
1306 		}
1307 	}
1308 	spin_unlock(&lu_gps_lock);
1309 
1310 	return NULL;
1311 }
1312 
1313 void core_alua_put_lu_gp_from_name(struct t10_alua_lu_gp *lu_gp)
1314 {
1315 	spin_lock(&lu_gps_lock);
1316 	atomic_dec(&lu_gp->lu_gp_ref_cnt);
1317 	spin_unlock(&lu_gps_lock);
1318 }
1319 
1320 /*
1321  * Called with struct t10_alua_lu_gp_member->lu_gp_mem_lock
1322  */
1323 void __core_alua_attach_lu_gp_mem(
1324 	struct t10_alua_lu_gp_member *lu_gp_mem,
1325 	struct t10_alua_lu_gp *lu_gp)
1326 {
1327 	spin_lock(&lu_gp->lu_gp_lock);
1328 	lu_gp_mem->lu_gp = lu_gp;
1329 	lu_gp_mem->lu_gp_assoc = 1;
1330 	list_add_tail(&lu_gp_mem->lu_gp_mem_list, &lu_gp->lu_gp_mem_list);
1331 	lu_gp->lu_gp_members++;
1332 	spin_unlock(&lu_gp->lu_gp_lock);
1333 }
1334 
1335 /*
1336  * Called with struct t10_alua_lu_gp_member->lu_gp_mem_lock
1337  */
1338 void __core_alua_drop_lu_gp_mem(
1339 	struct t10_alua_lu_gp_member *lu_gp_mem,
1340 	struct t10_alua_lu_gp *lu_gp)
1341 {
1342 	spin_lock(&lu_gp->lu_gp_lock);
1343 	list_del(&lu_gp_mem->lu_gp_mem_list);
1344 	lu_gp_mem->lu_gp = NULL;
1345 	lu_gp_mem->lu_gp_assoc = 0;
1346 	lu_gp->lu_gp_members--;
1347 	spin_unlock(&lu_gp->lu_gp_lock);
1348 }
1349 
1350 struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(struct se_device *dev,
1351 		const char *name, int def_group)
1352 {
1353 	struct t10_alua_tg_pt_gp *tg_pt_gp;
1354 
1355 	tg_pt_gp = kmem_cache_zalloc(t10_alua_tg_pt_gp_cache, GFP_KERNEL);
1356 	if (!tg_pt_gp) {
1357 		pr_err("Unable to allocate struct t10_alua_tg_pt_gp\n");
1358 		return NULL;
1359 	}
1360 	INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_list);
1361 	INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_mem_list);
1362 	mutex_init(&tg_pt_gp->tg_pt_gp_md_mutex);
1363 	spin_lock_init(&tg_pt_gp->tg_pt_gp_lock);
1364 	atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0);
1365 	tg_pt_gp->tg_pt_gp_dev = dev;
1366 	tg_pt_gp->tg_pt_gp_md_buf_len = ALUA_MD_BUF_LEN;
1367 	atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
1368 		ALUA_ACCESS_STATE_ACTIVE_OPTMIZED);
1369 	/*
1370 	 * Enable both explict and implict ALUA support by default
1371 	 */
1372 	tg_pt_gp->tg_pt_gp_alua_access_type =
1373 			TPGS_EXPLICT_ALUA | TPGS_IMPLICT_ALUA;
1374 	/*
1375 	 * Set the default Active/NonOptimized Delay in milliseconds
1376 	 */
1377 	tg_pt_gp->tg_pt_gp_nonop_delay_msecs = ALUA_DEFAULT_NONOP_DELAY_MSECS;
1378 	tg_pt_gp->tg_pt_gp_trans_delay_msecs = ALUA_DEFAULT_TRANS_DELAY_MSECS;
1379 	tg_pt_gp->tg_pt_gp_implict_trans_secs = ALUA_DEFAULT_IMPLICT_TRANS_SECS;
1380 
1381 	if (def_group) {
1382 		spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1383 		tg_pt_gp->tg_pt_gp_id =
1384 				dev->t10_alua.alua_tg_pt_gps_counter++;
1385 		tg_pt_gp->tg_pt_gp_valid_id = 1;
1386 		dev->t10_alua.alua_tg_pt_gps_count++;
1387 		list_add_tail(&tg_pt_gp->tg_pt_gp_list,
1388 			      &dev->t10_alua.tg_pt_gps_list);
1389 		spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1390 	}
1391 
1392 	return tg_pt_gp;
1393 }
1394 
1395 int core_alua_set_tg_pt_gp_id(
1396 	struct t10_alua_tg_pt_gp *tg_pt_gp,
1397 	u16 tg_pt_gp_id)
1398 {
1399 	struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
1400 	struct t10_alua_tg_pt_gp *tg_pt_gp_tmp;
1401 	u16 tg_pt_gp_id_tmp;
1402 
1403 	/*
1404 	 * The tg_pt_gp->tg_pt_gp_id may only be set once..
1405 	 */
1406 	if (tg_pt_gp->tg_pt_gp_valid_id) {
1407 		pr_warn("ALUA TG PT Group already has a valid ID,"
1408 			" ignoring request\n");
1409 		return -EINVAL;
1410 	}
1411 
1412 	spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1413 	if (dev->t10_alua.alua_tg_pt_gps_count == 0x0000ffff) {
1414 		pr_err("Maximum ALUA alua_tg_pt_gps_count:"
1415 			" 0x0000ffff reached\n");
1416 		spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1417 		kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp);
1418 		return -ENOSPC;
1419 	}
1420 again:
1421 	tg_pt_gp_id_tmp = (tg_pt_gp_id != 0) ? tg_pt_gp_id :
1422 			dev->t10_alua.alua_tg_pt_gps_counter++;
1423 
1424 	list_for_each_entry(tg_pt_gp_tmp, &dev->t10_alua.tg_pt_gps_list,
1425 			tg_pt_gp_list) {
1426 		if (tg_pt_gp_tmp->tg_pt_gp_id == tg_pt_gp_id_tmp) {
1427 			if (!tg_pt_gp_id)
1428 				goto again;
1429 
1430 			pr_err("ALUA Target Port Group ID: %hu already"
1431 				" exists, ignoring request\n", tg_pt_gp_id);
1432 			spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1433 			return -EINVAL;
1434 		}
1435 	}
1436 
1437 	tg_pt_gp->tg_pt_gp_id = tg_pt_gp_id_tmp;
1438 	tg_pt_gp->tg_pt_gp_valid_id = 1;
1439 	list_add_tail(&tg_pt_gp->tg_pt_gp_list,
1440 			&dev->t10_alua.tg_pt_gps_list);
1441 	dev->t10_alua.alua_tg_pt_gps_count++;
1442 	spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1443 
1444 	return 0;
1445 }
1446 
1447 struct t10_alua_tg_pt_gp_member *core_alua_allocate_tg_pt_gp_mem(
1448 	struct se_port *port)
1449 {
1450 	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
1451 
1452 	tg_pt_gp_mem = kmem_cache_zalloc(t10_alua_tg_pt_gp_mem_cache,
1453 				GFP_KERNEL);
1454 	if (!tg_pt_gp_mem) {
1455 		pr_err("Unable to allocate struct t10_alua_tg_pt_gp_member\n");
1456 		return ERR_PTR(-ENOMEM);
1457 	}
1458 	INIT_LIST_HEAD(&tg_pt_gp_mem->tg_pt_gp_mem_list);
1459 	spin_lock_init(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1460 	atomic_set(&tg_pt_gp_mem->tg_pt_gp_mem_ref_cnt, 0);
1461 
1462 	tg_pt_gp_mem->tg_pt = port;
1463 	port->sep_alua_tg_pt_gp_mem = tg_pt_gp_mem;
1464 
1465 	return tg_pt_gp_mem;
1466 }
1467 
1468 void core_alua_free_tg_pt_gp(
1469 	struct t10_alua_tg_pt_gp *tg_pt_gp)
1470 {
1471 	struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
1472 	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, *tg_pt_gp_mem_tmp;
1473 
1474 	/*
1475 	 * Once we have reached this point, config_item_put() has already
1476 	 * been called from target_core_alua_drop_tg_pt_gp().
1477 	 *
1478 	 * Here we remove *tg_pt_gp from the global list so that
1479 	 * no assications *OR* explict ALUA via SET_TARGET_PORT_GROUPS
1480 	 * can be made while we are releasing struct t10_alua_tg_pt_gp.
1481 	 */
1482 	spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1483 	list_del(&tg_pt_gp->tg_pt_gp_list);
1484 	dev->t10_alua.alua_tg_pt_gps_counter--;
1485 	spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1486 
1487 	/*
1488 	 * Allow a struct t10_alua_tg_pt_gp_member * referenced by
1489 	 * core_alua_get_tg_pt_gp_by_name() in
1490 	 * target_core_configfs.c:target_core_store_alua_tg_pt_gp()
1491 	 * to be released with core_alua_put_tg_pt_gp_from_name().
1492 	 */
1493 	while (atomic_read(&tg_pt_gp->tg_pt_gp_ref_cnt))
1494 		cpu_relax();
1495 
1496 	/*
1497 	 * Release reference to struct t10_alua_tg_pt_gp from all associated
1498 	 * struct se_port.
1499 	 */
1500 	spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1501 	list_for_each_entry_safe(tg_pt_gp_mem, tg_pt_gp_mem_tmp,
1502 			&tg_pt_gp->tg_pt_gp_mem_list, tg_pt_gp_mem_list) {
1503 		if (tg_pt_gp_mem->tg_pt_gp_assoc) {
1504 			list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list);
1505 			tg_pt_gp->tg_pt_gp_members--;
1506 			tg_pt_gp_mem->tg_pt_gp_assoc = 0;
1507 		}
1508 		spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1509 		/*
1510 		 * tg_pt_gp_mem is associated with a single
1511 		 * se_port->sep_alua_tg_pt_gp_mem, and is released via
1512 		 * core_alua_free_tg_pt_gp_mem().
1513 		 *
1514 		 * If the passed tg_pt_gp does NOT match the default_tg_pt_gp,
1515 		 * assume we want to re-assocate a given tg_pt_gp_mem with
1516 		 * default_tg_pt_gp.
1517 		 */
1518 		spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1519 		if (tg_pt_gp != dev->t10_alua.default_tg_pt_gp) {
1520 			__core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
1521 					dev->t10_alua.default_tg_pt_gp);
1522 		} else
1523 			tg_pt_gp_mem->tg_pt_gp = NULL;
1524 		spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1525 
1526 		spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1527 	}
1528 	spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1529 
1530 	kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp);
1531 }
1532 
1533 void core_alua_free_tg_pt_gp_mem(struct se_port *port)
1534 {
1535 	struct t10_alua_tg_pt_gp *tg_pt_gp;
1536 	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
1537 
1538 	tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
1539 	if (!tg_pt_gp_mem)
1540 		return;
1541 
1542 	while (atomic_read(&tg_pt_gp_mem->tg_pt_gp_mem_ref_cnt))
1543 		cpu_relax();
1544 
1545 	spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1546 	tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
1547 	if (tg_pt_gp) {
1548 		spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1549 		if (tg_pt_gp_mem->tg_pt_gp_assoc) {
1550 			list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list);
1551 			tg_pt_gp->tg_pt_gp_members--;
1552 			tg_pt_gp_mem->tg_pt_gp_assoc = 0;
1553 		}
1554 		spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1555 		tg_pt_gp_mem->tg_pt_gp = NULL;
1556 	}
1557 	spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1558 
1559 	kmem_cache_free(t10_alua_tg_pt_gp_mem_cache, tg_pt_gp_mem);
1560 }
1561 
1562 static struct t10_alua_tg_pt_gp *core_alua_get_tg_pt_gp_by_name(
1563 		struct se_device *dev, const char *name)
1564 {
1565 	struct t10_alua_tg_pt_gp *tg_pt_gp;
1566 	struct config_item *ci;
1567 
1568 	spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1569 	list_for_each_entry(tg_pt_gp, &dev->t10_alua.tg_pt_gps_list,
1570 			tg_pt_gp_list) {
1571 		if (!tg_pt_gp->tg_pt_gp_valid_id)
1572 			continue;
1573 		ci = &tg_pt_gp->tg_pt_gp_group.cg_item;
1574 		if (!strcmp(config_item_name(ci), name)) {
1575 			atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
1576 			spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1577 			return tg_pt_gp;
1578 		}
1579 	}
1580 	spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1581 
1582 	return NULL;
1583 }
1584 
1585 static void core_alua_put_tg_pt_gp_from_name(
1586 	struct t10_alua_tg_pt_gp *tg_pt_gp)
1587 {
1588 	struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
1589 
1590 	spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1591 	atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
1592 	spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1593 }
1594 
1595 /*
1596  * Called with struct t10_alua_tg_pt_gp_member->tg_pt_gp_mem_lock held
1597  */
1598 void __core_alua_attach_tg_pt_gp_mem(
1599 	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
1600 	struct t10_alua_tg_pt_gp *tg_pt_gp)
1601 {
1602 	spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1603 	tg_pt_gp_mem->tg_pt_gp = tg_pt_gp;
1604 	tg_pt_gp_mem->tg_pt_gp_assoc = 1;
1605 	list_add_tail(&tg_pt_gp_mem->tg_pt_gp_mem_list,
1606 			&tg_pt_gp->tg_pt_gp_mem_list);
1607 	tg_pt_gp->tg_pt_gp_members++;
1608 	spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1609 }
1610 
1611 /*
1612  * Called with struct t10_alua_tg_pt_gp_member->tg_pt_gp_mem_lock held
1613  */
1614 static void __core_alua_drop_tg_pt_gp_mem(
1615 	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
1616 	struct t10_alua_tg_pt_gp *tg_pt_gp)
1617 {
1618 	spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1619 	list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list);
1620 	tg_pt_gp_mem->tg_pt_gp = NULL;
1621 	tg_pt_gp_mem->tg_pt_gp_assoc = 0;
1622 	tg_pt_gp->tg_pt_gp_members--;
1623 	spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1624 }
1625 
1626 ssize_t core_alua_show_tg_pt_gp_info(struct se_port *port, char *page)
1627 {
1628 	struct config_item *tg_pt_ci;
1629 	struct t10_alua_tg_pt_gp *tg_pt_gp;
1630 	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
1631 	ssize_t len = 0;
1632 
1633 	tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
1634 	if (!tg_pt_gp_mem)
1635 		return len;
1636 
1637 	spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1638 	tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
1639 	if (tg_pt_gp) {
1640 		tg_pt_ci = &tg_pt_gp->tg_pt_gp_group.cg_item;
1641 		len += sprintf(page, "TG Port Alias: %s\nTG Port Group ID:"
1642 			" %hu\nTG Port Primary Access State: %s\nTG Port "
1643 			"Primary Access Status: %s\nTG Port Secondary Access"
1644 			" State: %s\nTG Port Secondary Access Status: %s\n",
1645 			config_item_name(tg_pt_ci), tg_pt_gp->tg_pt_gp_id,
1646 			core_alua_dump_state(atomic_read(
1647 					&tg_pt_gp->tg_pt_gp_alua_access_state)),
1648 			core_alua_dump_status(
1649 				tg_pt_gp->tg_pt_gp_alua_access_status),
1650 			(atomic_read(&port->sep_tg_pt_secondary_offline)) ?
1651 			"Offline" : "None",
1652 			core_alua_dump_status(port->sep_tg_pt_secondary_stat));
1653 	}
1654 	spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1655 
1656 	return len;
1657 }
1658 
1659 ssize_t core_alua_store_tg_pt_gp_info(
1660 	struct se_port *port,
1661 	const char *page,
1662 	size_t count)
1663 {
1664 	struct se_portal_group *tpg;
1665 	struct se_lun *lun;
1666 	struct se_device *dev = port->sep_lun->lun_se_dev;
1667 	struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *tg_pt_gp_new = NULL;
1668 	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
1669 	unsigned char buf[TG_PT_GROUP_NAME_BUF];
1670 	int move = 0;
1671 
1672 	tpg = port->sep_tpg;
1673 	lun = port->sep_lun;
1674 
1675 	tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
1676 	if (!tg_pt_gp_mem)
1677 		return 0;
1678 
1679 	if (count > TG_PT_GROUP_NAME_BUF) {
1680 		pr_err("ALUA Target Port Group alias too large!\n");
1681 		return -EINVAL;
1682 	}
1683 	memset(buf, 0, TG_PT_GROUP_NAME_BUF);
1684 	memcpy(buf, page, count);
1685 	/*
1686 	 * Any ALUA target port group alias besides "NULL" means we will be
1687 	 * making a new group association.
1688 	 */
1689 	if (strcmp(strstrip(buf), "NULL")) {
1690 		/*
1691 		 * core_alua_get_tg_pt_gp_by_name() will increment reference to
1692 		 * struct t10_alua_tg_pt_gp.  This reference is released with
1693 		 * core_alua_put_tg_pt_gp_from_name() below.
1694 		 */
1695 		tg_pt_gp_new = core_alua_get_tg_pt_gp_by_name(dev,
1696 					strstrip(buf));
1697 		if (!tg_pt_gp_new)
1698 			return -ENODEV;
1699 	}
1700 
1701 	spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1702 	tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
1703 	if (tg_pt_gp) {
1704 		/*
1705 		 * Clearing an existing tg_pt_gp association, and replacing
1706 		 * with the default_tg_pt_gp.
1707 		 */
1708 		if (!tg_pt_gp_new) {
1709 			pr_debug("Target_Core_ConfigFS: Moving"
1710 				" %s/tpgt_%hu/%s from ALUA Target Port Group:"
1711 				" alua/%s, ID: %hu back to"
1712 				" default_tg_pt_gp\n",
1713 				tpg->se_tpg_tfo->tpg_get_wwn(tpg),
1714 				tpg->se_tpg_tfo->tpg_get_tag(tpg),
1715 				config_item_name(&lun->lun_group.cg_item),
1716 				config_item_name(
1717 					&tg_pt_gp->tg_pt_gp_group.cg_item),
1718 				tg_pt_gp->tg_pt_gp_id);
1719 
1720 			__core_alua_drop_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp);
1721 			__core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
1722 					dev->t10_alua.default_tg_pt_gp);
1723 			spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1724 
1725 			return count;
1726 		}
1727 		/*
1728 		 * Removing existing association of tg_pt_gp_mem with tg_pt_gp
1729 		 */
1730 		__core_alua_drop_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp);
1731 		move = 1;
1732 	}
1733 	/*
1734 	 * Associate tg_pt_gp_mem with tg_pt_gp_new.
1735 	 */
1736 	__core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp_new);
1737 	spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1738 	pr_debug("Target_Core_ConfigFS: %s %s/tpgt_%hu/%s to ALUA"
1739 		" Target Port Group: alua/%s, ID: %hu\n", (move) ?
1740 		"Moving" : "Adding", tpg->se_tpg_tfo->tpg_get_wwn(tpg),
1741 		tpg->se_tpg_tfo->tpg_get_tag(tpg),
1742 		config_item_name(&lun->lun_group.cg_item),
1743 		config_item_name(&tg_pt_gp_new->tg_pt_gp_group.cg_item),
1744 		tg_pt_gp_new->tg_pt_gp_id);
1745 
1746 	core_alua_put_tg_pt_gp_from_name(tg_pt_gp_new);
1747 	return count;
1748 }
1749 
1750 ssize_t core_alua_show_access_type(
1751 	struct t10_alua_tg_pt_gp *tg_pt_gp,
1752 	char *page)
1753 {
1754 	if ((tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA) &&
1755 	    (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICT_ALUA))
1756 		return sprintf(page, "Implict and Explict\n");
1757 	else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICT_ALUA)
1758 		return sprintf(page, "Implict\n");
1759 	else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA)
1760 		return sprintf(page, "Explict\n");
1761 	else
1762 		return sprintf(page, "None\n");
1763 }
1764 
1765 ssize_t core_alua_store_access_type(
1766 	struct t10_alua_tg_pt_gp *tg_pt_gp,
1767 	const char *page,
1768 	size_t count)
1769 {
1770 	unsigned long tmp;
1771 	int ret;
1772 
1773 	ret = strict_strtoul(page, 0, &tmp);
1774 	if (ret < 0) {
1775 		pr_err("Unable to extract alua_access_type\n");
1776 		return -EINVAL;
1777 	}
1778 	if ((tmp != 0) && (tmp != 1) && (tmp != 2) && (tmp != 3)) {
1779 		pr_err("Illegal value for alua_access_type:"
1780 				" %lu\n", tmp);
1781 		return -EINVAL;
1782 	}
1783 	if (tmp == 3)
1784 		tg_pt_gp->tg_pt_gp_alua_access_type =
1785 			TPGS_IMPLICT_ALUA | TPGS_EXPLICT_ALUA;
1786 	else if (tmp == 2)
1787 		tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_EXPLICT_ALUA;
1788 	else if (tmp == 1)
1789 		tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_IMPLICT_ALUA;
1790 	else
1791 		tg_pt_gp->tg_pt_gp_alua_access_type = 0;
1792 
1793 	return count;
1794 }
1795 
1796 ssize_t core_alua_show_nonop_delay_msecs(
1797 	struct t10_alua_tg_pt_gp *tg_pt_gp,
1798 	char *page)
1799 {
1800 	return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_nonop_delay_msecs);
1801 }
1802 
1803 ssize_t core_alua_store_nonop_delay_msecs(
1804 	struct t10_alua_tg_pt_gp *tg_pt_gp,
1805 	const char *page,
1806 	size_t count)
1807 {
1808 	unsigned long tmp;
1809 	int ret;
1810 
1811 	ret = strict_strtoul(page, 0, &tmp);
1812 	if (ret < 0) {
1813 		pr_err("Unable to extract nonop_delay_msecs\n");
1814 		return -EINVAL;
1815 	}
1816 	if (tmp > ALUA_MAX_NONOP_DELAY_MSECS) {
1817 		pr_err("Passed nonop_delay_msecs: %lu, exceeds"
1818 			" ALUA_MAX_NONOP_DELAY_MSECS: %d\n", tmp,
1819 			ALUA_MAX_NONOP_DELAY_MSECS);
1820 		return -EINVAL;
1821 	}
1822 	tg_pt_gp->tg_pt_gp_nonop_delay_msecs = (int)tmp;
1823 
1824 	return count;
1825 }
1826 
1827 ssize_t core_alua_show_trans_delay_msecs(
1828 	struct t10_alua_tg_pt_gp *tg_pt_gp,
1829 	char *page)
1830 {
1831 	return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_trans_delay_msecs);
1832 }
1833 
1834 ssize_t core_alua_store_trans_delay_msecs(
1835 	struct t10_alua_tg_pt_gp *tg_pt_gp,
1836 	const char *page,
1837 	size_t count)
1838 {
1839 	unsigned long tmp;
1840 	int ret;
1841 
1842 	ret = strict_strtoul(page, 0, &tmp);
1843 	if (ret < 0) {
1844 		pr_err("Unable to extract trans_delay_msecs\n");
1845 		return -EINVAL;
1846 	}
1847 	if (tmp > ALUA_MAX_TRANS_DELAY_MSECS) {
1848 		pr_err("Passed trans_delay_msecs: %lu, exceeds"
1849 			" ALUA_MAX_TRANS_DELAY_MSECS: %d\n", tmp,
1850 			ALUA_MAX_TRANS_DELAY_MSECS);
1851 		return -EINVAL;
1852 	}
1853 	tg_pt_gp->tg_pt_gp_trans_delay_msecs = (int)tmp;
1854 
1855 	return count;
1856 }
1857 
1858 ssize_t core_alua_show_implict_trans_secs(
1859 	struct t10_alua_tg_pt_gp *tg_pt_gp,
1860 	char *page)
1861 {
1862 	return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_implict_trans_secs);
1863 }
1864 
1865 ssize_t core_alua_store_implict_trans_secs(
1866 	struct t10_alua_tg_pt_gp *tg_pt_gp,
1867 	const char *page,
1868 	size_t count)
1869 {
1870 	unsigned long tmp;
1871 	int ret;
1872 
1873 	ret = strict_strtoul(page, 0, &tmp);
1874 	if (ret < 0) {
1875 		pr_err("Unable to extract implict_trans_secs\n");
1876 		return -EINVAL;
1877 	}
1878 	if (tmp > ALUA_MAX_IMPLICT_TRANS_SECS) {
1879 		pr_err("Passed implict_trans_secs: %lu, exceeds"
1880 			" ALUA_MAX_IMPLICT_TRANS_SECS: %d\n", tmp,
1881 			ALUA_MAX_IMPLICT_TRANS_SECS);
1882 		return  -EINVAL;
1883 	}
1884 	tg_pt_gp->tg_pt_gp_implict_trans_secs = (int)tmp;
1885 
1886 	return count;
1887 }
1888 
1889 ssize_t core_alua_show_preferred_bit(
1890 	struct t10_alua_tg_pt_gp *tg_pt_gp,
1891 	char *page)
1892 {
1893 	return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_pref);
1894 }
1895 
1896 ssize_t core_alua_store_preferred_bit(
1897 	struct t10_alua_tg_pt_gp *tg_pt_gp,
1898 	const char *page,
1899 	size_t count)
1900 {
1901 	unsigned long tmp;
1902 	int ret;
1903 
1904 	ret = strict_strtoul(page, 0, &tmp);
1905 	if (ret < 0) {
1906 		pr_err("Unable to extract preferred ALUA value\n");
1907 		return -EINVAL;
1908 	}
1909 	if ((tmp != 0) && (tmp != 1)) {
1910 		pr_err("Illegal value for preferred ALUA: %lu\n", tmp);
1911 		return -EINVAL;
1912 	}
1913 	tg_pt_gp->tg_pt_gp_pref = (int)tmp;
1914 
1915 	return count;
1916 }
1917 
1918 ssize_t core_alua_show_offline_bit(struct se_lun *lun, char *page)
1919 {
1920 	if (!lun->lun_sep)
1921 		return -ENODEV;
1922 
1923 	return sprintf(page, "%d\n",
1924 		atomic_read(&lun->lun_sep->sep_tg_pt_secondary_offline));
1925 }
1926 
1927 ssize_t core_alua_store_offline_bit(
1928 	struct se_lun *lun,
1929 	const char *page,
1930 	size_t count)
1931 {
1932 	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
1933 	unsigned long tmp;
1934 	int ret;
1935 
1936 	if (!lun->lun_sep)
1937 		return -ENODEV;
1938 
1939 	ret = strict_strtoul(page, 0, &tmp);
1940 	if (ret < 0) {
1941 		pr_err("Unable to extract alua_tg_pt_offline value\n");
1942 		return -EINVAL;
1943 	}
1944 	if ((tmp != 0) && (tmp != 1)) {
1945 		pr_err("Illegal value for alua_tg_pt_offline: %lu\n",
1946 				tmp);
1947 		return -EINVAL;
1948 	}
1949 	tg_pt_gp_mem = lun->lun_sep->sep_alua_tg_pt_gp_mem;
1950 	if (!tg_pt_gp_mem) {
1951 		pr_err("Unable to locate *tg_pt_gp_mem\n");
1952 		return -EINVAL;
1953 	}
1954 
1955 	ret = core_alua_set_tg_pt_secondary_state(tg_pt_gp_mem,
1956 			lun->lun_sep, 0, (int)tmp);
1957 	if (ret < 0)
1958 		return -EINVAL;
1959 
1960 	return count;
1961 }
1962 
1963 ssize_t core_alua_show_secondary_status(
1964 	struct se_lun *lun,
1965 	char *page)
1966 {
1967 	return sprintf(page, "%d\n", lun->lun_sep->sep_tg_pt_secondary_stat);
1968 }
1969 
1970 ssize_t core_alua_store_secondary_status(
1971 	struct se_lun *lun,
1972 	const char *page,
1973 	size_t count)
1974 {
1975 	unsigned long tmp;
1976 	int ret;
1977 
1978 	ret = strict_strtoul(page, 0, &tmp);
1979 	if (ret < 0) {
1980 		pr_err("Unable to extract alua_tg_pt_status\n");
1981 		return -EINVAL;
1982 	}
1983 	if ((tmp != ALUA_STATUS_NONE) &&
1984 	    (tmp != ALUA_STATUS_ALTERED_BY_EXPLICT_STPG) &&
1985 	    (tmp != ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA)) {
1986 		pr_err("Illegal value for alua_tg_pt_status: %lu\n",
1987 				tmp);
1988 		return -EINVAL;
1989 	}
1990 	lun->lun_sep->sep_tg_pt_secondary_stat = (int)tmp;
1991 
1992 	return count;
1993 }
1994 
1995 ssize_t core_alua_show_secondary_write_metadata(
1996 	struct se_lun *lun,
1997 	char *page)
1998 {
1999 	return sprintf(page, "%d\n",
2000 			lun->lun_sep->sep_tg_pt_secondary_write_md);
2001 }
2002 
2003 ssize_t core_alua_store_secondary_write_metadata(
2004 	struct se_lun *lun,
2005 	const char *page,
2006 	size_t count)
2007 {
2008 	unsigned long tmp;
2009 	int ret;
2010 
2011 	ret = strict_strtoul(page, 0, &tmp);
2012 	if (ret < 0) {
2013 		pr_err("Unable to extract alua_tg_pt_write_md\n");
2014 		return -EINVAL;
2015 	}
2016 	if ((tmp != 0) && (tmp != 1)) {
2017 		pr_err("Illegal value for alua_tg_pt_write_md:"
2018 				" %lu\n", tmp);
2019 		return -EINVAL;
2020 	}
2021 	lun->lun_sep->sep_tg_pt_secondary_write_md = (int)tmp;
2022 
2023 	return count;
2024 }
2025 
2026 int core_setup_alua(struct se_device *dev)
2027 {
2028 	if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV &&
2029 	    !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) {
2030 		struct t10_alua_lu_gp_member *lu_gp_mem;
2031 
2032 		/*
2033 		 * Associate this struct se_device with the default ALUA
2034 		 * LUN Group.
2035 		 */
2036 		lu_gp_mem = core_alua_allocate_lu_gp_mem(dev);
2037 		if (IS_ERR(lu_gp_mem))
2038 			return PTR_ERR(lu_gp_mem);
2039 
2040 		spin_lock(&lu_gp_mem->lu_gp_mem_lock);
2041 		__core_alua_attach_lu_gp_mem(lu_gp_mem,
2042 				default_lu_gp);
2043 		spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
2044 
2045 		pr_debug("%s: Adding to default ALUA LU Group:"
2046 			" core/alua/lu_gps/default_lu_gp\n",
2047 			dev->transport->name);
2048 	}
2049 
2050 	return 0;
2051 }
2052