xref: /linux/drivers/target/target_core_alua.c (revision b43ab901d671e3e3cad425ea5e9a3c74e266dcdd)
1 /*******************************************************************************
2  * Filename:  target_core_alua.c
3  *
4  * This file contains SPC-3 compliant asymmetric logical unit assigntment (ALUA)
5  *
6  * Copyright (c) 2009-2010 Rising Tide Systems
7  * Copyright (c) 2009-2010 Linux-iSCSI.org
8  *
9  * Nicholas A. Bellinger <nab@kernel.org>
10  *
11  * This program is free software; you can redistribute it and/or modify
12  * it under the terms of the GNU General Public License as published by
13  * the Free Software Foundation; either version 2 of the License, or
14  * (at your option) any later version.
15  *
16  * This program is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
19  * GNU General Public License for more details.
20  *
21  * You should have received a copy of the GNU General Public License
22  * along with this program; if not, write to the Free Software
23  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
24  *
25  ******************************************************************************/
26 
27 #include <linux/slab.h>
28 #include <linux/spinlock.h>
29 #include <linux/configfs.h>
30 #include <linux/export.h>
31 #include <scsi/scsi.h>
32 #include <scsi/scsi_cmnd.h>
33 
34 #include <target/target_core_base.h>
35 #include <target/target_core_backend.h>
36 #include <target/target_core_fabric.h>
37 #include <target/target_core_configfs.h>
38 
39 #include "target_core_internal.h"
40 #include "target_core_alua.h"
41 #include "target_core_ua.h"
42 
43 static int core_alua_check_transition(int state, int *primary);
44 static int core_alua_set_tg_pt_secondary_state(
45 		struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
46 		struct se_port *port, int explict, int offline);
47 
48 static u16 alua_lu_gps_counter;
49 static u32 alua_lu_gps_count;
50 
51 static DEFINE_SPINLOCK(lu_gps_lock);
52 static LIST_HEAD(lu_gps_list);
53 
54 struct t10_alua_lu_gp *default_lu_gp;
55 
56 /*
57  * REPORT_TARGET_PORT_GROUPS
58  *
59  * See spc4r17 section 6.27
60  */
61 int target_emulate_report_target_port_groups(struct se_task *task)
62 {
63 	struct se_cmd *cmd = task->task_se_cmd;
64 	struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev;
65 	struct se_port *port;
66 	struct t10_alua_tg_pt_gp *tg_pt_gp;
67 	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
68 	unsigned char *buf;
69 	u32 rd_len = 0, off = 4; /* Skip over RESERVED area to first
70 				    Target port group descriptor */
71 	/*
72 	 * Need at least 4 bytes of response data or else we can't
73 	 * even fit the return data length.
74 	 */
75 	if (cmd->data_length < 4) {
76 		pr_warn("REPORT TARGET PORT GROUPS allocation length %u"
77 			" too small\n", cmd->data_length);
78 		return -EINVAL;
79 	}
80 
81 	buf = transport_kmap_first_data_page(cmd);
82 
83 	spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
84 	list_for_each_entry(tg_pt_gp, &su_dev->t10_alua.tg_pt_gps_list,
85 			tg_pt_gp_list) {
86 		/*
87 		 * Check if the Target port group and Target port descriptor list
88 		 * based on tg_pt_gp_members count will fit into the response payload.
89 		 * Otherwise, bump rd_len to let the initiator know we have exceeded
90 		 * the allocation length and the response is truncated.
91 		 */
92 		if ((off + 8 + (tg_pt_gp->tg_pt_gp_members * 4)) >
93 		     cmd->data_length) {
94 			rd_len += 8 + (tg_pt_gp->tg_pt_gp_members * 4);
95 			continue;
96 		}
97 		/*
98 		 * PREF: Preferred target port bit, determine if this
99 		 * bit should be set for port group.
100 		 */
101 		if (tg_pt_gp->tg_pt_gp_pref)
102 			buf[off] = 0x80;
103 		/*
104 		 * Set the ASYMMETRIC ACCESS State
105 		 */
106 		buf[off++] |= (atomic_read(
107 			&tg_pt_gp->tg_pt_gp_alua_access_state) & 0xff);
108 		/*
109 		 * Set supported ASYMMETRIC ACCESS State bits
110 		 */
111 		buf[off] = 0x80; /* T_SUP */
112 		buf[off] |= 0x40; /* O_SUP */
113 		buf[off] |= 0x8; /* U_SUP */
114 		buf[off] |= 0x4; /* S_SUP */
115 		buf[off] |= 0x2; /* AN_SUP */
116 		buf[off++] |= 0x1; /* AO_SUP */
117 		/*
118 		 * TARGET PORT GROUP
119 		 */
120 		buf[off++] = ((tg_pt_gp->tg_pt_gp_id >> 8) & 0xff);
121 		buf[off++] = (tg_pt_gp->tg_pt_gp_id & 0xff);
122 
123 		off++; /* Skip over Reserved */
124 		/*
125 		 * STATUS CODE
126 		 */
127 		buf[off++] = (tg_pt_gp->tg_pt_gp_alua_access_status & 0xff);
128 		/*
129 		 * Vendor Specific field
130 		 */
131 		buf[off++] = 0x00;
132 		/*
133 		 * TARGET PORT COUNT
134 		 */
135 		buf[off++] = (tg_pt_gp->tg_pt_gp_members & 0xff);
136 		rd_len += 8;
137 
138 		spin_lock(&tg_pt_gp->tg_pt_gp_lock);
139 		list_for_each_entry(tg_pt_gp_mem, &tg_pt_gp->tg_pt_gp_mem_list,
140 				tg_pt_gp_mem_list) {
141 			port = tg_pt_gp_mem->tg_pt;
142 			/*
143 			 * Start Target Port descriptor format
144 			 *
145 			 * See spc4r17 section 6.2.7 Table 247
146 			 */
147 			off += 2; /* Skip over Obsolete */
148 			/*
149 			 * Set RELATIVE TARGET PORT IDENTIFIER
150 			 */
151 			buf[off++] = ((port->sep_rtpi >> 8) & 0xff);
152 			buf[off++] = (port->sep_rtpi & 0xff);
153 			rd_len += 4;
154 		}
155 		spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
156 	}
157 	spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
158 	/*
159 	 * Set the RETURN DATA LENGTH set in the header of the DataIN Payload
160 	 */
161 	buf[0] = ((rd_len >> 24) & 0xff);
162 	buf[1] = ((rd_len >> 16) & 0xff);
163 	buf[2] = ((rd_len >> 8) & 0xff);
164 	buf[3] = (rd_len & 0xff);
165 
166 	transport_kunmap_first_data_page(cmd);
167 
168 	task->task_scsi_status = GOOD;
169 	transport_complete_task(task, 1);
170 	return 0;
171 }
172 
173 /*
174  * SET_TARGET_PORT_GROUPS for explict ALUA operation.
175  *
176  * See spc4r17 section 6.35
177  */
178 int target_emulate_set_target_port_groups(struct se_task *task)
179 {
180 	struct se_cmd *cmd = task->task_se_cmd;
181 	struct se_device *dev = cmd->se_dev;
182 	struct se_subsystem_dev *su_dev = dev->se_sub_dev;
183 	struct se_port *port, *l_port = cmd->se_lun->lun_sep;
184 	struct se_node_acl *nacl = cmd->se_sess->se_node_acl;
185 	struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *l_tg_pt_gp;
186 	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, *l_tg_pt_gp_mem;
187 	unsigned char *buf;
188 	unsigned char *ptr;
189 	u32 len = 4; /* Skip over RESERVED area in header */
190 	int alua_access_state, primary = 0, rc;
191 	u16 tg_pt_id, rtpi;
192 
193 	if (!l_port) {
194 		cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
195 		return -EINVAL;
196 	}
197 	buf = transport_kmap_first_data_page(cmd);
198 
199 	/*
200 	 * Determine if explict ALUA via SET_TARGET_PORT_GROUPS is allowed
201 	 * for the local tg_pt_gp.
202 	 */
203 	l_tg_pt_gp_mem = l_port->sep_alua_tg_pt_gp_mem;
204 	if (!l_tg_pt_gp_mem) {
205 		pr_err("Unable to access l_port->sep_alua_tg_pt_gp_mem\n");
206 		cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
207 		rc = -EINVAL;
208 		goto out;
209 	}
210 	spin_lock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
211 	l_tg_pt_gp = l_tg_pt_gp_mem->tg_pt_gp;
212 	if (!l_tg_pt_gp) {
213 		spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
214 		pr_err("Unable to access *l_tg_pt_gp_mem->tg_pt_gp\n");
215 		cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
216 		rc = -EINVAL;
217 		goto out;
218 	}
219 	rc = (l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA);
220 	spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
221 
222 	if (!rc) {
223 		pr_debug("Unable to process SET_TARGET_PORT_GROUPS"
224 				" while TPGS_EXPLICT_ALUA is disabled\n");
225 		cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
226 		rc = -EINVAL;
227 		goto out;
228 	}
229 
230 	ptr = &buf[4]; /* Skip over RESERVED area in header */
231 
232 	while (len < cmd->data_length) {
233 		alua_access_state = (ptr[0] & 0x0f);
234 		/*
235 		 * Check the received ALUA access state, and determine if
236 		 * the state is a primary or secondary target port asymmetric
237 		 * access state.
238 		 */
239 		rc = core_alua_check_transition(alua_access_state, &primary);
240 		if (rc != 0) {
241 			/*
242 			 * If the SET TARGET PORT GROUPS attempts to establish
243 			 * an invalid combination of target port asymmetric
244 			 * access states or attempts to establish an
245 			 * unsupported target port asymmetric access state,
246 			 * then the command shall be terminated with CHECK
247 			 * CONDITION status, with the sense key set to ILLEGAL
248 			 * REQUEST, and the additional sense code set to INVALID
249 			 * FIELD IN PARAMETER LIST.
250 			 */
251 			cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
252 			rc = -EINVAL;
253 			goto out;
254 		}
255 		rc = -1;
256 		/*
257 		 * If the ASYMMETRIC ACCESS STATE field (see table 267)
258 		 * specifies a primary target port asymmetric access state,
259 		 * then the TARGET PORT GROUP OR TARGET PORT field specifies
260 		 * a primary target port group for which the primary target
261 		 * port asymmetric access state shall be changed. If the
262 		 * ASYMMETRIC ACCESS STATE field specifies a secondary target
263 		 * port asymmetric access state, then the TARGET PORT GROUP OR
264 		 * TARGET PORT field specifies the relative target port
265 		 * identifier (see 3.1.120) of the target port for which the
266 		 * secondary target port asymmetric access state shall be
267 		 * changed.
268 		 */
269 		if (primary) {
270 			tg_pt_id = ((ptr[2] << 8) & 0xff);
271 			tg_pt_id |= (ptr[3] & 0xff);
272 			/*
273 			 * Locate the matching target port group ID from
274 			 * the global tg_pt_gp list
275 			 */
276 			spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
277 			list_for_each_entry(tg_pt_gp,
278 					&su_dev->t10_alua.tg_pt_gps_list,
279 					tg_pt_gp_list) {
280 				if (!tg_pt_gp->tg_pt_gp_valid_id)
281 					continue;
282 
283 				if (tg_pt_id != tg_pt_gp->tg_pt_gp_id)
284 					continue;
285 
286 				atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
287 				smp_mb__after_atomic_inc();
288 				spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
289 
290 				rc = core_alua_do_port_transition(tg_pt_gp,
291 						dev, l_port, nacl,
292 						alua_access_state, 1);
293 
294 				spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
295 				atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
296 				smp_mb__after_atomic_dec();
297 				break;
298 			}
299 			spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
300 			/*
301 			 * If not matching target port group ID can be located
302 			 * throw an exception with ASCQ: INVALID_PARAMETER_LIST
303 			 */
304 			if (rc != 0) {
305 				cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
306 				rc = -EINVAL;
307 				goto out;
308 			}
309 		} else {
310 			/*
311 			 * Extact the RELATIVE TARGET PORT IDENTIFIER to identify
312 			 * the Target Port in question for the the incoming
313 			 * SET_TARGET_PORT_GROUPS op.
314 			 */
315 			rtpi = ((ptr[2] << 8) & 0xff);
316 			rtpi |= (ptr[3] & 0xff);
317 			/*
318 			 * Locate the matching relative target port identifer
319 			 * for the struct se_device storage object.
320 			 */
321 			spin_lock(&dev->se_port_lock);
322 			list_for_each_entry(port, &dev->dev_sep_list,
323 							sep_list) {
324 				if (port->sep_rtpi != rtpi)
325 					continue;
326 
327 				tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
328 				spin_unlock(&dev->se_port_lock);
329 
330 				rc = core_alua_set_tg_pt_secondary_state(
331 						tg_pt_gp_mem, port, 1, 1);
332 
333 				spin_lock(&dev->se_port_lock);
334 				break;
335 			}
336 			spin_unlock(&dev->se_port_lock);
337 			/*
338 			 * If not matching relative target port identifier can
339 			 * be located, throw an exception with ASCQ:
340 			 * INVALID_PARAMETER_LIST
341 			 */
342 			if (rc != 0) {
343 				cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
344 				rc = -EINVAL;
345 				goto out;
346 			}
347 		}
348 
349 		ptr += 4;
350 		len += 4;
351 	}
352 
353 out:
354 	transport_kunmap_first_data_page(cmd);
355 	task->task_scsi_status = GOOD;
356 	transport_complete_task(task, 1);
357 	return 0;
358 }
359 
360 static inline int core_alua_state_nonoptimized(
361 	struct se_cmd *cmd,
362 	unsigned char *cdb,
363 	int nonop_delay_msecs,
364 	u8 *alua_ascq)
365 {
366 	/*
367 	 * Set SCF_ALUA_NON_OPTIMIZED here, this value will be checked
368 	 * later to determine if processing of this cmd needs to be
369 	 * temporarily delayed for the Active/NonOptimized primary access state.
370 	 */
371 	cmd->se_cmd_flags |= SCF_ALUA_NON_OPTIMIZED;
372 	cmd->alua_nonop_delay = nonop_delay_msecs;
373 	return 0;
374 }
375 
376 static inline int core_alua_state_standby(
377 	struct se_cmd *cmd,
378 	unsigned char *cdb,
379 	u8 *alua_ascq)
380 {
381 	/*
382 	 * Allowed CDBs for ALUA_ACCESS_STATE_STANDBY as defined by
383 	 * spc4r17 section 5.9.2.4.4
384 	 */
385 	switch (cdb[0]) {
386 	case INQUIRY:
387 	case LOG_SELECT:
388 	case LOG_SENSE:
389 	case MODE_SELECT:
390 	case MODE_SENSE:
391 	case REPORT_LUNS:
392 	case RECEIVE_DIAGNOSTIC:
393 	case SEND_DIAGNOSTIC:
394 	case MAINTENANCE_IN:
395 		switch (cdb[1]) {
396 		case MI_REPORT_TARGET_PGS:
397 			return 0;
398 		default:
399 			*alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY;
400 			return 1;
401 		}
402 	case MAINTENANCE_OUT:
403 		switch (cdb[1]) {
404 		case MO_SET_TARGET_PGS:
405 			return 0;
406 		default:
407 			*alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY;
408 			return 1;
409 		}
410 	case REQUEST_SENSE:
411 	case PERSISTENT_RESERVE_IN:
412 	case PERSISTENT_RESERVE_OUT:
413 	case READ_BUFFER:
414 	case WRITE_BUFFER:
415 		return 0;
416 	default:
417 		*alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY;
418 		return 1;
419 	}
420 
421 	return 0;
422 }
423 
424 static inline int core_alua_state_unavailable(
425 	struct se_cmd *cmd,
426 	unsigned char *cdb,
427 	u8 *alua_ascq)
428 {
429 	/*
430 	 * Allowed CDBs for ALUA_ACCESS_STATE_UNAVAILABLE as defined by
431 	 * spc4r17 section 5.9.2.4.5
432 	 */
433 	switch (cdb[0]) {
434 	case INQUIRY:
435 	case REPORT_LUNS:
436 	case MAINTENANCE_IN:
437 		switch (cdb[1]) {
438 		case MI_REPORT_TARGET_PGS:
439 			return 0;
440 		default:
441 			*alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
442 			return 1;
443 		}
444 	case MAINTENANCE_OUT:
445 		switch (cdb[1]) {
446 		case MO_SET_TARGET_PGS:
447 			return 0;
448 		default:
449 			*alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
450 			return 1;
451 		}
452 	case REQUEST_SENSE:
453 	case READ_BUFFER:
454 	case WRITE_BUFFER:
455 		return 0;
456 	default:
457 		*alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
458 		return 1;
459 	}
460 
461 	return 0;
462 }
463 
464 static inline int core_alua_state_transition(
465 	struct se_cmd *cmd,
466 	unsigned char *cdb,
467 	u8 *alua_ascq)
468 {
469 	/*
470 	 * Allowed CDBs for ALUA_ACCESS_STATE_TRANSITIO as defined by
471 	 * spc4r17 section 5.9.2.5
472 	 */
473 	switch (cdb[0]) {
474 	case INQUIRY:
475 	case REPORT_LUNS:
476 	case MAINTENANCE_IN:
477 		switch (cdb[1]) {
478 		case MI_REPORT_TARGET_PGS:
479 			return 0;
480 		default:
481 			*alua_ascq = ASCQ_04H_ALUA_STATE_TRANSITION;
482 			return 1;
483 		}
484 	case REQUEST_SENSE:
485 	case READ_BUFFER:
486 	case WRITE_BUFFER:
487 		return 0;
488 	default:
489 		*alua_ascq = ASCQ_04H_ALUA_STATE_TRANSITION;
490 		return 1;
491 	}
492 
493 	return 0;
494 }
495 
496 /*
497  * Used for alua_type SPC_ALUA_PASSTHROUGH and SPC2_ALUA_DISABLED
498  * in transport_cmd_sequencer().  This function is assigned to
499  * struct t10_alua *->state_check() in core_setup_alua()
500  */
501 static int core_alua_state_check_nop(
502 	struct se_cmd *cmd,
503 	unsigned char *cdb,
504 	u8 *alua_ascq)
505 {
506 	return 0;
507 }
508 
509 /*
510  * Used for alua_type SPC3_ALUA_EMULATED in transport_cmd_sequencer().
511  * This function is assigned to struct t10_alua *->state_check() in
512  * core_setup_alua()
513  *
514  * Also, this function can return three different return codes to
515  * signal transport_generic_cmd_sequencer()
516  *
517  * return 1: Is used to signal LUN not accecsable, and check condition/not ready
518  * return 0: Used to signal success
519  * reutrn -1: Used to signal failure, and invalid cdb field
520  */
521 static int core_alua_state_check(
522 	struct se_cmd *cmd,
523 	unsigned char *cdb,
524 	u8 *alua_ascq)
525 {
526 	struct se_lun *lun = cmd->se_lun;
527 	struct se_port *port = lun->lun_sep;
528 	struct t10_alua_tg_pt_gp *tg_pt_gp;
529 	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
530 	int out_alua_state, nonop_delay_msecs;
531 
532 	if (!port)
533 		return 0;
534 	/*
535 	 * First, check for a struct se_port specific secondary ALUA target port
536 	 * access state: OFFLINE
537 	 */
538 	if (atomic_read(&port->sep_tg_pt_secondary_offline)) {
539 		*alua_ascq = ASCQ_04H_ALUA_OFFLINE;
540 		pr_debug("ALUA: Got secondary offline status for local"
541 				" target port\n");
542 		*alua_ascq = ASCQ_04H_ALUA_OFFLINE;
543 		return 1;
544 	}
545 	 /*
546 	 * Second, obtain the struct t10_alua_tg_pt_gp_member pointer to the
547 	 * ALUA target port group, to obtain current ALUA access state.
548 	 * Otherwise look for the underlying struct se_device association with
549 	 * a ALUA logical unit group.
550 	 */
551 	tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
552 	spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
553 	tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
554 	out_alua_state = atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state);
555 	nonop_delay_msecs = tg_pt_gp->tg_pt_gp_nonop_delay_msecs;
556 	spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
557 	/*
558 	 * Process ALUA_ACCESS_STATE_ACTIVE_OPTMIZED in a separate conditional
559 	 * statement so the compiler knows explicitly to check this case first.
560 	 * For the Optimized ALUA access state case, we want to process the
561 	 * incoming fabric cmd ASAP..
562 	 */
563 	if (out_alua_state == ALUA_ACCESS_STATE_ACTIVE_OPTMIZED)
564 		return 0;
565 
566 	switch (out_alua_state) {
567 	case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
568 		return core_alua_state_nonoptimized(cmd, cdb,
569 					nonop_delay_msecs, alua_ascq);
570 	case ALUA_ACCESS_STATE_STANDBY:
571 		return core_alua_state_standby(cmd, cdb, alua_ascq);
572 	case ALUA_ACCESS_STATE_UNAVAILABLE:
573 		return core_alua_state_unavailable(cmd, cdb, alua_ascq);
574 	case ALUA_ACCESS_STATE_TRANSITION:
575 		return core_alua_state_transition(cmd, cdb, alua_ascq);
576 	/*
577 	 * OFFLINE is a secondary ALUA target port group access state, that is
578 	 * handled above with struct se_port->sep_tg_pt_secondary_offline=1
579 	 */
580 	case ALUA_ACCESS_STATE_OFFLINE:
581 	default:
582 		pr_err("Unknown ALUA access state: 0x%02x\n",
583 				out_alua_state);
584 		return -EINVAL;
585 	}
586 
587 	return 0;
588 }
589 
590 /*
591  * Check implict and explict ALUA state change request.
592  */
593 static int core_alua_check_transition(int state, int *primary)
594 {
595 	switch (state) {
596 	case ALUA_ACCESS_STATE_ACTIVE_OPTMIZED:
597 	case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
598 	case ALUA_ACCESS_STATE_STANDBY:
599 	case ALUA_ACCESS_STATE_UNAVAILABLE:
600 		/*
601 		 * OPTIMIZED, NON-OPTIMIZED, STANDBY and UNAVAILABLE are
602 		 * defined as primary target port asymmetric access states.
603 		 */
604 		*primary = 1;
605 		break;
606 	case ALUA_ACCESS_STATE_OFFLINE:
607 		/*
608 		 * OFFLINE state is defined as a secondary target port
609 		 * asymmetric access state.
610 		 */
611 		*primary = 0;
612 		break;
613 	default:
614 		pr_err("Unknown ALUA access state: 0x%02x\n", state);
615 		return -EINVAL;
616 	}
617 
618 	return 0;
619 }
620 
621 static char *core_alua_dump_state(int state)
622 {
623 	switch (state) {
624 	case ALUA_ACCESS_STATE_ACTIVE_OPTMIZED:
625 		return "Active/Optimized";
626 	case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
627 		return "Active/NonOptimized";
628 	case ALUA_ACCESS_STATE_STANDBY:
629 		return "Standby";
630 	case ALUA_ACCESS_STATE_UNAVAILABLE:
631 		return "Unavailable";
632 	case ALUA_ACCESS_STATE_OFFLINE:
633 		return "Offline";
634 	default:
635 		return "Unknown";
636 	}
637 
638 	return NULL;
639 }
640 
641 char *core_alua_dump_status(int status)
642 {
643 	switch (status) {
644 	case ALUA_STATUS_NONE:
645 		return "None";
646 	case ALUA_STATUS_ALTERED_BY_EXPLICT_STPG:
647 		return "Altered by Explict STPG";
648 	case ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA:
649 		return "Altered by Implict ALUA";
650 	default:
651 		return "Unknown";
652 	}
653 
654 	return NULL;
655 }
656 
657 /*
658  * Used by fabric modules to determine when we need to delay processing
659  * for the Active/NonOptimized paths..
660  */
661 int core_alua_check_nonop_delay(
662 	struct se_cmd *cmd)
663 {
664 	if (!(cmd->se_cmd_flags & SCF_ALUA_NON_OPTIMIZED))
665 		return 0;
666 	if (in_interrupt())
667 		return 0;
668 	/*
669 	 * The ALUA Active/NonOptimized access state delay can be disabled
670 	 * in via configfs with a value of zero
671 	 */
672 	if (!cmd->alua_nonop_delay)
673 		return 0;
674 	/*
675 	 * struct se_cmd->alua_nonop_delay gets set by a target port group
676 	 * defined interval in core_alua_state_nonoptimized()
677 	 */
678 	msleep_interruptible(cmd->alua_nonop_delay);
679 	return 0;
680 }
681 EXPORT_SYMBOL(core_alua_check_nonop_delay);
682 
683 /*
684  * Called with tg_pt_gp->tg_pt_gp_md_mutex or tg_pt_gp_mem->sep_tg_pt_md_mutex
685  *
686  */
687 static int core_alua_write_tpg_metadata(
688 	const char *path,
689 	unsigned char *md_buf,
690 	u32 md_buf_len)
691 {
692 	mm_segment_t old_fs;
693 	struct file *file;
694 	struct iovec iov[1];
695 	int flags = O_RDWR | O_CREAT | O_TRUNC, ret;
696 
697 	memset(iov, 0, sizeof(struct iovec));
698 
699 	file = filp_open(path, flags, 0600);
700 	if (IS_ERR(file) || !file || !file->f_dentry) {
701 		pr_err("filp_open(%s) for ALUA metadata failed\n",
702 			path);
703 		return -ENODEV;
704 	}
705 
706 	iov[0].iov_base = &md_buf[0];
707 	iov[0].iov_len = md_buf_len;
708 
709 	old_fs = get_fs();
710 	set_fs(get_ds());
711 	ret = vfs_writev(file, &iov[0], 1, &file->f_pos);
712 	set_fs(old_fs);
713 
714 	if (ret < 0) {
715 		pr_err("Error writing ALUA metadata file: %s\n", path);
716 		filp_close(file, NULL);
717 		return -EIO;
718 	}
719 	filp_close(file, NULL);
720 
721 	return 0;
722 }
723 
724 /*
725  * Called with tg_pt_gp->tg_pt_gp_md_mutex held
726  */
727 static int core_alua_update_tpg_primary_metadata(
728 	struct t10_alua_tg_pt_gp *tg_pt_gp,
729 	int primary_state,
730 	unsigned char *md_buf)
731 {
732 	struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev;
733 	struct t10_wwn *wwn = &su_dev->t10_wwn;
734 	char path[ALUA_METADATA_PATH_LEN];
735 	int len;
736 
737 	memset(path, 0, ALUA_METADATA_PATH_LEN);
738 
739 	len = snprintf(md_buf, tg_pt_gp->tg_pt_gp_md_buf_len,
740 			"tg_pt_gp_id=%hu\n"
741 			"alua_access_state=0x%02x\n"
742 			"alua_access_status=0x%02x\n",
743 			tg_pt_gp->tg_pt_gp_id, primary_state,
744 			tg_pt_gp->tg_pt_gp_alua_access_status);
745 
746 	snprintf(path, ALUA_METADATA_PATH_LEN,
747 		"/var/target/alua/tpgs_%s/%s", &wwn->unit_serial[0],
748 		config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item));
749 
750 	return core_alua_write_tpg_metadata(path, md_buf, len);
751 }
752 
753 static int core_alua_do_transition_tg_pt(
754 	struct t10_alua_tg_pt_gp *tg_pt_gp,
755 	struct se_port *l_port,
756 	struct se_node_acl *nacl,
757 	unsigned char *md_buf,
758 	int new_state,
759 	int explict)
760 {
761 	struct se_dev_entry *se_deve;
762 	struct se_lun_acl *lacl;
763 	struct se_port *port;
764 	struct t10_alua_tg_pt_gp_member *mem;
765 	int old_state = 0;
766 	/*
767 	 * Save the old primary ALUA access state, and set the current state
768 	 * to ALUA_ACCESS_STATE_TRANSITION.
769 	 */
770 	old_state = atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state);
771 	atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
772 			ALUA_ACCESS_STATE_TRANSITION);
773 	tg_pt_gp->tg_pt_gp_alua_access_status = (explict) ?
774 				ALUA_STATUS_ALTERED_BY_EXPLICT_STPG :
775 				ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA;
776 	/*
777 	 * Check for the optional ALUA primary state transition delay
778 	 */
779 	if (tg_pt_gp->tg_pt_gp_trans_delay_msecs != 0)
780 		msleep_interruptible(tg_pt_gp->tg_pt_gp_trans_delay_msecs);
781 
782 	spin_lock(&tg_pt_gp->tg_pt_gp_lock);
783 	list_for_each_entry(mem, &tg_pt_gp->tg_pt_gp_mem_list,
784 				tg_pt_gp_mem_list) {
785 		port = mem->tg_pt;
786 		/*
787 		 * After an implicit target port asymmetric access state
788 		 * change, a device server shall establish a unit attention
789 		 * condition for the initiator port associated with every I_T
790 		 * nexus with the additional sense code set to ASYMMETRIC
791 		 * ACCESS STATE CHAGED.
792 		 *
793 		 * After an explicit target port asymmetric access state
794 		 * change, a device server shall establish a unit attention
795 		 * condition with the additional sense code set to ASYMMETRIC
796 		 * ACCESS STATE CHANGED for the initiator port associated with
797 		 * every I_T nexus other than the I_T nexus on which the SET
798 		 * TARGET PORT GROUPS command
799 		 */
800 		atomic_inc(&mem->tg_pt_gp_mem_ref_cnt);
801 		smp_mb__after_atomic_inc();
802 		spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
803 
804 		spin_lock_bh(&port->sep_alua_lock);
805 		list_for_each_entry(se_deve, &port->sep_alua_list,
806 					alua_port_list) {
807 			lacl = se_deve->se_lun_acl;
808 			/*
809 			 * se_deve->se_lun_acl pointer may be NULL for a
810 			 * entry created without explict Node+MappedLUN ACLs
811 			 */
812 			if (!lacl)
813 				continue;
814 
815 			if (explict &&
816 			   (nacl != NULL) && (nacl == lacl->se_lun_nacl) &&
817 			   (l_port != NULL) && (l_port == port))
818 				continue;
819 
820 			core_scsi3_ua_allocate(lacl->se_lun_nacl,
821 				se_deve->mapped_lun, 0x2A,
822 				ASCQ_2AH_ASYMMETRIC_ACCESS_STATE_CHANGED);
823 		}
824 		spin_unlock_bh(&port->sep_alua_lock);
825 
826 		spin_lock(&tg_pt_gp->tg_pt_gp_lock);
827 		atomic_dec(&mem->tg_pt_gp_mem_ref_cnt);
828 		smp_mb__after_atomic_dec();
829 	}
830 	spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
831 	/*
832 	 * Update the ALUA metadata buf that has been allocated in
833 	 * core_alua_do_port_transition(), this metadata will be written
834 	 * to struct file.
835 	 *
836 	 * Note that there is the case where we do not want to update the
837 	 * metadata when the saved metadata is being parsed in userspace
838 	 * when setting the existing port access state and access status.
839 	 *
840 	 * Also note that the failure to write out the ALUA metadata to
841 	 * struct file does NOT affect the actual ALUA transition.
842 	 */
843 	if (tg_pt_gp->tg_pt_gp_write_metadata) {
844 		mutex_lock(&tg_pt_gp->tg_pt_gp_md_mutex);
845 		core_alua_update_tpg_primary_metadata(tg_pt_gp,
846 					new_state, md_buf);
847 		mutex_unlock(&tg_pt_gp->tg_pt_gp_md_mutex);
848 	}
849 	/*
850 	 * Set the current primary ALUA access state to the requested new state
851 	 */
852 	atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, new_state);
853 
854 	pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu"
855 		" from primary access state %s to %s\n", (explict) ? "explict" :
856 		"implict", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
857 		tg_pt_gp->tg_pt_gp_id, core_alua_dump_state(old_state),
858 		core_alua_dump_state(new_state));
859 
860 	return 0;
861 }
862 
863 int core_alua_do_port_transition(
864 	struct t10_alua_tg_pt_gp *l_tg_pt_gp,
865 	struct se_device *l_dev,
866 	struct se_port *l_port,
867 	struct se_node_acl *l_nacl,
868 	int new_state,
869 	int explict)
870 {
871 	struct se_device *dev;
872 	struct se_port *port;
873 	struct se_subsystem_dev *su_dev;
874 	struct se_node_acl *nacl;
875 	struct t10_alua_lu_gp *lu_gp;
876 	struct t10_alua_lu_gp_member *lu_gp_mem, *local_lu_gp_mem;
877 	struct t10_alua_tg_pt_gp *tg_pt_gp;
878 	unsigned char *md_buf;
879 	int primary;
880 
881 	if (core_alua_check_transition(new_state, &primary) != 0)
882 		return -EINVAL;
883 
884 	md_buf = kzalloc(l_tg_pt_gp->tg_pt_gp_md_buf_len, GFP_KERNEL);
885 	if (!md_buf) {
886 		pr_err("Unable to allocate buf for ALUA metadata\n");
887 		return -ENOMEM;
888 	}
889 
890 	local_lu_gp_mem = l_dev->dev_alua_lu_gp_mem;
891 	spin_lock(&local_lu_gp_mem->lu_gp_mem_lock);
892 	lu_gp = local_lu_gp_mem->lu_gp;
893 	atomic_inc(&lu_gp->lu_gp_ref_cnt);
894 	smp_mb__after_atomic_inc();
895 	spin_unlock(&local_lu_gp_mem->lu_gp_mem_lock);
896 	/*
897 	 * For storage objects that are members of the 'default_lu_gp',
898 	 * we only do transition on the passed *l_tp_pt_gp, and not
899 	 * on all of the matching target port groups IDs in default_lu_gp.
900 	 */
901 	if (!lu_gp->lu_gp_id) {
902 		/*
903 		 * core_alua_do_transition_tg_pt() will always return
904 		 * success.
905 		 */
906 		core_alua_do_transition_tg_pt(l_tg_pt_gp, l_port, l_nacl,
907 					md_buf, new_state, explict);
908 		atomic_dec(&lu_gp->lu_gp_ref_cnt);
909 		smp_mb__after_atomic_dec();
910 		kfree(md_buf);
911 		return 0;
912 	}
913 	/*
914 	 * For all other LU groups aside from 'default_lu_gp', walk all of
915 	 * the associated storage objects looking for a matching target port
916 	 * group ID from the local target port group.
917 	 */
918 	spin_lock(&lu_gp->lu_gp_lock);
919 	list_for_each_entry(lu_gp_mem, &lu_gp->lu_gp_mem_list,
920 				lu_gp_mem_list) {
921 
922 		dev = lu_gp_mem->lu_gp_mem_dev;
923 		su_dev = dev->se_sub_dev;
924 		atomic_inc(&lu_gp_mem->lu_gp_mem_ref_cnt);
925 		smp_mb__after_atomic_inc();
926 		spin_unlock(&lu_gp->lu_gp_lock);
927 
928 		spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
929 		list_for_each_entry(tg_pt_gp,
930 				&su_dev->t10_alua.tg_pt_gps_list,
931 				tg_pt_gp_list) {
932 
933 			if (!tg_pt_gp->tg_pt_gp_valid_id)
934 				continue;
935 			/*
936 			 * If the target behavior port asymmetric access state
937 			 * is changed for any target port group accessiable via
938 			 * a logical unit within a LU group, the target port
939 			 * behavior group asymmetric access states for the same
940 			 * target port group accessible via other logical units
941 			 * in that LU group will also change.
942 			 */
943 			if (l_tg_pt_gp->tg_pt_gp_id != tg_pt_gp->tg_pt_gp_id)
944 				continue;
945 
946 			if (l_tg_pt_gp == tg_pt_gp) {
947 				port = l_port;
948 				nacl = l_nacl;
949 			} else {
950 				port = NULL;
951 				nacl = NULL;
952 			}
953 			atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
954 			smp_mb__after_atomic_inc();
955 			spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
956 			/*
957 			 * core_alua_do_transition_tg_pt() will always return
958 			 * success.
959 			 */
960 			core_alua_do_transition_tg_pt(tg_pt_gp, port,
961 					nacl, md_buf, new_state, explict);
962 
963 			spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
964 			atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
965 			smp_mb__after_atomic_dec();
966 		}
967 		spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
968 
969 		spin_lock(&lu_gp->lu_gp_lock);
970 		atomic_dec(&lu_gp_mem->lu_gp_mem_ref_cnt);
971 		smp_mb__after_atomic_dec();
972 	}
973 	spin_unlock(&lu_gp->lu_gp_lock);
974 
975 	pr_debug("Successfully processed LU Group: %s all ALUA TG PT"
976 		" Group IDs: %hu %s transition to primary state: %s\n",
977 		config_item_name(&lu_gp->lu_gp_group.cg_item),
978 		l_tg_pt_gp->tg_pt_gp_id, (explict) ? "explict" : "implict",
979 		core_alua_dump_state(new_state));
980 
981 	atomic_dec(&lu_gp->lu_gp_ref_cnt);
982 	smp_mb__after_atomic_dec();
983 	kfree(md_buf);
984 	return 0;
985 }
986 
987 /*
988  * Called with tg_pt_gp_mem->sep_tg_pt_md_mutex held
989  */
990 static int core_alua_update_tpg_secondary_metadata(
991 	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
992 	struct se_port *port,
993 	unsigned char *md_buf,
994 	u32 md_buf_len)
995 {
996 	struct se_portal_group *se_tpg = port->sep_tpg;
997 	char path[ALUA_METADATA_PATH_LEN], wwn[ALUA_SECONDARY_METADATA_WWN_LEN];
998 	int len;
999 
1000 	memset(path, 0, ALUA_METADATA_PATH_LEN);
1001 	memset(wwn, 0, ALUA_SECONDARY_METADATA_WWN_LEN);
1002 
1003 	len = snprintf(wwn, ALUA_SECONDARY_METADATA_WWN_LEN, "%s",
1004 			se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg));
1005 
1006 	if (se_tpg->se_tpg_tfo->tpg_get_tag != NULL)
1007 		snprintf(wwn+len, ALUA_SECONDARY_METADATA_WWN_LEN-len, "+%hu",
1008 				se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
1009 
1010 	len = snprintf(md_buf, md_buf_len, "alua_tg_pt_offline=%d\n"
1011 			"alua_tg_pt_status=0x%02x\n",
1012 			atomic_read(&port->sep_tg_pt_secondary_offline),
1013 			port->sep_tg_pt_secondary_stat);
1014 
1015 	snprintf(path, ALUA_METADATA_PATH_LEN, "/var/target/alua/%s/%s/lun_%u",
1016 			se_tpg->se_tpg_tfo->get_fabric_name(), wwn,
1017 			port->sep_lun->unpacked_lun);
1018 
1019 	return core_alua_write_tpg_metadata(path, md_buf, len);
1020 }
1021 
1022 static int core_alua_set_tg_pt_secondary_state(
1023 	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
1024 	struct se_port *port,
1025 	int explict,
1026 	int offline)
1027 {
1028 	struct t10_alua_tg_pt_gp *tg_pt_gp;
1029 	unsigned char *md_buf;
1030 	u32 md_buf_len;
1031 	int trans_delay_msecs;
1032 
1033 	spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1034 	tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
1035 	if (!tg_pt_gp) {
1036 		spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1037 		pr_err("Unable to complete secondary state"
1038 				" transition\n");
1039 		return -EINVAL;
1040 	}
1041 	trans_delay_msecs = tg_pt_gp->tg_pt_gp_trans_delay_msecs;
1042 	/*
1043 	 * Set the secondary ALUA target port access state to OFFLINE
1044 	 * or release the previously secondary state for struct se_port
1045 	 */
1046 	if (offline)
1047 		atomic_set(&port->sep_tg_pt_secondary_offline, 1);
1048 	else
1049 		atomic_set(&port->sep_tg_pt_secondary_offline, 0);
1050 
1051 	md_buf_len = tg_pt_gp->tg_pt_gp_md_buf_len;
1052 	port->sep_tg_pt_secondary_stat = (explict) ?
1053 			ALUA_STATUS_ALTERED_BY_EXPLICT_STPG :
1054 			ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA;
1055 
1056 	pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu"
1057 		" to secondary access state: %s\n", (explict) ? "explict" :
1058 		"implict", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
1059 		tg_pt_gp->tg_pt_gp_id, (offline) ? "OFFLINE" : "ONLINE");
1060 
1061 	spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1062 	/*
1063 	 * Do the optional transition delay after we set the secondary
1064 	 * ALUA access state.
1065 	 */
1066 	if (trans_delay_msecs != 0)
1067 		msleep_interruptible(trans_delay_msecs);
1068 	/*
1069 	 * See if we need to update the ALUA fabric port metadata for
1070 	 * secondary state and status
1071 	 */
1072 	if (port->sep_tg_pt_secondary_write_md) {
1073 		md_buf = kzalloc(md_buf_len, GFP_KERNEL);
1074 		if (!md_buf) {
1075 			pr_err("Unable to allocate md_buf for"
1076 				" secondary ALUA access metadata\n");
1077 			return -ENOMEM;
1078 		}
1079 		mutex_lock(&port->sep_tg_pt_md_mutex);
1080 		core_alua_update_tpg_secondary_metadata(tg_pt_gp_mem, port,
1081 				md_buf, md_buf_len);
1082 		mutex_unlock(&port->sep_tg_pt_md_mutex);
1083 
1084 		kfree(md_buf);
1085 	}
1086 
1087 	return 0;
1088 }
1089 
1090 struct t10_alua_lu_gp *
1091 core_alua_allocate_lu_gp(const char *name, int def_group)
1092 {
1093 	struct t10_alua_lu_gp *lu_gp;
1094 
1095 	lu_gp = kmem_cache_zalloc(t10_alua_lu_gp_cache, GFP_KERNEL);
1096 	if (!lu_gp) {
1097 		pr_err("Unable to allocate struct t10_alua_lu_gp\n");
1098 		return ERR_PTR(-ENOMEM);
1099 	}
1100 	INIT_LIST_HEAD(&lu_gp->lu_gp_node);
1101 	INIT_LIST_HEAD(&lu_gp->lu_gp_mem_list);
1102 	spin_lock_init(&lu_gp->lu_gp_lock);
1103 	atomic_set(&lu_gp->lu_gp_ref_cnt, 0);
1104 
1105 	if (def_group) {
1106 		lu_gp->lu_gp_id = alua_lu_gps_counter++;
1107 		lu_gp->lu_gp_valid_id = 1;
1108 		alua_lu_gps_count++;
1109 	}
1110 
1111 	return lu_gp;
1112 }
1113 
1114 int core_alua_set_lu_gp_id(struct t10_alua_lu_gp *lu_gp, u16 lu_gp_id)
1115 {
1116 	struct t10_alua_lu_gp *lu_gp_tmp;
1117 	u16 lu_gp_id_tmp;
1118 	/*
1119 	 * The lu_gp->lu_gp_id may only be set once..
1120 	 */
1121 	if (lu_gp->lu_gp_valid_id) {
1122 		pr_warn("ALUA LU Group already has a valid ID,"
1123 			" ignoring request\n");
1124 		return -EINVAL;
1125 	}
1126 
1127 	spin_lock(&lu_gps_lock);
1128 	if (alua_lu_gps_count == 0x0000ffff) {
1129 		pr_err("Maximum ALUA alua_lu_gps_count:"
1130 				" 0x0000ffff reached\n");
1131 		spin_unlock(&lu_gps_lock);
1132 		kmem_cache_free(t10_alua_lu_gp_cache, lu_gp);
1133 		return -ENOSPC;
1134 	}
1135 again:
1136 	lu_gp_id_tmp = (lu_gp_id != 0) ? lu_gp_id :
1137 				alua_lu_gps_counter++;
1138 
1139 	list_for_each_entry(lu_gp_tmp, &lu_gps_list, lu_gp_node) {
1140 		if (lu_gp_tmp->lu_gp_id == lu_gp_id_tmp) {
1141 			if (!lu_gp_id)
1142 				goto again;
1143 
1144 			pr_warn("ALUA Logical Unit Group ID: %hu"
1145 				" already exists, ignoring request\n",
1146 				lu_gp_id);
1147 			spin_unlock(&lu_gps_lock);
1148 			return -EINVAL;
1149 		}
1150 	}
1151 
1152 	lu_gp->lu_gp_id = lu_gp_id_tmp;
1153 	lu_gp->lu_gp_valid_id = 1;
1154 	list_add_tail(&lu_gp->lu_gp_node, &lu_gps_list);
1155 	alua_lu_gps_count++;
1156 	spin_unlock(&lu_gps_lock);
1157 
1158 	return 0;
1159 }
1160 
1161 static struct t10_alua_lu_gp_member *
1162 core_alua_allocate_lu_gp_mem(struct se_device *dev)
1163 {
1164 	struct t10_alua_lu_gp_member *lu_gp_mem;
1165 
1166 	lu_gp_mem = kmem_cache_zalloc(t10_alua_lu_gp_mem_cache, GFP_KERNEL);
1167 	if (!lu_gp_mem) {
1168 		pr_err("Unable to allocate struct t10_alua_lu_gp_member\n");
1169 		return ERR_PTR(-ENOMEM);
1170 	}
1171 	INIT_LIST_HEAD(&lu_gp_mem->lu_gp_mem_list);
1172 	spin_lock_init(&lu_gp_mem->lu_gp_mem_lock);
1173 	atomic_set(&lu_gp_mem->lu_gp_mem_ref_cnt, 0);
1174 
1175 	lu_gp_mem->lu_gp_mem_dev = dev;
1176 	dev->dev_alua_lu_gp_mem = lu_gp_mem;
1177 
1178 	return lu_gp_mem;
1179 }
1180 
1181 void core_alua_free_lu_gp(struct t10_alua_lu_gp *lu_gp)
1182 {
1183 	struct t10_alua_lu_gp_member *lu_gp_mem, *lu_gp_mem_tmp;
1184 	/*
1185 	 * Once we have reached this point, config_item_put() has
1186 	 * already been called from target_core_alua_drop_lu_gp().
1187 	 *
1188 	 * Here, we remove the *lu_gp from the global list so that
1189 	 * no associations can be made while we are releasing
1190 	 * struct t10_alua_lu_gp.
1191 	 */
1192 	spin_lock(&lu_gps_lock);
1193 	list_del(&lu_gp->lu_gp_node);
1194 	alua_lu_gps_count--;
1195 	spin_unlock(&lu_gps_lock);
1196 	/*
1197 	 * Allow struct t10_alua_lu_gp * referenced by core_alua_get_lu_gp_by_name()
1198 	 * in target_core_configfs.c:target_core_store_alua_lu_gp() to be
1199 	 * released with core_alua_put_lu_gp_from_name()
1200 	 */
1201 	while (atomic_read(&lu_gp->lu_gp_ref_cnt))
1202 		cpu_relax();
1203 	/*
1204 	 * Release reference to struct t10_alua_lu_gp * from all associated
1205 	 * struct se_device.
1206 	 */
1207 	spin_lock(&lu_gp->lu_gp_lock);
1208 	list_for_each_entry_safe(lu_gp_mem, lu_gp_mem_tmp,
1209 				&lu_gp->lu_gp_mem_list, lu_gp_mem_list) {
1210 		if (lu_gp_mem->lu_gp_assoc) {
1211 			list_del(&lu_gp_mem->lu_gp_mem_list);
1212 			lu_gp->lu_gp_members--;
1213 			lu_gp_mem->lu_gp_assoc = 0;
1214 		}
1215 		spin_unlock(&lu_gp->lu_gp_lock);
1216 		/*
1217 		 *
1218 		 * lu_gp_mem is associated with a single
1219 		 * struct se_device->dev_alua_lu_gp_mem, and is released when
1220 		 * struct se_device is released via core_alua_free_lu_gp_mem().
1221 		 *
1222 		 * If the passed lu_gp does NOT match the default_lu_gp, assume
1223 		 * we want to re-assocate a given lu_gp_mem with default_lu_gp.
1224 		 */
1225 		spin_lock(&lu_gp_mem->lu_gp_mem_lock);
1226 		if (lu_gp != default_lu_gp)
1227 			__core_alua_attach_lu_gp_mem(lu_gp_mem,
1228 					default_lu_gp);
1229 		else
1230 			lu_gp_mem->lu_gp = NULL;
1231 		spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
1232 
1233 		spin_lock(&lu_gp->lu_gp_lock);
1234 	}
1235 	spin_unlock(&lu_gp->lu_gp_lock);
1236 
1237 	kmem_cache_free(t10_alua_lu_gp_cache, lu_gp);
1238 }
1239 
1240 void core_alua_free_lu_gp_mem(struct se_device *dev)
1241 {
1242 	struct se_subsystem_dev *su_dev = dev->se_sub_dev;
1243 	struct t10_alua *alua = &su_dev->t10_alua;
1244 	struct t10_alua_lu_gp *lu_gp;
1245 	struct t10_alua_lu_gp_member *lu_gp_mem;
1246 
1247 	if (alua->alua_type != SPC3_ALUA_EMULATED)
1248 		return;
1249 
1250 	lu_gp_mem = dev->dev_alua_lu_gp_mem;
1251 	if (!lu_gp_mem)
1252 		return;
1253 
1254 	while (atomic_read(&lu_gp_mem->lu_gp_mem_ref_cnt))
1255 		cpu_relax();
1256 
1257 	spin_lock(&lu_gp_mem->lu_gp_mem_lock);
1258 	lu_gp = lu_gp_mem->lu_gp;
1259 	if (lu_gp) {
1260 		spin_lock(&lu_gp->lu_gp_lock);
1261 		if (lu_gp_mem->lu_gp_assoc) {
1262 			list_del(&lu_gp_mem->lu_gp_mem_list);
1263 			lu_gp->lu_gp_members--;
1264 			lu_gp_mem->lu_gp_assoc = 0;
1265 		}
1266 		spin_unlock(&lu_gp->lu_gp_lock);
1267 		lu_gp_mem->lu_gp = NULL;
1268 	}
1269 	spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
1270 
1271 	kmem_cache_free(t10_alua_lu_gp_mem_cache, lu_gp_mem);
1272 }
1273 
1274 struct t10_alua_lu_gp *core_alua_get_lu_gp_by_name(const char *name)
1275 {
1276 	struct t10_alua_lu_gp *lu_gp;
1277 	struct config_item *ci;
1278 
1279 	spin_lock(&lu_gps_lock);
1280 	list_for_each_entry(lu_gp, &lu_gps_list, lu_gp_node) {
1281 		if (!lu_gp->lu_gp_valid_id)
1282 			continue;
1283 		ci = &lu_gp->lu_gp_group.cg_item;
1284 		if (!strcmp(config_item_name(ci), name)) {
1285 			atomic_inc(&lu_gp->lu_gp_ref_cnt);
1286 			spin_unlock(&lu_gps_lock);
1287 			return lu_gp;
1288 		}
1289 	}
1290 	spin_unlock(&lu_gps_lock);
1291 
1292 	return NULL;
1293 }
1294 
1295 void core_alua_put_lu_gp_from_name(struct t10_alua_lu_gp *lu_gp)
1296 {
1297 	spin_lock(&lu_gps_lock);
1298 	atomic_dec(&lu_gp->lu_gp_ref_cnt);
1299 	spin_unlock(&lu_gps_lock);
1300 }
1301 
1302 /*
1303  * Called with struct t10_alua_lu_gp_member->lu_gp_mem_lock
1304  */
1305 void __core_alua_attach_lu_gp_mem(
1306 	struct t10_alua_lu_gp_member *lu_gp_mem,
1307 	struct t10_alua_lu_gp *lu_gp)
1308 {
1309 	spin_lock(&lu_gp->lu_gp_lock);
1310 	lu_gp_mem->lu_gp = lu_gp;
1311 	lu_gp_mem->lu_gp_assoc = 1;
1312 	list_add_tail(&lu_gp_mem->lu_gp_mem_list, &lu_gp->lu_gp_mem_list);
1313 	lu_gp->lu_gp_members++;
1314 	spin_unlock(&lu_gp->lu_gp_lock);
1315 }
1316 
1317 /*
1318  * Called with struct t10_alua_lu_gp_member->lu_gp_mem_lock
1319  */
1320 void __core_alua_drop_lu_gp_mem(
1321 	struct t10_alua_lu_gp_member *lu_gp_mem,
1322 	struct t10_alua_lu_gp *lu_gp)
1323 {
1324 	spin_lock(&lu_gp->lu_gp_lock);
1325 	list_del(&lu_gp_mem->lu_gp_mem_list);
1326 	lu_gp_mem->lu_gp = NULL;
1327 	lu_gp_mem->lu_gp_assoc = 0;
1328 	lu_gp->lu_gp_members--;
1329 	spin_unlock(&lu_gp->lu_gp_lock);
1330 }
1331 
1332 struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(
1333 	struct se_subsystem_dev *su_dev,
1334 	const char *name,
1335 	int def_group)
1336 {
1337 	struct t10_alua_tg_pt_gp *tg_pt_gp;
1338 
1339 	tg_pt_gp = kmem_cache_zalloc(t10_alua_tg_pt_gp_cache, GFP_KERNEL);
1340 	if (!tg_pt_gp) {
1341 		pr_err("Unable to allocate struct t10_alua_tg_pt_gp\n");
1342 		return NULL;
1343 	}
1344 	INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_list);
1345 	INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_mem_list);
1346 	mutex_init(&tg_pt_gp->tg_pt_gp_md_mutex);
1347 	spin_lock_init(&tg_pt_gp->tg_pt_gp_lock);
1348 	atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0);
1349 	tg_pt_gp->tg_pt_gp_su_dev = su_dev;
1350 	tg_pt_gp->tg_pt_gp_md_buf_len = ALUA_MD_BUF_LEN;
1351 	atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
1352 		ALUA_ACCESS_STATE_ACTIVE_OPTMIZED);
1353 	/*
1354 	 * Enable both explict and implict ALUA support by default
1355 	 */
1356 	tg_pt_gp->tg_pt_gp_alua_access_type =
1357 			TPGS_EXPLICT_ALUA | TPGS_IMPLICT_ALUA;
1358 	/*
1359 	 * Set the default Active/NonOptimized Delay in milliseconds
1360 	 */
1361 	tg_pt_gp->tg_pt_gp_nonop_delay_msecs = ALUA_DEFAULT_NONOP_DELAY_MSECS;
1362 	tg_pt_gp->tg_pt_gp_trans_delay_msecs = ALUA_DEFAULT_TRANS_DELAY_MSECS;
1363 
1364 	if (def_group) {
1365 		spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
1366 		tg_pt_gp->tg_pt_gp_id =
1367 				su_dev->t10_alua.alua_tg_pt_gps_counter++;
1368 		tg_pt_gp->tg_pt_gp_valid_id = 1;
1369 		su_dev->t10_alua.alua_tg_pt_gps_count++;
1370 		list_add_tail(&tg_pt_gp->tg_pt_gp_list,
1371 			      &su_dev->t10_alua.tg_pt_gps_list);
1372 		spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
1373 	}
1374 
1375 	return tg_pt_gp;
1376 }
1377 
1378 int core_alua_set_tg_pt_gp_id(
1379 	struct t10_alua_tg_pt_gp *tg_pt_gp,
1380 	u16 tg_pt_gp_id)
1381 {
1382 	struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev;
1383 	struct t10_alua_tg_pt_gp *tg_pt_gp_tmp;
1384 	u16 tg_pt_gp_id_tmp;
1385 	/*
1386 	 * The tg_pt_gp->tg_pt_gp_id may only be set once..
1387 	 */
1388 	if (tg_pt_gp->tg_pt_gp_valid_id) {
1389 		pr_warn("ALUA TG PT Group already has a valid ID,"
1390 			" ignoring request\n");
1391 		return -EINVAL;
1392 	}
1393 
1394 	spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
1395 	if (su_dev->t10_alua.alua_tg_pt_gps_count == 0x0000ffff) {
1396 		pr_err("Maximum ALUA alua_tg_pt_gps_count:"
1397 			" 0x0000ffff reached\n");
1398 		spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
1399 		kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp);
1400 		return -ENOSPC;
1401 	}
1402 again:
1403 	tg_pt_gp_id_tmp = (tg_pt_gp_id != 0) ? tg_pt_gp_id :
1404 			su_dev->t10_alua.alua_tg_pt_gps_counter++;
1405 
1406 	list_for_each_entry(tg_pt_gp_tmp, &su_dev->t10_alua.tg_pt_gps_list,
1407 			tg_pt_gp_list) {
1408 		if (tg_pt_gp_tmp->tg_pt_gp_id == tg_pt_gp_id_tmp) {
1409 			if (!tg_pt_gp_id)
1410 				goto again;
1411 
1412 			pr_err("ALUA Target Port Group ID: %hu already"
1413 				" exists, ignoring request\n", tg_pt_gp_id);
1414 			spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
1415 			return -EINVAL;
1416 		}
1417 	}
1418 
1419 	tg_pt_gp->tg_pt_gp_id = tg_pt_gp_id_tmp;
1420 	tg_pt_gp->tg_pt_gp_valid_id = 1;
1421 	list_add_tail(&tg_pt_gp->tg_pt_gp_list,
1422 			&su_dev->t10_alua.tg_pt_gps_list);
1423 	su_dev->t10_alua.alua_tg_pt_gps_count++;
1424 	spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
1425 
1426 	return 0;
1427 }
1428 
1429 struct t10_alua_tg_pt_gp_member *core_alua_allocate_tg_pt_gp_mem(
1430 	struct se_port *port)
1431 {
1432 	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
1433 
1434 	tg_pt_gp_mem = kmem_cache_zalloc(t10_alua_tg_pt_gp_mem_cache,
1435 				GFP_KERNEL);
1436 	if (!tg_pt_gp_mem) {
1437 		pr_err("Unable to allocate struct t10_alua_tg_pt_gp_member\n");
1438 		return ERR_PTR(-ENOMEM);
1439 	}
1440 	INIT_LIST_HEAD(&tg_pt_gp_mem->tg_pt_gp_mem_list);
1441 	spin_lock_init(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1442 	atomic_set(&tg_pt_gp_mem->tg_pt_gp_mem_ref_cnt, 0);
1443 
1444 	tg_pt_gp_mem->tg_pt = port;
1445 	port->sep_alua_tg_pt_gp_mem = tg_pt_gp_mem;
1446 
1447 	return tg_pt_gp_mem;
1448 }
1449 
1450 void core_alua_free_tg_pt_gp(
1451 	struct t10_alua_tg_pt_gp *tg_pt_gp)
1452 {
1453 	struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev;
1454 	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, *tg_pt_gp_mem_tmp;
1455 	/*
1456 	 * Once we have reached this point, config_item_put() has already
1457 	 * been called from target_core_alua_drop_tg_pt_gp().
1458 	 *
1459 	 * Here we remove *tg_pt_gp from the global list so that
1460 	 * no assications *OR* explict ALUA via SET_TARGET_PORT_GROUPS
1461 	 * can be made while we are releasing struct t10_alua_tg_pt_gp.
1462 	 */
1463 	spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
1464 	list_del(&tg_pt_gp->tg_pt_gp_list);
1465 	su_dev->t10_alua.alua_tg_pt_gps_counter--;
1466 	spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
1467 	/*
1468 	 * Allow a struct t10_alua_tg_pt_gp_member * referenced by
1469 	 * core_alua_get_tg_pt_gp_by_name() in
1470 	 * target_core_configfs.c:target_core_store_alua_tg_pt_gp()
1471 	 * to be released with core_alua_put_tg_pt_gp_from_name().
1472 	 */
1473 	while (atomic_read(&tg_pt_gp->tg_pt_gp_ref_cnt))
1474 		cpu_relax();
1475 	/*
1476 	 * Release reference to struct t10_alua_tg_pt_gp from all associated
1477 	 * struct se_port.
1478 	 */
1479 	spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1480 	list_for_each_entry_safe(tg_pt_gp_mem, tg_pt_gp_mem_tmp,
1481 			&tg_pt_gp->tg_pt_gp_mem_list, tg_pt_gp_mem_list) {
1482 		if (tg_pt_gp_mem->tg_pt_gp_assoc) {
1483 			list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list);
1484 			tg_pt_gp->tg_pt_gp_members--;
1485 			tg_pt_gp_mem->tg_pt_gp_assoc = 0;
1486 		}
1487 		spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1488 		/*
1489 		 * tg_pt_gp_mem is associated with a single
1490 		 * se_port->sep_alua_tg_pt_gp_mem, and is released via
1491 		 * core_alua_free_tg_pt_gp_mem().
1492 		 *
1493 		 * If the passed tg_pt_gp does NOT match the default_tg_pt_gp,
1494 		 * assume we want to re-assocate a given tg_pt_gp_mem with
1495 		 * default_tg_pt_gp.
1496 		 */
1497 		spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1498 		if (tg_pt_gp != su_dev->t10_alua.default_tg_pt_gp) {
1499 			__core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
1500 					su_dev->t10_alua.default_tg_pt_gp);
1501 		} else
1502 			tg_pt_gp_mem->tg_pt_gp = NULL;
1503 		spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1504 
1505 		spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1506 	}
1507 	spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1508 
1509 	kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp);
1510 }
1511 
1512 void core_alua_free_tg_pt_gp_mem(struct se_port *port)
1513 {
1514 	struct se_subsystem_dev *su_dev = port->sep_lun->lun_se_dev->se_sub_dev;
1515 	struct t10_alua *alua = &su_dev->t10_alua;
1516 	struct t10_alua_tg_pt_gp *tg_pt_gp;
1517 	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
1518 
1519 	if (alua->alua_type != SPC3_ALUA_EMULATED)
1520 		return;
1521 
1522 	tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
1523 	if (!tg_pt_gp_mem)
1524 		return;
1525 
1526 	while (atomic_read(&tg_pt_gp_mem->tg_pt_gp_mem_ref_cnt))
1527 		cpu_relax();
1528 
1529 	spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1530 	tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
1531 	if (tg_pt_gp) {
1532 		spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1533 		if (tg_pt_gp_mem->tg_pt_gp_assoc) {
1534 			list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list);
1535 			tg_pt_gp->tg_pt_gp_members--;
1536 			tg_pt_gp_mem->tg_pt_gp_assoc = 0;
1537 		}
1538 		spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1539 		tg_pt_gp_mem->tg_pt_gp = NULL;
1540 	}
1541 	spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1542 
1543 	kmem_cache_free(t10_alua_tg_pt_gp_mem_cache, tg_pt_gp_mem);
1544 }
1545 
1546 static struct t10_alua_tg_pt_gp *core_alua_get_tg_pt_gp_by_name(
1547 	struct se_subsystem_dev *su_dev,
1548 	const char *name)
1549 {
1550 	struct t10_alua_tg_pt_gp *tg_pt_gp;
1551 	struct config_item *ci;
1552 
1553 	spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
1554 	list_for_each_entry(tg_pt_gp, &su_dev->t10_alua.tg_pt_gps_list,
1555 			tg_pt_gp_list) {
1556 		if (!tg_pt_gp->tg_pt_gp_valid_id)
1557 			continue;
1558 		ci = &tg_pt_gp->tg_pt_gp_group.cg_item;
1559 		if (!strcmp(config_item_name(ci), name)) {
1560 			atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
1561 			spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
1562 			return tg_pt_gp;
1563 		}
1564 	}
1565 	spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
1566 
1567 	return NULL;
1568 }
1569 
1570 static void core_alua_put_tg_pt_gp_from_name(
1571 	struct t10_alua_tg_pt_gp *tg_pt_gp)
1572 {
1573 	struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev;
1574 
1575 	spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
1576 	atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
1577 	spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
1578 }
1579 
1580 /*
1581  * Called with struct t10_alua_tg_pt_gp_member->tg_pt_gp_mem_lock held
1582  */
1583 void __core_alua_attach_tg_pt_gp_mem(
1584 	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
1585 	struct t10_alua_tg_pt_gp *tg_pt_gp)
1586 {
1587 	spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1588 	tg_pt_gp_mem->tg_pt_gp = tg_pt_gp;
1589 	tg_pt_gp_mem->tg_pt_gp_assoc = 1;
1590 	list_add_tail(&tg_pt_gp_mem->tg_pt_gp_mem_list,
1591 			&tg_pt_gp->tg_pt_gp_mem_list);
1592 	tg_pt_gp->tg_pt_gp_members++;
1593 	spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1594 }
1595 
1596 /*
1597  * Called with struct t10_alua_tg_pt_gp_member->tg_pt_gp_mem_lock held
1598  */
1599 static void __core_alua_drop_tg_pt_gp_mem(
1600 	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
1601 	struct t10_alua_tg_pt_gp *tg_pt_gp)
1602 {
1603 	spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1604 	list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list);
1605 	tg_pt_gp_mem->tg_pt_gp = NULL;
1606 	tg_pt_gp_mem->tg_pt_gp_assoc = 0;
1607 	tg_pt_gp->tg_pt_gp_members--;
1608 	spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1609 }
1610 
1611 ssize_t core_alua_show_tg_pt_gp_info(struct se_port *port, char *page)
1612 {
1613 	struct se_subsystem_dev *su_dev = port->sep_lun->lun_se_dev->se_sub_dev;
1614 	struct config_item *tg_pt_ci;
1615 	struct t10_alua *alua = &su_dev->t10_alua;
1616 	struct t10_alua_tg_pt_gp *tg_pt_gp;
1617 	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
1618 	ssize_t len = 0;
1619 
1620 	if (alua->alua_type != SPC3_ALUA_EMULATED)
1621 		return len;
1622 
1623 	tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
1624 	if (!tg_pt_gp_mem)
1625 		return len;
1626 
1627 	spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1628 	tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
1629 	if (tg_pt_gp) {
1630 		tg_pt_ci = &tg_pt_gp->tg_pt_gp_group.cg_item;
1631 		len += sprintf(page, "TG Port Alias: %s\nTG Port Group ID:"
1632 			" %hu\nTG Port Primary Access State: %s\nTG Port "
1633 			"Primary Access Status: %s\nTG Port Secondary Access"
1634 			" State: %s\nTG Port Secondary Access Status: %s\n",
1635 			config_item_name(tg_pt_ci), tg_pt_gp->tg_pt_gp_id,
1636 			core_alua_dump_state(atomic_read(
1637 					&tg_pt_gp->tg_pt_gp_alua_access_state)),
1638 			core_alua_dump_status(
1639 				tg_pt_gp->tg_pt_gp_alua_access_status),
1640 			(atomic_read(&port->sep_tg_pt_secondary_offline)) ?
1641 			"Offline" : "None",
1642 			core_alua_dump_status(port->sep_tg_pt_secondary_stat));
1643 	}
1644 	spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1645 
1646 	return len;
1647 }
1648 
1649 ssize_t core_alua_store_tg_pt_gp_info(
1650 	struct se_port *port,
1651 	const char *page,
1652 	size_t count)
1653 {
1654 	struct se_portal_group *tpg;
1655 	struct se_lun *lun;
1656 	struct se_subsystem_dev *su_dev = port->sep_lun->lun_se_dev->se_sub_dev;
1657 	struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *tg_pt_gp_new = NULL;
1658 	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
1659 	unsigned char buf[TG_PT_GROUP_NAME_BUF];
1660 	int move = 0;
1661 
1662 	tpg = port->sep_tpg;
1663 	lun = port->sep_lun;
1664 
1665 	if (su_dev->t10_alua.alua_type != SPC3_ALUA_EMULATED) {
1666 		pr_warn("SPC3_ALUA_EMULATED not enabled for"
1667 			" %s/tpgt_%hu/%s\n", tpg->se_tpg_tfo->tpg_get_wwn(tpg),
1668 			tpg->se_tpg_tfo->tpg_get_tag(tpg),
1669 			config_item_name(&lun->lun_group.cg_item));
1670 		return -EINVAL;
1671 	}
1672 
1673 	if (count > TG_PT_GROUP_NAME_BUF) {
1674 		pr_err("ALUA Target Port Group alias too large!\n");
1675 		return -EINVAL;
1676 	}
1677 	memset(buf, 0, TG_PT_GROUP_NAME_BUF);
1678 	memcpy(buf, page, count);
1679 	/*
1680 	 * Any ALUA target port group alias besides "NULL" means we will be
1681 	 * making a new group association.
1682 	 */
1683 	if (strcmp(strstrip(buf), "NULL")) {
1684 		/*
1685 		 * core_alua_get_tg_pt_gp_by_name() will increment reference to
1686 		 * struct t10_alua_tg_pt_gp.  This reference is released with
1687 		 * core_alua_put_tg_pt_gp_from_name() below.
1688 		 */
1689 		tg_pt_gp_new = core_alua_get_tg_pt_gp_by_name(su_dev,
1690 					strstrip(buf));
1691 		if (!tg_pt_gp_new)
1692 			return -ENODEV;
1693 	}
1694 	tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
1695 	if (!tg_pt_gp_mem) {
1696 		if (tg_pt_gp_new)
1697 			core_alua_put_tg_pt_gp_from_name(tg_pt_gp_new);
1698 		pr_err("NULL struct se_port->sep_alua_tg_pt_gp_mem pointer\n");
1699 		return -EINVAL;
1700 	}
1701 
1702 	spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1703 	tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
1704 	if (tg_pt_gp) {
1705 		/*
1706 		 * Clearing an existing tg_pt_gp association, and replacing
1707 		 * with the default_tg_pt_gp.
1708 		 */
1709 		if (!tg_pt_gp_new) {
1710 			pr_debug("Target_Core_ConfigFS: Moving"
1711 				" %s/tpgt_%hu/%s from ALUA Target Port Group:"
1712 				" alua/%s, ID: %hu back to"
1713 				" default_tg_pt_gp\n",
1714 				tpg->se_tpg_tfo->tpg_get_wwn(tpg),
1715 				tpg->se_tpg_tfo->tpg_get_tag(tpg),
1716 				config_item_name(&lun->lun_group.cg_item),
1717 				config_item_name(
1718 					&tg_pt_gp->tg_pt_gp_group.cg_item),
1719 				tg_pt_gp->tg_pt_gp_id);
1720 
1721 			__core_alua_drop_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp);
1722 			__core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
1723 					su_dev->t10_alua.default_tg_pt_gp);
1724 			spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1725 
1726 			return count;
1727 		}
1728 		/*
1729 		 * Removing existing association of tg_pt_gp_mem with tg_pt_gp
1730 		 */
1731 		__core_alua_drop_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp);
1732 		move = 1;
1733 	}
1734 	/*
1735 	 * Associate tg_pt_gp_mem with tg_pt_gp_new.
1736 	 */
1737 	__core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp_new);
1738 	spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1739 	pr_debug("Target_Core_ConfigFS: %s %s/tpgt_%hu/%s to ALUA"
1740 		" Target Port Group: alua/%s, ID: %hu\n", (move) ?
1741 		"Moving" : "Adding", tpg->se_tpg_tfo->tpg_get_wwn(tpg),
1742 		tpg->se_tpg_tfo->tpg_get_tag(tpg),
1743 		config_item_name(&lun->lun_group.cg_item),
1744 		config_item_name(&tg_pt_gp_new->tg_pt_gp_group.cg_item),
1745 		tg_pt_gp_new->tg_pt_gp_id);
1746 
1747 	core_alua_put_tg_pt_gp_from_name(tg_pt_gp_new);
1748 	return count;
1749 }
1750 
1751 ssize_t core_alua_show_access_type(
1752 	struct t10_alua_tg_pt_gp *tg_pt_gp,
1753 	char *page)
1754 {
1755 	if ((tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA) &&
1756 	    (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICT_ALUA))
1757 		return sprintf(page, "Implict and Explict\n");
1758 	else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICT_ALUA)
1759 		return sprintf(page, "Implict\n");
1760 	else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA)
1761 		return sprintf(page, "Explict\n");
1762 	else
1763 		return sprintf(page, "None\n");
1764 }
1765 
1766 ssize_t core_alua_store_access_type(
1767 	struct t10_alua_tg_pt_gp *tg_pt_gp,
1768 	const char *page,
1769 	size_t count)
1770 {
1771 	unsigned long tmp;
1772 	int ret;
1773 
1774 	ret = strict_strtoul(page, 0, &tmp);
1775 	if (ret < 0) {
1776 		pr_err("Unable to extract alua_access_type\n");
1777 		return -EINVAL;
1778 	}
1779 	if ((tmp != 0) && (tmp != 1) && (tmp != 2) && (tmp != 3)) {
1780 		pr_err("Illegal value for alua_access_type:"
1781 				" %lu\n", tmp);
1782 		return -EINVAL;
1783 	}
1784 	if (tmp == 3)
1785 		tg_pt_gp->tg_pt_gp_alua_access_type =
1786 			TPGS_IMPLICT_ALUA | TPGS_EXPLICT_ALUA;
1787 	else if (tmp == 2)
1788 		tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_EXPLICT_ALUA;
1789 	else if (tmp == 1)
1790 		tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_IMPLICT_ALUA;
1791 	else
1792 		tg_pt_gp->tg_pt_gp_alua_access_type = 0;
1793 
1794 	return count;
1795 }
1796 
1797 ssize_t core_alua_show_nonop_delay_msecs(
1798 	struct t10_alua_tg_pt_gp *tg_pt_gp,
1799 	char *page)
1800 {
1801 	return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_nonop_delay_msecs);
1802 }
1803 
1804 ssize_t core_alua_store_nonop_delay_msecs(
1805 	struct t10_alua_tg_pt_gp *tg_pt_gp,
1806 	const char *page,
1807 	size_t count)
1808 {
1809 	unsigned long tmp;
1810 	int ret;
1811 
1812 	ret = strict_strtoul(page, 0, &tmp);
1813 	if (ret < 0) {
1814 		pr_err("Unable to extract nonop_delay_msecs\n");
1815 		return -EINVAL;
1816 	}
1817 	if (tmp > ALUA_MAX_NONOP_DELAY_MSECS) {
1818 		pr_err("Passed nonop_delay_msecs: %lu, exceeds"
1819 			" ALUA_MAX_NONOP_DELAY_MSECS: %d\n", tmp,
1820 			ALUA_MAX_NONOP_DELAY_MSECS);
1821 		return -EINVAL;
1822 	}
1823 	tg_pt_gp->tg_pt_gp_nonop_delay_msecs = (int)tmp;
1824 
1825 	return count;
1826 }
1827 
1828 ssize_t core_alua_show_trans_delay_msecs(
1829 	struct t10_alua_tg_pt_gp *tg_pt_gp,
1830 	char *page)
1831 {
1832 	return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_trans_delay_msecs);
1833 }
1834 
1835 ssize_t core_alua_store_trans_delay_msecs(
1836 	struct t10_alua_tg_pt_gp *tg_pt_gp,
1837 	const char *page,
1838 	size_t count)
1839 {
1840 	unsigned long tmp;
1841 	int ret;
1842 
1843 	ret = strict_strtoul(page, 0, &tmp);
1844 	if (ret < 0) {
1845 		pr_err("Unable to extract trans_delay_msecs\n");
1846 		return -EINVAL;
1847 	}
1848 	if (tmp > ALUA_MAX_TRANS_DELAY_MSECS) {
1849 		pr_err("Passed trans_delay_msecs: %lu, exceeds"
1850 			" ALUA_MAX_TRANS_DELAY_MSECS: %d\n", tmp,
1851 			ALUA_MAX_TRANS_DELAY_MSECS);
1852 		return -EINVAL;
1853 	}
1854 	tg_pt_gp->tg_pt_gp_trans_delay_msecs = (int)tmp;
1855 
1856 	return count;
1857 }
1858 
1859 ssize_t core_alua_show_preferred_bit(
1860 	struct t10_alua_tg_pt_gp *tg_pt_gp,
1861 	char *page)
1862 {
1863 	return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_pref);
1864 }
1865 
1866 ssize_t core_alua_store_preferred_bit(
1867 	struct t10_alua_tg_pt_gp *tg_pt_gp,
1868 	const char *page,
1869 	size_t count)
1870 {
1871 	unsigned long tmp;
1872 	int ret;
1873 
1874 	ret = strict_strtoul(page, 0, &tmp);
1875 	if (ret < 0) {
1876 		pr_err("Unable to extract preferred ALUA value\n");
1877 		return -EINVAL;
1878 	}
1879 	if ((tmp != 0) && (tmp != 1)) {
1880 		pr_err("Illegal value for preferred ALUA: %lu\n", tmp);
1881 		return -EINVAL;
1882 	}
1883 	tg_pt_gp->tg_pt_gp_pref = (int)tmp;
1884 
1885 	return count;
1886 }
1887 
1888 ssize_t core_alua_show_offline_bit(struct se_lun *lun, char *page)
1889 {
1890 	if (!lun->lun_sep)
1891 		return -ENODEV;
1892 
1893 	return sprintf(page, "%d\n",
1894 		atomic_read(&lun->lun_sep->sep_tg_pt_secondary_offline));
1895 }
1896 
1897 ssize_t core_alua_store_offline_bit(
1898 	struct se_lun *lun,
1899 	const char *page,
1900 	size_t count)
1901 {
1902 	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
1903 	unsigned long tmp;
1904 	int ret;
1905 
1906 	if (!lun->lun_sep)
1907 		return -ENODEV;
1908 
1909 	ret = strict_strtoul(page, 0, &tmp);
1910 	if (ret < 0) {
1911 		pr_err("Unable to extract alua_tg_pt_offline value\n");
1912 		return -EINVAL;
1913 	}
1914 	if ((tmp != 0) && (tmp != 1)) {
1915 		pr_err("Illegal value for alua_tg_pt_offline: %lu\n",
1916 				tmp);
1917 		return -EINVAL;
1918 	}
1919 	tg_pt_gp_mem = lun->lun_sep->sep_alua_tg_pt_gp_mem;
1920 	if (!tg_pt_gp_mem) {
1921 		pr_err("Unable to locate *tg_pt_gp_mem\n");
1922 		return -EINVAL;
1923 	}
1924 
1925 	ret = core_alua_set_tg_pt_secondary_state(tg_pt_gp_mem,
1926 			lun->lun_sep, 0, (int)tmp);
1927 	if (ret < 0)
1928 		return -EINVAL;
1929 
1930 	return count;
1931 }
1932 
1933 ssize_t core_alua_show_secondary_status(
1934 	struct se_lun *lun,
1935 	char *page)
1936 {
1937 	return sprintf(page, "%d\n", lun->lun_sep->sep_tg_pt_secondary_stat);
1938 }
1939 
1940 ssize_t core_alua_store_secondary_status(
1941 	struct se_lun *lun,
1942 	const char *page,
1943 	size_t count)
1944 {
1945 	unsigned long tmp;
1946 	int ret;
1947 
1948 	ret = strict_strtoul(page, 0, &tmp);
1949 	if (ret < 0) {
1950 		pr_err("Unable to extract alua_tg_pt_status\n");
1951 		return -EINVAL;
1952 	}
1953 	if ((tmp != ALUA_STATUS_NONE) &&
1954 	    (tmp != ALUA_STATUS_ALTERED_BY_EXPLICT_STPG) &&
1955 	    (tmp != ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA)) {
1956 		pr_err("Illegal value for alua_tg_pt_status: %lu\n",
1957 				tmp);
1958 		return -EINVAL;
1959 	}
1960 	lun->lun_sep->sep_tg_pt_secondary_stat = (int)tmp;
1961 
1962 	return count;
1963 }
1964 
1965 ssize_t core_alua_show_secondary_write_metadata(
1966 	struct se_lun *lun,
1967 	char *page)
1968 {
1969 	return sprintf(page, "%d\n",
1970 			lun->lun_sep->sep_tg_pt_secondary_write_md);
1971 }
1972 
1973 ssize_t core_alua_store_secondary_write_metadata(
1974 	struct se_lun *lun,
1975 	const char *page,
1976 	size_t count)
1977 {
1978 	unsigned long tmp;
1979 	int ret;
1980 
1981 	ret = strict_strtoul(page, 0, &tmp);
1982 	if (ret < 0) {
1983 		pr_err("Unable to extract alua_tg_pt_write_md\n");
1984 		return -EINVAL;
1985 	}
1986 	if ((tmp != 0) && (tmp != 1)) {
1987 		pr_err("Illegal value for alua_tg_pt_write_md:"
1988 				" %lu\n", tmp);
1989 		return -EINVAL;
1990 	}
1991 	lun->lun_sep->sep_tg_pt_secondary_write_md = (int)tmp;
1992 
1993 	return count;
1994 }
1995 
1996 int core_setup_alua(struct se_device *dev, int force_pt)
1997 {
1998 	struct se_subsystem_dev *su_dev = dev->se_sub_dev;
1999 	struct t10_alua *alua = &su_dev->t10_alua;
2000 	struct t10_alua_lu_gp_member *lu_gp_mem;
2001 	/*
2002 	 * If this device is from Target_Core_Mod/pSCSI, use the ALUA logic
2003 	 * of the Underlying SCSI hardware.  In Linux/SCSI terms, this can
2004 	 * cause a problem because libata and some SATA RAID HBAs appear
2005 	 * under Linux/SCSI, but emulate SCSI logic themselves.
2006 	 */
2007 	if (((dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) &&
2008 	    !(dev->se_sub_dev->se_dev_attrib.emulate_alua)) || force_pt) {
2009 		alua->alua_type = SPC_ALUA_PASSTHROUGH;
2010 		alua->alua_state_check = &core_alua_state_check_nop;
2011 		pr_debug("%s: Using SPC_ALUA_PASSTHROUGH, no ALUA"
2012 			" emulation\n", dev->transport->name);
2013 		return 0;
2014 	}
2015 	/*
2016 	 * If SPC-3 or above is reported by real or emulated struct se_device,
2017 	 * use emulated ALUA.
2018 	 */
2019 	if (dev->transport->get_device_rev(dev) >= SCSI_3) {
2020 		pr_debug("%s: Enabling ALUA Emulation for SPC-3"
2021 			" device\n", dev->transport->name);
2022 		/*
2023 		 * Associate this struct se_device with the default ALUA
2024 		 * LUN Group.
2025 		 */
2026 		lu_gp_mem = core_alua_allocate_lu_gp_mem(dev);
2027 		if (IS_ERR(lu_gp_mem))
2028 			return PTR_ERR(lu_gp_mem);
2029 
2030 		alua->alua_type = SPC3_ALUA_EMULATED;
2031 		alua->alua_state_check = &core_alua_state_check;
2032 		spin_lock(&lu_gp_mem->lu_gp_mem_lock);
2033 		__core_alua_attach_lu_gp_mem(lu_gp_mem,
2034 				default_lu_gp);
2035 		spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
2036 
2037 		pr_debug("%s: Adding to default ALUA LU Group:"
2038 			" core/alua/lu_gps/default_lu_gp\n",
2039 			dev->transport->name);
2040 	} else {
2041 		alua->alua_type = SPC2_ALUA_DISABLED;
2042 		alua->alua_state_check = &core_alua_state_check_nop;
2043 		pr_debug("%s: Disabling ALUA Emulation for SPC-2"
2044 			" device\n", dev->transport->name);
2045 	}
2046 
2047 	return 0;
2048 }
2049