1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /******************************************************************************* 3 * Filename: target_core_alua.c 4 * 5 * This file contains SPC-3 compliant asymmetric logical unit assigntment (ALUA) 6 * 7 * (c) Copyright 2009-2013 Datera, Inc. 8 * 9 * Nicholas A. Bellinger <nab@kernel.org> 10 * 11 ******************************************************************************/ 12 13 #include <linux/slab.h> 14 #include <linux/spinlock.h> 15 #include <linux/configfs.h> 16 #include <linux/delay.h> 17 #include <linux/export.h> 18 #include <linux/fcntl.h> 19 #include <linux/file.h> 20 #include <linux/fs.h> 21 #include <scsi/scsi_proto.h> 22 #include <asm/unaligned.h> 23 24 #include <target/target_core_base.h> 25 #include <target/target_core_backend.h> 26 #include <target/target_core_fabric.h> 27 28 #include "target_core_internal.h" 29 #include "target_core_alua.h" 30 #include "target_core_ua.h" 31 32 static sense_reason_t core_alua_check_transition(int state, int valid, 33 int *primary, int explicit); 34 static int core_alua_set_tg_pt_secondary_state( 35 struct se_lun *lun, int explicit, int offline); 36 37 static char *core_alua_dump_state(int state); 38 39 static void __target_attach_tg_pt_gp(struct se_lun *lun, 40 struct t10_alua_tg_pt_gp *tg_pt_gp); 41 42 static u16 alua_lu_gps_counter; 43 static u32 alua_lu_gps_count; 44 45 static DEFINE_SPINLOCK(lu_gps_lock); 46 static LIST_HEAD(lu_gps_list); 47 48 struct t10_alua_lu_gp *default_lu_gp; 49 50 /* 51 * REPORT REFERRALS 52 * 53 * See sbc3r35 section 5.23 54 */ 55 sense_reason_t 56 target_emulate_report_referrals(struct se_cmd *cmd) 57 { 58 struct se_device *dev = cmd->se_dev; 59 struct t10_alua_lba_map *map; 60 struct t10_alua_lba_map_member *map_mem; 61 unsigned char *buf; 62 u32 rd_len = 0, off; 63 64 if (cmd->data_length < 4) { 65 pr_warn("REPORT REFERRALS allocation length %u too" 66 " small\n", cmd->data_length); 67 return TCM_INVALID_CDB_FIELD; 68 } 69 70 buf = transport_kmap_data_sg(cmd); 71 if (!buf) 72 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 73 74 off = 4; 75 spin_lock(&dev->t10_alua.lba_map_lock); 76 if (list_empty(&dev->t10_alua.lba_map_list)) { 77 spin_unlock(&dev->t10_alua.lba_map_lock); 78 transport_kunmap_data_sg(cmd); 79 80 return TCM_UNSUPPORTED_SCSI_OPCODE; 81 } 82 83 list_for_each_entry(map, &dev->t10_alua.lba_map_list, 84 lba_map_list) { 85 int desc_num = off + 3; 86 int pg_num; 87 88 off += 4; 89 if (cmd->data_length > off) 90 put_unaligned_be64(map->lba_map_first_lba, &buf[off]); 91 off += 8; 92 if (cmd->data_length > off) 93 put_unaligned_be64(map->lba_map_last_lba, &buf[off]); 94 off += 8; 95 rd_len += 20; 96 pg_num = 0; 97 list_for_each_entry(map_mem, &map->lba_map_mem_list, 98 lba_map_mem_list) { 99 int alua_state = map_mem->lba_map_mem_alua_state; 100 int alua_pg_id = map_mem->lba_map_mem_alua_pg_id; 101 102 if (cmd->data_length > off) 103 buf[off] = alua_state & 0x0f; 104 off += 2; 105 if (cmd->data_length > off) 106 buf[off] = (alua_pg_id >> 8) & 0xff; 107 off++; 108 if (cmd->data_length > off) 109 buf[off] = (alua_pg_id & 0xff); 110 off++; 111 rd_len += 4; 112 pg_num++; 113 } 114 if (cmd->data_length > desc_num) 115 buf[desc_num] = pg_num; 116 } 117 spin_unlock(&dev->t10_alua.lba_map_lock); 118 119 /* 120 * Set the RETURN DATA LENGTH set in the header of the DataIN Payload 121 */ 122 put_unaligned_be16(rd_len, &buf[2]); 123 124 transport_kunmap_data_sg(cmd); 125 126 target_complete_cmd(cmd, GOOD); 127 return 0; 128 } 129 130 /* 131 * REPORT_TARGET_PORT_GROUPS 132 * 133 * See spc4r17 section 6.27 134 */ 135 sense_reason_t 136 target_emulate_report_target_port_groups(struct se_cmd *cmd) 137 { 138 struct se_device *dev = cmd->se_dev; 139 struct t10_alua_tg_pt_gp *tg_pt_gp; 140 struct se_lun *lun; 141 unsigned char *buf; 142 u32 rd_len = 0, off; 143 int ext_hdr = (cmd->t_task_cdb[1] & 0x20); 144 145 /* 146 * Skip over RESERVED area to first Target port group descriptor 147 * depending on the PARAMETER DATA FORMAT type.. 148 */ 149 if (ext_hdr != 0) 150 off = 8; 151 else 152 off = 4; 153 154 if (cmd->data_length < off) { 155 pr_warn("REPORT TARGET PORT GROUPS allocation length %u too" 156 " small for %s header\n", cmd->data_length, 157 (ext_hdr) ? "extended" : "normal"); 158 return TCM_INVALID_CDB_FIELD; 159 } 160 buf = transport_kmap_data_sg(cmd); 161 if (!buf) 162 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 163 164 spin_lock(&dev->t10_alua.tg_pt_gps_lock); 165 list_for_each_entry(tg_pt_gp, &dev->t10_alua.tg_pt_gps_list, 166 tg_pt_gp_list) { 167 /* 168 * Check if the Target port group and Target port descriptor list 169 * based on tg_pt_gp_members count will fit into the response payload. 170 * Otherwise, bump rd_len to let the initiator know we have exceeded 171 * the allocation length and the response is truncated. 172 */ 173 if ((off + 8 + (tg_pt_gp->tg_pt_gp_members * 4)) > 174 cmd->data_length) { 175 rd_len += 8 + (tg_pt_gp->tg_pt_gp_members * 4); 176 continue; 177 } 178 /* 179 * PREF: Preferred target port bit, determine if this 180 * bit should be set for port group. 181 */ 182 if (tg_pt_gp->tg_pt_gp_pref) 183 buf[off] = 0x80; 184 /* 185 * Set the ASYMMETRIC ACCESS State 186 */ 187 buf[off++] |= tg_pt_gp->tg_pt_gp_alua_access_state & 0xff; 188 /* 189 * Set supported ASYMMETRIC ACCESS State bits 190 */ 191 buf[off++] |= tg_pt_gp->tg_pt_gp_alua_supported_states; 192 /* 193 * TARGET PORT GROUP 194 */ 195 put_unaligned_be16(tg_pt_gp->tg_pt_gp_id, &buf[off]); 196 off += 2; 197 198 off++; /* Skip over Reserved */ 199 /* 200 * STATUS CODE 201 */ 202 buf[off++] = (tg_pt_gp->tg_pt_gp_alua_access_status & 0xff); 203 /* 204 * Vendor Specific field 205 */ 206 buf[off++] = 0x00; 207 /* 208 * TARGET PORT COUNT 209 */ 210 buf[off++] = (tg_pt_gp->tg_pt_gp_members & 0xff); 211 rd_len += 8; 212 213 spin_lock(&tg_pt_gp->tg_pt_gp_lock); 214 list_for_each_entry(lun, &tg_pt_gp->tg_pt_gp_lun_list, 215 lun_tg_pt_gp_link) { 216 /* 217 * Start Target Port descriptor format 218 * 219 * See spc4r17 section 6.2.7 Table 247 220 */ 221 off += 2; /* Skip over Obsolete */ 222 /* 223 * Set RELATIVE TARGET PORT IDENTIFIER 224 */ 225 put_unaligned_be16(lun->lun_rtpi, &buf[off]); 226 off += 2; 227 rd_len += 4; 228 } 229 spin_unlock(&tg_pt_gp->tg_pt_gp_lock); 230 } 231 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 232 /* 233 * Set the RETURN DATA LENGTH set in the header of the DataIN Payload 234 */ 235 put_unaligned_be32(rd_len, &buf[0]); 236 237 /* 238 * Fill in the Extended header parameter data format if requested 239 */ 240 if (ext_hdr != 0) { 241 buf[4] = 0x10; 242 /* 243 * Set the implicit transition time (in seconds) for the application 244 * client to use as a base for it's transition timeout value. 245 * 246 * Use the current tg_pt_gp_mem -> tg_pt_gp membership from the LUN 247 * this CDB was received upon to determine this value individually 248 * for ALUA target port group. 249 */ 250 spin_lock(&cmd->se_lun->lun_tg_pt_gp_lock); 251 tg_pt_gp = cmd->se_lun->lun_tg_pt_gp; 252 if (tg_pt_gp) 253 buf[5] = tg_pt_gp->tg_pt_gp_implicit_trans_secs; 254 spin_unlock(&cmd->se_lun->lun_tg_pt_gp_lock); 255 } 256 transport_kunmap_data_sg(cmd); 257 258 target_complete_cmd_with_length(cmd, GOOD, rd_len + 4); 259 return 0; 260 } 261 262 /* 263 * SET_TARGET_PORT_GROUPS for explicit ALUA operation. 264 * 265 * See spc4r17 section 6.35 266 */ 267 sense_reason_t 268 target_emulate_set_target_port_groups(struct se_cmd *cmd) 269 { 270 struct se_device *dev = cmd->se_dev; 271 struct se_lun *l_lun = cmd->se_lun; 272 struct se_node_acl *nacl = cmd->se_sess->se_node_acl; 273 struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *l_tg_pt_gp; 274 unsigned char *buf; 275 unsigned char *ptr; 276 sense_reason_t rc = TCM_NO_SENSE; 277 u32 len = 4; /* Skip over RESERVED area in header */ 278 int alua_access_state, primary = 0, valid_states; 279 u16 tg_pt_id, rtpi; 280 281 if (cmd->data_length < 4) { 282 pr_warn("SET TARGET PORT GROUPS parameter list length %u too" 283 " small\n", cmd->data_length); 284 return TCM_INVALID_PARAMETER_LIST; 285 } 286 287 buf = transport_kmap_data_sg(cmd); 288 if (!buf) 289 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 290 291 /* 292 * Determine if explicit ALUA via SET_TARGET_PORT_GROUPS is allowed 293 * for the local tg_pt_gp. 294 */ 295 spin_lock(&l_lun->lun_tg_pt_gp_lock); 296 l_tg_pt_gp = l_lun->lun_tg_pt_gp; 297 if (!l_tg_pt_gp) { 298 spin_unlock(&l_lun->lun_tg_pt_gp_lock); 299 pr_err("Unable to access l_lun->tg_pt_gp\n"); 300 rc = TCM_UNSUPPORTED_SCSI_OPCODE; 301 goto out; 302 } 303 304 if (!(l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA)) { 305 spin_unlock(&l_lun->lun_tg_pt_gp_lock); 306 pr_debug("Unable to process SET_TARGET_PORT_GROUPS" 307 " while TPGS_EXPLICIT_ALUA is disabled\n"); 308 rc = TCM_UNSUPPORTED_SCSI_OPCODE; 309 goto out; 310 } 311 valid_states = l_tg_pt_gp->tg_pt_gp_alua_supported_states; 312 spin_unlock(&l_lun->lun_tg_pt_gp_lock); 313 314 ptr = &buf[4]; /* Skip over RESERVED area in header */ 315 316 while (len < cmd->data_length) { 317 bool found = false; 318 alua_access_state = (ptr[0] & 0x0f); 319 /* 320 * Check the received ALUA access state, and determine if 321 * the state is a primary or secondary target port asymmetric 322 * access state. 323 */ 324 rc = core_alua_check_transition(alua_access_state, valid_states, 325 &primary, 1); 326 if (rc) { 327 /* 328 * If the SET TARGET PORT GROUPS attempts to establish 329 * an invalid combination of target port asymmetric 330 * access states or attempts to establish an 331 * unsupported target port asymmetric access state, 332 * then the command shall be terminated with CHECK 333 * CONDITION status, with the sense key set to ILLEGAL 334 * REQUEST, and the additional sense code set to INVALID 335 * FIELD IN PARAMETER LIST. 336 */ 337 goto out; 338 } 339 340 /* 341 * If the ASYMMETRIC ACCESS STATE field (see table 267) 342 * specifies a primary target port asymmetric access state, 343 * then the TARGET PORT GROUP OR TARGET PORT field specifies 344 * a primary target port group for which the primary target 345 * port asymmetric access state shall be changed. If the 346 * ASYMMETRIC ACCESS STATE field specifies a secondary target 347 * port asymmetric access state, then the TARGET PORT GROUP OR 348 * TARGET PORT field specifies the relative target port 349 * identifier (see 3.1.120) of the target port for which the 350 * secondary target port asymmetric access state shall be 351 * changed. 352 */ 353 if (primary) { 354 tg_pt_id = get_unaligned_be16(ptr + 2); 355 /* 356 * Locate the matching target port group ID from 357 * the global tg_pt_gp list 358 */ 359 spin_lock(&dev->t10_alua.tg_pt_gps_lock); 360 list_for_each_entry(tg_pt_gp, 361 &dev->t10_alua.tg_pt_gps_list, 362 tg_pt_gp_list) { 363 if (!tg_pt_gp->tg_pt_gp_valid_id) 364 continue; 365 366 if (tg_pt_id != tg_pt_gp->tg_pt_gp_id) 367 continue; 368 369 atomic_inc_mb(&tg_pt_gp->tg_pt_gp_ref_cnt); 370 371 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 372 373 if (!core_alua_do_port_transition(tg_pt_gp, 374 dev, l_lun, nacl, 375 alua_access_state, 1)) 376 found = true; 377 378 spin_lock(&dev->t10_alua.tg_pt_gps_lock); 379 atomic_dec_mb(&tg_pt_gp->tg_pt_gp_ref_cnt); 380 break; 381 } 382 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 383 } else { 384 struct se_lun *lun; 385 386 /* 387 * Extract the RELATIVE TARGET PORT IDENTIFIER to identify 388 * the Target Port in question for the the incoming 389 * SET_TARGET_PORT_GROUPS op. 390 */ 391 rtpi = get_unaligned_be16(ptr + 2); 392 /* 393 * Locate the matching relative target port identifier 394 * for the struct se_device storage object. 395 */ 396 spin_lock(&dev->se_port_lock); 397 list_for_each_entry(lun, &dev->dev_sep_list, 398 lun_dev_link) { 399 if (lun->lun_rtpi != rtpi) 400 continue; 401 402 // XXX: racy unlock 403 spin_unlock(&dev->se_port_lock); 404 405 if (!core_alua_set_tg_pt_secondary_state( 406 lun, 1, 1)) 407 found = true; 408 409 spin_lock(&dev->se_port_lock); 410 break; 411 } 412 spin_unlock(&dev->se_port_lock); 413 } 414 415 if (!found) { 416 rc = TCM_INVALID_PARAMETER_LIST; 417 goto out; 418 } 419 420 ptr += 4; 421 len += 4; 422 } 423 424 out: 425 transport_kunmap_data_sg(cmd); 426 if (!rc) 427 target_complete_cmd(cmd, GOOD); 428 return rc; 429 } 430 431 static inline void set_ascq(struct se_cmd *cmd, u8 alua_ascq) 432 { 433 /* 434 * Set SCSI additional sense code (ASC) to 'LUN Not Accessible'; 435 * The ALUA additional sense code qualifier (ASCQ) is determined 436 * by the ALUA primary or secondary access state.. 437 */ 438 pr_debug("[%s]: ALUA TG Port not available, " 439 "SenseKey: NOT_READY, ASC/ASCQ: " 440 "0x04/0x%02x\n", 441 cmd->se_tfo->fabric_name, alua_ascq); 442 443 cmd->scsi_asc = 0x04; 444 cmd->scsi_ascq = alua_ascq; 445 } 446 447 static inline void core_alua_state_nonoptimized( 448 struct se_cmd *cmd, 449 unsigned char *cdb, 450 int nonop_delay_msecs) 451 { 452 /* 453 * Set SCF_ALUA_NON_OPTIMIZED here, this value will be checked 454 * later to determine if processing of this cmd needs to be 455 * temporarily delayed for the Active/NonOptimized primary access state. 456 */ 457 cmd->se_cmd_flags |= SCF_ALUA_NON_OPTIMIZED; 458 cmd->alua_nonop_delay = nonop_delay_msecs; 459 } 460 461 static inline int core_alua_state_lba_dependent( 462 struct se_cmd *cmd, 463 struct t10_alua_tg_pt_gp *tg_pt_gp) 464 { 465 struct se_device *dev = cmd->se_dev; 466 u64 segment_size, segment_mult, sectors, lba; 467 468 /* Only need to check for cdb actually containing LBAs */ 469 if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB)) 470 return 0; 471 472 spin_lock(&dev->t10_alua.lba_map_lock); 473 segment_size = dev->t10_alua.lba_map_segment_size; 474 segment_mult = dev->t10_alua.lba_map_segment_multiplier; 475 sectors = cmd->data_length / dev->dev_attrib.block_size; 476 477 lba = cmd->t_task_lba; 478 while (lba < cmd->t_task_lba + sectors) { 479 struct t10_alua_lba_map *cur_map = NULL, *map; 480 struct t10_alua_lba_map_member *map_mem; 481 482 list_for_each_entry(map, &dev->t10_alua.lba_map_list, 483 lba_map_list) { 484 u64 start_lba, last_lba; 485 u64 first_lba = map->lba_map_first_lba; 486 487 if (segment_mult) { 488 u64 tmp = lba; 489 start_lba = do_div(tmp, segment_size * segment_mult); 490 491 last_lba = first_lba + segment_size - 1; 492 if (start_lba >= first_lba && 493 start_lba <= last_lba) { 494 lba += segment_size; 495 cur_map = map; 496 break; 497 } 498 } else { 499 last_lba = map->lba_map_last_lba; 500 if (lba >= first_lba && lba <= last_lba) { 501 lba = last_lba + 1; 502 cur_map = map; 503 break; 504 } 505 } 506 } 507 if (!cur_map) { 508 spin_unlock(&dev->t10_alua.lba_map_lock); 509 set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE); 510 return 1; 511 } 512 list_for_each_entry(map_mem, &cur_map->lba_map_mem_list, 513 lba_map_mem_list) { 514 if (map_mem->lba_map_mem_alua_pg_id != 515 tg_pt_gp->tg_pt_gp_id) 516 continue; 517 switch(map_mem->lba_map_mem_alua_state) { 518 case ALUA_ACCESS_STATE_STANDBY: 519 spin_unlock(&dev->t10_alua.lba_map_lock); 520 set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY); 521 return 1; 522 case ALUA_ACCESS_STATE_UNAVAILABLE: 523 spin_unlock(&dev->t10_alua.lba_map_lock); 524 set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE); 525 return 1; 526 default: 527 break; 528 } 529 } 530 } 531 spin_unlock(&dev->t10_alua.lba_map_lock); 532 return 0; 533 } 534 535 static inline int core_alua_state_standby( 536 struct se_cmd *cmd, 537 unsigned char *cdb) 538 { 539 /* 540 * Allowed CDBs for ALUA_ACCESS_STATE_STANDBY as defined by 541 * spc4r17 section 5.9.2.4.4 542 */ 543 switch (cdb[0]) { 544 case INQUIRY: 545 case LOG_SELECT: 546 case LOG_SENSE: 547 case MODE_SELECT: 548 case MODE_SENSE: 549 case REPORT_LUNS: 550 case RECEIVE_DIAGNOSTIC: 551 case SEND_DIAGNOSTIC: 552 case READ_CAPACITY: 553 return 0; 554 case SERVICE_ACTION_IN_16: 555 switch (cdb[1] & 0x1f) { 556 case SAI_READ_CAPACITY_16: 557 return 0; 558 default: 559 set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY); 560 return 1; 561 } 562 case MAINTENANCE_IN: 563 switch (cdb[1] & 0x1f) { 564 case MI_REPORT_TARGET_PGS: 565 return 0; 566 default: 567 set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY); 568 return 1; 569 } 570 case MAINTENANCE_OUT: 571 switch (cdb[1]) { 572 case MO_SET_TARGET_PGS: 573 return 0; 574 default: 575 set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY); 576 return 1; 577 } 578 case REQUEST_SENSE: 579 case PERSISTENT_RESERVE_IN: 580 case PERSISTENT_RESERVE_OUT: 581 case READ_BUFFER: 582 case WRITE_BUFFER: 583 return 0; 584 default: 585 set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY); 586 return 1; 587 } 588 589 return 0; 590 } 591 592 static inline int core_alua_state_unavailable( 593 struct se_cmd *cmd, 594 unsigned char *cdb) 595 { 596 /* 597 * Allowed CDBs for ALUA_ACCESS_STATE_UNAVAILABLE as defined by 598 * spc4r17 section 5.9.2.4.5 599 */ 600 switch (cdb[0]) { 601 case INQUIRY: 602 case REPORT_LUNS: 603 return 0; 604 case MAINTENANCE_IN: 605 switch (cdb[1] & 0x1f) { 606 case MI_REPORT_TARGET_PGS: 607 return 0; 608 default: 609 set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE); 610 return 1; 611 } 612 case MAINTENANCE_OUT: 613 switch (cdb[1]) { 614 case MO_SET_TARGET_PGS: 615 return 0; 616 default: 617 set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE); 618 return 1; 619 } 620 case REQUEST_SENSE: 621 case READ_BUFFER: 622 case WRITE_BUFFER: 623 return 0; 624 default: 625 set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE); 626 return 1; 627 } 628 629 return 0; 630 } 631 632 static inline int core_alua_state_transition( 633 struct se_cmd *cmd, 634 unsigned char *cdb) 635 { 636 /* 637 * Allowed CDBs for ALUA_ACCESS_STATE_TRANSITION as defined by 638 * spc4r17 section 5.9.2.5 639 */ 640 switch (cdb[0]) { 641 case INQUIRY: 642 case REPORT_LUNS: 643 return 0; 644 case MAINTENANCE_IN: 645 switch (cdb[1] & 0x1f) { 646 case MI_REPORT_TARGET_PGS: 647 return 0; 648 default: 649 set_ascq(cmd, ASCQ_04H_ALUA_STATE_TRANSITION); 650 return 1; 651 } 652 case REQUEST_SENSE: 653 case READ_BUFFER: 654 case WRITE_BUFFER: 655 return 0; 656 default: 657 set_ascq(cmd, ASCQ_04H_ALUA_STATE_TRANSITION); 658 return 1; 659 } 660 661 return 0; 662 } 663 664 /* 665 * return 1: Is used to signal LUN not accessible, and check condition/not ready 666 * return 0: Used to signal success 667 * return -1: Used to signal failure, and invalid cdb field 668 */ 669 sense_reason_t 670 target_alua_state_check(struct se_cmd *cmd) 671 { 672 struct se_device *dev = cmd->se_dev; 673 unsigned char *cdb = cmd->t_task_cdb; 674 struct se_lun *lun = cmd->se_lun; 675 struct t10_alua_tg_pt_gp *tg_pt_gp; 676 int out_alua_state, nonop_delay_msecs; 677 678 if (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE) 679 return 0; 680 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA) 681 return 0; 682 683 /* 684 * First, check for a struct se_port specific secondary ALUA target port 685 * access state: OFFLINE 686 */ 687 if (atomic_read(&lun->lun_tg_pt_secondary_offline)) { 688 pr_debug("ALUA: Got secondary offline status for local" 689 " target port\n"); 690 set_ascq(cmd, ASCQ_04H_ALUA_OFFLINE); 691 return TCM_CHECK_CONDITION_NOT_READY; 692 } 693 694 if (!lun->lun_tg_pt_gp) 695 return 0; 696 697 spin_lock(&lun->lun_tg_pt_gp_lock); 698 tg_pt_gp = lun->lun_tg_pt_gp; 699 out_alua_state = tg_pt_gp->tg_pt_gp_alua_access_state; 700 nonop_delay_msecs = tg_pt_gp->tg_pt_gp_nonop_delay_msecs; 701 702 // XXX: keeps using tg_pt_gp witout reference after unlock 703 spin_unlock(&lun->lun_tg_pt_gp_lock); 704 /* 705 * Process ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED in a separate conditional 706 * statement so the compiler knows explicitly to check this case first. 707 * For the Optimized ALUA access state case, we want to process the 708 * incoming fabric cmd ASAP.. 709 */ 710 if (out_alua_state == ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED) 711 return 0; 712 713 switch (out_alua_state) { 714 case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED: 715 core_alua_state_nonoptimized(cmd, cdb, nonop_delay_msecs); 716 break; 717 case ALUA_ACCESS_STATE_STANDBY: 718 if (core_alua_state_standby(cmd, cdb)) 719 return TCM_CHECK_CONDITION_NOT_READY; 720 break; 721 case ALUA_ACCESS_STATE_UNAVAILABLE: 722 if (core_alua_state_unavailable(cmd, cdb)) 723 return TCM_CHECK_CONDITION_NOT_READY; 724 break; 725 case ALUA_ACCESS_STATE_TRANSITION: 726 if (core_alua_state_transition(cmd, cdb)) 727 return TCM_CHECK_CONDITION_NOT_READY; 728 break; 729 case ALUA_ACCESS_STATE_LBA_DEPENDENT: 730 if (core_alua_state_lba_dependent(cmd, tg_pt_gp)) 731 return TCM_CHECK_CONDITION_NOT_READY; 732 break; 733 /* 734 * OFFLINE is a secondary ALUA target port group access state, that is 735 * handled above with struct se_lun->lun_tg_pt_secondary_offline=1 736 */ 737 case ALUA_ACCESS_STATE_OFFLINE: 738 default: 739 pr_err("Unknown ALUA access state: 0x%02x\n", 740 out_alua_state); 741 return TCM_INVALID_CDB_FIELD; 742 } 743 744 return 0; 745 } 746 747 /* 748 * Check implicit and explicit ALUA state change request. 749 */ 750 static sense_reason_t 751 core_alua_check_transition(int state, int valid, int *primary, int explicit) 752 { 753 /* 754 * OPTIMIZED, NON-OPTIMIZED, STANDBY and UNAVAILABLE are 755 * defined as primary target port asymmetric access states. 756 */ 757 switch (state) { 758 case ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED: 759 if (!(valid & ALUA_AO_SUP)) 760 goto not_supported; 761 *primary = 1; 762 break; 763 case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED: 764 if (!(valid & ALUA_AN_SUP)) 765 goto not_supported; 766 *primary = 1; 767 break; 768 case ALUA_ACCESS_STATE_STANDBY: 769 if (!(valid & ALUA_S_SUP)) 770 goto not_supported; 771 *primary = 1; 772 break; 773 case ALUA_ACCESS_STATE_UNAVAILABLE: 774 if (!(valid & ALUA_U_SUP)) 775 goto not_supported; 776 *primary = 1; 777 break; 778 case ALUA_ACCESS_STATE_LBA_DEPENDENT: 779 if (!(valid & ALUA_LBD_SUP)) 780 goto not_supported; 781 *primary = 1; 782 break; 783 case ALUA_ACCESS_STATE_OFFLINE: 784 /* 785 * OFFLINE state is defined as a secondary target port 786 * asymmetric access state. 787 */ 788 if (!(valid & ALUA_O_SUP)) 789 goto not_supported; 790 *primary = 0; 791 break; 792 case ALUA_ACCESS_STATE_TRANSITION: 793 if (!(valid & ALUA_T_SUP) || explicit) 794 /* 795 * Transitioning is set internally and by tcmu daemon, 796 * and cannot be selected through a STPG. 797 */ 798 goto not_supported; 799 *primary = 0; 800 break; 801 default: 802 pr_err("Unknown ALUA access state: 0x%02x\n", state); 803 return TCM_INVALID_PARAMETER_LIST; 804 } 805 806 return 0; 807 808 not_supported: 809 pr_err("ALUA access state %s not supported", 810 core_alua_dump_state(state)); 811 return TCM_INVALID_PARAMETER_LIST; 812 } 813 814 static char *core_alua_dump_state(int state) 815 { 816 switch (state) { 817 case ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED: 818 return "Active/Optimized"; 819 case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED: 820 return "Active/NonOptimized"; 821 case ALUA_ACCESS_STATE_LBA_DEPENDENT: 822 return "LBA Dependent"; 823 case ALUA_ACCESS_STATE_STANDBY: 824 return "Standby"; 825 case ALUA_ACCESS_STATE_UNAVAILABLE: 826 return "Unavailable"; 827 case ALUA_ACCESS_STATE_OFFLINE: 828 return "Offline"; 829 case ALUA_ACCESS_STATE_TRANSITION: 830 return "Transitioning"; 831 default: 832 return "Unknown"; 833 } 834 835 return NULL; 836 } 837 838 char *core_alua_dump_status(int status) 839 { 840 switch (status) { 841 case ALUA_STATUS_NONE: 842 return "None"; 843 case ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG: 844 return "Altered by Explicit STPG"; 845 case ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA: 846 return "Altered by Implicit ALUA"; 847 default: 848 return "Unknown"; 849 } 850 851 return NULL; 852 } 853 854 /* 855 * Used by fabric modules to determine when we need to delay processing 856 * for the Active/NonOptimized paths.. 857 */ 858 int core_alua_check_nonop_delay( 859 struct se_cmd *cmd) 860 { 861 if (!(cmd->se_cmd_flags & SCF_ALUA_NON_OPTIMIZED)) 862 return 0; 863 if (in_interrupt()) 864 return 0; 865 /* 866 * The ALUA Active/NonOptimized access state delay can be disabled 867 * in via configfs with a value of zero 868 */ 869 if (!cmd->alua_nonop_delay) 870 return 0; 871 /* 872 * struct se_cmd->alua_nonop_delay gets set by a target port group 873 * defined interval in core_alua_state_nonoptimized() 874 */ 875 msleep_interruptible(cmd->alua_nonop_delay); 876 return 0; 877 } 878 EXPORT_SYMBOL(core_alua_check_nonop_delay); 879 880 static int core_alua_write_tpg_metadata( 881 const char *path, 882 unsigned char *md_buf, 883 u32 md_buf_len) 884 { 885 struct file *file = filp_open(path, O_RDWR | O_CREAT | O_TRUNC, 0600); 886 loff_t pos = 0; 887 int ret; 888 889 if (IS_ERR(file)) { 890 pr_err("filp_open(%s) for ALUA metadata failed\n", path); 891 return -ENODEV; 892 } 893 ret = kernel_write(file, md_buf, md_buf_len, &pos); 894 if (ret < 0) 895 pr_err("Error writing ALUA metadata file: %s\n", path); 896 fput(file); 897 return (ret < 0) ? -EIO : 0; 898 } 899 900 static int core_alua_update_tpg_primary_metadata( 901 struct t10_alua_tg_pt_gp *tg_pt_gp) 902 { 903 unsigned char *md_buf; 904 struct t10_wwn *wwn = &tg_pt_gp->tg_pt_gp_dev->t10_wwn; 905 char *path; 906 int len, rc; 907 908 lockdep_assert_held(&tg_pt_gp->tg_pt_gp_transition_mutex); 909 910 md_buf = kzalloc(ALUA_MD_BUF_LEN, GFP_KERNEL); 911 if (!md_buf) { 912 pr_err("Unable to allocate buf for ALUA metadata\n"); 913 return -ENOMEM; 914 } 915 916 len = snprintf(md_buf, ALUA_MD_BUF_LEN, 917 "tg_pt_gp_id=%hu\n" 918 "alua_access_state=0x%02x\n" 919 "alua_access_status=0x%02x\n", 920 tg_pt_gp->tg_pt_gp_id, 921 tg_pt_gp->tg_pt_gp_alua_access_state, 922 tg_pt_gp->tg_pt_gp_alua_access_status); 923 924 rc = -ENOMEM; 925 path = kasprintf(GFP_KERNEL, "%s/alua/tpgs_%s/%s", db_root, 926 &wwn->unit_serial[0], 927 config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item)); 928 if (path) { 929 rc = core_alua_write_tpg_metadata(path, md_buf, len); 930 kfree(path); 931 } 932 kfree(md_buf); 933 return rc; 934 } 935 936 static void core_alua_queue_state_change_ua(struct t10_alua_tg_pt_gp *tg_pt_gp) 937 { 938 struct se_dev_entry *se_deve; 939 struct se_lun *lun; 940 struct se_lun_acl *lacl; 941 942 spin_lock(&tg_pt_gp->tg_pt_gp_lock); 943 list_for_each_entry(lun, &tg_pt_gp->tg_pt_gp_lun_list, 944 lun_tg_pt_gp_link) { 945 /* 946 * After an implicit target port asymmetric access state 947 * change, a device server shall establish a unit attention 948 * condition for the initiator port associated with every I_T 949 * nexus with the additional sense code set to ASYMMETRIC 950 * ACCESS STATE CHANGED. 951 * 952 * After an explicit target port asymmetric access state 953 * change, a device server shall establish a unit attention 954 * condition with the additional sense code set to ASYMMETRIC 955 * ACCESS STATE CHANGED for the initiator port associated with 956 * every I_T nexus other than the I_T nexus on which the SET 957 * TARGET PORT GROUPS command 958 */ 959 if (!percpu_ref_tryget_live(&lun->lun_ref)) 960 continue; 961 spin_unlock(&tg_pt_gp->tg_pt_gp_lock); 962 963 spin_lock(&lun->lun_deve_lock); 964 list_for_each_entry(se_deve, &lun->lun_deve_list, lun_link) { 965 lacl = rcu_dereference_check(se_deve->se_lun_acl, 966 lockdep_is_held(&lun->lun_deve_lock)); 967 968 /* 969 * spc4r37 p.242: 970 * After an explicit target port asymmetric access 971 * state change, a device server shall establish a 972 * unit attention condition with the additional sense 973 * code set to ASYMMETRIC ACCESS STATE CHANGED for 974 * the initiator port associated with every I_T nexus 975 * other than the I_T nexus on which the SET TARGET 976 * PORT GROUPS command was received. 977 */ 978 if ((tg_pt_gp->tg_pt_gp_alua_access_status == 979 ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG) && 980 (tg_pt_gp->tg_pt_gp_alua_lun != NULL) && 981 (tg_pt_gp->tg_pt_gp_alua_lun == lun)) 982 continue; 983 984 /* 985 * se_deve->se_lun_acl pointer may be NULL for a 986 * entry created without explicit Node+MappedLUN ACLs 987 */ 988 if (lacl && (tg_pt_gp->tg_pt_gp_alua_nacl != NULL) && 989 (tg_pt_gp->tg_pt_gp_alua_nacl == lacl->se_lun_nacl)) 990 continue; 991 992 core_scsi3_ua_allocate(se_deve, 0x2A, 993 ASCQ_2AH_ASYMMETRIC_ACCESS_STATE_CHANGED); 994 } 995 spin_unlock(&lun->lun_deve_lock); 996 997 spin_lock(&tg_pt_gp->tg_pt_gp_lock); 998 percpu_ref_put(&lun->lun_ref); 999 } 1000 spin_unlock(&tg_pt_gp->tg_pt_gp_lock); 1001 } 1002 1003 static int core_alua_do_transition_tg_pt( 1004 struct t10_alua_tg_pt_gp *tg_pt_gp, 1005 int new_state, 1006 int explicit) 1007 { 1008 int prev_state; 1009 1010 mutex_lock(&tg_pt_gp->tg_pt_gp_transition_mutex); 1011 /* Nothing to be done here */ 1012 if (tg_pt_gp->tg_pt_gp_alua_access_state == new_state) { 1013 mutex_unlock(&tg_pt_gp->tg_pt_gp_transition_mutex); 1014 return 0; 1015 } 1016 1017 if (explicit && new_state == ALUA_ACCESS_STATE_TRANSITION) { 1018 mutex_unlock(&tg_pt_gp->tg_pt_gp_transition_mutex); 1019 return -EAGAIN; 1020 } 1021 1022 /* 1023 * Save the old primary ALUA access state, and set the current state 1024 * to ALUA_ACCESS_STATE_TRANSITION. 1025 */ 1026 prev_state = tg_pt_gp->tg_pt_gp_alua_access_state; 1027 tg_pt_gp->tg_pt_gp_alua_access_state = ALUA_ACCESS_STATE_TRANSITION; 1028 tg_pt_gp->tg_pt_gp_alua_access_status = (explicit) ? 1029 ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG : 1030 ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA; 1031 1032 core_alua_queue_state_change_ua(tg_pt_gp); 1033 1034 if (new_state == ALUA_ACCESS_STATE_TRANSITION) { 1035 mutex_unlock(&tg_pt_gp->tg_pt_gp_transition_mutex); 1036 return 0; 1037 } 1038 1039 /* 1040 * Check for the optional ALUA primary state transition delay 1041 */ 1042 if (tg_pt_gp->tg_pt_gp_trans_delay_msecs != 0) 1043 msleep_interruptible(tg_pt_gp->tg_pt_gp_trans_delay_msecs); 1044 1045 /* 1046 * Set the current primary ALUA access state to the requested new state 1047 */ 1048 tg_pt_gp->tg_pt_gp_alua_access_state = new_state; 1049 1050 /* 1051 * Update the ALUA metadata buf that has been allocated in 1052 * core_alua_do_port_transition(), this metadata will be written 1053 * to struct file. 1054 * 1055 * Note that there is the case where we do not want to update the 1056 * metadata when the saved metadata is being parsed in userspace 1057 * when setting the existing port access state and access status. 1058 * 1059 * Also note that the failure to write out the ALUA metadata to 1060 * struct file does NOT affect the actual ALUA transition. 1061 */ 1062 if (tg_pt_gp->tg_pt_gp_write_metadata) { 1063 core_alua_update_tpg_primary_metadata(tg_pt_gp); 1064 } 1065 1066 pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu" 1067 " from primary access state %s to %s\n", (explicit) ? "explicit" : 1068 "implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item), 1069 tg_pt_gp->tg_pt_gp_id, 1070 core_alua_dump_state(prev_state), 1071 core_alua_dump_state(new_state)); 1072 1073 core_alua_queue_state_change_ua(tg_pt_gp); 1074 1075 mutex_unlock(&tg_pt_gp->tg_pt_gp_transition_mutex); 1076 return 0; 1077 } 1078 1079 int core_alua_do_port_transition( 1080 struct t10_alua_tg_pt_gp *l_tg_pt_gp, 1081 struct se_device *l_dev, 1082 struct se_lun *l_lun, 1083 struct se_node_acl *l_nacl, 1084 int new_state, 1085 int explicit) 1086 { 1087 struct se_device *dev; 1088 struct t10_alua_lu_gp *lu_gp; 1089 struct t10_alua_lu_gp_member *lu_gp_mem, *local_lu_gp_mem; 1090 struct t10_alua_tg_pt_gp *tg_pt_gp; 1091 int primary, valid_states, rc = 0; 1092 1093 if (l_dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA) 1094 return -ENODEV; 1095 1096 valid_states = l_tg_pt_gp->tg_pt_gp_alua_supported_states; 1097 if (core_alua_check_transition(new_state, valid_states, &primary, 1098 explicit) != 0) 1099 return -EINVAL; 1100 1101 local_lu_gp_mem = l_dev->dev_alua_lu_gp_mem; 1102 spin_lock(&local_lu_gp_mem->lu_gp_mem_lock); 1103 lu_gp = local_lu_gp_mem->lu_gp; 1104 atomic_inc(&lu_gp->lu_gp_ref_cnt); 1105 spin_unlock(&local_lu_gp_mem->lu_gp_mem_lock); 1106 /* 1107 * For storage objects that are members of the 'default_lu_gp', 1108 * we only do transition on the passed *l_tp_pt_gp, and not 1109 * on all of the matching target port groups IDs in default_lu_gp. 1110 */ 1111 if (!lu_gp->lu_gp_id) { 1112 /* 1113 * core_alua_do_transition_tg_pt() will always return 1114 * success. 1115 */ 1116 l_tg_pt_gp->tg_pt_gp_alua_lun = l_lun; 1117 l_tg_pt_gp->tg_pt_gp_alua_nacl = l_nacl; 1118 rc = core_alua_do_transition_tg_pt(l_tg_pt_gp, 1119 new_state, explicit); 1120 atomic_dec_mb(&lu_gp->lu_gp_ref_cnt); 1121 return rc; 1122 } 1123 /* 1124 * For all other LU groups aside from 'default_lu_gp', walk all of 1125 * the associated storage objects looking for a matching target port 1126 * group ID from the local target port group. 1127 */ 1128 spin_lock(&lu_gp->lu_gp_lock); 1129 list_for_each_entry(lu_gp_mem, &lu_gp->lu_gp_mem_list, 1130 lu_gp_mem_list) { 1131 1132 dev = lu_gp_mem->lu_gp_mem_dev; 1133 atomic_inc_mb(&lu_gp_mem->lu_gp_mem_ref_cnt); 1134 spin_unlock(&lu_gp->lu_gp_lock); 1135 1136 spin_lock(&dev->t10_alua.tg_pt_gps_lock); 1137 list_for_each_entry(tg_pt_gp, 1138 &dev->t10_alua.tg_pt_gps_list, 1139 tg_pt_gp_list) { 1140 1141 if (!tg_pt_gp->tg_pt_gp_valid_id) 1142 continue; 1143 /* 1144 * If the target behavior port asymmetric access state 1145 * is changed for any target port group accessible via 1146 * a logical unit within a LU group, the target port 1147 * behavior group asymmetric access states for the same 1148 * target port group accessible via other logical units 1149 * in that LU group will also change. 1150 */ 1151 if (l_tg_pt_gp->tg_pt_gp_id != tg_pt_gp->tg_pt_gp_id) 1152 continue; 1153 1154 if (l_tg_pt_gp == tg_pt_gp) { 1155 tg_pt_gp->tg_pt_gp_alua_lun = l_lun; 1156 tg_pt_gp->tg_pt_gp_alua_nacl = l_nacl; 1157 } else { 1158 tg_pt_gp->tg_pt_gp_alua_lun = NULL; 1159 tg_pt_gp->tg_pt_gp_alua_nacl = NULL; 1160 } 1161 atomic_inc_mb(&tg_pt_gp->tg_pt_gp_ref_cnt); 1162 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 1163 /* 1164 * core_alua_do_transition_tg_pt() will always return 1165 * success. 1166 */ 1167 rc = core_alua_do_transition_tg_pt(tg_pt_gp, 1168 new_state, explicit); 1169 1170 spin_lock(&dev->t10_alua.tg_pt_gps_lock); 1171 atomic_dec_mb(&tg_pt_gp->tg_pt_gp_ref_cnt); 1172 if (rc) 1173 break; 1174 } 1175 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 1176 1177 spin_lock(&lu_gp->lu_gp_lock); 1178 atomic_dec_mb(&lu_gp_mem->lu_gp_mem_ref_cnt); 1179 } 1180 spin_unlock(&lu_gp->lu_gp_lock); 1181 1182 if (!rc) { 1183 pr_debug("Successfully processed LU Group: %s all ALUA TG PT" 1184 " Group IDs: %hu %s transition to primary state: %s\n", 1185 config_item_name(&lu_gp->lu_gp_group.cg_item), 1186 l_tg_pt_gp->tg_pt_gp_id, 1187 (explicit) ? "explicit" : "implicit", 1188 core_alua_dump_state(new_state)); 1189 } 1190 1191 atomic_dec_mb(&lu_gp->lu_gp_ref_cnt); 1192 return rc; 1193 } 1194 1195 static int core_alua_update_tpg_secondary_metadata(struct se_lun *lun) 1196 { 1197 struct se_portal_group *se_tpg = lun->lun_tpg; 1198 unsigned char *md_buf; 1199 char *path; 1200 int len, rc; 1201 1202 mutex_lock(&lun->lun_tg_pt_md_mutex); 1203 1204 md_buf = kzalloc(ALUA_MD_BUF_LEN, GFP_KERNEL); 1205 if (!md_buf) { 1206 pr_err("Unable to allocate buf for ALUA metadata\n"); 1207 rc = -ENOMEM; 1208 goto out_unlock; 1209 } 1210 1211 len = snprintf(md_buf, ALUA_MD_BUF_LEN, "alua_tg_pt_offline=%d\n" 1212 "alua_tg_pt_status=0x%02x\n", 1213 atomic_read(&lun->lun_tg_pt_secondary_offline), 1214 lun->lun_tg_pt_secondary_stat); 1215 1216 if (se_tpg->se_tpg_tfo->tpg_get_tag != NULL) { 1217 path = kasprintf(GFP_KERNEL, "%s/alua/%s/%s+%hu/lun_%llu", 1218 db_root, se_tpg->se_tpg_tfo->fabric_name, 1219 se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg), 1220 se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg), 1221 lun->unpacked_lun); 1222 } else { 1223 path = kasprintf(GFP_KERNEL, "%s/alua/%s/%s/lun_%llu", 1224 db_root, se_tpg->se_tpg_tfo->fabric_name, 1225 se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg), 1226 lun->unpacked_lun); 1227 } 1228 if (!path) { 1229 rc = -ENOMEM; 1230 goto out_free; 1231 } 1232 1233 rc = core_alua_write_tpg_metadata(path, md_buf, len); 1234 kfree(path); 1235 out_free: 1236 kfree(md_buf); 1237 out_unlock: 1238 mutex_unlock(&lun->lun_tg_pt_md_mutex); 1239 return rc; 1240 } 1241 1242 static int core_alua_set_tg_pt_secondary_state( 1243 struct se_lun *lun, 1244 int explicit, 1245 int offline) 1246 { 1247 struct t10_alua_tg_pt_gp *tg_pt_gp; 1248 int trans_delay_msecs; 1249 1250 spin_lock(&lun->lun_tg_pt_gp_lock); 1251 tg_pt_gp = lun->lun_tg_pt_gp; 1252 if (!tg_pt_gp) { 1253 spin_unlock(&lun->lun_tg_pt_gp_lock); 1254 pr_err("Unable to complete secondary state" 1255 " transition\n"); 1256 return -EINVAL; 1257 } 1258 trans_delay_msecs = tg_pt_gp->tg_pt_gp_trans_delay_msecs; 1259 /* 1260 * Set the secondary ALUA target port access state to OFFLINE 1261 * or release the previously secondary state for struct se_lun 1262 */ 1263 if (offline) 1264 atomic_set(&lun->lun_tg_pt_secondary_offline, 1); 1265 else 1266 atomic_set(&lun->lun_tg_pt_secondary_offline, 0); 1267 1268 lun->lun_tg_pt_secondary_stat = (explicit) ? 1269 ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG : 1270 ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA; 1271 1272 pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu" 1273 " to secondary access state: %s\n", (explicit) ? "explicit" : 1274 "implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item), 1275 tg_pt_gp->tg_pt_gp_id, (offline) ? "OFFLINE" : "ONLINE"); 1276 1277 spin_unlock(&lun->lun_tg_pt_gp_lock); 1278 /* 1279 * Do the optional transition delay after we set the secondary 1280 * ALUA access state. 1281 */ 1282 if (trans_delay_msecs != 0) 1283 msleep_interruptible(trans_delay_msecs); 1284 /* 1285 * See if we need to update the ALUA fabric port metadata for 1286 * secondary state and status 1287 */ 1288 if (lun->lun_tg_pt_secondary_write_md) 1289 core_alua_update_tpg_secondary_metadata(lun); 1290 1291 return 0; 1292 } 1293 1294 struct t10_alua_lba_map * 1295 core_alua_allocate_lba_map(struct list_head *list, 1296 u64 first_lba, u64 last_lba) 1297 { 1298 struct t10_alua_lba_map *lba_map; 1299 1300 lba_map = kmem_cache_zalloc(t10_alua_lba_map_cache, GFP_KERNEL); 1301 if (!lba_map) { 1302 pr_err("Unable to allocate struct t10_alua_lba_map\n"); 1303 return ERR_PTR(-ENOMEM); 1304 } 1305 INIT_LIST_HEAD(&lba_map->lba_map_mem_list); 1306 lba_map->lba_map_first_lba = first_lba; 1307 lba_map->lba_map_last_lba = last_lba; 1308 1309 list_add_tail(&lba_map->lba_map_list, list); 1310 return lba_map; 1311 } 1312 1313 int 1314 core_alua_allocate_lba_map_mem(struct t10_alua_lba_map *lba_map, 1315 int pg_id, int state) 1316 { 1317 struct t10_alua_lba_map_member *lba_map_mem; 1318 1319 list_for_each_entry(lba_map_mem, &lba_map->lba_map_mem_list, 1320 lba_map_mem_list) { 1321 if (lba_map_mem->lba_map_mem_alua_pg_id == pg_id) { 1322 pr_err("Duplicate pg_id %d in lba_map\n", pg_id); 1323 return -EINVAL; 1324 } 1325 } 1326 1327 lba_map_mem = kmem_cache_zalloc(t10_alua_lba_map_mem_cache, GFP_KERNEL); 1328 if (!lba_map_mem) { 1329 pr_err("Unable to allocate struct t10_alua_lba_map_mem\n"); 1330 return -ENOMEM; 1331 } 1332 lba_map_mem->lba_map_mem_alua_state = state; 1333 lba_map_mem->lba_map_mem_alua_pg_id = pg_id; 1334 1335 list_add_tail(&lba_map_mem->lba_map_mem_list, 1336 &lba_map->lba_map_mem_list); 1337 return 0; 1338 } 1339 1340 void 1341 core_alua_free_lba_map(struct list_head *lba_list) 1342 { 1343 struct t10_alua_lba_map *lba_map, *lba_map_tmp; 1344 struct t10_alua_lba_map_member *lba_map_mem, *lba_map_mem_tmp; 1345 1346 list_for_each_entry_safe(lba_map, lba_map_tmp, lba_list, 1347 lba_map_list) { 1348 list_for_each_entry_safe(lba_map_mem, lba_map_mem_tmp, 1349 &lba_map->lba_map_mem_list, 1350 lba_map_mem_list) { 1351 list_del(&lba_map_mem->lba_map_mem_list); 1352 kmem_cache_free(t10_alua_lba_map_mem_cache, 1353 lba_map_mem); 1354 } 1355 list_del(&lba_map->lba_map_list); 1356 kmem_cache_free(t10_alua_lba_map_cache, lba_map); 1357 } 1358 } 1359 1360 void 1361 core_alua_set_lba_map(struct se_device *dev, struct list_head *lba_map_list, 1362 int segment_size, int segment_mult) 1363 { 1364 struct list_head old_lba_map_list; 1365 struct t10_alua_tg_pt_gp *tg_pt_gp; 1366 int activate = 0, supported; 1367 1368 INIT_LIST_HEAD(&old_lba_map_list); 1369 spin_lock(&dev->t10_alua.lba_map_lock); 1370 dev->t10_alua.lba_map_segment_size = segment_size; 1371 dev->t10_alua.lba_map_segment_multiplier = segment_mult; 1372 list_splice_init(&dev->t10_alua.lba_map_list, &old_lba_map_list); 1373 if (lba_map_list) { 1374 list_splice_init(lba_map_list, &dev->t10_alua.lba_map_list); 1375 activate = 1; 1376 } 1377 spin_unlock(&dev->t10_alua.lba_map_lock); 1378 spin_lock(&dev->t10_alua.tg_pt_gps_lock); 1379 list_for_each_entry(tg_pt_gp, &dev->t10_alua.tg_pt_gps_list, 1380 tg_pt_gp_list) { 1381 1382 if (!tg_pt_gp->tg_pt_gp_valid_id) 1383 continue; 1384 supported = tg_pt_gp->tg_pt_gp_alua_supported_states; 1385 if (activate) 1386 supported |= ALUA_LBD_SUP; 1387 else 1388 supported &= ~ALUA_LBD_SUP; 1389 tg_pt_gp->tg_pt_gp_alua_supported_states = supported; 1390 } 1391 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 1392 core_alua_free_lba_map(&old_lba_map_list); 1393 } 1394 1395 struct t10_alua_lu_gp * 1396 core_alua_allocate_lu_gp(const char *name, int def_group) 1397 { 1398 struct t10_alua_lu_gp *lu_gp; 1399 1400 lu_gp = kmem_cache_zalloc(t10_alua_lu_gp_cache, GFP_KERNEL); 1401 if (!lu_gp) { 1402 pr_err("Unable to allocate struct t10_alua_lu_gp\n"); 1403 return ERR_PTR(-ENOMEM); 1404 } 1405 INIT_LIST_HEAD(&lu_gp->lu_gp_node); 1406 INIT_LIST_HEAD(&lu_gp->lu_gp_mem_list); 1407 spin_lock_init(&lu_gp->lu_gp_lock); 1408 atomic_set(&lu_gp->lu_gp_ref_cnt, 0); 1409 1410 if (def_group) { 1411 lu_gp->lu_gp_id = alua_lu_gps_counter++; 1412 lu_gp->lu_gp_valid_id = 1; 1413 alua_lu_gps_count++; 1414 } 1415 1416 return lu_gp; 1417 } 1418 1419 int core_alua_set_lu_gp_id(struct t10_alua_lu_gp *lu_gp, u16 lu_gp_id) 1420 { 1421 struct t10_alua_lu_gp *lu_gp_tmp; 1422 u16 lu_gp_id_tmp; 1423 /* 1424 * The lu_gp->lu_gp_id may only be set once.. 1425 */ 1426 if (lu_gp->lu_gp_valid_id) { 1427 pr_warn("ALUA LU Group already has a valid ID," 1428 " ignoring request\n"); 1429 return -EINVAL; 1430 } 1431 1432 spin_lock(&lu_gps_lock); 1433 if (alua_lu_gps_count == 0x0000ffff) { 1434 pr_err("Maximum ALUA alua_lu_gps_count:" 1435 " 0x0000ffff reached\n"); 1436 spin_unlock(&lu_gps_lock); 1437 kmem_cache_free(t10_alua_lu_gp_cache, lu_gp); 1438 return -ENOSPC; 1439 } 1440 again: 1441 lu_gp_id_tmp = (lu_gp_id != 0) ? lu_gp_id : 1442 alua_lu_gps_counter++; 1443 1444 list_for_each_entry(lu_gp_tmp, &lu_gps_list, lu_gp_node) { 1445 if (lu_gp_tmp->lu_gp_id == lu_gp_id_tmp) { 1446 if (!lu_gp_id) 1447 goto again; 1448 1449 pr_warn("ALUA Logical Unit Group ID: %hu" 1450 " already exists, ignoring request\n", 1451 lu_gp_id); 1452 spin_unlock(&lu_gps_lock); 1453 return -EINVAL; 1454 } 1455 } 1456 1457 lu_gp->lu_gp_id = lu_gp_id_tmp; 1458 lu_gp->lu_gp_valid_id = 1; 1459 list_add_tail(&lu_gp->lu_gp_node, &lu_gps_list); 1460 alua_lu_gps_count++; 1461 spin_unlock(&lu_gps_lock); 1462 1463 return 0; 1464 } 1465 1466 static struct t10_alua_lu_gp_member * 1467 core_alua_allocate_lu_gp_mem(struct se_device *dev) 1468 { 1469 struct t10_alua_lu_gp_member *lu_gp_mem; 1470 1471 lu_gp_mem = kmem_cache_zalloc(t10_alua_lu_gp_mem_cache, GFP_KERNEL); 1472 if (!lu_gp_mem) { 1473 pr_err("Unable to allocate struct t10_alua_lu_gp_member\n"); 1474 return ERR_PTR(-ENOMEM); 1475 } 1476 INIT_LIST_HEAD(&lu_gp_mem->lu_gp_mem_list); 1477 spin_lock_init(&lu_gp_mem->lu_gp_mem_lock); 1478 atomic_set(&lu_gp_mem->lu_gp_mem_ref_cnt, 0); 1479 1480 lu_gp_mem->lu_gp_mem_dev = dev; 1481 dev->dev_alua_lu_gp_mem = lu_gp_mem; 1482 1483 return lu_gp_mem; 1484 } 1485 1486 void core_alua_free_lu_gp(struct t10_alua_lu_gp *lu_gp) 1487 { 1488 struct t10_alua_lu_gp_member *lu_gp_mem, *lu_gp_mem_tmp; 1489 /* 1490 * Once we have reached this point, config_item_put() has 1491 * already been called from target_core_alua_drop_lu_gp(). 1492 * 1493 * Here, we remove the *lu_gp from the global list so that 1494 * no associations can be made while we are releasing 1495 * struct t10_alua_lu_gp. 1496 */ 1497 spin_lock(&lu_gps_lock); 1498 list_del(&lu_gp->lu_gp_node); 1499 alua_lu_gps_count--; 1500 spin_unlock(&lu_gps_lock); 1501 /* 1502 * Allow struct t10_alua_lu_gp * referenced by core_alua_get_lu_gp_by_name() 1503 * in target_core_configfs.c:target_core_store_alua_lu_gp() to be 1504 * released with core_alua_put_lu_gp_from_name() 1505 */ 1506 while (atomic_read(&lu_gp->lu_gp_ref_cnt)) 1507 cpu_relax(); 1508 /* 1509 * Release reference to struct t10_alua_lu_gp * from all associated 1510 * struct se_device. 1511 */ 1512 spin_lock(&lu_gp->lu_gp_lock); 1513 list_for_each_entry_safe(lu_gp_mem, lu_gp_mem_tmp, 1514 &lu_gp->lu_gp_mem_list, lu_gp_mem_list) { 1515 if (lu_gp_mem->lu_gp_assoc) { 1516 list_del(&lu_gp_mem->lu_gp_mem_list); 1517 lu_gp->lu_gp_members--; 1518 lu_gp_mem->lu_gp_assoc = 0; 1519 } 1520 spin_unlock(&lu_gp->lu_gp_lock); 1521 /* 1522 * 1523 * lu_gp_mem is associated with a single 1524 * struct se_device->dev_alua_lu_gp_mem, and is released when 1525 * struct se_device is released via core_alua_free_lu_gp_mem(). 1526 * 1527 * If the passed lu_gp does NOT match the default_lu_gp, assume 1528 * we want to re-associate a given lu_gp_mem with default_lu_gp. 1529 */ 1530 spin_lock(&lu_gp_mem->lu_gp_mem_lock); 1531 if (lu_gp != default_lu_gp) 1532 __core_alua_attach_lu_gp_mem(lu_gp_mem, 1533 default_lu_gp); 1534 else 1535 lu_gp_mem->lu_gp = NULL; 1536 spin_unlock(&lu_gp_mem->lu_gp_mem_lock); 1537 1538 spin_lock(&lu_gp->lu_gp_lock); 1539 } 1540 spin_unlock(&lu_gp->lu_gp_lock); 1541 1542 kmem_cache_free(t10_alua_lu_gp_cache, lu_gp); 1543 } 1544 1545 void core_alua_free_lu_gp_mem(struct se_device *dev) 1546 { 1547 struct t10_alua_lu_gp *lu_gp; 1548 struct t10_alua_lu_gp_member *lu_gp_mem; 1549 1550 lu_gp_mem = dev->dev_alua_lu_gp_mem; 1551 if (!lu_gp_mem) 1552 return; 1553 1554 while (atomic_read(&lu_gp_mem->lu_gp_mem_ref_cnt)) 1555 cpu_relax(); 1556 1557 spin_lock(&lu_gp_mem->lu_gp_mem_lock); 1558 lu_gp = lu_gp_mem->lu_gp; 1559 if (lu_gp) { 1560 spin_lock(&lu_gp->lu_gp_lock); 1561 if (lu_gp_mem->lu_gp_assoc) { 1562 list_del(&lu_gp_mem->lu_gp_mem_list); 1563 lu_gp->lu_gp_members--; 1564 lu_gp_mem->lu_gp_assoc = 0; 1565 } 1566 spin_unlock(&lu_gp->lu_gp_lock); 1567 lu_gp_mem->lu_gp = NULL; 1568 } 1569 spin_unlock(&lu_gp_mem->lu_gp_mem_lock); 1570 1571 kmem_cache_free(t10_alua_lu_gp_mem_cache, lu_gp_mem); 1572 } 1573 1574 struct t10_alua_lu_gp *core_alua_get_lu_gp_by_name(const char *name) 1575 { 1576 struct t10_alua_lu_gp *lu_gp; 1577 struct config_item *ci; 1578 1579 spin_lock(&lu_gps_lock); 1580 list_for_each_entry(lu_gp, &lu_gps_list, lu_gp_node) { 1581 if (!lu_gp->lu_gp_valid_id) 1582 continue; 1583 ci = &lu_gp->lu_gp_group.cg_item; 1584 if (!strcmp(config_item_name(ci), name)) { 1585 atomic_inc(&lu_gp->lu_gp_ref_cnt); 1586 spin_unlock(&lu_gps_lock); 1587 return lu_gp; 1588 } 1589 } 1590 spin_unlock(&lu_gps_lock); 1591 1592 return NULL; 1593 } 1594 1595 void core_alua_put_lu_gp_from_name(struct t10_alua_lu_gp *lu_gp) 1596 { 1597 spin_lock(&lu_gps_lock); 1598 atomic_dec(&lu_gp->lu_gp_ref_cnt); 1599 spin_unlock(&lu_gps_lock); 1600 } 1601 1602 /* 1603 * Called with struct t10_alua_lu_gp_member->lu_gp_mem_lock 1604 */ 1605 void __core_alua_attach_lu_gp_mem( 1606 struct t10_alua_lu_gp_member *lu_gp_mem, 1607 struct t10_alua_lu_gp *lu_gp) 1608 { 1609 spin_lock(&lu_gp->lu_gp_lock); 1610 lu_gp_mem->lu_gp = lu_gp; 1611 lu_gp_mem->lu_gp_assoc = 1; 1612 list_add_tail(&lu_gp_mem->lu_gp_mem_list, &lu_gp->lu_gp_mem_list); 1613 lu_gp->lu_gp_members++; 1614 spin_unlock(&lu_gp->lu_gp_lock); 1615 } 1616 1617 /* 1618 * Called with struct t10_alua_lu_gp_member->lu_gp_mem_lock 1619 */ 1620 void __core_alua_drop_lu_gp_mem( 1621 struct t10_alua_lu_gp_member *lu_gp_mem, 1622 struct t10_alua_lu_gp *lu_gp) 1623 { 1624 spin_lock(&lu_gp->lu_gp_lock); 1625 list_del(&lu_gp_mem->lu_gp_mem_list); 1626 lu_gp_mem->lu_gp = NULL; 1627 lu_gp_mem->lu_gp_assoc = 0; 1628 lu_gp->lu_gp_members--; 1629 spin_unlock(&lu_gp->lu_gp_lock); 1630 } 1631 1632 struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(struct se_device *dev, 1633 const char *name, int def_group) 1634 { 1635 struct t10_alua_tg_pt_gp *tg_pt_gp; 1636 1637 tg_pt_gp = kmem_cache_zalloc(t10_alua_tg_pt_gp_cache, GFP_KERNEL); 1638 if (!tg_pt_gp) { 1639 pr_err("Unable to allocate struct t10_alua_tg_pt_gp\n"); 1640 return NULL; 1641 } 1642 INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_list); 1643 INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_lun_list); 1644 mutex_init(&tg_pt_gp->tg_pt_gp_transition_mutex); 1645 spin_lock_init(&tg_pt_gp->tg_pt_gp_lock); 1646 atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0); 1647 tg_pt_gp->tg_pt_gp_dev = dev; 1648 tg_pt_gp->tg_pt_gp_alua_access_state = 1649 ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED; 1650 /* 1651 * Enable both explicit and implicit ALUA support by default 1652 */ 1653 tg_pt_gp->tg_pt_gp_alua_access_type = 1654 TPGS_EXPLICIT_ALUA | TPGS_IMPLICIT_ALUA; 1655 /* 1656 * Set the default Active/NonOptimized Delay in milliseconds 1657 */ 1658 tg_pt_gp->tg_pt_gp_nonop_delay_msecs = ALUA_DEFAULT_NONOP_DELAY_MSECS; 1659 tg_pt_gp->tg_pt_gp_trans_delay_msecs = ALUA_DEFAULT_TRANS_DELAY_MSECS; 1660 tg_pt_gp->tg_pt_gp_implicit_trans_secs = ALUA_DEFAULT_IMPLICIT_TRANS_SECS; 1661 1662 /* 1663 * Enable all supported states 1664 */ 1665 tg_pt_gp->tg_pt_gp_alua_supported_states = 1666 ALUA_T_SUP | ALUA_O_SUP | 1667 ALUA_U_SUP | ALUA_S_SUP | ALUA_AN_SUP | ALUA_AO_SUP; 1668 1669 if (def_group) { 1670 spin_lock(&dev->t10_alua.tg_pt_gps_lock); 1671 tg_pt_gp->tg_pt_gp_id = 1672 dev->t10_alua.alua_tg_pt_gps_counter++; 1673 tg_pt_gp->tg_pt_gp_valid_id = 1; 1674 dev->t10_alua.alua_tg_pt_gps_count++; 1675 list_add_tail(&tg_pt_gp->tg_pt_gp_list, 1676 &dev->t10_alua.tg_pt_gps_list); 1677 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 1678 } 1679 1680 return tg_pt_gp; 1681 } 1682 1683 int core_alua_set_tg_pt_gp_id( 1684 struct t10_alua_tg_pt_gp *tg_pt_gp, 1685 u16 tg_pt_gp_id) 1686 { 1687 struct se_device *dev = tg_pt_gp->tg_pt_gp_dev; 1688 struct t10_alua_tg_pt_gp *tg_pt_gp_tmp; 1689 u16 tg_pt_gp_id_tmp; 1690 1691 /* 1692 * The tg_pt_gp->tg_pt_gp_id may only be set once.. 1693 */ 1694 if (tg_pt_gp->tg_pt_gp_valid_id) { 1695 pr_warn("ALUA TG PT Group already has a valid ID," 1696 " ignoring request\n"); 1697 return -EINVAL; 1698 } 1699 1700 spin_lock(&dev->t10_alua.tg_pt_gps_lock); 1701 if (dev->t10_alua.alua_tg_pt_gps_count == 0x0000ffff) { 1702 pr_err("Maximum ALUA alua_tg_pt_gps_count:" 1703 " 0x0000ffff reached\n"); 1704 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 1705 kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp); 1706 return -ENOSPC; 1707 } 1708 again: 1709 tg_pt_gp_id_tmp = (tg_pt_gp_id != 0) ? tg_pt_gp_id : 1710 dev->t10_alua.alua_tg_pt_gps_counter++; 1711 1712 list_for_each_entry(tg_pt_gp_tmp, &dev->t10_alua.tg_pt_gps_list, 1713 tg_pt_gp_list) { 1714 if (tg_pt_gp_tmp->tg_pt_gp_id == tg_pt_gp_id_tmp) { 1715 if (!tg_pt_gp_id) 1716 goto again; 1717 1718 pr_err("ALUA Target Port Group ID: %hu already" 1719 " exists, ignoring request\n", tg_pt_gp_id); 1720 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 1721 return -EINVAL; 1722 } 1723 } 1724 1725 tg_pt_gp->tg_pt_gp_id = tg_pt_gp_id_tmp; 1726 tg_pt_gp->tg_pt_gp_valid_id = 1; 1727 list_add_tail(&tg_pt_gp->tg_pt_gp_list, 1728 &dev->t10_alua.tg_pt_gps_list); 1729 dev->t10_alua.alua_tg_pt_gps_count++; 1730 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 1731 1732 return 0; 1733 } 1734 1735 void core_alua_free_tg_pt_gp( 1736 struct t10_alua_tg_pt_gp *tg_pt_gp) 1737 { 1738 struct se_device *dev = tg_pt_gp->tg_pt_gp_dev; 1739 struct se_lun *lun, *next; 1740 1741 /* 1742 * Once we have reached this point, config_item_put() has already 1743 * been called from target_core_alua_drop_tg_pt_gp(). 1744 * 1745 * Here we remove *tg_pt_gp from the global list so that 1746 * no associations *OR* explicit ALUA via SET_TARGET_PORT_GROUPS 1747 * can be made while we are releasing struct t10_alua_tg_pt_gp. 1748 */ 1749 spin_lock(&dev->t10_alua.tg_pt_gps_lock); 1750 if (tg_pt_gp->tg_pt_gp_valid_id) { 1751 list_del(&tg_pt_gp->tg_pt_gp_list); 1752 dev->t10_alua.alua_tg_pt_gps_count--; 1753 } 1754 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 1755 1756 /* 1757 * Allow a struct t10_alua_tg_pt_gp_member * referenced by 1758 * core_alua_get_tg_pt_gp_by_name() in 1759 * target_core_configfs.c:target_core_store_alua_tg_pt_gp() 1760 * to be released with core_alua_put_tg_pt_gp_from_name(). 1761 */ 1762 while (atomic_read(&tg_pt_gp->tg_pt_gp_ref_cnt)) 1763 cpu_relax(); 1764 1765 /* 1766 * Release reference to struct t10_alua_tg_pt_gp from all associated 1767 * struct se_port. 1768 */ 1769 spin_lock(&tg_pt_gp->tg_pt_gp_lock); 1770 list_for_each_entry_safe(lun, next, 1771 &tg_pt_gp->tg_pt_gp_lun_list, lun_tg_pt_gp_link) { 1772 list_del_init(&lun->lun_tg_pt_gp_link); 1773 tg_pt_gp->tg_pt_gp_members--; 1774 1775 spin_unlock(&tg_pt_gp->tg_pt_gp_lock); 1776 /* 1777 * If the passed tg_pt_gp does NOT match the default_tg_pt_gp, 1778 * assume we want to re-associate a given tg_pt_gp_mem with 1779 * default_tg_pt_gp. 1780 */ 1781 spin_lock(&lun->lun_tg_pt_gp_lock); 1782 if (tg_pt_gp != dev->t10_alua.default_tg_pt_gp) { 1783 __target_attach_tg_pt_gp(lun, 1784 dev->t10_alua.default_tg_pt_gp); 1785 } else 1786 lun->lun_tg_pt_gp = NULL; 1787 spin_unlock(&lun->lun_tg_pt_gp_lock); 1788 1789 spin_lock(&tg_pt_gp->tg_pt_gp_lock); 1790 } 1791 spin_unlock(&tg_pt_gp->tg_pt_gp_lock); 1792 1793 kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp); 1794 } 1795 1796 static struct t10_alua_tg_pt_gp *core_alua_get_tg_pt_gp_by_name( 1797 struct se_device *dev, const char *name) 1798 { 1799 struct t10_alua_tg_pt_gp *tg_pt_gp; 1800 struct config_item *ci; 1801 1802 spin_lock(&dev->t10_alua.tg_pt_gps_lock); 1803 list_for_each_entry(tg_pt_gp, &dev->t10_alua.tg_pt_gps_list, 1804 tg_pt_gp_list) { 1805 if (!tg_pt_gp->tg_pt_gp_valid_id) 1806 continue; 1807 ci = &tg_pt_gp->tg_pt_gp_group.cg_item; 1808 if (!strcmp(config_item_name(ci), name)) { 1809 atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); 1810 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 1811 return tg_pt_gp; 1812 } 1813 } 1814 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 1815 1816 return NULL; 1817 } 1818 1819 static void core_alua_put_tg_pt_gp_from_name( 1820 struct t10_alua_tg_pt_gp *tg_pt_gp) 1821 { 1822 struct se_device *dev = tg_pt_gp->tg_pt_gp_dev; 1823 1824 spin_lock(&dev->t10_alua.tg_pt_gps_lock); 1825 atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); 1826 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 1827 } 1828 1829 static void __target_attach_tg_pt_gp(struct se_lun *lun, 1830 struct t10_alua_tg_pt_gp *tg_pt_gp) 1831 { 1832 struct se_dev_entry *se_deve; 1833 1834 assert_spin_locked(&lun->lun_tg_pt_gp_lock); 1835 1836 spin_lock(&tg_pt_gp->tg_pt_gp_lock); 1837 lun->lun_tg_pt_gp = tg_pt_gp; 1838 list_add_tail(&lun->lun_tg_pt_gp_link, &tg_pt_gp->tg_pt_gp_lun_list); 1839 tg_pt_gp->tg_pt_gp_members++; 1840 spin_lock(&lun->lun_deve_lock); 1841 list_for_each_entry(se_deve, &lun->lun_deve_list, lun_link) 1842 core_scsi3_ua_allocate(se_deve, 0x3f, 1843 ASCQ_3FH_INQUIRY_DATA_HAS_CHANGED); 1844 spin_unlock(&lun->lun_deve_lock); 1845 spin_unlock(&tg_pt_gp->tg_pt_gp_lock); 1846 } 1847 1848 void target_attach_tg_pt_gp(struct se_lun *lun, 1849 struct t10_alua_tg_pt_gp *tg_pt_gp) 1850 { 1851 spin_lock(&lun->lun_tg_pt_gp_lock); 1852 __target_attach_tg_pt_gp(lun, tg_pt_gp); 1853 spin_unlock(&lun->lun_tg_pt_gp_lock); 1854 } 1855 1856 static void __target_detach_tg_pt_gp(struct se_lun *lun, 1857 struct t10_alua_tg_pt_gp *tg_pt_gp) 1858 { 1859 assert_spin_locked(&lun->lun_tg_pt_gp_lock); 1860 1861 spin_lock(&tg_pt_gp->tg_pt_gp_lock); 1862 list_del_init(&lun->lun_tg_pt_gp_link); 1863 tg_pt_gp->tg_pt_gp_members--; 1864 spin_unlock(&tg_pt_gp->tg_pt_gp_lock); 1865 1866 lun->lun_tg_pt_gp = NULL; 1867 } 1868 1869 void target_detach_tg_pt_gp(struct se_lun *lun) 1870 { 1871 struct t10_alua_tg_pt_gp *tg_pt_gp; 1872 1873 spin_lock(&lun->lun_tg_pt_gp_lock); 1874 tg_pt_gp = lun->lun_tg_pt_gp; 1875 if (tg_pt_gp) 1876 __target_detach_tg_pt_gp(lun, tg_pt_gp); 1877 spin_unlock(&lun->lun_tg_pt_gp_lock); 1878 } 1879 1880 ssize_t core_alua_show_tg_pt_gp_info(struct se_lun *lun, char *page) 1881 { 1882 struct config_item *tg_pt_ci; 1883 struct t10_alua_tg_pt_gp *tg_pt_gp; 1884 ssize_t len = 0; 1885 1886 spin_lock(&lun->lun_tg_pt_gp_lock); 1887 tg_pt_gp = lun->lun_tg_pt_gp; 1888 if (tg_pt_gp) { 1889 tg_pt_ci = &tg_pt_gp->tg_pt_gp_group.cg_item; 1890 len += sprintf(page, "TG Port Alias: %s\nTG Port Group ID:" 1891 " %hu\nTG Port Primary Access State: %s\nTG Port " 1892 "Primary Access Status: %s\nTG Port Secondary Access" 1893 " State: %s\nTG Port Secondary Access Status: %s\n", 1894 config_item_name(tg_pt_ci), tg_pt_gp->tg_pt_gp_id, 1895 core_alua_dump_state( 1896 tg_pt_gp->tg_pt_gp_alua_access_state), 1897 core_alua_dump_status( 1898 tg_pt_gp->tg_pt_gp_alua_access_status), 1899 atomic_read(&lun->lun_tg_pt_secondary_offline) ? 1900 "Offline" : "None", 1901 core_alua_dump_status(lun->lun_tg_pt_secondary_stat)); 1902 } 1903 spin_unlock(&lun->lun_tg_pt_gp_lock); 1904 1905 return len; 1906 } 1907 1908 ssize_t core_alua_store_tg_pt_gp_info( 1909 struct se_lun *lun, 1910 const char *page, 1911 size_t count) 1912 { 1913 struct se_portal_group *tpg = lun->lun_tpg; 1914 /* 1915 * rcu_dereference_raw protected by se_lun->lun_group symlink 1916 * reference to se_device->dev_group. 1917 */ 1918 struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev); 1919 struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *tg_pt_gp_new = NULL; 1920 unsigned char buf[TG_PT_GROUP_NAME_BUF]; 1921 int move = 0; 1922 1923 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA || 1924 (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) 1925 return -ENODEV; 1926 1927 if (count > TG_PT_GROUP_NAME_BUF) { 1928 pr_err("ALUA Target Port Group alias too large!\n"); 1929 return -EINVAL; 1930 } 1931 memset(buf, 0, TG_PT_GROUP_NAME_BUF); 1932 memcpy(buf, page, count); 1933 /* 1934 * Any ALUA target port group alias besides "NULL" means we will be 1935 * making a new group association. 1936 */ 1937 if (strcmp(strstrip(buf), "NULL")) { 1938 /* 1939 * core_alua_get_tg_pt_gp_by_name() will increment reference to 1940 * struct t10_alua_tg_pt_gp. This reference is released with 1941 * core_alua_put_tg_pt_gp_from_name() below. 1942 */ 1943 tg_pt_gp_new = core_alua_get_tg_pt_gp_by_name(dev, 1944 strstrip(buf)); 1945 if (!tg_pt_gp_new) 1946 return -ENODEV; 1947 } 1948 1949 spin_lock(&lun->lun_tg_pt_gp_lock); 1950 tg_pt_gp = lun->lun_tg_pt_gp; 1951 if (tg_pt_gp) { 1952 /* 1953 * Clearing an existing tg_pt_gp association, and replacing 1954 * with the default_tg_pt_gp. 1955 */ 1956 if (!tg_pt_gp_new) { 1957 pr_debug("Target_Core_ConfigFS: Moving" 1958 " %s/tpgt_%hu/%s from ALUA Target Port Group:" 1959 " alua/%s, ID: %hu back to" 1960 " default_tg_pt_gp\n", 1961 tpg->se_tpg_tfo->tpg_get_wwn(tpg), 1962 tpg->se_tpg_tfo->tpg_get_tag(tpg), 1963 config_item_name(&lun->lun_group.cg_item), 1964 config_item_name( 1965 &tg_pt_gp->tg_pt_gp_group.cg_item), 1966 tg_pt_gp->tg_pt_gp_id); 1967 1968 __target_detach_tg_pt_gp(lun, tg_pt_gp); 1969 __target_attach_tg_pt_gp(lun, 1970 dev->t10_alua.default_tg_pt_gp); 1971 spin_unlock(&lun->lun_tg_pt_gp_lock); 1972 1973 return count; 1974 } 1975 __target_detach_tg_pt_gp(lun, tg_pt_gp); 1976 move = 1; 1977 } 1978 1979 __target_attach_tg_pt_gp(lun, tg_pt_gp_new); 1980 spin_unlock(&lun->lun_tg_pt_gp_lock); 1981 pr_debug("Target_Core_ConfigFS: %s %s/tpgt_%hu/%s to ALUA" 1982 " Target Port Group: alua/%s, ID: %hu\n", (move) ? 1983 "Moving" : "Adding", tpg->se_tpg_tfo->tpg_get_wwn(tpg), 1984 tpg->se_tpg_tfo->tpg_get_tag(tpg), 1985 config_item_name(&lun->lun_group.cg_item), 1986 config_item_name(&tg_pt_gp_new->tg_pt_gp_group.cg_item), 1987 tg_pt_gp_new->tg_pt_gp_id); 1988 1989 core_alua_put_tg_pt_gp_from_name(tg_pt_gp_new); 1990 return count; 1991 } 1992 1993 ssize_t core_alua_show_access_type( 1994 struct t10_alua_tg_pt_gp *tg_pt_gp, 1995 char *page) 1996 { 1997 if ((tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA) && 1998 (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICIT_ALUA)) 1999 return sprintf(page, "Implicit and Explicit\n"); 2000 else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICIT_ALUA) 2001 return sprintf(page, "Implicit\n"); 2002 else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA) 2003 return sprintf(page, "Explicit\n"); 2004 else 2005 return sprintf(page, "None\n"); 2006 } 2007 2008 ssize_t core_alua_store_access_type( 2009 struct t10_alua_tg_pt_gp *tg_pt_gp, 2010 const char *page, 2011 size_t count) 2012 { 2013 unsigned long tmp; 2014 int ret; 2015 2016 ret = kstrtoul(page, 0, &tmp); 2017 if (ret < 0) { 2018 pr_err("Unable to extract alua_access_type\n"); 2019 return ret; 2020 } 2021 if ((tmp != 0) && (tmp != 1) && (tmp != 2) && (tmp != 3)) { 2022 pr_err("Illegal value for alua_access_type:" 2023 " %lu\n", tmp); 2024 return -EINVAL; 2025 } 2026 if (tmp == 3) 2027 tg_pt_gp->tg_pt_gp_alua_access_type = 2028 TPGS_IMPLICIT_ALUA | TPGS_EXPLICIT_ALUA; 2029 else if (tmp == 2) 2030 tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_EXPLICIT_ALUA; 2031 else if (tmp == 1) 2032 tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_IMPLICIT_ALUA; 2033 else 2034 tg_pt_gp->tg_pt_gp_alua_access_type = 0; 2035 2036 return count; 2037 } 2038 2039 ssize_t core_alua_show_nonop_delay_msecs( 2040 struct t10_alua_tg_pt_gp *tg_pt_gp, 2041 char *page) 2042 { 2043 return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_nonop_delay_msecs); 2044 } 2045 2046 ssize_t core_alua_store_nonop_delay_msecs( 2047 struct t10_alua_tg_pt_gp *tg_pt_gp, 2048 const char *page, 2049 size_t count) 2050 { 2051 unsigned long tmp; 2052 int ret; 2053 2054 ret = kstrtoul(page, 0, &tmp); 2055 if (ret < 0) { 2056 pr_err("Unable to extract nonop_delay_msecs\n"); 2057 return ret; 2058 } 2059 if (tmp > ALUA_MAX_NONOP_DELAY_MSECS) { 2060 pr_err("Passed nonop_delay_msecs: %lu, exceeds" 2061 " ALUA_MAX_NONOP_DELAY_MSECS: %d\n", tmp, 2062 ALUA_MAX_NONOP_DELAY_MSECS); 2063 return -EINVAL; 2064 } 2065 tg_pt_gp->tg_pt_gp_nonop_delay_msecs = (int)tmp; 2066 2067 return count; 2068 } 2069 2070 ssize_t core_alua_show_trans_delay_msecs( 2071 struct t10_alua_tg_pt_gp *tg_pt_gp, 2072 char *page) 2073 { 2074 return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_trans_delay_msecs); 2075 } 2076 2077 ssize_t core_alua_store_trans_delay_msecs( 2078 struct t10_alua_tg_pt_gp *tg_pt_gp, 2079 const char *page, 2080 size_t count) 2081 { 2082 unsigned long tmp; 2083 int ret; 2084 2085 ret = kstrtoul(page, 0, &tmp); 2086 if (ret < 0) { 2087 pr_err("Unable to extract trans_delay_msecs\n"); 2088 return ret; 2089 } 2090 if (tmp > ALUA_MAX_TRANS_DELAY_MSECS) { 2091 pr_err("Passed trans_delay_msecs: %lu, exceeds" 2092 " ALUA_MAX_TRANS_DELAY_MSECS: %d\n", tmp, 2093 ALUA_MAX_TRANS_DELAY_MSECS); 2094 return -EINVAL; 2095 } 2096 tg_pt_gp->tg_pt_gp_trans_delay_msecs = (int)tmp; 2097 2098 return count; 2099 } 2100 2101 ssize_t core_alua_show_implicit_trans_secs( 2102 struct t10_alua_tg_pt_gp *tg_pt_gp, 2103 char *page) 2104 { 2105 return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_implicit_trans_secs); 2106 } 2107 2108 ssize_t core_alua_store_implicit_trans_secs( 2109 struct t10_alua_tg_pt_gp *tg_pt_gp, 2110 const char *page, 2111 size_t count) 2112 { 2113 unsigned long tmp; 2114 int ret; 2115 2116 ret = kstrtoul(page, 0, &tmp); 2117 if (ret < 0) { 2118 pr_err("Unable to extract implicit_trans_secs\n"); 2119 return ret; 2120 } 2121 if (tmp > ALUA_MAX_IMPLICIT_TRANS_SECS) { 2122 pr_err("Passed implicit_trans_secs: %lu, exceeds" 2123 " ALUA_MAX_IMPLICIT_TRANS_SECS: %d\n", tmp, 2124 ALUA_MAX_IMPLICIT_TRANS_SECS); 2125 return -EINVAL; 2126 } 2127 tg_pt_gp->tg_pt_gp_implicit_trans_secs = (int)tmp; 2128 2129 return count; 2130 } 2131 2132 ssize_t core_alua_show_preferred_bit( 2133 struct t10_alua_tg_pt_gp *tg_pt_gp, 2134 char *page) 2135 { 2136 return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_pref); 2137 } 2138 2139 ssize_t core_alua_store_preferred_bit( 2140 struct t10_alua_tg_pt_gp *tg_pt_gp, 2141 const char *page, 2142 size_t count) 2143 { 2144 unsigned long tmp; 2145 int ret; 2146 2147 ret = kstrtoul(page, 0, &tmp); 2148 if (ret < 0) { 2149 pr_err("Unable to extract preferred ALUA value\n"); 2150 return ret; 2151 } 2152 if ((tmp != 0) && (tmp != 1)) { 2153 pr_err("Illegal value for preferred ALUA: %lu\n", tmp); 2154 return -EINVAL; 2155 } 2156 tg_pt_gp->tg_pt_gp_pref = (int)tmp; 2157 2158 return count; 2159 } 2160 2161 ssize_t core_alua_show_offline_bit(struct se_lun *lun, char *page) 2162 { 2163 return sprintf(page, "%d\n", 2164 atomic_read(&lun->lun_tg_pt_secondary_offline)); 2165 } 2166 2167 ssize_t core_alua_store_offline_bit( 2168 struct se_lun *lun, 2169 const char *page, 2170 size_t count) 2171 { 2172 /* 2173 * rcu_dereference_raw protected by se_lun->lun_group symlink 2174 * reference to se_device->dev_group. 2175 */ 2176 struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev); 2177 unsigned long tmp; 2178 int ret; 2179 2180 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA || 2181 (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) 2182 return -ENODEV; 2183 2184 ret = kstrtoul(page, 0, &tmp); 2185 if (ret < 0) { 2186 pr_err("Unable to extract alua_tg_pt_offline value\n"); 2187 return ret; 2188 } 2189 if ((tmp != 0) && (tmp != 1)) { 2190 pr_err("Illegal value for alua_tg_pt_offline: %lu\n", 2191 tmp); 2192 return -EINVAL; 2193 } 2194 2195 ret = core_alua_set_tg_pt_secondary_state(lun, 0, (int)tmp); 2196 if (ret < 0) 2197 return -EINVAL; 2198 2199 return count; 2200 } 2201 2202 ssize_t core_alua_show_secondary_status( 2203 struct se_lun *lun, 2204 char *page) 2205 { 2206 return sprintf(page, "%d\n", lun->lun_tg_pt_secondary_stat); 2207 } 2208 2209 ssize_t core_alua_store_secondary_status( 2210 struct se_lun *lun, 2211 const char *page, 2212 size_t count) 2213 { 2214 unsigned long tmp; 2215 int ret; 2216 2217 ret = kstrtoul(page, 0, &tmp); 2218 if (ret < 0) { 2219 pr_err("Unable to extract alua_tg_pt_status\n"); 2220 return ret; 2221 } 2222 if ((tmp != ALUA_STATUS_NONE) && 2223 (tmp != ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG) && 2224 (tmp != ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA)) { 2225 pr_err("Illegal value for alua_tg_pt_status: %lu\n", 2226 tmp); 2227 return -EINVAL; 2228 } 2229 lun->lun_tg_pt_secondary_stat = (int)tmp; 2230 2231 return count; 2232 } 2233 2234 ssize_t core_alua_show_secondary_write_metadata( 2235 struct se_lun *lun, 2236 char *page) 2237 { 2238 return sprintf(page, "%d\n", lun->lun_tg_pt_secondary_write_md); 2239 } 2240 2241 ssize_t core_alua_store_secondary_write_metadata( 2242 struct se_lun *lun, 2243 const char *page, 2244 size_t count) 2245 { 2246 unsigned long tmp; 2247 int ret; 2248 2249 ret = kstrtoul(page, 0, &tmp); 2250 if (ret < 0) { 2251 pr_err("Unable to extract alua_tg_pt_write_md\n"); 2252 return ret; 2253 } 2254 if ((tmp != 0) && (tmp != 1)) { 2255 pr_err("Illegal value for alua_tg_pt_write_md:" 2256 " %lu\n", tmp); 2257 return -EINVAL; 2258 } 2259 lun->lun_tg_pt_secondary_write_md = (int)tmp; 2260 2261 return count; 2262 } 2263 2264 int core_setup_alua(struct se_device *dev) 2265 { 2266 if (!(dev->transport->transport_flags & 2267 TRANSPORT_FLAG_PASSTHROUGH_ALUA) && 2268 !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) { 2269 struct t10_alua_lu_gp_member *lu_gp_mem; 2270 2271 /* 2272 * Associate this struct se_device with the default ALUA 2273 * LUN Group. 2274 */ 2275 lu_gp_mem = core_alua_allocate_lu_gp_mem(dev); 2276 if (IS_ERR(lu_gp_mem)) 2277 return PTR_ERR(lu_gp_mem); 2278 2279 spin_lock(&lu_gp_mem->lu_gp_mem_lock); 2280 __core_alua_attach_lu_gp_mem(lu_gp_mem, 2281 default_lu_gp); 2282 spin_unlock(&lu_gp_mem->lu_gp_mem_lock); 2283 2284 pr_debug("%s: Adding to default ALUA LU Group:" 2285 " core/alua/lu_gps/default_lu_gp\n", 2286 dev->transport->name); 2287 } 2288 2289 return 0; 2290 } 2291