1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. 23 */ 24 25 #include <sys/conf.h> 26 #include <sys/file.h> 27 #include <sys/ddi.h> 28 #include <sys/sunddi.h> 29 #include <sys/modctl.h> 30 #include <sys/scsi/scsi.h> 31 #include <sys/scsi/impl/scsi_reset_notify.h> 32 #include <sys/disp.h> 33 #include <sys/byteorder.h> 34 #include <sys/atomic.h> 35 36 #include <sys/stmf.h> 37 #include <sys/lpif.h> 38 #include <sys/portif.h> 39 #include <sys/stmf_ioctl.h> 40 41 #include "stmf_impl.h" 42 #include "lun_map.h" 43 #include "stmf_state.h" 44 45 void stmf_update_sessions_per_ve(stmf_view_entry_t *ve, 46 stmf_lu_t *lu, int action); 47 void stmf_add_lus_to_session_per_vemap(stmf_i_local_port_t *ilport, 48 stmf_i_scsi_session_t *iss, stmf_lun_map_t *vemap); 49 stmf_id_data_t *stmf_lookup_group_for_host(uint8_t *ident, uint16_t ident_size); 50 static stmf_status_t stmf_add_ent_to_map(stmf_lun_map_t *sm, void *ent, 51 uint8_t *lun); 52 static stmf_status_t stmf_remove_ent_from_map(stmf_lun_map_t *sm, uint8_t *lun); 53 uint16_t stmf_get_next_free_lun(stmf_lun_map_t *sm, uint8_t *lun); 54 stmf_status_t stmf_add_tg(uint8_t *tg_name, uint16_t tg_name_size, 55 int allow_special, uint32_t *err_detail); 56 stmf_status_t stmf_add_hg(uint8_t *hg_name, uint16_t hg_name_size, 57 int allow_special, uint32_t *err_detail); 58 stmf_i_local_port_t *stmf_targetident_to_ilport(uint8_t *target_ident, 59 uint16_t ident_size); 60 stmf_i_scsi_session_t *stmf_lookup_session_for_hostident( 61 stmf_i_local_port_t *ilport, uint8_t *host_ident, 62 uint16_t ident_size); 63 stmf_i_lu_t *stmf_luident_to_ilu(uint8_t *lu_ident); 64 stmf_lun_map_t *stmf_get_ve_map_per_ids(stmf_id_data_t *tgid, 65 stmf_id_data_t *hgid); 66 stmf_lun_map_t *stmf_duplicate_ve_map(stmf_lun_map_t *src); 67 int stmf_merge_ve_map(stmf_lun_map_t *src, stmf_lun_map_t *dst, 68 stmf_lun_map_t **pp_ret_map, stmf_merge_flags_t mf); 69 void stmf_destroy_ve_map(stmf_lun_map_t *dst); 70 void stmf_free_id(stmf_id_data_t *id); 71 72 73 /* 74 * Init the view 75 */ 76 void 77 stmf_view_init() 78 { 79 uint8_t grpname_forall = '*'; 80 (void) stmf_add_hg(&grpname_forall, 1, 1, NULL); 81 (void) stmf_add_tg(&grpname_forall, 1, 1, NULL); 82 } 83 84 /* 85 * Clear config database here 86 */ 87 void 88 stmf_view_clear_config() 89 { 90 stmf_id_data_t *idgrp, *idgrp_next, *idmemb, *idmemb_next; 91 stmf_ver_tg_t *vtg, *vtg_next; 92 stmf_ver_hg_t *vhg, *vhg_next; 93 stmf_view_entry_t *ve, *ve_next; 94 stmf_i_lu_t *ilu; 95 stmf_id_list_t *idlist; 96 stmf_i_local_port_t *ilport; 97 98 for (vtg = stmf_state.stmf_ver_tg_head; vtg; vtg = vtg_next) { 99 for (vhg = vtg->vert_verh_list; vhg; vhg = vhg_next) { 100 if (vhg->verh_ve_map.lm_nentries) { 101 kmem_free(vhg->verh_ve_map.lm_plus, 102 vhg->verh_ve_map.lm_nentries * 103 sizeof (void *)); 104 } 105 vhg_next = vhg->verh_next; 106 kmem_free(vhg, sizeof (stmf_ver_hg_t)); 107 } 108 vtg_next = vtg->vert_next; 109 kmem_free(vtg, sizeof (stmf_ver_tg_t)); 110 } 111 stmf_state.stmf_ver_tg_head = NULL; 112 113 if (stmf_state.stmf_luid_list.id_count) { 114 /* clear the views for lus */ 115 for (idmemb = stmf_state.stmf_luid_list.idl_head; 116 idmemb; idmemb = idmemb_next) { 117 for (ve = (stmf_view_entry_t *)idmemb->id_impl_specific; 118 ve; ve = ve_next) { 119 ve_next = ve->ve_next; 120 ve->ve_hg->id_refcnt--; 121 ve->ve_tg->id_refcnt--; 122 kmem_free(ve, sizeof (stmf_view_entry_t)); 123 } 124 if (idmemb->id_pt_to_object) { 125 ilu = (stmf_i_lu_t *)(idmemb->id_pt_to_object); 126 ilu->ilu_luid = NULL; 127 } 128 idmemb_next = idmemb->id_next; 129 stmf_free_id(idmemb); 130 } 131 stmf_state.stmf_luid_list.id_count = 0; 132 stmf_state.stmf_luid_list.idl_head = 133 stmf_state.stmf_luid_list.idl_tail = NULL; 134 } 135 136 if (stmf_state.stmf_hg_list.id_count) { 137 /* free all the host group */ 138 for (idgrp = stmf_state.stmf_hg_list.idl_head; 139 idgrp; idgrp = idgrp_next) { 140 idlist = (stmf_id_list_t *)(idgrp->id_impl_specific); 141 if (idlist->id_count) { 142 for (idmemb = idlist->idl_head; idmemb; 143 idmemb = idmemb_next) { 144 idmemb_next = idmemb->id_next; 145 stmf_free_id(idmemb); 146 } 147 } 148 idgrp_next = idgrp->id_next; 149 stmf_free_id(idgrp); 150 } 151 stmf_state.stmf_hg_list.id_count = 0; 152 stmf_state.stmf_hg_list.idl_head = 153 stmf_state.stmf_hg_list.idl_tail = NULL; 154 } 155 if (stmf_state.stmf_tg_list.id_count) { 156 /* free all the target group */ 157 for (idgrp = stmf_state.stmf_tg_list.idl_head; 158 idgrp; idgrp = idgrp_next) { 159 idlist = (stmf_id_list_t *)(idgrp->id_impl_specific); 160 if (idlist->id_count) { 161 for (idmemb = idlist->idl_head; idmemb; 162 idmemb = idmemb_next) { 163 idmemb_next = idmemb->id_next; 164 stmf_free_id(idmemb); 165 } 166 } 167 idgrp_next = idgrp->id_next; 168 stmf_free_id(idgrp); 169 } 170 stmf_state.stmf_tg_list.id_count = 0; 171 stmf_state.stmf_tg_list.idl_head = 172 stmf_state.stmf_tg_list.idl_tail = NULL; 173 } 174 175 for (ilport = stmf_state.stmf_ilportlist; ilport; 176 ilport = ilport->ilport_next) { 177 ilport->ilport_tg = NULL; 178 } 179 } 180 181 /* 182 * Create luns map for session based on the view 183 * iss_lockp is held 184 */ 185 stmf_status_t 186 stmf_session_create_lun_map(stmf_i_local_port_t *ilport, 187 stmf_i_scsi_session_t *iss) 188 { 189 stmf_id_data_t *tg; 190 stmf_id_data_t *hg; 191 stmf_ver_tg_t *vertg; 192 char *phg_data, *ptg_data; 193 stmf_ver_hg_t *verhg; 194 stmf_lun_map_t *ve_map; 195 196 ASSERT(mutex_owned(&stmf_state.stmf_lock)); 197 198 tg = ilport->ilport_tg; 199 hg = stmf_lookup_group_for_host(iss->iss_ss->ss_rport_id->ident, 200 iss->iss_ss->ss_rport_id->ident_length); 201 iss->iss_hg = hg; 202 203 /* 204 * get the view entry map, 205 * take all host/target group into consideration 206 */ 207 ve_map = stmf_duplicate_ve_map(0); 208 for (vertg = stmf_state.stmf_ver_tg_head; vertg != NULL; 209 vertg = vertg->vert_next) { 210 ptg_data = (char *)vertg->vert_tg_ref->id_data; 211 if ((ptg_data[0] != '*') && (!tg || 212 ((tg->id_data[0] != '*') && 213 (vertg->vert_tg_ref != tg)))) { 214 continue; 215 } 216 for (verhg = vertg->vert_verh_list; verhg != NULL; 217 verhg = verhg->verh_next) { 218 phg_data = (char *)verhg->verh_hg_ref->id_data; 219 if ((phg_data[0] != '*') && (!hg || 220 ((hg->id_data[0] != '*') && 221 (verhg->verh_hg_ref != hg)))) { 222 continue; 223 } 224 (void) stmf_merge_ve_map(&verhg->verh_ve_map, ve_map, 225 &ve_map, 0); 226 } 227 } 228 229 230 if (ve_map->lm_nluns) { 231 stmf_add_lus_to_session_per_vemap(ilport, iss, ve_map); 232 } 233 /* not configured, cannot access any luns for now */ 234 235 stmf_destroy_ve_map(ve_map); 236 237 return (STMF_SUCCESS); 238 } 239 240 /* 241 * Expects the session lock to be held. 242 * iss_lockp is held 243 */ 244 stmf_xfer_data_t * 245 stmf_session_prepare_report_lun_data(stmf_lun_map_t *sm) 246 { 247 stmf_xfer_data_t *xd; 248 uint16_t nluns, ent; 249 uint32_t alloc_size, data_size; 250 uchar_t *buf; 251 int i; 252 253 nluns = sm->lm_nluns; 254 255 data_size = 8 + (((uint32_t)nluns) << 3); 256 if (nluns == 0) { 257 data_size += 8; 258 } 259 alloc_size = data_size + sizeof (stmf_xfer_data_t) - 4; 260 261 xd = (stmf_xfer_data_t *)kmem_zalloc(alloc_size, KM_NOSLEEP); 262 263 if (xd == NULL) 264 return (NULL); 265 266 xd->alloc_size = alloc_size; 267 xd->size_left = data_size; 268 269 *((uint32_t *)xd->buf) = BE_32(data_size - 8); 270 if (nluns == 0) { 271 return (xd); 272 } 273 274 ent = 0; 275 276 buf = &(xd->buf[0]); 277 for (i = 0; ((i < sm->lm_nentries) && (ent < nluns)); i++) { 278 if (sm->lm_plus[i] == NULL) 279 continue; 280 /* Fill in the entry */ 281 buf[8 + (ent << 3) + 1] = (uchar_t)i; 282 buf[8 + (ent << 3) + 0] = ((uchar_t)(i >> 8)); 283 ent++; 284 } 285 286 ASSERT(ent == nluns); 287 288 return (xd); 289 } 290 291 /* 292 * Add a lu to active sessions based on LUN inventory. 293 * Only invoked when the lu is onlined 294 */ 295 void 296 stmf_add_lu_to_active_sessions(stmf_lu_t *lu) 297 { 298 stmf_id_data_t *luid; 299 stmf_view_entry_t *ve; 300 stmf_i_lu_t *ilu; 301 302 ASSERT(mutex_owned(&stmf_state.stmf_lock)); 303 ilu = (stmf_i_lu_t *)lu->lu_stmf_private; 304 ASSERT(ilu->ilu_state == STMF_STATE_ONLINE); 305 306 luid = ((stmf_i_lu_t *)lu->lu_stmf_private)->ilu_luid; 307 308 if (!luid) { 309 /* we did not configure view for this lun, so just return */ 310 return; 311 } 312 313 for (ve = (stmf_view_entry_t *)luid->id_impl_specific; 314 ve; ve = ve->ve_next) { 315 stmf_update_sessions_per_ve(ve, lu, 1); 316 } 317 } 318 /* 319 * Unmap a lun from all sessions 320 */ 321 void 322 stmf_session_lu_unmapall(stmf_lu_t *lu) 323 { 324 stmf_i_lu_t *ilu; 325 stmf_id_data_t *luid; 326 stmf_view_entry_t *ve; 327 328 ASSERT(mutex_owned(&stmf_state.stmf_lock)); 329 330 ilu = (stmf_i_lu_t *)lu->lu_stmf_private; 331 332 if (ilu->ilu_ref_cnt == 0) 333 return; 334 335 luid = ((stmf_i_lu_t *)lu->lu_stmf_private)->ilu_luid; 336 if (!luid) { 337 /* 338 * we did not configure view for this lun, this should be 339 * an error 340 */ 341 return; 342 } 343 344 for (ve = (stmf_view_entry_t *)luid->id_impl_specific; 345 ve; ve = ve->ve_next) { 346 stmf_update_sessions_per_ve(ve, lu, 0); 347 if (ilu->ilu_ref_cnt == 0) 348 break; 349 } 350 } 351 /* 352 * add lu to a session, stmf_lock is already held 353 * iss_lockp/ilport_lock already held 354 */ 355 static stmf_status_t 356 stmf_add_lu_to_session(stmf_i_local_port_t *ilport, 357 stmf_i_scsi_session_t *iss, 358 stmf_lu_t *lu, 359 uint8_t *lu_nbr) 360 { 361 stmf_lun_map_t *sm = iss->iss_sm; 362 stmf_status_t ret; 363 stmf_i_lu_t *ilu = (stmf_i_lu_t *)lu->lu_stmf_private; 364 stmf_lun_map_ent_t *lun_map_ent; 365 uint32_t new_flags = 0; 366 uint16_t luNbr = 367 ((uint16_t)lu_nbr[1] | (((uint16_t)(lu_nbr[0] & 0x3F)) << 8)); 368 369 ASSERT(mutex_owned(&stmf_state.stmf_lock)); 370 ASSERT(!stmf_get_ent_from_map(sm, luNbr)); 371 372 if ((sm->lm_nluns == 0) && 373 ((iss->iss_flags & ISS_BEING_CREATED) == 0)) { 374 new_flags = ISS_GOT_INITIAL_LUNS; 375 atomic_or_32(&ilport->ilport_flags, ILPORT_SS_GOT_INITIAL_LUNS); 376 stmf_state.stmf_process_initial_luns = 1; 377 } 378 379 lun_map_ent = (stmf_lun_map_ent_t *) 380 kmem_zalloc(sizeof (stmf_lun_map_ent_t), KM_SLEEP); 381 lun_map_ent->ent_lu = lu; 382 ret = stmf_add_ent_to_map(sm, (void *)lun_map_ent, lu_nbr); 383 ASSERT(ret == STMF_SUCCESS); 384 atomic_inc_32(&ilu->ilu_ref_cnt); 385 /* 386 * do not set lun inventory flag for standby port 387 * as this would be handled from peer 388 */ 389 if (ilport->ilport_standby == 0) { 390 new_flags |= ISS_LUN_INVENTORY_CHANGED; 391 } 392 atomic_or_32(&iss->iss_flags, new_flags); 393 return (STMF_SUCCESS); 394 } 395 396 /* 397 * remvoe lu from a session, stmf_lock is already held 398 * iss_lockp held 399 */ 400 static void 401 stmf_remove_lu_from_session(stmf_i_scsi_session_t *iss, 402 stmf_lu_t *lu, uint8_t *lu_nbr) 403 { 404 stmf_status_t ret; 405 stmf_i_lu_t *ilu; 406 stmf_lun_map_t *sm = iss->iss_sm; 407 stmf_lun_map_ent_t *lun_map_ent; 408 uint16_t luNbr = 409 ((uint16_t)lu_nbr[1] | (((uint16_t)(lu_nbr[0] & 0x3F)) << 8)); 410 411 ASSERT(mutex_owned(&stmf_state.stmf_lock)); 412 lun_map_ent = stmf_get_ent_from_map(sm, luNbr); 413 ASSERT(lun_map_ent->ent_lu == lu); 414 if (lun_map_ent == NULL) { 415 return; 416 } 417 418 ilu = (stmf_i_lu_t *)lu->lu_stmf_private; 419 420 ret = stmf_remove_ent_from_map(sm, lu_nbr); 421 ASSERT(ret == STMF_SUCCESS); 422 atomic_dec_32(&ilu->ilu_ref_cnt); 423 iss->iss_flags |= ISS_LUN_INVENTORY_CHANGED; 424 if (lun_map_ent->ent_itl_datap) { 425 stmf_do_itl_dereg(lu, lun_map_ent->ent_itl_datap, 426 STMF_ITL_REASON_USER_REQUEST); 427 } 428 kmem_free((void *)lun_map_ent, sizeof (stmf_lun_map_ent_t)); 429 } 430 431 /* 432 * add or remove lu from all related sessions based on view entry, 433 * action is 0 for delete, 1 for add 434 */ 435 void 436 stmf_update_sessions_per_ve(stmf_view_entry_t *ve, 437 stmf_lu_t *lu, int action) 438 { 439 stmf_i_lu_t *ilu_tmp; 440 stmf_lu_t *lu_to_add; 441 stmf_i_local_port_t *ilport; 442 stmf_i_scsi_session_t *iss; 443 stmf_id_list_t *hostlist; 444 stmf_id_list_t *targetlist; 445 int all_hg = 0, all_tg = 0; 446 447 ASSERT(mutex_owned(&stmf_state.stmf_lock)); 448 449 if (!lu) { 450 ilu_tmp = (stmf_i_lu_t *)ve->ve_luid->id_pt_to_object; 451 if (!ilu_tmp) 452 return; 453 lu_to_add = ilu_tmp->ilu_lu; 454 } else { 455 lu_to_add = lu; 456 ilu_tmp = (stmf_i_lu_t *)lu->lu_stmf_private; 457 } 458 459 if (ve->ve_hg->id_data[0] == '*') 460 all_hg = 1; 461 if (ve->ve_tg->id_data[0] == '*') 462 all_tg = 1; 463 hostlist = (stmf_id_list_t *)ve->ve_hg->id_impl_specific; 464 targetlist = (stmf_id_list_t *)ve->ve_tg->id_impl_specific; 465 466 if ((!all_hg && !hostlist->idl_head) || 467 (!all_tg && !targetlist->idl_head)) 468 /* No sessions to be updated */ 469 return; 470 471 for (ilport = stmf_state.stmf_ilportlist; ilport != NULL; 472 ilport = ilport->ilport_next) { 473 if (!all_tg && ilport->ilport_tg != ve->ve_tg) 474 continue; 475 /* This ilport belongs to the target group */ 476 rw_enter(&ilport->ilport_lock, RW_WRITER); 477 for (iss = ilport->ilport_ss_list; iss != NULL; 478 iss = iss->iss_next) { 479 if (!all_hg && iss->iss_hg != ve->ve_hg) 480 continue; 481 /* This host belongs to the host group */ 482 if (action == 0) { /* to remove */ 483 stmf_remove_lu_from_session(iss, lu_to_add, 484 ve->ve_lun); 485 if (ilu_tmp->ilu_ref_cnt == 0) { 486 rw_exit(&ilport->ilport_lock); 487 return; 488 } 489 } else { 490 (void) stmf_add_lu_to_session(ilport, iss, 491 lu_to_add, ve->ve_lun); 492 } 493 } 494 rw_exit(&ilport->ilport_lock); 495 } 496 } 497 498 /* 499 * add luns in view entry map to a session, 500 * and stmf_lock is already held 501 */ 502 void 503 stmf_add_lus_to_session_per_vemap(stmf_i_local_port_t *ilport, 504 stmf_i_scsi_session_t *iss, 505 stmf_lun_map_t *vemap) 506 { 507 stmf_lu_t *lu; 508 stmf_i_lu_t *ilu; 509 stmf_view_entry_t *ve; 510 uint32_t i; 511 512 ASSERT(mutex_owned(&stmf_state.stmf_lock)); 513 for (i = 0; i < vemap->lm_nentries; i++) { 514 ve = (stmf_view_entry_t *)vemap->lm_plus[i]; 515 if (!ve) 516 continue; 517 ilu = (stmf_i_lu_t *)ve->ve_luid->id_pt_to_object; 518 if (ilu && ilu->ilu_state == STMF_STATE_ONLINE) { 519 lu = ilu->ilu_lu; 520 (void) stmf_add_lu_to_session(ilport, iss, lu, 521 ve->ve_lun); 522 } 523 } 524 } 525 /* 526 * remove luns in view entry map from a session 527 * iss_lockp held 528 */ 529 void 530 stmf_remove_lus_from_session_per_vemap(stmf_i_scsi_session_t *iss, 531 stmf_lun_map_t *vemap) 532 { 533 stmf_lu_t *lu; 534 stmf_i_lu_t *ilu; 535 stmf_view_entry_t *ve; 536 uint32_t i; 537 538 ASSERT(mutex_owned(&stmf_state.stmf_lock)); 539 540 for (i = 0; i < vemap->lm_nentries; i++) { 541 ve = (stmf_view_entry_t *)vemap->lm_plus[i]; 542 if (!ve) 543 continue; 544 ilu = (stmf_i_lu_t *)ve->ve_luid->id_pt_to_object; 545 if (ilu && ilu->ilu_state == STMF_STATE_ONLINE) { 546 lu = ilu->ilu_lu; 547 stmf_remove_lu_from_session(iss, lu, ve->ve_lun); 548 } 549 } 550 } 551 552 stmf_id_data_t * 553 stmf_alloc_id(uint16_t id_size, uint16_t type, uint8_t *id_data, 554 uint32_t additional_size) 555 { 556 stmf_id_data_t *id; 557 int struct_size, total_size, real_id_size; 558 559 real_id_size = ((uint32_t)id_size + 7) & (~7); 560 struct_size = (sizeof (*id) + 7) & (~7); 561 total_size = ((additional_size + 7) & (~7)) + struct_size + 562 real_id_size; 563 id = (stmf_id_data_t *)kmem_zalloc(total_size, KM_SLEEP); 564 id->id_type = type; 565 id->id_data_size = id_size; 566 id->id_data = ((uint8_t *)id) + struct_size; 567 id->id_total_alloc_size = total_size; 568 if (additional_size) { 569 id->id_impl_specific = ((uint8_t *)id) + struct_size + 570 real_id_size; 571 } 572 bcopy(id_data, id->id_data, id_size); 573 574 return (id); 575 } 576 577 void 578 stmf_free_id(stmf_id_data_t *id) 579 { 580 kmem_free(id, id->id_total_alloc_size); 581 } 582 583 584 stmf_id_data_t * 585 stmf_lookup_id(stmf_id_list_t *idlist, uint16_t id_size, uint8_t *data) 586 { 587 stmf_id_data_t *id; 588 589 for (id = idlist->idl_head; id != NULL; id = id->id_next) { 590 if ((id->id_data_size == id_size) && 591 (bcmp(id->id_data, data, id_size) == 0)) { 592 return (id); 593 } 594 } 595 596 return (NULL); 597 } 598 /* Return the target group which a target belong to */ 599 stmf_id_data_t * 600 stmf_lookup_group_for_target(uint8_t *ident, uint16_t ident_size) 601 { 602 stmf_id_data_t *tgid; 603 stmf_id_data_t *target; 604 605 ASSERT(mutex_owned(&stmf_state.stmf_lock)); 606 607 for (tgid = stmf_state.stmf_tg_list.idl_head; tgid; 608 tgid = tgid->id_next) { 609 target = stmf_lookup_id( 610 (stmf_id_list_t *)tgid->id_impl_specific, 611 ident_size, ident); 612 if (target) 613 return (tgid); 614 } 615 return (NULL); 616 } 617 /* Return the host group which a host belong to */ 618 stmf_id_data_t * 619 stmf_lookup_group_for_host(uint8_t *ident, uint16_t ident_size) 620 { 621 stmf_id_data_t *hgid; 622 stmf_id_data_t *host; 623 624 ASSERT(mutex_owned(&stmf_state.stmf_lock)); 625 626 for (hgid = stmf_state.stmf_hg_list.idl_head; hgid; 627 hgid = hgid->id_next) { 628 host = stmf_lookup_id( 629 (stmf_id_list_t *)hgid->id_impl_specific, 630 ident_size, ident); 631 if (host) 632 return (hgid); 633 } 634 return (NULL); 635 } 636 637 void 638 stmf_append_id(stmf_id_list_t *idlist, stmf_id_data_t *id) 639 { 640 id->id_next = NULL; 641 642 if ((id->id_prev = idlist->idl_tail) == NULL) { 643 idlist->idl_head = idlist->idl_tail = id; 644 } else { 645 idlist->idl_tail->id_next = id; 646 idlist->idl_tail = id; 647 } 648 atomic_inc_32(&idlist->id_count); 649 } 650 651 void 652 stmf_remove_id(stmf_id_list_t *idlist, stmf_id_data_t *id) 653 { 654 if (id->id_next) { 655 id->id_next->id_prev = id->id_prev; 656 } else { 657 idlist->idl_tail = id->id_prev; 658 } 659 660 if (id->id_prev) { 661 id->id_prev->id_next = id->id_next; 662 } else { 663 idlist->idl_head = id->id_next; 664 } 665 atomic_dec_32(&idlist->id_count); 666 } 667 668 669 /* 670 * The refcnts of objects in a view entry are updated when then entry 671 * is successfully added. ve_map is just another representation of the 672 * view enrtries in a LU. Duplicating or merging a ve map does not 673 * affect any refcnts. 674 * stmf_state.stmf_lock held 675 */ 676 stmf_lun_map_t * 677 stmf_duplicate_ve_map(stmf_lun_map_t *src) 678 { 679 stmf_lun_map_t *dst; 680 int i; 681 682 dst = (stmf_lun_map_t *)kmem_zalloc(sizeof (*dst), KM_SLEEP); 683 684 if (src == NULL) 685 return (dst); 686 687 if (src->lm_nentries) { 688 dst->lm_plus = kmem_zalloc(dst->lm_nentries * 689 sizeof (void *), KM_SLEEP); 690 for (i = 0; i < dst->lm_nentries; i++) { 691 dst->lm_plus[i] = src->lm_plus[i]; 692 } 693 } 694 695 return (dst); 696 } 697 698 void 699 stmf_destroy_ve_map(stmf_lun_map_t *dst) 700 { 701 if (dst->lm_nentries) { 702 kmem_free(dst->lm_plus, dst->lm_nentries * sizeof (void *)); 703 } 704 kmem_free(dst, sizeof (*dst)); 705 } 706 707 /* 708 * stmf_state.stmf_lock held. Operations are stmf global in nature and 709 * not session level. 710 */ 711 int 712 stmf_merge_ve_map(stmf_lun_map_t *src, stmf_lun_map_t *dst, 713 stmf_lun_map_t **pp_ret_map, stmf_merge_flags_t mf) 714 { 715 int i; 716 int nentries; 717 int to_create_space = 0; 718 719 if (dst == NULL) { 720 *pp_ret_map = stmf_duplicate_ve_map(src); 721 return (1); 722 } 723 724 if (src == NULL || src->lm_nluns == 0) { 725 if (mf & MERGE_FLAG_RETURN_NEW_MAP) 726 *pp_ret_map = stmf_duplicate_ve_map(dst); 727 else 728 *pp_ret_map = dst; 729 return (1); 730 } 731 732 if (mf & MERGE_FLAG_RETURN_NEW_MAP) { 733 *pp_ret_map = stmf_duplicate_ve_map(NULL); 734 nentries = max(dst->lm_nentries, src->lm_nentries); 735 to_create_space = 1; 736 } else { 737 *pp_ret_map = dst; 738 /* If there is not enough space in dst map */ 739 if (dst->lm_nentries < src->lm_nentries) { 740 nentries = src->lm_nentries; 741 to_create_space = 1; 742 } 743 } 744 if (to_create_space) { 745 void **p; 746 p = (void **)kmem_zalloc(nentries * sizeof (void *), KM_SLEEP); 747 if (dst->lm_nentries) { 748 bcopy(dst->lm_plus, p, 749 dst->lm_nentries * sizeof (void *)); 750 } 751 if (mf & (MERGE_FLAG_RETURN_NEW_MAP == 0)) 752 kmem_free(dst->lm_plus, 753 dst->lm_nentries * sizeof (void *)); 754 (*pp_ret_map)->lm_plus = p; 755 (*pp_ret_map)->lm_nentries = nentries; 756 } 757 758 for (i = 0; i < src->lm_nentries; i++) { 759 if (src->lm_plus[i] == NULL) 760 continue; 761 if (dst->lm_plus[i] != NULL) { 762 if (mf & MERGE_FLAG_NO_DUPLICATE) { 763 if (mf & MERGE_FLAG_RETURN_NEW_MAP) { 764 stmf_destroy_ve_map(*pp_ret_map); 765 *pp_ret_map = NULL; 766 } 767 return (0); 768 } 769 } else { 770 dst->lm_plus[i] = src->lm_plus[i]; 771 dst->lm_nluns++; 772 } 773 } 774 775 return (1); 776 } 777 778 /* 779 * add host group, id_impl_specific point to a list of hosts, 780 * on return, if error happened, err_detail may be assigned if 781 * the pointer is not NULL 782 */ 783 stmf_status_t 784 stmf_add_hg(uint8_t *hg_name, uint16_t hg_name_size, 785 int allow_special, uint32_t *err_detail) 786 { 787 stmf_id_data_t *id; 788 789 if (!allow_special) { 790 if (hg_name[0] == '*') 791 return (STMF_INVALID_ARG); 792 } 793 794 if (stmf_lookup_id(&stmf_state.stmf_hg_list, 795 hg_name_size, (uint8_t *)hg_name)) { 796 if (err_detail) 797 *err_detail = STMF_IOCERR_HG_EXISTS; 798 return (STMF_ALREADY); 799 } 800 id = stmf_alloc_id(hg_name_size, STMF_ID_TYPE_HOST_GROUP, 801 (uint8_t *)hg_name, sizeof (stmf_id_list_t)); 802 stmf_append_id(&stmf_state.stmf_hg_list, id); 803 804 return (STMF_SUCCESS); 805 } 806 807 /* add target group */ 808 stmf_status_t 809 stmf_add_tg(uint8_t *tg_name, uint16_t tg_name_size, 810 int allow_special, uint32_t *err_detail) 811 { 812 stmf_id_data_t *id; 813 814 if (!allow_special) { 815 if (tg_name[0] == '*') 816 return (STMF_INVALID_ARG); 817 } 818 819 820 if (stmf_lookup_id(&stmf_state.stmf_tg_list, tg_name_size, 821 (uint8_t *)tg_name)) { 822 if (err_detail) 823 *err_detail = STMF_IOCERR_TG_EXISTS; 824 return (STMF_ALREADY); 825 } 826 id = stmf_alloc_id(tg_name_size, STMF_ID_TYPE_TARGET_GROUP, 827 (uint8_t *)tg_name, sizeof (stmf_id_list_t)); 828 stmf_append_id(&stmf_state.stmf_tg_list, id); 829 830 return (STMF_SUCCESS); 831 } 832 833 /* 834 * insert view entry into list for a luid, if ve->ve_id is 0xffffffff, 835 * pick up a smallest available veid for it, and return the veid in ve->ve_id. 836 * The view entries list is sorted based on veid. 837 */ 838 stmf_status_t 839 stmf_add_ve_to_luid(stmf_id_data_t *luid, stmf_view_entry_t *ve) 840 { 841 stmf_view_entry_t *ve_tmp = NULL; 842 stmf_view_entry_t *ve_prev = NULL; 843 844 ASSERT(mutex_owned(&stmf_state.stmf_lock)); 845 846 ve_tmp = (stmf_view_entry_t *)luid->id_impl_specific; 847 848 if (ve->ve_id != 0xffffffff) { 849 for (; ve_tmp; ve_tmp = ve_tmp->ve_next) { 850 if (ve_tmp->ve_id > ve->ve_id) { 851 break; 852 } else if (ve_tmp->ve_id == ve->ve_id) { 853 return (STMF_ALREADY); 854 } 855 ve_prev = ve_tmp; 856 } 857 } else { 858 uint32_t veid = 0; 859 /* search the smallest available veid */ 860 for (; ve_tmp; ve_tmp = ve_tmp->ve_next) { 861 ASSERT(ve_tmp->ve_id >= veid); 862 if (ve_tmp->ve_id != veid) 863 break; 864 veid++; 865 if (veid == 0xffffffff) 866 return (STMF_NOT_SUPPORTED); 867 ve_prev = ve_tmp; 868 } 869 ve->ve_id = veid; 870 } 871 872 /* insert before ve_tmp if it exist */ 873 ve->ve_next = ve_tmp; 874 ve->ve_prev = ve_prev; 875 if (ve_tmp) { 876 ve_tmp->ve_prev = ve; 877 } 878 if (ve_prev) { 879 ve_prev->ve_next = ve; 880 } else { 881 luid->id_impl_specific = (void *)ve; 882 } 883 return (STMF_SUCCESS); 884 } 885 886 /* stmf_lock is already held, err_detail may be assigned if error happens */ 887 stmf_status_t 888 stmf_add_view_entry(stmf_id_data_t *hg, stmf_id_data_t *tg, 889 uint8_t *lu_guid, uint32_t *ve_id, uint8_t *lun, 890 stmf_view_entry_t **conflicting, uint32_t *err_detail) 891 { 892 stmf_id_data_t *luid; 893 stmf_view_entry_t *ve; 894 char *phg, *ptg; 895 stmf_lun_map_t *ve_map = NULL; 896 stmf_ver_hg_t *verhg = NULL, *verhg_ex = NULL; 897 stmf_ver_tg_t *vertg = NULL, *vertg_ex = NULL; 898 char luid_new; 899 uint16_t lun_num; 900 stmf_i_lu_t *ilu; 901 stmf_status_t ret; 902 903 ASSERT(mutex_owned(&stmf_state.stmf_lock)); 904 905 lun_num = ((uint16_t)lun[1] | (((uint16_t)(lun[0] & 0x3F)) << 8)); 906 907 luid = stmf_lookup_id(&stmf_state.stmf_luid_list, 16, lu_guid); 908 if (luid == NULL) { 909 luid = stmf_alloc_id(16, STMF_ID_TYPE_LU_GUID, lu_guid, 0); 910 ilu = stmf_luident_to_ilu(lu_guid); 911 if (ilu) { 912 ilu->ilu_luid = luid; 913 luid->id_pt_to_object = (void *)ilu; 914 } 915 luid_new = 1; 916 } else { 917 luid_new = 0; 918 ilu = (stmf_i_lu_t *)luid->id_pt_to_object; 919 } 920 921 /* The view entry won't be added if there is any confilict */ 922 phg = (char *)hg->id_data; ptg = (char *)tg->id_data; 923 for (ve = (stmf_view_entry_t *)luid->id_impl_specific; ve != NULL; 924 ve = ve->ve_next) { 925 if (((phg[0] == '*') || (ve->ve_hg->id_data[0] == '*') || 926 (hg == ve->ve_hg)) && ((ptg[0] == '*') || 927 (ve->ve_tg->id_data[0] == '*') || (tg == ve->ve_tg))) { 928 *conflicting = ve; 929 *err_detail = STMF_IOCERR_VIEW_ENTRY_CONFLICT; 930 ret = STMF_ALREADY; 931 goto add_ve_err_ret; 932 } 933 } 934 935 ve_map = stmf_duplicate_ve_map(0); 936 for (vertg = stmf_state.stmf_ver_tg_head; vertg != NULL; 937 vertg = vertg->vert_next) { 938 ptg = (char *)vertg->vert_tg_ref->id_data; 939 if ((ptg[0] != '*') && (tg->id_data[0] != '*') && 940 (vertg->vert_tg_ref != tg)) { 941 continue; 942 } 943 if (vertg->vert_tg_ref == tg) 944 vertg_ex = vertg; 945 for (verhg = vertg->vert_verh_list; verhg != NULL; 946 verhg = verhg->verh_next) { 947 phg = (char *)verhg->verh_hg_ref->id_data; 948 if ((phg[0] != '*') && (hg->id_data[0] != '*') && 949 (verhg->verh_hg_ref != hg)) { 950 continue; 951 } 952 if ((vertg_ex == vertg) && (verhg->verh_hg_ref == hg)) 953 verhg_ex = verhg; 954 (void) stmf_merge_ve_map(&verhg->verh_ve_map, ve_map, 955 &ve_map, 0); 956 } 957 } 958 959 if (lun[2] == 0xFF) { 960 /* Pick a LUN number */ 961 lun_num = stmf_get_next_free_lun(ve_map, lun); 962 if (lun_num > 0x3FFF) { 963 stmf_destroy_ve_map(ve_map); 964 ret = STMF_NOT_SUPPORTED; 965 goto add_ve_err_ret; 966 } 967 } else { 968 if ((*conflicting = stmf_get_ent_from_map(ve_map, lun_num)) 969 != NULL) { 970 stmf_destroy_ve_map(ve_map); 971 *err_detail = STMF_IOCERR_LU_NUMBER_IN_USE; 972 ret = STMF_LUN_TAKEN; 973 goto add_ve_err_ret; 974 } 975 } 976 stmf_destroy_ve_map(ve_map); 977 978 /* All is well, do the actual addition now */ 979 ve = (stmf_view_entry_t *)kmem_zalloc(sizeof (*ve), KM_SLEEP); 980 ve->ve_id = *ve_id; 981 ve->ve_lun[0] = lun[0]; 982 ve->ve_lun[1] = lun[1]; 983 984 if ((ret = stmf_add_ve_to_luid(luid, ve)) != STMF_SUCCESS) { 985 kmem_free(ve, sizeof (stmf_view_entry_t)); 986 goto add_ve_err_ret; 987 } 988 ve->ve_hg = hg; hg->id_refcnt++; 989 ve->ve_tg = tg; tg->id_refcnt++; 990 ve->ve_luid = luid; luid->id_refcnt++; 991 992 *ve_id = ve->ve_id; 993 994 if (luid_new) { 995 stmf_append_id(&stmf_state.stmf_luid_list, luid); 996 } 997 998 if (vertg_ex == NULL) { 999 vertg_ex = (stmf_ver_tg_t *)kmem_zalloc(sizeof (stmf_ver_tg_t), 1000 KM_SLEEP); 1001 vertg_ex->vert_next = stmf_state.stmf_ver_tg_head; 1002 stmf_state.stmf_ver_tg_head = vertg_ex; 1003 vertg_ex->vert_tg_ref = tg; 1004 verhg_ex = vertg_ex->vert_verh_list = 1005 (stmf_ver_hg_t *)kmem_zalloc(sizeof (stmf_ver_hg_t), 1006 KM_SLEEP); 1007 verhg_ex->verh_hg_ref = hg; 1008 } 1009 if (verhg_ex == NULL) { 1010 verhg_ex = (stmf_ver_hg_t *)kmem_zalloc(sizeof (stmf_ver_hg_t), 1011 KM_SLEEP); 1012 verhg_ex->verh_next = vertg_ex->vert_verh_list; 1013 vertg_ex->vert_verh_list = verhg_ex; 1014 verhg_ex->verh_hg_ref = hg; 1015 } 1016 ret = stmf_add_ent_to_map(&verhg_ex->verh_ve_map, ve, ve->ve_lun); 1017 ASSERT(ret == STMF_SUCCESS); 1018 1019 /* we need to update the affected session */ 1020 if (stmf_state.stmf_service_running) { 1021 if (ilu && ilu->ilu_state == STMF_STATE_ONLINE) 1022 stmf_update_sessions_per_ve(ve, ilu->ilu_lu, 1); 1023 } 1024 1025 return (STMF_SUCCESS); 1026 add_ve_err_ret: 1027 if (luid_new) { 1028 if (ilu) 1029 ilu->ilu_luid = NULL; 1030 stmf_free_id(luid); 1031 } 1032 return (ret); 1033 } 1034 1035 /* 1036 * protected by stmf_state.stmf_lock when working on global lun map. 1037 * iss_lockp when working at the session level. 1038 */ 1039 static stmf_status_t 1040 stmf_add_ent_to_map(stmf_lun_map_t *lm, void *ent, uint8_t *lun) 1041 { 1042 uint16_t n; 1043 if (((lun[0] & 0xc0) >> 6) != 0) 1044 return (STMF_FAILURE); 1045 1046 n = (uint16_t)lun[1] | (((uint16_t)(lun[0] & 0x3F)) << 8); 1047 try_again_to_add: 1048 if (lm->lm_nentries && (n < lm->lm_nentries)) { 1049 if (lm->lm_plus[n] == NULL) { 1050 lm->lm_plus[n] = ent; 1051 lm->lm_nluns++; 1052 return (STMF_SUCCESS); 1053 } else { 1054 return (STMF_LUN_TAKEN); 1055 } 1056 } else { 1057 void **pplu; 1058 uint16_t m = n + 1; 1059 m = ((m + 7) & ~7) & 0x7FFF; 1060 pplu = (void **)kmem_zalloc(m * sizeof (void *), KM_SLEEP); 1061 bcopy(lm->lm_plus, pplu, 1062 lm->lm_nentries * sizeof (void *)); 1063 kmem_free(lm->lm_plus, lm->lm_nentries * sizeof (void *)); 1064 lm->lm_plus = pplu; 1065 lm->lm_nentries = m; 1066 goto try_again_to_add; 1067 } 1068 } 1069 1070 1071 /* 1072 * iss_lockp held when working on a session. 1073 * stmf_state.stmf_lock is held when working on the global views. 1074 */ 1075 static stmf_status_t 1076 stmf_remove_ent_from_map(stmf_lun_map_t *lm, uint8_t *lun) 1077 { 1078 uint16_t n, i; 1079 uint8_t lutype = (lun[0] & 0xc0) >> 6; 1080 if (lutype != 0) 1081 return (STMF_FAILURE); 1082 1083 n = (uint16_t)lun[1] | (((uint16_t)(lun[0] & 0x3F)) << 8); 1084 1085 if (n >= lm->lm_nentries) 1086 return (STMF_NOT_FOUND); 1087 if (lm->lm_plus[n] == NULL) 1088 return (STMF_NOT_FOUND); 1089 1090 lm->lm_plus[n] = NULL; 1091 lm->lm_nluns--; 1092 1093 for (i = 0; i < lm->lm_nentries; i++) { 1094 if (lm->lm_plus[lm->lm_nentries - 1 - i] != NULL) 1095 break; 1096 } 1097 i &= ~15; 1098 if (i >= 16) { 1099 void **pplu; 1100 uint16_t m; 1101 m = lm->lm_nentries - i; 1102 pplu = (void **)kmem_zalloc(m * sizeof (void *), KM_SLEEP); 1103 bcopy(lm->lm_plus, pplu, m * sizeof (void *)); 1104 kmem_free(lm->lm_plus, lm->lm_nentries * sizeof (void *)); 1105 lm->lm_plus = pplu; 1106 lm->lm_nentries = m; 1107 } 1108 1109 return (STMF_SUCCESS); 1110 } 1111 1112 /* 1113 * stmf_state.stmf_lock held 1114 */ 1115 uint16_t 1116 stmf_get_next_free_lun(stmf_lun_map_t *sm, uint8_t *lun) 1117 { 1118 uint16_t luNbr; 1119 1120 1121 if (sm->lm_nluns < 0x4000) { 1122 for (luNbr = 0; luNbr < sm->lm_nentries; luNbr++) { 1123 if (sm->lm_plus[luNbr] == NULL) 1124 break; 1125 } 1126 } else { 1127 return (0xFFFF); 1128 } 1129 if (lun) { 1130 bzero(lun, 8); 1131 lun[1] = luNbr & 0xff; 1132 lun[0] = (luNbr >> 8) & 0xff; 1133 } 1134 1135 return (luNbr); 1136 } 1137 1138 /* 1139 * stmf_state.stmf_lock is held when working on global view map 1140 * iss_lockp (RW_WRITER) is held when working on session map. 1141 */ 1142 void * 1143 stmf_get_ent_from_map(stmf_lun_map_t *sm, uint16_t lun_num) 1144 { 1145 if ((lun_num & 0xC000) == 0) { 1146 if (sm->lm_nentries > lun_num) 1147 return (sm->lm_plus[lun_num & 0x3FFF]); 1148 else 1149 return (NULL); 1150 } 1151 1152 return (NULL); 1153 } 1154 1155 int 1156 stmf_add_ve(uint8_t *hgname, uint16_t hgname_size, 1157 uint8_t *tgname, uint16_t tgname_size, 1158 uint8_t *lu_guid, uint32_t *ve_id, 1159 uint8_t *luNbr, uint32_t *err_detail) 1160 { 1161 stmf_id_data_t *hg; 1162 stmf_id_data_t *tg; 1163 stmf_view_entry_t *conflictve; 1164 stmf_status_t ret; 1165 1166 ASSERT(mutex_owned(&stmf_state.stmf_lock)); 1167 1168 hg = stmf_lookup_id(&stmf_state.stmf_hg_list, hgname_size, 1169 (uint8_t *)hgname); 1170 if (!hg) { 1171 *err_detail = STMF_IOCERR_INVALID_HG; 1172 return (ENOENT); /* could not find group */ 1173 } 1174 tg = stmf_lookup_id(&stmf_state.stmf_tg_list, tgname_size, 1175 (uint8_t *)tgname); 1176 if (!tg) { 1177 *err_detail = STMF_IOCERR_INVALID_TG; 1178 return (ENOENT); /* could not find group */ 1179 } 1180 ret = stmf_add_view_entry(hg, tg, lu_guid, ve_id, luNbr, 1181 &conflictve, err_detail); 1182 1183 if (ret == STMF_ALREADY) { 1184 return (EALREADY); 1185 } else if (ret == STMF_LUN_TAKEN) { 1186 return (EEXIST); 1187 } else if (ret == STMF_NOT_SUPPORTED) { 1188 return (E2BIG); 1189 } else if (ret != STMF_SUCCESS) { 1190 return (EINVAL); 1191 } 1192 return (0); 1193 } 1194 1195 int 1196 stmf_remove_ve_by_id(uint8_t *guid, uint32_t veid, uint32_t *err_detail) 1197 { 1198 stmf_id_data_t *luid; 1199 stmf_view_entry_t *ve; 1200 stmf_ver_tg_t *vtg; 1201 stmf_ver_hg_t *vhg; 1202 stmf_ver_tg_t *prev_vtg = NULL; 1203 stmf_ver_hg_t *prev_vhg = NULL; 1204 int found = 0; 1205 stmf_i_lu_t *ilu; 1206 1207 ASSERT(mutex_owned(&stmf_state.stmf_lock)); 1208 luid = stmf_lookup_id(&stmf_state.stmf_luid_list, 16, guid); 1209 if (luid == NULL) { 1210 *err_detail = STMF_IOCERR_INVALID_LU_ID; 1211 return (ENODEV); 1212 } 1213 ilu = (stmf_i_lu_t *)luid->id_pt_to_object; 1214 1215 for (ve = (stmf_view_entry_t *)luid->id_impl_specific; 1216 ve; ve = ve->ve_next) { 1217 if (ve->ve_id == veid) { 1218 break; 1219 } 1220 } 1221 if (!ve) { 1222 *err_detail = STMF_IOCERR_INVALID_VE_ID; 1223 return (ENODEV); 1224 } 1225 /* remove the ve */ 1226 if (ve->ve_next) 1227 ve->ve_next->ve_prev = ve->ve_prev; 1228 if (ve->ve_prev) 1229 ve->ve_prev->ve_next = ve->ve_next; 1230 else { 1231 luid->id_impl_specific = (void *)ve->ve_next; 1232 if (!luid->id_impl_specific) { 1233 /* don't have any view entries related to this lu */ 1234 stmf_remove_id(&stmf_state.stmf_luid_list, luid); 1235 if (ilu) 1236 ilu->ilu_luid = NULL; 1237 stmf_free_id(luid); 1238 } 1239 } 1240 1241 /* we need to update ver_hg->verh_ve_map */ 1242 for (vtg = stmf_state.stmf_ver_tg_head; vtg; vtg = vtg->vert_next) { 1243 if (vtg->vert_tg_ref == ve->ve_tg) { 1244 found = 1; 1245 break; 1246 } 1247 prev_vtg = vtg; 1248 } 1249 ASSERT(found); 1250 found = 0; 1251 for (vhg = vtg->vert_verh_list; vhg; vhg = vhg->verh_next) { 1252 if (vhg->verh_hg_ref == ve->ve_hg) { 1253 found = 1; 1254 break; 1255 } 1256 prev_vhg = vhg; 1257 } 1258 ASSERT(found); 1259 1260 (void) stmf_remove_ent_from_map(&vhg->verh_ve_map, ve->ve_lun); 1261 1262 /* free verhg if it don't have any ve entries related */ 1263 if (!vhg->verh_ve_map.lm_nluns) { 1264 /* we don't have any view entry related */ 1265 if (prev_vhg) 1266 prev_vhg->verh_next = vhg->verh_next; 1267 else 1268 vtg->vert_verh_list = vhg->verh_next; 1269 1270 /* Free entries in case the map still has memory */ 1271 if (vhg->verh_ve_map.lm_nentries) { 1272 kmem_free(vhg->verh_ve_map.lm_plus, 1273 vhg->verh_ve_map.lm_nentries * 1274 sizeof (void *)); 1275 } 1276 kmem_free(vhg, sizeof (stmf_ver_hg_t)); 1277 if (!vtg->vert_verh_list) { 1278 /* we don't have any ve related */ 1279 if (prev_vtg) 1280 prev_vtg->vert_next = vtg->vert_next; 1281 else 1282 stmf_state.stmf_ver_tg_head = vtg->vert_next; 1283 kmem_free(vtg, sizeof (stmf_ver_tg_t)); 1284 } 1285 } 1286 1287 if (stmf_state.stmf_service_running && ilu && 1288 ilu->ilu_state == STMF_STATE_ONLINE) { 1289 stmf_update_sessions_per_ve(ve, ilu->ilu_lu, 0); 1290 } 1291 1292 ve->ve_hg->id_refcnt--; 1293 ve->ve_tg->id_refcnt--; 1294 1295 kmem_free(ve, sizeof (stmf_view_entry_t)); 1296 return (0); 1297 } 1298 1299 int 1300 stmf_add_group(uint8_t *grpname, uint16_t grpname_size, 1301 stmf_id_type_t group_type, uint32_t *err_detail) 1302 { 1303 stmf_status_t status; 1304 1305 ASSERT(mutex_owned(&stmf_state.stmf_lock)); 1306 1307 if (group_type == STMF_ID_TYPE_HOST_GROUP) 1308 status = stmf_add_hg(grpname, grpname_size, 0, err_detail); 1309 else if (group_type == STMF_ID_TYPE_TARGET_GROUP) 1310 status = stmf_add_tg(grpname, grpname_size, 0, err_detail); 1311 else { 1312 return (EINVAL); 1313 } 1314 switch (status) { 1315 case STMF_SUCCESS: 1316 return (0); 1317 case STMF_INVALID_ARG: 1318 return (EINVAL); 1319 case STMF_ALREADY: 1320 return (EEXIST); 1321 default: 1322 return (EIO); 1323 } 1324 } 1325 1326 /* 1327 * Group can only be removed only when it does not have 1328 * any view entry related 1329 */ 1330 int 1331 stmf_remove_group(uint8_t *grpname, uint16_t grpname_size, 1332 stmf_id_type_t group_type, uint32_t *err_detail) 1333 { 1334 stmf_id_data_t *id; 1335 stmf_id_data_t *idmemb; 1336 stmf_id_list_t *grp_memblist; 1337 stmf_i_scsi_session_t *iss; 1338 stmf_i_local_port_t *ilport; 1339 1340 if (grpname[0] == '*') 1341 return (EINVAL); 1342 1343 ASSERT(mutex_owned(&stmf_state.stmf_lock)); 1344 1345 if (group_type == STMF_ID_TYPE_HOST_GROUP) 1346 id = stmf_lookup_id(&stmf_state.stmf_hg_list, 1347 grpname_size, grpname); 1348 else if (group_type == STMF_ID_TYPE_TARGET_GROUP) 1349 id = stmf_lookup_id(&stmf_state.stmf_tg_list, 1350 grpname_size, grpname); 1351 if (!id) { 1352 *err_detail = (group_type == STMF_ID_TYPE_HOST_GROUP)? 1353 STMF_IOCERR_INVALID_HG:STMF_IOCERR_INVALID_TG; 1354 return (ENODEV); /* no such grp */ 1355 } 1356 if (id->id_refcnt) { 1357 /* fail, still have viewentry related to it */ 1358 *err_detail = (group_type == STMF_ID_TYPE_HOST_GROUP)? 1359 STMF_IOCERR_HG_IN_USE:STMF_IOCERR_TG_IN_USE; 1360 return (EBUSY); 1361 } 1362 grp_memblist = (stmf_id_list_t *)id->id_impl_specific; 1363 while ((idmemb = grp_memblist->idl_head) != NULL) { 1364 stmf_remove_id(grp_memblist, idmemb); 1365 stmf_free_id(idmemb); 1366 } 1367 1368 ASSERT(!grp_memblist->id_count); 1369 if (id->id_type == STMF_ID_TYPE_TARGET_GROUP) { 1370 for (ilport = stmf_state.stmf_ilportlist; ilport; 1371 ilport = ilport->ilport_next) { 1372 if (ilport->ilport_tg == (void *)id) { 1373 ilport->ilport_tg = NULL; 1374 } 1375 } 1376 stmf_remove_id(&stmf_state.stmf_tg_list, id); 1377 } else { 1378 for (ilport = stmf_state.stmf_ilportlist; ilport; 1379 ilport = ilport->ilport_next) { 1380 for (iss = ilport->ilport_ss_list; iss; 1381 iss = iss->iss_next) { 1382 if (iss->iss_hg == (void *)id) 1383 iss->iss_hg = NULL; 1384 } 1385 } 1386 stmf_remove_id(&stmf_state.stmf_hg_list, id); 1387 } 1388 stmf_free_id(id); 1389 return (0); 1390 1391 } 1392 1393 int 1394 stmf_add_group_member(uint8_t *grpname, uint16_t grpname_size, 1395 uint8_t *entry_ident, uint16_t entry_size, 1396 stmf_id_type_t entry_type, uint32_t *err_detail) 1397 { 1398 stmf_id_data_t *id_grp, *id_alltgt; 1399 stmf_id_data_t *id_member; 1400 stmf_id_data_t *id_grp_tmp; 1401 stmf_i_scsi_session_t *iss; 1402 stmf_i_local_port_t *ilport; 1403 stmf_lun_map_t *vemap, *vemap_alltgt; 1404 uint8_t grpname_forall = '*'; 1405 1406 ASSERT(mutex_owned(&stmf_state.stmf_lock)); 1407 ASSERT(grpname[0] != '*'); 1408 1409 if (entry_type == STMF_ID_TYPE_HOST) { 1410 id_grp = stmf_lookup_id(&stmf_state.stmf_hg_list, 1411 grpname_size, grpname); 1412 id_grp_tmp = stmf_lookup_group_for_host(entry_ident, 1413 entry_size); 1414 } else { 1415 id_grp = stmf_lookup_id(&stmf_state.stmf_tg_list, 1416 grpname_size, grpname); 1417 id_grp_tmp = stmf_lookup_group_for_target(entry_ident, 1418 entry_size); 1419 } 1420 if (id_grp == NULL) { 1421 *err_detail = (entry_type == STMF_ID_TYPE_HOST)? 1422 STMF_IOCERR_INVALID_HG:STMF_IOCERR_INVALID_TG; 1423 return (ENODEV); /* not found */ 1424 } 1425 1426 /* Check whether this member already bound to a group */ 1427 if (id_grp_tmp) { 1428 if (id_grp_tmp != id_grp) { 1429 *err_detail = (entry_type == STMF_ID_TYPE_HOST)? 1430 STMF_IOCERR_HG_ENTRY_EXISTS: 1431 STMF_IOCERR_TG_ENTRY_EXISTS; 1432 return (EEXIST); /* already added into another grp */ 1433 } 1434 else 1435 return (0); 1436 } 1437 1438 /* verify target is offline */ 1439 if (entry_type == STMF_ID_TYPE_TARGET) { 1440 ilport = stmf_targetident_to_ilport(entry_ident, entry_size); 1441 if (ilport && ilport->ilport_state != STMF_STATE_OFFLINE) { 1442 *err_detail = STMF_IOCERR_TG_NEED_TG_OFFLINE; 1443 return (EBUSY); 1444 } 1445 } 1446 1447 id_member = stmf_alloc_id(entry_size, entry_type, 1448 entry_ident, 0); 1449 stmf_append_id((stmf_id_list_t *)id_grp->id_impl_specific, id_member); 1450 1451 if (entry_type == STMF_ID_TYPE_TARGET) { 1452 ilport = stmf_targetident_to_ilport(entry_ident, entry_size); 1453 if (ilport) 1454 ilport->ilport_tg = (void *)id_grp; 1455 return (0); 1456 } 1457 /* For host group member, update the session if needed */ 1458 if (!stmf_state.stmf_service_running) 1459 return (0); 1460 /* Need to consider all target group + this host group */ 1461 id_alltgt = stmf_lookup_id(&stmf_state.stmf_tg_list, 1462 1, &grpname_forall); 1463 vemap_alltgt = stmf_get_ve_map_per_ids(id_alltgt, id_grp); 1464 1465 /* check whether there are sessions may be affected */ 1466 for (ilport = stmf_state.stmf_ilportlist; ilport; 1467 ilport = ilport->ilport_next) { 1468 if (ilport->ilport_state != STMF_STATE_ONLINE) 1469 continue; 1470 iss = stmf_lookup_session_for_hostident(ilport, 1471 entry_ident, entry_size); 1472 if (iss) { 1473 stmf_id_data_t *tgid; 1474 iss->iss_hg = (void *)id_grp; 1475 tgid = ilport->ilport_tg; 1476 rw_enter(iss->iss_lockp, RW_WRITER); 1477 if (tgid) { 1478 vemap = stmf_get_ve_map_per_ids(tgid, id_grp); 1479 if (vemap) 1480 stmf_add_lus_to_session_per_vemap( 1481 ilport, iss, vemap); 1482 } 1483 if (vemap_alltgt) 1484 stmf_add_lus_to_session_per_vemap(ilport, 1485 iss, vemap_alltgt); 1486 rw_exit(iss->iss_lockp); 1487 } 1488 } 1489 1490 return (0); 1491 } 1492 1493 int 1494 stmf_remove_group_member(uint8_t *grpname, uint16_t grpname_size, 1495 uint8_t *entry_ident, uint16_t entry_size, 1496 stmf_id_type_t entry_type, uint32_t *err_detail) 1497 { 1498 stmf_id_data_t *id_grp, *id_alltgt; 1499 stmf_id_data_t *id_member; 1500 stmf_lun_map_t *vemap, *vemap_alltgt; 1501 uint8_t grpname_forall = '*'; 1502 stmf_i_local_port_t *ilport; 1503 stmf_i_scsi_session_t *iss; 1504 1505 ASSERT(mutex_owned(&stmf_state.stmf_lock)); 1506 ASSERT(grpname[0] != '*'); 1507 1508 if (entry_type == STMF_ID_TYPE_HOST) { 1509 id_grp = stmf_lookup_id(&stmf_state.stmf_hg_list, 1510 grpname_size, grpname); 1511 } else { 1512 id_grp = stmf_lookup_id(&stmf_state.stmf_tg_list, 1513 grpname_size, grpname); 1514 } 1515 if (id_grp == NULL) { 1516 *err_detail = (entry_type == STMF_ID_TYPE_HOST)? 1517 STMF_IOCERR_INVALID_HG:STMF_IOCERR_INVALID_TG; 1518 return (ENODEV); /* no such group */ 1519 } 1520 id_member = stmf_lookup_id((stmf_id_list_t *)id_grp->id_impl_specific, 1521 entry_size, entry_ident); 1522 if (!id_member) { 1523 *err_detail = (entry_type == STMF_ID_TYPE_HOST)? 1524 STMF_IOCERR_INVALID_HG_ENTRY:STMF_IOCERR_INVALID_TG_ENTRY; 1525 return (ENODEV); /* no such member */ 1526 } 1527 /* verify target is offline */ 1528 if (entry_type == STMF_ID_TYPE_TARGET) { 1529 ilport = stmf_targetident_to_ilport(entry_ident, entry_size); 1530 if (ilport && ilport->ilport_state != STMF_STATE_OFFLINE) { 1531 *err_detail = STMF_IOCERR_TG_NEED_TG_OFFLINE; 1532 return (EBUSY); 1533 } 1534 } 1535 1536 stmf_remove_id((stmf_id_list_t *)id_grp->id_impl_specific, id_member); 1537 stmf_free_id(id_member); 1538 1539 if (entry_type == STMF_ID_TYPE_TARGET) { 1540 ilport = stmf_targetident_to_ilport(entry_ident, entry_size); 1541 if (ilport) 1542 ilport->ilport_tg = NULL; 1543 return (0); 1544 } 1545 /* For host group member, update the session */ 1546 if (!stmf_state.stmf_service_running) 1547 return (0); 1548 1549 /* Need to consider all target group + this host group */ 1550 id_alltgt = stmf_lookup_id(&stmf_state.stmf_tg_list, 1551 1, &grpname_forall); 1552 vemap_alltgt = stmf_get_ve_map_per_ids(id_alltgt, id_grp); 1553 1554 /* check if there are session related, if so, update it */ 1555 for (ilport = stmf_state.stmf_ilportlist; ilport; 1556 ilport = ilport->ilport_next) { 1557 if (ilport->ilport_state != STMF_STATE_ONLINE) 1558 continue; 1559 iss = stmf_lookup_session_for_hostident(ilport, 1560 entry_ident, entry_size); 1561 if (iss) { 1562 stmf_id_data_t *tgid; 1563 rw_enter(iss->iss_lockp, RW_WRITER); 1564 iss->iss_hg = NULL; 1565 tgid = ilport->ilport_tg; 1566 if (tgid) { 1567 vemap = stmf_get_ve_map_per_ids(tgid, id_grp); 1568 if (vemap) 1569 stmf_remove_lus_from_session_per_vemap( 1570 iss, vemap); 1571 } 1572 if (vemap_alltgt) 1573 stmf_remove_lus_from_session_per_vemap(iss, 1574 vemap_alltgt); 1575 rw_exit(iss->iss_lockp); 1576 } 1577 } 1578 1579 return (0); 1580 } 1581 1582 /* Assert stmf_lock is already held */ 1583 stmf_i_local_port_t * 1584 stmf_targetident_to_ilport(uint8_t *target_ident, uint16_t ident_size) 1585 { 1586 stmf_i_local_port_t *ilport; 1587 uint8_t *id; 1588 1589 ASSERT(mutex_owned(&stmf_state.stmf_lock)); 1590 1591 for (ilport = stmf_state.stmf_ilportlist; ilport; 1592 ilport = ilport->ilport_next) { 1593 id = (uint8_t *)ilport->ilport_lport->lport_id; 1594 if ((id[3] == ident_size) && 1595 bcmp(id + 4, target_ident, ident_size) == 0) { 1596 return (ilport); 1597 } 1598 } 1599 return (NULL); 1600 } 1601 1602 stmf_i_scsi_session_t * 1603 stmf_lookup_session_for_hostident(stmf_i_local_port_t *ilport, 1604 uint8_t *host_ident, uint16_t ident_size) 1605 { 1606 stmf_i_scsi_session_t *iss; 1607 uint8_t *id; 1608 1609 ASSERT(mutex_owned(&stmf_state.stmf_lock)); 1610 1611 for (iss = ilport->ilport_ss_list; iss; iss = iss->iss_next) { 1612 id = (uint8_t *)iss->iss_ss->ss_rport_id; 1613 if ((id[3] == ident_size) && 1614 bcmp(id + 4, host_ident, ident_size) == 0) { 1615 return (iss); 1616 } 1617 } 1618 return (NULL); 1619 } 1620 1621 stmf_i_lu_t * 1622 stmf_luident_to_ilu(uint8_t *lu_ident) 1623 { 1624 stmf_i_lu_t *ilu; 1625 1626 ASSERT(mutex_owned(&stmf_state.stmf_lock)); 1627 1628 for (ilu = stmf_state.stmf_ilulist; ilu; ilu = ilu->ilu_next) { 1629 if (bcmp(&ilu->ilu_lu->lu_id->ident[0], lu_ident, 16) == 0) 1630 return (ilu); 1631 } 1632 1633 return (NULL); 1634 } 1635 1636 /* 1637 * Assert stmf_lock is already held, 1638 * Just get the view map for the specific target group and host group 1639 * tgid and hgid can not be NULL 1640 */ 1641 stmf_lun_map_t * 1642 stmf_get_ve_map_per_ids(stmf_id_data_t *tgid, stmf_id_data_t *hgid) 1643 { 1644 int found = 0; 1645 stmf_ver_tg_t *vertg; 1646 stmf_ver_hg_t *verhg; 1647 1648 ASSERT(mutex_owned(&stmf_state.stmf_lock)); 1649 1650 for (vertg = stmf_state.stmf_ver_tg_head; 1651 vertg; vertg = vertg->vert_next) { 1652 if (vertg->vert_tg_ref == tgid) { 1653 found = 1; 1654 break; 1655 } 1656 } 1657 if (!found) 1658 return (NULL); 1659 1660 for (verhg = vertg->vert_verh_list; verhg; verhg = verhg->verh_next) { 1661 if (verhg->verh_hg_ref == hgid) { 1662 return (&verhg->verh_ve_map); 1663 } 1664 } 1665 return (NULL); 1666 } 1667 1668 stmf_status_t 1669 stmf_validate_lun_view_entry(stmf_id_data_t *hg, stmf_id_data_t *tg, 1670 uint8_t *lun, uint32_t *err_detail) 1671 { 1672 char *phg, *ptg; 1673 stmf_lun_map_t *ve_map = NULL; 1674 stmf_ver_hg_t *verhg = NULL; 1675 stmf_ver_tg_t *vertg = NULL; 1676 uint16_t lun_num; 1677 stmf_status_t ret = STMF_SUCCESS; 1678 1679 ASSERT(mutex_owned(&stmf_state.stmf_lock)); 1680 1681 ve_map = stmf_duplicate_ve_map(0); 1682 for (vertg = stmf_state.stmf_ver_tg_head; vertg != NULL; 1683 vertg = vertg->vert_next) { 1684 ptg = (char *)vertg->vert_tg_ref->id_data; 1685 if ((ptg[0] != '*') && (tg->id_data[0] != '*') && 1686 (vertg->vert_tg_ref != tg)) { 1687 continue; 1688 } 1689 for (verhg = vertg->vert_verh_list; verhg != NULL; 1690 verhg = verhg->verh_next) { 1691 phg = (char *)verhg->verh_hg_ref->id_data; 1692 if ((phg[0] != '*') && (hg->id_data[0] != '*') && 1693 (verhg->verh_hg_ref != hg)) { 1694 continue; 1695 } 1696 (void) stmf_merge_ve_map(&verhg->verh_ve_map, ve_map, 1697 &ve_map, 0); 1698 } 1699 } 1700 1701 ret = STMF_SUCCESS; 1702 /* Return an available lun number */ 1703 if (lun[2] == 0xFF) { 1704 /* Pick a LUN number */ 1705 lun_num = stmf_get_next_free_lun(ve_map, lun); 1706 if (lun_num > 0x3FFF) 1707 ret = STMF_NOT_SUPPORTED; 1708 } else { 1709 lun_num = (uint16_t)lun[1] | (((uint16_t)(lun[0] & 0x3F)) << 8); 1710 if (stmf_get_ent_from_map(ve_map, lun_num) != NULL) { 1711 *err_detail = STMF_IOCERR_LU_NUMBER_IN_USE; 1712 ret = STMF_LUN_TAKEN; 1713 } 1714 } 1715 stmf_destroy_ve_map(ve_map); 1716 1717 return (ret); 1718 } 1719 1720 int 1721 stmf_validate_lun_ve(uint8_t *hgname, uint16_t hgname_size, 1722 uint8_t *tgname, uint16_t tgname_size, 1723 uint8_t *luNbr, uint32_t *err_detail) 1724 { 1725 stmf_id_data_t *hg; 1726 stmf_id_data_t *tg; 1727 stmf_status_t ret; 1728 1729 ASSERT(mutex_owned(&stmf_state.stmf_lock)); 1730 1731 hg = stmf_lookup_id(&stmf_state.stmf_hg_list, hgname_size, 1732 (uint8_t *)hgname); 1733 if (!hg) { 1734 *err_detail = STMF_IOCERR_INVALID_HG; 1735 return (ENOENT); /* could not find group */ 1736 } 1737 tg = stmf_lookup_id(&stmf_state.stmf_tg_list, tgname_size, 1738 (uint8_t *)tgname); 1739 if (!tg) { 1740 *err_detail = STMF_IOCERR_INVALID_TG; 1741 return (ENOENT); /* could not find group */ 1742 } 1743 ret = stmf_validate_lun_view_entry(hg, tg, luNbr, err_detail); 1744 1745 if (ret == STMF_LUN_TAKEN) { 1746 return (EEXIST); 1747 } else if (ret == STMF_NOT_SUPPORTED) { 1748 return (E2BIG); 1749 } else if (ret != STMF_SUCCESS) { 1750 return (EINVAL); 1751 } 1752 return (0); 1753 } 1754