xref: /illumos-gate/usr/src/uts/common/io/comstar/stmf/lun_map.c (revision 07a48826732249fcd3aa8dd53c8389595e9f1fbc)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #include <sys/conf.h>
27 #include <sys/file.h>
28 #include <sys/ddi.h>
29 #include <sys/sunddi.h>
30 #include <sys/modctl.h>
31 #include <sys/scsi/scsi.h>
32 #include <sys/scsi/impl/scsi_reset_notify.h>
33 #include <sys/disp.h>
34 #include <sys/byteorder.h>
35 #include <sys/atomic.h>
36 
37 #include "stmf.h"
38 #include "lpif.h"
39 #include "portif.h"
40 #include "stmf_ioctl.h"
41 #include "stmf_impl.h"
42 #include "lun_map.h"
43 #include "stmf_state.h"
44 
45 void stmf_update_sessions_per_ve(stmf_view_entry_t *ve,
46 		stmf_lu_t *lu, int action);
47 void stmf_add_lus_to_session_per_vemap(stmf_i_local_port_t *ilport,
48 		stmf_i_scsi_session_t *iss, stmf_lun_map_t *vemap);
49 stmf_id_data_t *stmf_lookup_group_for_host(uint8_t *ident, uint16_t ident_size);
50 stmf_status_t stmf_add_ent_to_map(stmf_lun_map_t *sm, void *ent, uint8_t *lun);
51 stmf_status_t stmf_remove_ent_from_map(stmf_lun_map_t *sm, uint8_t *lun);
52 uint16_t stmf_get_next_free_lun(stmf_lun_map_t *sm, uint8_t *lun);
53 stmf_status_t stmf_add_tg(uint8_t *tg_name, uint16_t tg_name_size,
54 		int allow_special, uint32_t *err_detail);
55 stmf_status_t stmf_add_hg(uint8_t *hg_name, uint16_t hg_name_size,
56 		int allow_special, uint32_t *err_detail);
57 stmf_i_local_port_t *stmf_targetident_to_ilport(uint8_t *target_ident,
58 		uint16_t ident_size);
59 stmf_i_scsi_session_t *stmf_lookup_session_for_hostident(
60 		stmf_i_local_port_t *ilport, uint8_t *host_ident,
61 		uint16_t ident_size);
62 stmf_i_lu_t *stmf_luident_to_ilu(uint8_t *lu_ident);
63 stmf_lun_map_t *stmf_get_ve_map_per_ids(stmf_id_data_t *tgid,
64 		stmf_id_data_t *hgid);
65 stmf_lun_map_t *stmf_duplicate_ve_map(stmf_lun_map_t *src);
66 int stmf_merge_ve_map(stmf_lun_map_t *src, stmf_lun_map_t *dst,
67 		stmf_lun_map_t **pp_ret_map, stmf_merge_flags_t mf);
68 void stmf_destroy_ve_map(stmf_lun_map_t *dst);
69 void stmf_free_id(stmf_id_data_t *id);
70 
71 
72 /*
73  * Init the view
74  */
75 void
76 stmf_view_init()
77 {
78 	uint8_t grpname_forall = '*';
79 	(void) stmf_add_hg(&grpname_forall, 1, 1, NULL);
80 	(void) stmf_add_tg(&grpname_forall, 1, 1, NULL);
81 }
82 
83 /*
84  * Clear config database here
85  */
86 void
87 stmf_view_clear_config()
88 {
89 	stmf_id_data_t *idgrp, *idgrp_next, *idmemb, *idmemb_next;
90 	stmf_ver_tg_t *vtg, *vtg_next;
91 	stmf_ver_hg_t *vhg, *vhg_next;
92 	stmf_view_entry_t *ve, *ve_next;
93 	stmf_i_lu_t	*ilu;
94 	stmf_id_list_t	*idlist;
95 	stmf_i_local_port_t *ilport;
96 
97 	for (vtg = stmf_state.stmf_ver_tg_head; vtg; vtg = vtg_next) {
98 		for (vhg = vtg->vert_verh_list; vhg; vhg = vhg_next) {
99 			if (vhg->verh_ve_map.lm_nentries) {
100 				kmem_free(vhg->verh_ve_map.lm_plus,
101 				    vhg->verh_ve_map.lm_nentries *
102 				    sizeof (void *));
103 			}
104 			vhg_next = vhg->verh_next;
105 			kmem_free(vhg, sizeof (stmf_ver_hg_t));
106 		}
107 		vtg_next = vtg->vert_next;
108 		kmem_free(vtg, sizeof (stmf_ver_tg_t));
109 	}
110 	stmf_state.stmf_ver_tg_head = NULL;
111 
112 	if (stmf_state.stmf_luid_list.id_count) {
113 		/* clear the views for lus */
114 		for (idmemb = stmf_state.stmf_luid_list.idl_head;
115 		    idmemb; idmemb = idmemb_next) {
116 			for (ve = (stmf_view_entry_t *)idmemb->id_impl_specific;
117 			    ve; ve = ve_next) {
118 				ve_next = ve->ve_next;
119 				ve->ve_hg->id_refcnt--;
120 				ve->ve_tg->id_refcnt--;
121 				kmem_free(ve, sizeof (stmf_view_entry_t));
122 			}
123 			if (idmemb->id_pt_to_object) {
124 				ilu = (stmf_i_lu_t *)(idmemb->id_pt_to_object);
125 				ilu->ilu_luid = NULL;
126 			}
127 			idmemb_next = idmemb->id_next;
128 			stmf_free_id(idmemb);
129 		}
130 		stmf_state.stmf_luid_list.id_count = 0;
131 		stmf_state.stmf_luid_list.idl_head =
132 		    stmf_state.stmf_luid_list.idl_tail = NULL;
133 	}
134 
135 	if (stmf_state.stmf_hg_list.id_count) {
136 		/* free all the host group */
137 		for (idgrp = stmf_state.stmf_hg_list.idl_head;
138 		    idgrp; idgrp = idgrp_next) {
139 			idlist = (stmf_id_list_t *)(idgrp->id_impl_specific);
140 			if (idlist->id_count) {
141 				for (idmemb = idlist->idl_head; idmemb;
142 				    idmemb = idmemb_next) {
143 					idmemb_next = idmemb->id_next;
144 					stmf_free_id(idmemb);
145 				}
146 			}
147 			idgrp_next = idgrp->id_next;
148 			stmf_free_id(idgrp);
149 		}
150 		stmf_state.stmf_hg_list.id_count = 0;
151 		stmf_state.stmf_hg_list.idl_head =
152 		    stmf_state.stmf_hg_list.idl_tail = NULL;
153 	}
154 	if (stmf_state.stmf_tg_list.id_count) {
155 		/* free all the target group */
156 		for (idgrp = stmf_state.stmf_tg_list.idl_head;
157 		    idgrp; idgrp = idgrp_next) {
158 			idlist = (stmf_id_list_t *)(idgrp->id_impl_specific);
159 			if (idlist->id_count) {
160 				for (idmemb = idlist->idl_head; idmemb;
161 				    idmemb = idmemb_next) {
162 					idmemb_next = idmemb->id_next;
163 					stmf_free_id(idmemb);
164 				}
165 			}
166 			idgrp_next = idgrp->id_next;
167 			stmf_free_id(idgrp);
168 		}
169 		stmf_state.stmf_tg_list.id_count = 0;
170 		stmf_state.stmf_tg_list.idl_head =
171 		    stmf_state.stmf_tg_list.idl_tail = NULL;
172 	}
173 
174 	for (ilport = stmf_state.stmf_ilportlist; ilport;
175 	    ilport = ilport->ilport_next) {
176 		ilport->ilport_tg = NULL;
177 	}
178 }
179 
180 /*
181  * Create luns map for session based on the view
182  */
183 stmf_status_t
184 stmf_session_create_lun_map(stmf_i_local_port_t *ilport,
185 		stmf_i_scsi_session_t *iss)
186 {
187 	stmf_id_data_t *tg;
188 	stmf_id_data_t *hg;
189 	stmf_ver_tg_t	*vertg;
190 	char *phg_data, *ptg_data;
191 	stmf_ver_hg_t	*verhg;
192 	stmf_lun_map_t	*ve_map;
193 
194 	if (iss->iss_sm != NULL)
195 		cmn_err(CE_PANIC, "create lun map called with non NULL map");
196 	iss->iss_sm = (stmf_lun_map_t *)kmem_zalloc(sizeof (stmf_lun_map_t),
197 	    KM_SLEEP);
198 	mutex_enter(&stmf_state.stmf_lock);
199 	tg = ilport->ilport_tg;
200 	hg = stmf_lookup_group_for_host(iss->iss_ss->ss_rport_id->ident,
201 	    iss->iss_ss->ss_rport_id->ident_length);
202 	iss->iss_hg = hg;
203 
204 	/*
205 	 * get the view entry map,
206 	 * take all host/target group into consideration
207 	 */
208 	ve_map = stmf_duplicate_ve_map(0);
209 	for (vertg = stmf_state.stmf_ver_tg_head; vertg != NULL;
210 	    vertg = vertg->vert_next) {
211 		ptg_data = (char *)vertg->vert_tg_ref->id_data;
212 		if ((ptg_data[0] != '*') && (!tg ||
213 		    ((tg->id_data[0] != '*') &&
214 		    (vertg->vert_tg_ref != tg)))) {
215 			continue;
216 		}
217 		for (verhg = vertg->vert_verh_list; verhg != NULL;
218 		    verhg = verhg->verh_next) {
219 			phg_data = (char *)verhg->verh_hg_ref->id_data;
220 			if ((phg_data[0] != '*') && (!hg ||
221 			    ((hg->id_data[0] != '*') &&
222 			    (verhg->verh_hg_ref != hg)))) {
223 				continue;
224 			}
225 			(void) stmf_merge_ve_map(&verhg->verh_ve_map, ve_map,
226 			    &ve_map, 0);
227 		}
228 	}
229 
230 
231 	if (ve_map->lm_nluns) {
232 		stmf_add_lus_to_session_per_vemap(ilport, iss, ve_map);
233 	}
234 	/* not configured, cannot access any luns for now */
235 
236 	mutex_exit(&stmf_state.stmf_lock);
237 	stmf_destroy_ve_map(ve_map);
238 
239 	return (STMF_SUCCESS);
240 }
241 
242 /*
243  * destroy lun map for session
244  */
245 /* ARGSUSED */
246 stmf_status_t
247 stmf_session_destroy_lun_map(stmf_i_local_port_t *ilport,
248 		stmf_i_scsi_session_t *iss)
249 {
250 	stmf_lun_map_t *sm;
251 	stmf_i_lu_t *ilu;
252 	uint16_t n;
253 	stmf_lun_map_ent_t *ent;
254 
255 	/*
256 	 * to avoid conflict with updating session's map,
257 	 * which only grab stmf_lock
258 	 */
259 	mutex_enter(&stmf_state.stmf_lock);
260 	sm = iss->iss_sm;
261 	iss->iss_sm = NULL;
262 	iss->iss_hg = NULL;
263 	mutex_exit(&stmf_state.stmf_lock);
264 	if (sm->lm_nentries) {
265 		for (n = 0; n < sm->lm_nentries; n++) {
266 			if ((ent = (stmf_lun_map_ent_t *)sm->lm_plus[n])
267 			    != NULL) {
268 				if (ent->ent_itl_datap) {
269 					stmf_do_itl_dereg(ent->ent_lu,
270 					    ent->ent_itl_datap,
271 					    STMF_ITL_REASON_IT_NEXUS_LOSS);
272 				}
273 				ilu = (stmf_i_lu_t *)
274 				    ent->ent_lu->lu_stmf_private;
275 				atomic_add_32(&ilu->ilu_ref_cnt, -1);
276 				kmem_free(sm->lm_plus[n],
277 				    sizeof (stmf_lun_map_ent_t));
278 			}
279 		}
280 		kmem_free(sm->lm_plus,
281 		    sizeof (stmf_lun_map_ent_t *) * sm->lm_nentries);
282 	}
283 
284 	kmem_free(sm, sizeof (*sm));
285 	return (STMF_SUCCESS);
286 }
287 
288 /*
289  * Expects the session lock to be held.
290  */
291 stmf_xfer_data_t *
292 stmf_session_prepare_report_lun_data(stmf_lun_map_t *sm)
293 {
294 	stmf_xfer_data_t *xd;
295 	uint16_t nluns, ent;
296 	uint32_t alloc_size, data_size;
297 	int i;
298 
299 	nluns = sm->lm_nluns;
300 
301 	data_size = 8 + (((uint32_t)nluns) << 3);
302 	if (nluns == 0) {
303 		data_size += 8;
304 	}
305 	alloc_size = data_size + sizeof (stmf_xfer_data_t) - 4;
306 
307 	xd = (stmf_xfer_data_t *)kmem_zalloc(alloc_size, KM_NOSLEEP);
308 
309 	if (xd == NULL)
310 		return (NULL);
311 
312 	xd->alloc_size = alloc_size;
313 	xd->size_left = data_size;
314 
315 	*((uint32_t *)xd->buf) = BE_32(data_size - 8);
316 	if (nluns == 0) {
317 		return (xd);
318 	}
319 
320 	ent = 0;
321 
322 	for (i = 0; ((i < sm->lm_nentries) && (ent < nluns)); i++) {
323 		if (sm->lm_plus[i] == NULL)
324 			continue;
325 		/* Fill in the entry */
326 		xd->buf[8 + (ent << 3) + 1] = (uchar_t)i;
327 		xd->buf[8 + (ent << 3) + 0] = ((uchar_t)(i >> 8));
328 		ent++;
329 	}
330 
331 	ASSERT(ent == nluns);
332 
333 	return (xd);
334 }
335 
336 /*
337  * Add a lu to active sessions based on LUN inventory.
338  * Only invoked when the lu is onlined
339  */
340 void
341 stmf_add_lu_to_active_sessions(stmf_lu_t *lu)
342 {
343 	stmf_id_data_t *luid;
344 	stmf_view_entry_t	*ve;
345 	stmf_i_lu_t *ilu;
346 
347 	ASSERT(mutex_owned(&stmf_state.stmf_lock));
348 	ilu = (stmf_i_lu_t *)lu->lu_stmf_private;
349 	ASSERT(ilu->ilu_state == STMF_STATE_ONLINE);
350 
351 	luid = ((stmf_i_lu_t *)lu->lu_stmf_private)->ilu_luid;
352 
353 	if (!luid) {
354 		/* we did not configure view for this lun, so just return */
355 		return;
356 	}
357 
358 	for (ve = (stmf_view_entry_t *)luid->id_impl_specific;
359 	    ve; ve = ve->ve_next) {
360 		stmf_update_sessions_per_ve(ve, lu, 1);
361 	}
362 }
363 /*
364  * Unmap a lun from all sessions
365  */
366 void
367 stmf_session_lu_unmapall(stmf_lu_t *lu)
368 {
369 	stmf_i_lu_t *ilu;
370 	stmf_id_data_t *luid;
371 	stmf_view_entry_t *ve;
372 
373 	ASSERT(mutex_owned(&stmf_state.stmf_lock));
374 
375 	ilu = (stmf_i_lu_t *)lu->lu_stmf_private;
376 
377 	if (ilu->ilu_ref_cnt == 0)
378 		return;
379 
380 	luid = ((stmf_i_lu_t *)lu->lu_stmf_private)->ilu_luid;
381 	if (!luid) {
382 		/*
383 		 * we did not configure view for this lun, this should be
384 		 * an error
385 		 */
386 		return;
387 	}
388 
389 	for (ve = (stmf_view_entry_t *)luid->id_impl_specific;
390 	    ve; ve = ve->ve_next) {
391 		stmf_update_sessions_per_ve(ve, lu, 0);
392 		if (ilu->ilu_ref_cnt == 0)
393 			break;
394 	}
395 }
396 /*
397  * add lu to a session, stmf_lock is already held
398  */
399 stmf_status_t
400 stmf_add_lu_to_session(stmf_i_local_port_t *ilport,
401 		stmf_i_scsi_session_t	*iss,
402 		stmf_lu_t *lu,
403 		uint8_t *lu_nbr)
404 {
405 	stmf_lun_map_t *sm = iss->iss_sm;
406 	stmf_status_t ret;
407 	stmf_i_lu_t *ilu = (stmf_i_lu_t *)lu->lu_stmf_private;
408 	stmf_lun_map_ent_t *lun_map_ent;
409 	uint32_t new_flags = 0;
410 	uint16_t luNbr =
411 	    ((uint16_t)lu_nbr[1] | (((uint16_t)(lu_nbr[0] & 0x3F)) << 8));
412 
413 	ASSERT(mutex_owned(&stmf_state.stmf_lock));
414 	ASSERT(!stmf_get_ent_from_map(sm, luNbr));
415 
416 	if ((sm->lm_nluns == 0) &&
417 	    ((iss->iss_flags & ISS_BEING_CREATED) == 0)) {
418 		new_flags = ISS_GOT_INITIAL_LUNS;
419 		atomic_or_32(&ilport->ilport_flags, ILPORT_SS_GOT_INITIAL_LUNS);
420 		stmf_state.stmf_process_initial_luns = 1;
421 	}
422 
423 	lun_map_ent = (stmf_lun_map_ent_t *)
424 	    kmem_zalloc(sizeof (stmf_lun_map_ent_t), KM_SLEEP);
425 	lun_map_ent->ent_lu = lu;
426 	ret = stmf_add_ent_to_map(sm, (void *)lun_map_ent, lu_nbr);
427 	ASSERT(ret == STMF_SUCCESS);
428 	atomic_add_32(&ilu->ilu_ref_cnt, 1);
429 	new_flags |= ISS_LUN_INVENTORY_CHANGED;
430 	atomic_or_32(&iss->iss_flags, new_flags);
431 	return (STMF_SUCCESS);
432 }
433 
434 /*
435  * remvoe lu from a session, stmf_lock is already held
436  */
437 /* ARGSUSED */
438 stmf_status_t
439 stmf_remove_lu_from_session(stmf_i_local_port_t *ilport,
440 		stmf_i_scsi_session_t *iss,
441 		stmf_lu_t *lu,
442 		uint8_t *lu_nbr)
443 {
444 	stmf_status_t ret;
445 	stmf_i_lu_t *ilu;
446 	stmf_lun_map_t *sm = iss->iss_sm;
447 	stmf_lun_map_ent_t *lun_map_ent;
448 	uint16_t luNbr =
449 	    ((uint16_t)lu_nbr[1] | (((uint16_t)(lu_nbr[0] & 0x3F)) << 8));
450 
451 	ASSERT(mutex_owned(&stmf_state.stmf_lock));
452 	lun_map_ent = stmf_get_ent_from_map(sm, luNbr);
453 	ASSERT(lun_map_ent && lun_map_ent->ent_lu == lu);
454 
455 	ilu = (stmf_i_lu_t *)lu->lu_stmf_private;
456 
457 	ret = stmf_remove_ent_from_map(sm, lu_nbr);
458 	ASSERT(ret == STMF_SUCCESS);
459 	atomic_add_32(&ilu->ilu_ref_cnt, -1);
460 	iss->iss_flags |= ISS_LUN_INVENTORY_CHANGED;
461 	if (lun_map_ent->ent_itl_datap) {
462 		stmf_do_itl_dereg(lu, lun_map_ent->ent_itl_datap,
463 		    STMF_ITL_REASON_USER_REQUEST);
464 	}
465 	kmem_free((void *)lun_map_ent, sizeof (stmf_lun_map_ent_t));
466 	return (STMF_SUCCESS);
467 }
468 
469 /*
470  * add or remove lu from all related sessions based on view entry,
471  * action is 0 for delete, 1 for add
472  */
473 void
474 stmf_update_sessions_per_ve(stmf_view_entry_t *ve,
475 		stmf_lu_t *lu, int action)
476 {
477 	stmf_i_lu_t *ilu_tmp;
478 	stmf_lu_t *lu_to_add;
479 	stmf_i_local_port_t *ilport;
480 	stmf_i_scsi_session_t *iss;
481 	stmf_id_list_t	*hostlist;
482 	stmf_id_list_t	*targetlist;
483 	int all_hg = 0, all_tg = 0;
484 
485 	ASSERT(mutex_owned(&stmf_state.stmf_lock));
486 
487 	if (!lu) {
488 		ilu_tmp = (stmf_i_lu_t *)ve->ve_luid->id_pt_to_object;
489 		if (!ilu_tmp)
490 			return;
491 		lu_to_add = ilu_tmp->ilu_lu;
492 	} else {
493 		lu_to_add = lu;
494 		ilu_tmp = (stmf_i_lu_t *)lu->lu_stmf_private;
495 	}
496 
497 	if (ve->ve_hg->id_data[0] == '*')
498 		all_hg = 1;
499 	if (ve->ve_tg->id_data[0] == '*')
500 		all_tg = 1;
501 	hostlist = (stmf_id_list_t *)ve->ve_hg->id_impl_specific;
502 	targetlist = (stmf_id_list_t *)ve->ve_tg->id_impl_specific;
503 
504 	if ((!all_hg && !hostlist->idl_head) ||
505 	    (!all_tg && !targetlist->idl_head))
506 		/* No sessions to be updated */
507 		return;
508 
509 	for (ilport = stmf_state.stmf_ilportlist; ilport != NULL;
510 	    ilport = ilport->ilport_next) {
511 		if (!all_tg && ilport->ilport_tg != ve->ve_tg)
512 			continue;
513 		/* This ilport belongs to the target group */
514 		rw_enter(&ilport->ilport_lock, RW_WRITER);
515 		for (iss = ilport->ilport_ss_list; iss != NULL;
516 		    iss = iss->iss_next) {
517 			if (!all_hg && iss->iss_hg != ve->ve_hg)
518 				continue;
519 			/* This host belongs to the host group */
520 			if (action == 0) { /* to remove */
521 				(void) stmf_remove_lu_from_session(ilport, iss,
522 				    lu_to_add, ve->ve_lun);
523 				if (ilu_tmp->ilu_ref_cnt == 0) {
524 					rw_exit(&ilport->ilport_lock);
525 					return;
526 				}
527 			} else {
528 				(void) stmf_add_lu_to_session(ilport, iss,
529 				    lu_to_add, ve->ve_lun);
530 			}
531 		}
532 		rw_exit(&ilport->ilport_lock);
533 	}
534 }
535 
536 /*
537  * add luns in view entry map to a session,
538  * and stmf_lock is already held
539  */
540 void
541 stmf_add_lus_to_session_per_vemap(stmf_i_local_port_t *ilport,
542 		stmf_i_scsi_session_t *iss,
543 		stmf_lun_map_t *vemap)
544 {
545 	stmf_lu_t *lu;
546 	stmf_i_lu_t *ilu;
547 	stmf_view_entry_t *ve;
548 	uint32_t	i;
549 
550 	ASSERT(mutex_owned(&stmf_state.stmf_lock));
551 
552 	for (i = 0; i < vemap->lm_nentries; i++) {
553 		ve = (stmf_view_entry_t *)vemap->lm_plus[i];
554 		if (!ve)
555 			continue;
556 		ilu = (stmf_i_lu_t *)ve->ve_luid->id_pt_to_object;
557 		if (ilu && ilu->ilu_state == STMF_STATE_ONLINE) {
558 			lu = ilu->ilu_lu;
559 			(void) stmf_add_lu_to_session(ilport, iss, lu,
560 			    ve->ve_lun);
561 		}
562 	}
563 }
564 /* remove luns in view entry map from a session */
565 void
566 stmf_remove_lus_from_session_per_vemap(stmf_i_local_port_t *ilport,
567 		stmf_i_scsi_session_t *iss,
568 		stmf_lun_map_t *vemap)
569 {
570 	stmf_lu_t *lu;
571 	stmf_i_lu_t *ilu;
572 	stmf_view_entry_t *ve;
573 	uint32_t i;
574 
575 	ASSERT(mutex_owned(&stmf_state.stmf_lock));
576 
577 	for (i = 0; i < vemap->lm_nentries; i++) {
578 		ve = (stmf_view_entry_t *)vemap->lm_plus[i];
579 		if (!ve)
580 			continue;
581 		ilu = (stmf_i_lu_t *)ve->ve_luid->id_pt_to_object;
582 		if (ilu && ilu->ilu_state == STMF_STATE_ONLINE) {
583 			lu = ilu->ilu_lu;
584 			(void) stmf_remove_lu_from_session(ilport, iss, lu,
585 			    ve->ve_lun);
586 		}
587 	}
588 }
589 
590 stmf_id_data_t *
591 stmf_alloc_id(uint16_t id_size, uint16_t type, uint8_t *id_data,
592 			uint32_t additional_size)
593 {
594 	stmf_id_data_t *id;
595 	int struct_size, total_size, real_id_size;
596 
597 	real_id_size = ((uint32_t)id_size + 7) & (~7);
598 	struct_size = (sizeof (*id) + 7) & (~7);
599 	total_size = ((additional_size + 7) & (~7)) + struct_size +
600 	    real_id_size;
601 	id = (stmf_id_data_t *)kmem_zalloc(total_size, KM_SLEEP);
602 	id->id_type = type;
603 	id->id_data_size = id_size;
604 	id->id_data = ((uint8_t *)id) + struct_size;
605 	id->id_total_alloc_size = total_size;
606 	if (additional_size) {
607 		id->id_impl_specific = ((uint8_t *)id) + struct_size +
608 		    real_id_size;
609 	}
610 	bcopy(id_data, id->id_data, id_size);
611 
612 	return (id);
613 }
614 
615 void
616 stmf_free_id(stmf_id_data_t *id)
617 {
618 	kmem_free(id, id->id_total_alloc_size);
619 }
620 
621 
622 stmf_id_data_t *
623 stmf_lookup_id(stmf_id_list_t *idlist, uint16_t id_size, uint8_t *data)
624 {
625 	stmf_id_data_t *id;
626 
627 	for (id = idlist->idl_head; id != NULL; id = id->id_next) {
628 		if ((id->id_data_size == id_size) &&
629 		    (bcmp(id->id_data, data, id_size) == 0)) {
630 			return (id);
631 		}
632 	}
633 
634 	return (NULL);
635 }
636 /* Return the target group which a target belong to */
637 stmf_id_data_t *
638 stmf_lookup_group_for_target(uint8_t *ident, uint16_t ident_size)
639 {
640 	stmf_id_data_t *tgid;
641 	stmf_id_data_t *target;
642 
643 	ASSERT(mutex_owned(&stmf_state.stmf_lock));
644 
645 	for (tgid = stmf_state.stmf_tg_list.idl_head; tgid;
646 	    tgid = tgid->id_next) {
647 		target = stmf_lookup_id(
648 		    (stmf_id_list_t *)tgid->id_impl_specific,
649 		    ident_size, ident);
650 		if (target)
651 			return (tgid);
652 	}
653 	return (NULL);
654 }
655 /* Return the host group which a host belong to */
656 stmf_id_data_t *
657 stmf_lookup_group_for_host(uint8_t *ident, uint16_t ident_size)
658 {
659 	stmf_id_data_t *hgid;
660 	stmf_id_data_t *host;
661 
662 	ASSERT(mutex_owned(&stmf_state.stmf_lock));
663 
664 	for (hgid = stmf_state.stmf_hg_list.idl_head; hgid;
665 	    hgid = hgid->id_next) {
666 		host = stmf_lookup_id(
667 		    (stmf_id_list_t *)hgid->id_impl_specific,
668 		    ident_size, ident);
669 		if (host)
670 			return (hgid);
671 	}
672 	return (NULL);
673 }
674 
675 void
676 stmf_append_id(stmf_id_list_t *idlist, stmf_id_data_t *id)
677 {
678 	id->id_next = NULL;
679 
680 	if ((id->id_prev = idlist->idl_tail) == NULL) {
681 		idlist->idl_head = idlist->idl_tail = id;
682 	} else {
683 		idlist->idl_tail->id_next = id;
684 		idlist->idl_tail = id;
685 	}
686 	atomic_add_32(&idlist->id_count, 1);
687 }
688 
689 void
690 stmf_remove_id(stmf_id_list_t *idlist, stmf_id_data_t *id)
691 {
692 	if (id->id_next) {
693 		id->id_next->id_prev = id->id_prev;
694 	} else {
695 		idlist->idl_tail = id->id_prev;
696 	}
697 
698 	if (id->id_prev) {
699 		id->id_prev->id_next = id->id_next;
700 	} else {
701 		idlist->idl_head = id->id_next;
702 	}
703 	atomic_add_32(&idlist->id_count, -1);
704 }
705 
706 
707 /*
708  * The refcnts of objects in a view entry are updated when then entry
709  * is successfully added. ve_map is just another representation of the
710  * view enrtries in a LU. Duplicating or merging a ve map does not
711  * affect any refcnts.
712  */
713 stmf_lun_map_t *
714 stmf_duplicate_ve_map(stmf_lun_map_t *src)
715 {
716 	stmf_lun_map_t *dst;
717 	int i;
718 
719 	dst = (stmf_lun_map_t *)kmem_zalloc(sizeof (*dst), KM_SLEEP);
720 
721 	if (src == NULL)
722 		return (dst);
723 
724 	if (src->lm_nentries) {
725 		dst->lm_plus = kmem_zalloc(dst->lm_nentries *
726 		    sizeof (void *), KM_SLEEP);
727 		for (i = 0; i < dst->lm_nentries; i++) {
728 			dst->lm_plus[i] = src->lm_plus[i];
729 		}
730 	}
731 
732 	return (dst);
733 }
734 
735 void
736 stmf_destroy_ve_map(stmf_lun_map_t *dst)
737 {
738 	if (dst->lm_nentries) {
739 		kmem_free(dst->lm_plus, dst->lm_nentries * sizeof (void *));
740 	}
741 	kmem_free(dst, sizeof (*dst));
742 }
743 
744 int
745 stmf_merge_ve_map(stmf_lun_map_t *src, stmf_lun_map_t *dst,
746 		stmf_lun_map_t **pp_ret_map, stmf_merge_flags_t mf)
747 {
748 	int i;
749 	int nentries;
750 	int to_create_space = 0;
751 
752 	if (dst == NULL) {
753 		*pp_ret_map = stmf_duplicate_ve_map(src);
754 		return (1);
755 	}
756 
757 	if (src == NULL || src->lm_nluns == 0) {
758 		if (mf & MERGE_FLAG_RETURN_NEW_MAP)
759 			*pp_ret_map = stmf_duplicate_ve_map(dst);
760 		else
761 			*pp_ret_map = dst;
762 		return (1);
763 	}
764 
765 	if (mf & MERGE_FLAG_RETURN_NEW_MAP) {
766 		*pp_ret_map = stmf_duplicate_ve_map(NULL);
767 		nentries = max(dst->lm_nentries, src->lm_nentries);
768 		to_create_space = 1;
769 	} else {
770 		*pp_ret_map = dst;
771 		/* If there is not enough space in dst map */
772 		if (dst->lm_nentries < src->lm_nentries) {
773 			nentries = src->lm_nentries;
774 			to_create_space = 1;
775 		}
776 	}
777 	if (to_create_space) {
778 		void **p;
779 		p = (void **)kmem_zalloc(nentries * sizeof (void *), KM_SLEEP);
780 		if (dst->lm_nentries) {
781 			bcopy(dst->lm_plus, p,
782 			    dst->lm_nentries * sizeof (void *));
783 		}
784 		if (mf & (MERGE_FLAG_RETURN_NEW_MAP == 0))
785 			kmem_free(dst->lm_plus,
786 			    dst->lm_nentries * sizeof (void *));
787 		(*pp_ret_map)->lm_plus = p;
788 		(*pp_ret_map)->lm_nentries = nentries;
789 	}
790 
791 	for (i = 0; i < src->lm_nentries; i++) {
792 		if (src->lm_plus[i] == NULL)
793 			continue;
794 		if (dst->lm_plus[i] != NULL) {
795 			if (mf & MERGE_FLAG_NO_DUPLICATE) {
796 				if (mf & MERGE_FLAG_RETURN_NEW_MAP) {
797 					stmf_destroy_ve_map(*pp_ret_map);
798 					*pp_ret_map = NULL;
799 				}
800 				return (0);
801 			}
802 		} else {
803 			dst->lm_plus[i] = src->lm_plus[i];
804 			dst->lm_nluns++;
805 		}
806 	}
807 
808 	return (1);
809 }
810 
811 /*
812  * add host group, id_impl_specific point to a list of hosts,
813  * on return, if error happened, err_detail may be assigned if
814  * the pointer is not NULL
815  */
816 stmf_status_t
817 stmf_add_hg(uint8_t *hg_name, uint16_t hg_name_size,
818 		int allow_special, uint32_t *err_detail)
819 {
820 	stmf_id_data_t *id;
821 
822 	if (!allow_special) {
823 		if (hg_name[0] == '*')
824 			return (STMF_INVALID_ARG);
825 	}
826 
827 	if (stmf_lookup_id(&stmf_state.stmf_hg_list,
828 	    hg_name_size, (uint8_t *)hg_name)) {
829 		if (err_detail)
830 			*err_detail = STMF_IOCERR_HG_EXISTS;
831 		return (STMF_ALREADY);
832 	}
833 	id = stmf_alloc_id(hg_name_size, STMF_ID_TYPE_HOST_GROUP,
834 	    (uint8_t *)hg_name, sizeof (stmf_id_list_t));
835 	stmf_append_id(&stmf_state.stmf_hg_list, id);
836 
837 	return (STMF_SUCCESS);
838 }
839 
840 /* add target group */
841 stmf_status_t
842 stmf_add_tg(uint8_t *tg_name, uint16_t tg_name_size,
843 		int allow_special, uint32_t *err_detail)
844 {
845 	stmf_id_data_t *id;
846 
847 	if (!allow_special) {
848 		if (tg_name[0] == '*')
849 			return (STMF_INVALID_ARG);
850 	}
851 
852 
853 	if (stmf_lookup_id(&stmf_state.stmf_tg_list, tg_name_size,
854 	    (uint8_t *)tg_name)) {
855 		if (err_detail)
856 			*err_detail = STMF_IOCERR_TG_EXISTS;
857 		return (STMF_ALREADY);
858 	}
859 	id = stmf_alloc_id(tg_name_size, STMF_ID_TYPE_TARGET_GROUP,
860 	    (uint8_t *)tg_name, sizeof (stmf_id_list_t));
861 	stmf_append_id(&stmf_state.stmf_tg_list, id);
862 
863 	return (STMF_SUCCESS);
864 }
865 
866 /*
867  * insert view entry into list for a luid, if ve->ve_id is 0xffffffff,
868  * pick up a smallest available veid for it, and return the veid in ve->ve_id.
869  * The view entries list is sorted based on veid.
870  */
871 stmf_status_t
872 stmf_add_ve_to_luid(stmf_id_data_t *luid, stmf_view_entry_t *ve)
873 {
874 	stmf_view_entry_t *ve_tmp = NULL;
875 	stmf_view_entry_t *ve_prev = NULL;
876 
877 	ASSERT(mutex_owned(&stmf_state.stmf_lock));
878 
879 	ve_tmp = (stmf_view_entry_t *)luid->id_impl_specific;
880 
881 	if (ve->ve_id != 0xffffffff) {
882 		for (; ve_tmp; ve_tmp = ve_tmp->ve_next) {
883 			if (ve_tmp->ve_id > ve->ve_id) {
884 				break;
885 			} else if (ve_tmp->ve_id == ve->ve_id) {
886 				return (STMF_ALREADY);
887 			}
888 			ve_prev = ve_tmp;
889 		}
890 	} else {
891 		uint32_t veid = 0;
892 		/* search the smallest available veid */
893 		for (; ve_tmp; ve_tmp = ve_tmp->ve_next) {
894 			ASSERT(ve_tmp->ve_id >= veid);
895 			if (ve_tmp->ve_id != veid)
896 				break;
897 			veid++;
898 			if (veid == 0xffffffff)
899 				return (STMF_NOT_SUPPORTED);
900 			ve_prev = ve_tmp;
901 		}
902 		ve->ve_id = veid;
903 	}
904 
905 	/* insert before ve_tmp if it exist */
906 	ve->ve_next = ve_tmp;
907 	ve->ve_prev = ve_prev;
908 	if (ve_tmp) {
909 		ve_tmp->ve_prev = ve;
910 	}
911 	if (ve_prev) {
912 		ve_prev->ve_next = ve;
913 	} else {
914 		luid->id_impl_specific = (void *)ve;
915 	}
916 	return (STMF_SUCCESS);
917 }
918 
919 /* stmf_lock is already held, err_detail may be assigned if error happens */
920 stmf_status_t
921 stmf_add_view_entry(stmf_id_data_t *hg, stmf_id_data_t *tg,
922 		uint8_t *lu_guid, uint32_t *ve_id, uint8_t *lun,
923 		stmf_view_entry_t **conflicting, uint32_t *err_detail)
924 {
925 	stmf_id_data_t *luid;
926 	stmf_view_entry_t *ve;
927 	char *phg, *ptg;
928 	stmf_lun_map_t *ve_map = NULL;
929 	stmf_ver_hg_t *verhg = NULL, *verhg_ex = NULL;
930 	stmf_ver_tg_t *vertg = NULL, *vertg_ex = NULL;
931 	char luid_new;
932 	uint16_t lun_num;
933 	stmf_i_lu_t *ilu;
934 	stmf_status_t ret;
935 
936 	ASSERT(mutex_owned(&stmf_state.stmf_lock));
937 
938 	lun_num = ((uint16_t)lun[1] | (((uint16_t)(lun[0] & 0x3F)) << 8));
939 
940 	luid = stmf_lookup_id(&stmf_state.stmf_luid_list, 16, lu_guid);
941 	if (luid == NULL) {
942 		luid = stmf_alloc_id(16, STMF_ID_TYPE_LU_GUID, lu_guid, 0);
943 		ilu = stmf_luident_to_ilu(lu_guid);
944 		if (ilu) {
945 			ilu->ilu_luid = luid;
946 			luid->id_pt_to_object = (void *)ilu;
947 		}
948 		luid_new = 1;
949 	} else {
950 		luid_new = 0;
951 		ilu = (stmf_i_lu_t *)luid->id_pt_to_object;
952 	}
953 
954 	/* The view entry won't be added if there is any confilict */
955 	phg = (char *)hg->id_data; ptg = (char *)tg->id_data;
956 	for (ve = (stmf_view_entry_t *)luid->id_impl_specific; ve != NULL;
957 	    ve = ve->ve_next) {
958 		if (((phg[0] == '*') || (ve->ve_hg->id_data[0] == '*') ||
959 		    (hg == ve->ve_hg)) && ((ptg[0] == '*') ||
960 		    (ve->ve_tg->id_data[0] == '*') || (tg == ve->ve_tg))) {
961 			*conflicting = ve;
962 			*err_detail = STMF_IOCERR_VIEW_ENTRY_CONFLICT;
963 			ret = STMF_ALREADY;
964 			goto add_ve_err_ret;
965 		}
966 	}
967 
968 	ve_map = stmf_duplicate_ve_map(0);
969 	for (vertg = stmf_state.stmf_ver_tg_head; vertg != NULL;
970 	    vertg = vertg->vert_next) {
971 		ptg = (char *)vertg->vert_tg_ref->id_data;
972 		if ((ptg[0] != '*') && (tg->id_data[0] != '*') &&
973 		    (vertg->vert_tg_ref != tg)) {
974 			continue;
975 		}
976 		if (vertg->vert_tg_ref == tg)
977 			vertg_ex = vertg;
978 		for (verhg = vertg->vert_verh_list; verhg != NULL;
979 		    verhg = verhg->verh_next) {
980 			phg = (char *)verhg->verh_hg_ref->id_data;
981 			if ((phg[0] != '*') && (hg->id_data[0] != '*') &&
982 			    (verhg->verh_hg_ref != hg)) {
983 				continue;
984 			}
985 			if ((vertg_ex == vertg) && (verhg->verh_hg_ref == hg))
986 				verhg_ex = verhg;
987 			(void) stmf_merge_ve_map(&verhg->verh_ve_map, ve_map,
988 			    &ve_map, 0);
989 		}
990 	}
991 
992 	if (lun[2] == 0xFF) {
993 		/* Pick a LUN number */
994 		lun_num = stmf_get_next_free_lun(ve_map, lun);
995 		if (lun_num > 0x3FFF) {
996 			stmf_destroy_ve_map(ve_map);
997 			ret = STMF_NOT_SUPPORTED;
998 			goto add_ve_err_ret;
999 		}
1000 	} else {
1001 		if ((*conflicting = stmf_get_ent_from_map(ve_map, lun_num))
1002 		    != NULL) {
1003 			stmf_destroy_ve_map(ve_map);
1004 			*err_detail = STMF_IOCERR_LU_NUMBER_IN_USE;
1005 			ret = STMF_LUN_TAKEN;
1006 			goto add_ve_err_ret;
1007 		}
1008 	}
1009 	stmf_destroy_ve_map(ve_map);
1010 
1011 	/* All is well, do the actual addition now */
1012 	ve = (stmf_view_entry_t *)kmem_zalloc(sizeof (*ve), KM_SLEEP);
1013 	ve->ve_id = *ve_id;
1014 	ve->ve_lun[0] = lun[0];
1015 	ve->ve_lun[1] = lun[1];
1016 
1017 	if ((ret = stmf_add_ve_to_luid(luid, ve)) != STMF_SUCCESS) {
1018 		kmem_free(ve, sizeof (stmf_view_entry_t));
1019 		goto add_ve_err_ret;
1020 	}
1021 	ve->ve_hg = hg; hg->id_refcnt++;
1022 	ve->ve_tg = tg; tg->id_refcnt++;
1023 	ve->ve_luid = luid; luid->id_refcnt++;
1024 
1025 	*ve_id = ve->ve_id;
1026 
1027 	if (luid_new) {
1028 		stmf_append_id(&stmf_state.stmf_luid_list, luid);
1029 	}
1030 
1031 	if (vertg_ex == NULL) {
1032 		vertg_ex = (stmf_ver_tg_t *)kmem_zalloc(sizeof (stmf_ver_tg_t),
1033 		    KM_SLEEP);
1034 		vertg_ex->vert_next = stmf_state.stmf_ver_tg_head;
1035 		stmf_state.stmf_ver_tg_head = vertg_ex;
1036 		vertg_ex->vert_tg_ref = tg;
1037 		verhg_ex = vertg_ex->vert_verh_list =
1038 		    (stmf_ver_hg_t *)kmem_zalloc(sizeof (stmf_ver_hg_t),
1039 		    KM_SLEEP);
1040 		verhg_ex->verh_hg_ref = hg;
1041 	}
1042 	if (verhg_ex == NULL) {
1043 		verhg_ex = (stmf_ver_hg_t *)kmem_zalloc(sizeof (stmf_ver_hg_t),
1044 		    KM_SLEEP);
1045 		verhg_ex->verh_next = vertg_ex->vert_verh_list;
1046 		vertg_ex->vert_verh_list = verhg_ex;
1047 		verhg_ex->verh_hg_ref = hg;
1048 	}
1049 	ret = stmf_add_ent_to_map(&verhg_ex->verh_ve_map, ve, ve->ve_lun);
1050 	ASSERT(ret == STMF_SUCCESS);
1051 
1052 	/* we need to update the affected session */
1053 	if (stmf_state.stmf_service_running) {
1054 		if (ilu && ilu->ilu_state == STMF_STATE_ONLINE)
1055 			stmf_update_sessions_per_ve(ve, ilu->ilu_lu, 1);
1056 	}
1057 
1058 	return (STMF_SUCCESS);
1059 add_ve_err_ret:
1060 	if (luid_new) {
1061 		if (ilu)
1062 			ilu->ilu_luid = NULL;
1063 		stmf_free_id(luid);
1064 	}
1065 	return (ret);
1066 }
1067 
1068 stmf_status_t
1069 stmf_add_ent_to_map(stmf_lun_map_t *lm, void *ent, uint8_t *lun)
1070 {
1071 	uint16_t n;
1072 	if (((lun[0] & 0xc0) >> 6) != 0)
1073 		return (STMF_FAILURE);
1074 
1075 	n = (uint16_t)lun[1] | (((uint16_t)(lun[0] & 0x3F)) << 8);
1076 try_again_to_add:
1077 	if (lm->lm_nentries && (n < lm->lm_nentries)) {
1078 		if (lm->lm_plus[n] == NULL) {
1079 			lm->lm_plus[n] = ent;
1080 			lm->lm_nluns++;
1081 			return (STMF_SUCCESS);
1082 		} else {
1083 			return (STMF_LUN_TAKEN);
1084 		}
1085 	} else {
1086 		void **pplu;
1087 		uint16_t m = n + 1;
1088 		m = ((m + 7) & ~7) & 0x7FFF;
1089 		pplu = (void **)kmem_zalloc(m * sizeof (void *), KM_SLEEP);
1090 		bcopy(lm->lm_plus, pplu,
1091 		    lm->lm_nentries * sizeof (void *));
1092 		kmem_free(lm->lm_plus, lm->lm_nentries * sizeof (void *));
1093 		lm->lm_plus = pplu;
1094 		lm->lm_nentries = m;
1095 		goto try_again_to_add;
1096 	}
1097 }
1098 
1099 
1100 stmf_status_t
1101 stmf_remove_ent_from_map(stmf_lun_map_t *lm, uint8_t *lun)
1102 {
1103 	uint16_t n, i;
1104 	uint8_t lutype = (lun[0] & 0xc0) >> 6;
1105 	if (lutype != 0)
1106 		return (STMF_FAILURE);
1107 
1108 	n = (uint16_t)lun[1] | (((uint16_t)(lun[0] & 0x3F)) << 8);
1109 
1110 	if (n >= lm->lm_nentries)
1111 		return (STMF_NOT_FOUND);
1112 	if (lm->lm_plus[n] == NULL)
1113 		return (STMF_NOT_FOUND);
1114 
1115 	lm->lm_plus[n] = NULL;
1116 	lm->lm_nluns--;
1117 
1118 	for (i = 0; i < lm->lm_nentries; i++) {
1119 		if (lm->lm_plus[lm->lm_nentries - 1 - i] != NULL)
1120 			break;
1121 	}
1122 	i &= ~15;
1123 	if (i >= 16) {
1124 		void **pplu;
1125 		uint16_t m;
1126 		m = lm->lm_nentries - i;
1127 		pplu = (void **)kmem_zalloc(m * sizeof (void *), KM_SLEEP);
1128 		bcopy(lm->lm_plus, pplu, m * sizeof (void *));
1129 		kmem_free(lm->lm_plus, lm->lm_nentries * sizeof (void *));
1130 		lm->lm_plus = pplu;
1131 		lm->lm_nentries = m;
1132 	}
1133 
1134 	return (STMF_SUCCESS);
1135 }
1136 
1137 uint16_t
1138 stmf_get_next_free_lun(stmf_lun_map_t *sm, uint8_t *lun)
1139 {
1140 	uint16_t luNbr;
1141 
1142 
1143 	if (sm->lm_nluns < 0x4000) {
1144 		for (luNbr = 0; luNbr < sm->lm_nentries; luNbr++) {
1145 			if (sm->lm_plus[luNbr] == NULL)
1146 				break;
1147 		}
1148 	} else {
1149 		return (0xFFFF);
1150 	}
1151 	if (lun) {
1152 		bzero(lun, 8);
1153 		lun[1] = luNbr & 0xff;
1154 		lun[0] = (luNbr >> 8) & 0xff;
1155 	}
1156 
1157 	return (luNbr);
1158 }
1159 
1160 void *
1161 stmf_get_ent_from_map(stmf_lun_map_t *sm, uint16_t lun_num)
1162 {
1163 	if ((lun_num & 0xC000) == 0) {
1164 		if (sm->lm_nentries > lun_num)
1165 			return (sm->lm_plus[lun_num & 0x3FFF]);
1166 		else
1167 			return (NULL);
1168 	}
1169 
1170 	return (NULL);
1171 }
1172 
1173 int
1174 stmf_add_ve(uint8_t *hgname, uint16_t hgname_size,
1175 		uint8_t *tgname, uint16_t tgname_size,
1176 		uint8_t *lu_guid, uint32_t *ve_id,
1177 		uint8_t *luNbr, uint32_t *err_detail)
1178 {
1179 	stmf_id_data_t *hg;
1180 	stmf_id_data_t *tg;
1181 	stmf_view_entry_t *conflictve;
1182 	stmf_status_t ret;
1183 
1184 	ASSERT(mutex_owned(&stmf_state.stmf_lock));
1185 
1186 	hg = stmf_lookup_id(&stmf_state.stmf_hg_list, hgname_size,
1187 	    (uint8_t *)hgname);
1188 	if (!hg) {
1189 		*err_detail = STMF_IOCERR_INVALID_HG;
1190 		return (ENOENT); /* could not find group */
1191 	}
1192 	tg = stmf_lookup_id(&stmf_state.stmf_tg_list, tgname_size,
1193 	    (uint8_t *)tgname);
1194 	if (!tg) {
1195 		*err_detail = STMF_IOCERR_INVALID_TG;
1196 		return (ENOENT); /* could not find group */
1197 	}
1198 	ret = stmf_add_view_entry(hg, tg, lu_guid, ve_id, luNbr,
1199 	    &conflictve, err_detail);
1200 
1201 	if (ret == STMF_ALREADY) {
1202 		return (EALREADY);
1203 	} else if (ret == STMF_LUN_TAKEN) {
1204 		return (EEXIST);
1205 	} else if (ret == STMF_NOT_SUPPORTED) {
1206 		return (E2BIG);
1207 	} else if (ret != STMF_SUCCESS) {
1208 		return (EINVAL);
1209 	}
1210 	return (0);
1211 }
1212 
1213 int
1214 stmf_remove_ve_by_id(uint8_t *guid, uint32_t veid, uint32_t *err_detail)
1215 {
1216 	stmf_id_data_t *luid;
1217 	stmf_view_entry_t	*ve;
1218 	stmf_ver_tg_t *vtg;
1219 	stmf_ver_hg_t *vhg;
1220 	stmf_ver_tg_t *prev_vtg = NULL;
1221 	stmf_ver_hg_t *prev_vhg = NULL;
1222 	int found = 0;
1223 	stmf_i_lu_t *ilu;
1224 
1225 	ASSERT(mutex_owned(&stmf_state.stmf_lock));
1226 	luid = stmf_lookup_id(&stmf_state.stmf_luid_list, 16, guid);
1227 	if (luid == NULL) {
1228 		*err_detail = STMF_IOCERR_INVALID_LU_ID;
1229 		return (ENODEV);
1230 	}
1231 	ilu = (stmf_i_lu_t *)luid->id_pt_to_object;
1232 
1233 	for (ve = (stmf_view_entry_t *)luid->id_impl_specific;
1234 	    ve; ve = ve->ve_next) {
1235 		if (ve->ve_id == veid) {
1236 			break;
1237 		}
1238 	}
1239 	if (!ve) {
1240 		*err_detail = STMF_IOCERR_INVALID_VE_ID;
1241 		return (ENODEV);
1242 	}
1243 	/* remove the ve */
1244 	if (ve->ve_next)
1245 		ve->ve_next->ve_prev = ve->ve_prev;
1246 	if (ve->ve_prev)
1247 		ve->ve_prev->ve_next = ve->ve_next;
1248 	else {
1249 		luid->id_impl_specific = (void *)ve->ve_next;
1250 		if (!luid->id_impl_specific) {
1251 			/* don't have any view entries related to this lu */
1252 			stmf_remove_id(&stmf_state.stmf_luid_list, luid);
1253 			if (ilu)
1254 				ilu->ilu_luid = NULL;
1255 			stmf_free_id(luid);
1256 		}
1257 	}
1258 
1259 	/* we need to update ver_hg->verh_ve_map */
1260 	for (vtg = stmf_state.stmf_ver_tg_head; vtg; vtg = vtg->vert_next) {
1261 		if (vtg->vert_tg_ref == ve->ve_tg) {
1262 			found = 1;
1263 			break;
1264 		}
1265 		prev_vtg = vtg;
1266 	}
1267 	ASSERT(found);
1268 	found = 0;
1269 	for (vhg = vtg->vert_verh_list; vhg; vhg = vhg->verh_next) {
1270 		if (vhg->verh_hg_ref == ve->ve_hg) {
1271 			found = 1;
1272 			break;
1273 		}
1274 		prev_vhg = vhg;
1275 	}
1276 	ASSERT(found);
1277 
1278 	(void) stmf_remove_ent_from_map(&vhg->verh_ve_map, ve->ve_lun);
1279 
1280 	/* free verhg if it don't have any ve entries related */
1281 	if (!vhg->verh_ve_map.lm_nluns) {
1282 		/* we don't have any view entry related */
1283 		if (prev_vhg)
1284 			prev_vhg->verh_next = vhg->verh_next;
1285 		else
1286 			vtg->vert_verh_list = vhg->verh_next;
1287 
1288 		/* Free entries in case the map still has memory */
1289 		if (vhg->verh_ve_map.lm_nentries) {
1290 			kmem_free(vhg->verh_ve_map.lm_plus,
1291 			    vhg->verh_ve_map.lm_nentries *
1292 			    sizeof (void *));
1293 		}
1294 		kmem_free(vhg, sizeof (stmf_ver_hg_t));
1295 		if (!vtg->vert_verh_list) {
1296 			/* we don't have any ve related */
1297 			if (prev_vtg)
1298 				prev_vtg->vert_next = vtg->vert_next;
1299 			else
1300 				stmf_state.stmf_ver_tg_head = vtg->vert_next;
1301 			kmem_free(vtg, sizeof (stmf_ver_tg_t));
1302 		}
1303 	}
1304 
1305 	if (stmf_state.stmf_service_running && ilu &&
1306 	    ilu->ilu_state == STMF_STATE_ONLINE) {
1307 		stmf_update_sessions_per_ve(ve, ilu->ilu_lu, 0);
1308 	}
1309 
1310 	ve->ve_hg->id_refcnt--;
1311 	ve->ve_tg->id_refcnt--;
1312 
1313 	kmem_free(ve, sizeof (stmf_view_entry_t));
1314 	return (0);
1315 }
1316 
1317 int
1318 stmf_add_group(uint8_t *grpname, uint16_t grpname_size,
1319 		stmf_id_type_t group_type, uint32_t *err_detail)
1320 {
1321 	stmf_status_t status;
1322 
1323 	ASSERT(mutex_owned(&stmf_state.stmf_lock));
1324 
1325 	if (group_type == STMF_ID_TYPE_HOST_GROUP)
1326 		status = stmf_add_hg(grpname, grpname_size, 0, err_detail);
1327 	else if (group_type == STMF_ID_TYPE_TARGET_GROUP)
1328 		status = stmf_add_tg(grpname, grpname_size, 0, err_detail);
1329 	else {
1330 		return (EINVAL);
1331 	}
1332 	switch (status) {
1333 	case STMF_SUCCESS:
1334 		return (0);
1335 	case STMF_INVALID_ARG:
1336 		return (EINVAL);
1337 	case STMF_ALREADY:
1338 		return (EEXIST);
1339 	default:
1340 		return (EIO);
1341 	}
1342 }
1343 
1344 /*
1345  * Group can only be removed only when it does not have
1346  * any view entry related
1347  */
1348 int
1349 stmf_remove_group(uint8_t *grpname, uint16_t grpname_size,
1350 		stmf_id_type_t group_type, uint32_t *err_detail)
1351 {
1352 	stmf_id_data_t *id;
1353 	stmf_id_data_t *idmemb;
1354 	stmf_id_list_t *grp_memblist;
1355 	stmf_i_scsi_session_t *iss;
1356 	stmf_i_local_port_t *ilport;
1357 
1358 	if (grpname[0] == '*')
1359 		return (EINVAL);
1360 
1361 	ASSERT(mutex_owned(&stmf_state.stmf_lock));
1362 
1363 	if (group_type == STMF_ID_TYPE_HOST_GROUP)
1364 		id = stmf_lookup_id(&stmf_state.stmf_hg_list,
1365 		    grpname_size, grpname);
1366 	else if (group_type == STMF_ID_TYPE_TARGET_GROUP)
1367 		id = stmf_lookup_id(&stmf_state.stmf_tg_list,
1368 		    grpname_size, grpname);
1369 	if (!id) {
1370 		*err_detail = (group_type == STMF_ID_TYPE_HOST_GROUP)?
1371 		    STMF_IOCERR_INVALID_HG:STMF_IOCERR_INVALID_TG;
1372 		return (ENODEV); /* no such grp */
1373 	}
1374 	if (id->id_refcnt) {
1375 		/* fail, still have viewentry related to it */
1376 		*err_detail = (group_type == STMF_ID_TYPE_HOST_GROUP)?
1377 		    STMF_IOCERR_HG_IN_USE:STMF_IOCERR_TG_IN_USE;
1378 		return (EBUSY);
1379 	}
1380 	grp_memblist = (stmf_id_list_t *)id->id_impl_specific;
1381 	while ((idmemb = grp_memblist->idl_head) != NULL) {
1382 		stmf_remove_id(grp_memblist, idmemb);
1383 		stmf_free_id(idmemb);
1384 	}
1385 
1386 	ASSERT(!grp_memblist->id_count);
1387 	if (id->id_type == STMF_ID_TYPE_TARGET_GROUP) {
1388 		for (ilport = stmf_state.stmf_ilportlist; ilport;
1389 		    ilport = ilport->ilport_next) {
1390 			if (ilport->ilport_tg == (void *)id) {
1391 				ilport->ilport_tg = NULL;
1392 			}
1393 		}
1394 		stmf_remove_id(&stmf_state.stmf_tg_list, id);
1395 	} else {
1396 		for (ilport = stmf_state.stmf_ilportlist; ilport;
1397 		    ilport = ilport->ilport_next) {
1398 			for (iss = ilport->ilport_ss_list; iss;
1399 			    iss = iss->iss_next) {
1400 				if (iss->iss_hg == (void *)id)
1401 					iss->iss_hg = NULL;
1402 			}
1403 		}
1404 		stmf_remove_id(&stmf_state.stmf_hg_list, id);
1405 	}
1406 	stmf_free_id(id);
1407 	return (0);
1408 
1409 }
1410 
1411 int
1412 stmf_add_group_member(uint8_t *grpname, uint16_t grpname_size,
1413 		uint8_t	*entry_ident, uint16_t entry_size,
1414 		stmf_id_type_t entry_type, uint32_t *err_detail)
1415 {
1416 	stmf_id_data_t	*id_grp, *id_alltgt;
1417 	stmf_id_data_t	*id_member;
1418 	stmf_id_data_t	*id_grp_tmp;
1419 	stmf_i_scsi_session_t *iss;
1420 	stmf_i_local_port_t *ilport;
1421 	stmf_lun_map_t *vemap, *vemap_alltgt;
1422 	uint8_t grpname_forall = '*';
1423 
1424 	ASSERT(mutex_owned(&stmf_state.stmf_lock));
1425 	ASSERT(grpname[0] != '*');
1426 
1427 	if (entry_type == STMF_ID_TYPE_HOST) {
1428 		id_grp = stmf_lookup_id(&stmf_state.stmf_hg_list,
1429 		    grpname_size, grpname);
1430 		id_grp_tmp = stmf_lookup_group_for_host(entry_ident,
1431 		    entry_size);
1432 	} else {
1433 		id_grp = stmf_lookup_id(&stmf_state.stmf_tg_list,
1434 		    grpname_size, grpname);
1435 		id_grp_tmp = stmf_lookup_group_for_target(entry_ident,
1436 		    entry_size);
1437 	}
1438 	if (id_grp == NULL) {
1439 		*err_detail = (entry_type == STMF_ID_TYPE_HOST)?
1440 		    STMF_IOCERR_INVALID_HG:STMF_IOCERR_INVALID_TG;
1441 		return (ENODEV); /* not found */
1442 	}
1443 
1444 	/* Check whether this member already bound to a group */
1445 	if (id_grp_tmp) {
1446 		if (id_grp_tmp != id_grp) {
1447 			*err_detail = (entry_type == STMF_ID_TYPE_HOST)?
1448 			    STMF_IOCERR_HG_ENTRY_EXISTS:
1449 			    STMF_IOCERR_TG_ENTRY_EXISTS;
1450 			return (EEXIST); /* already added into another grp */
1451 		}
1452 		else
1453 			return (0);
1454 	}
1455 	id_member = stmf_alloc_id(entry_size, entry_type,
1456 	    entry_ident, 0);
1457 	stmf_append_id((stmf_id_list_t *)id_grp->id_impl_specific, id_member);
1458 
1459 	if (entry_type == STMF_ID_TYPE_TARGET) {
1460 		ilport = stmf_targetident_to_ilport(entry_ident, entry_size);
1461 		if (ilport)
1462 			ilport->ilport_tg = (void *)id_grp;
1463 		return (0);
1464 	}
1465 	/* For host group member, update the session if needed */
1466 	if (!stmf_state.stmf_service_running)
1467 		return (0);
1468 	/* Need to consider all target group + this host group */
1469 	id_alltgt = stmf_lookup_id(&stmf_state.stmf_tg_list,
1470 	    1, &grpname_forall);
1471 	vemap_alltgt = stmf_get_ve_map_per_ids(id_alltgt, id_grp);
1472 
1473 	/* check whether there are sessions may be affected */
1474 	for (ilport = stmf_state.stmf_ilportlist; ilport;
1475 	    ilport = ilport->ilport_next) {
1476 		if (ilport->ilport_state != STMF_STATE_ONLINE)
1477 			continue;
1478 		iss = stmf_lookup_session_for_hostident(ilport,
1479 		    entry_ident, entry_size);
1480 		if (iss) {
1481 			stmf_id_data_t *tgid;
1482 			iss->iss_hg = (void *)id_grp;
1483 			tgid = ilport->ilport_tg;
1484 			if (tgid) {
1485 				vemap = stmf_get_ve_map_per_ids(tgid, id_grp);
1486 				if (vemap)
1487 					stmf_add_lus_to_session_per_vemap(
1488 					    ilport, iss, vemap);
1489 			}
1490 			if (vemap_alltgt)
1491 				stmf_add_lus_to_session_per_vemap(ilport,
1492 				    iss, vemap_alltgt);
1493 		}
1494 	}
1495 
1496 	return (0);
1497 }
1498 
1499 int
1500 stmf_remove_group_member(uint8_t *grpname, uint16_t grpname_size,
1501 		uint8_t *entry_ident, uint16_t entry_size,
1502 		stmf_id_type_t entry_type, uint32_t *err_detail)
1503 {
1504 	stmf_id_data_t	*id_grp, *id_alltgt;
1505 	stmf_id_data_t	*id_member;
1506 	stmf_lun_map_t *vemap,  *vemap_alltgt;
1507 	uint8_t grpname_forall = '*';
1508 	stmf_i_local_port_t *ilport;
1509 	stmf_i_scsi_session_t *iss;
1510 
1511 	ASSERT(mutex_owned(&stmf_state.stmf_lock));
1512 	ASSERT(grpname[0] != '*');
1513 
1514 	if (entry_type == STMF_ID_TYPE_HOST) {
1515 		id_grp = stmf_lookup_id(&stmf_state.stmf_hg_list,
1516 		    grpname_size, grpname);
1517 	} else {
1518 		id_grp = stmf_lookup_id(&stmf_state.stmf_tg_list,
1519 		    grpname_size, grpname);
1520 	}
1521 	if (id_grp == NULL) {
1522 		*err_detail = (entry_type == STMF_ID_TYPE_HOST)?
1523 		    STMF_IOCERR_INVALID_HG:STMF_IOCERR_INVALID_TG;
1524 		return (ENODEV); /* no such group */
1525 	}
1526 	id_member = stmf_lookup_id((stmf_id_list_t *)id_grp->id_impl_specific,
1527 	    entry_size, entry_ident);
1528 	if (!id_member) {
1529 		*err_detail = (entry_type == STMF_ID_TYPE_HOST)?
1530 		    STMF_IOCERR_INVALID_HG_ENTRY:STMF_IOCERR_INVALID_TG_ENTRY;
1531 		return (ENODEV); /* no such member */
1532 	}
1533 	/* verify target is offline */
1534 	if (entry_type == STMF_ID_TYPE_TARGET) {
1535 		ilport = stmf_targetident_to_ilport(entry_ident, entry_size);
1536 		if (ilport && ilport->ilport_state != STMF_STATE_OFFLINE) {
1537 			*err_detail = STMF_IOCERR_TG_NEED_TG_OFFLINE;
1538 			return (EBUSY);
1539 		}
1540 	}
1541 
1542 	stmf_remove_id((stmf_id_list_t *)id_grp->id_impl_specific, id_member);
1543 	stmf_free_id(id_member);
1544 
1545 	if (entry_type == STMF_ID_TYPE_TARGET) {
1546 		ilport = stmf_targetident_to_ilport(entry_ident, entry_size);
1547 		if (ilport)
1548 			ilport->ilport_tg = NULL;
1549 		return (0);
1550 	}
1551 	/* For host group member, update the session */
1552 	if (!stmf_state.stmf_service_running)
1553 		return (0);
1554 
1555 	/* Need to consider all target group + this host group */
1556 	id_alltgt = stmf_lookup_id(&stmf_state.stmf_tg_list,
1557 	    1, &grpname_forall);
1558 	vemap_alltgt = stmf_get_ve_map_per_ids(id_alltgt, id_grp);
1559 
1560 	/* check if there are session related, if so, update it */
1561 	for (ilport = stmf_state.stmf_ilportlist; ilport;
1562 	    ilport = ilport->ilport_next) {
1563 		if (ilport->ilport_state != STMF_STATE_ONLINE)
1564 			continue;
1565 		iss = stmf_lookup_session_for_hostident(ilport,
1566 		    entry_ident, entry_size);
1567 		if (iss) {
1568 			stmf_id_data_t *tgid;
1569 			iss->iss_hg = NULL;
1570 			tgid = ilport->ilport_tg;
1571 			if (tgid) {
1572 				vemap = stmf_get_ve_map_per_ids(tgid, id_grp);
1573 				if (vemap)
1574 					stmf_remove_lus_from_session_per_vemap(
1575 					    ilport, iss, vemap);
1576 			}
1577 			if (vemap_alltgt)
1578 				stmf_remove_lus_from_session_per_vemap(ilport,
1579 				    iss, vemap_alltgt);
1580 		}
1581 	}
1582 
1583 	return (0);
1584 }
1585 
1586 /* Assert stmf_lock is already held */
1587 stmf_i_local_port_t *
1588 stmf_targetident_to_ilport(uint8_t *target_ident, uint16_t ident_size)
1589 {
1590 	stmf_i_local_port_t *ilport;
1591 	uint8_t *id;
1592 
1593 	ASSERT(mutex_owned(&stmf_state.stmf_lock));
1594 
1595 	for (ilport = stmf_state.stmf_ilportlist; ilport;
1596 	    ilport = ilport->ilport_next) {
1597 		id = (uint8_t *)ilport->ilport_lport->lport_id;
1598 		if ((id[3] == ident_size) &&
1599 		    bcmp(id + 4, target_ident, ident_size) == 0) {
1600 			return (ilport);
1601 		}
1602 	}
1603 	return (NULL);
1604 }
1605 
1606 stmf_i_scsi_session_t *
1607 stmf_lookup_session_for_hostident(stmf_i_local_port_t *ilport,
1608 		uint8_t *host_ident, uint16_t ident_size)
1609 {
1610 	stmf_i_scsi_session_t *iss;
1611 	uint8_t *id;
1612 
1613 	ASSERT(mutex_owned(&stmf_state.stmf_lock));
1614 
1615 	for (iss = ilport->ilport_ss_list; iss; iss = iss->iss_next) {
1616 		id = (uint8_t *)iss->iss_ss->ss_rport_id;
1617 		if ((id[3] == ident_size) &&
1618 		    bcmp(id + 4, host_ident, ident_size) == 0) {
1619 			return (iss);
1620 		}
1621 	}
1622 	return (NULL);
1623 }
1624 
1625 stmf_i_lu_t *
1626 stmf_luident_to_ilu(uint8_t *lu_ident)
1627 {
1628 	stmf_i_lu_t *ilu;
1629 
1630 	ASSERT(mutex_owned(&stmf_state.stmf_lock));
1631 
1632 	for (ilu = stmf_state.stmf_ilulist; ilu; ilu = ilu->ilu_next) {
1633 		if (bcmp(&ilu->ilu_lu->lu_id->ident[0], lu_ident, 16) == 0)
1634 			return (ilu);
1635 	}
1636 
1637 	return (NULL);
1638 }
1639 
1640 /*
1641  * Assert stmf_lock is already held,
1642  * Just get the view map for the specific target group and host group
1643  * tgid and hgid can not be NULL
1644  */
1645 stmf_lun_map_t *
1646 stmf_get_ve_map_per_ids(stmf_id_data_t *tgid, stmf_id_data_t *hgid)
1647 {
1648 	int found = 0;
1649 	stmf_ver_tg_t *vertg;
1650 	stmf_ver_hg_t *verhg;
1651 
1652 	ASSERT(mutex_owned(&stmf_state.stmf_lock));
1653 
1654 	for (vertg = stmf_state.stmf_ver_tg_head;
1655 	    vertg; vertg = vertg->vert_next) {
1656 		if (vertg->vert_tg_ref == tgid) {
1657 			found = 1;
1658 			break;
1659 		}
1660 	}
1661 	if (!found)
1662 		return (NULL);
1663 
1664 	for (verhg = vertg->vert_verh_list; verhg; verhg = verhg->verh_next) {
1665 		if (verhg->verh_hg_ref == hgid) {
1666 			return (&verhg->verh_ve_map);
1667 		}
1668 	}
1669 	return (NULL);
1670 }
1671