xref: /illumos-gate/usr/src/uts/common/io/comstar/stmf/stmf.c (revision e8d80663e4f91871f843bb8ad9108dc0b76dfcf3)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
23  */
24 /*
25  * Copyright 2012, Nexenta Systems, Inc. All rights reserved.
26  */
27 
28 #include <sys/conf.h>
29 #include <sys/file.h>
30 #include <sys/ddi.h>
31 #include <sys/sunddi.h>
32 #include <sys/modctl.h>
33 #include <sys/scsi/scsi.h>
34 #include <sys/scsi/generic/persist.h>
35 #include <sys/scsi/impl/scsi_reset_notify.h>
36 #include <sys/disp.h>
37 #include <sys/byteorder.h>
38 #include <sys/atomic.h>
39 #include <sys/ethernet.h>
40 #include <sys/sdt.h>
41 #include <sys/nvpair.h>
42 #include <sys/zone.h>
43 #include <sys/id_space.h>
44 
45 #include <sys/stmf.h>
46 #include <sys/lpif.h>
47 #include <sys/portif.h>
48 #include <sys/stmf_ioctl.h>
49 #include <sys/pppt_ic_if.h>
50 
51 #include "stmf_impl.h"
52 #include "lun_map.h"
53 #include "stmf_state.h"
54 #include "stmf_stats.h"
55 
56 /*
57  * Lock order:
58  * stmf_state_lock --> ilport_lock/iss_lockp --> ilu_task_lock
59  */
60 
61 static uint64_t stmf_session_counter = 0;
62 static uint16_t stmf_rtpid_counter = 0;
63 /* start messages at 1 */
64 static uint64_t stmf_proxy_msg_id = 1;
65 #define	MSG_ID_TM_BIT	0x8000000000000000
66 #define	ALIGNED_TO_8BYTE_BOUNDARY(i)	(((i) + 7) & ~7)
67 
68 struct stmf_svc_clocks;
69 
70 static int stmf_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
71 static int stmf_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
72 static int stmf_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg,
73 	void **result);
74 static int stmf_open(dev_t *devp, int flag, int otype, cred_t *credp);
75 static int stmf_close(dev_t dev, int flag, int otype, cred_t *credp);
76 static int stmf_ioctl(dev_t dev, int cmd, intptr_t data, int mode,
77 	cred_t *credp, int *rval);
78 static int stmf_get_stmf_state(stmf_state_desc_t *std);
79 static int stmf_set_stmf_state(stmf_state_desc_t *std);
80 static void stmf_abort_task_offline(scsi_task_t *task, int offline_lu,
81     char *info);
82 static int stmf_set_alua_state(stmf_alua_state_desc_t *alua_state);
83 static void stmf_get_alua_state(stmf_alua_state_desc_t *alua_state);
84 
85 static void stmf_task_audit(stmf_i_scsi_task_t *itask,
86     task_audit_event_t te, uint32_t cmd_or_iof, stmf_data_buf_t *dbuf);
87 
88 static boolean_t stmf_base16_str_to_binary(char *c, int dplen, uint8_t *dp);
89 static char stmf_ctoi(char c);
90 stmf_xfer_data_t *stmf_prepare_tpgs_data(uint8_t ilu_alua);
91 void stmf_svc_init();
92 stmf_status_t stmf_svc_fini();
93 void stmf_svc(void *arg);
94 void stmf_svc_queue(int cmd, void *obj, stmf_state_change_info_t *info);
95 static void stmf_svc_kill_obj_requests(void *obj);
96 static void stmf_svc_timeout(struct stmf_svc_clocks *);
97 void stmf_check_freetask();
98 void stmf_abort_target_reset(scsi_task_t *task);
99 stmf_status_t stmf_lun_reset_poll(stmf_lu_t *lu, struct scsi_task *task,
100 							int target_reset);
101 void stmf_target_reset_poll(struct scsi_task *task);
102 void stmf_handle_lun_reset(scsi_task_t *task);
103 void stmf_handle_target_reset(scsi_task_t *task);
104 void stmf_xd_to_dbuf(stmf_data_buf_t *dbuf, int set_rel_off);
105 int stmf_load_ppd_ioctl(stmf_ppioctl_data_t *ppi, uint64_t *ppi_token,
106     uint32_t *err_ret);
107 int stmf_delete_ppd_ioctl(stmf_ppioctl_data_t *ppi);
108 int stmf_get_ppd_ioctl(stmf_ppioctl_data_t *ppi, stmf_ppioctl_data_t *ppi_out,
109     uint32_t *err_ret);
110 void stmf_delete_ppd(stmf_pp_data_t *ppd);
111 void stmf_delete_all_ppds();
112 void stmf_trace_clear();
113 void stmf_worker_init();
114 stmf_status_t stmf_worker_fini();
115 void stmf_worker_mgmt();
116 void stmf_worker_task(void *arg);
117 static void stmf_task_lu_free(scsi_task_t *task, stmf_i_scsi_session_t *iss);
118 static stmf_status_t stmf_ic_lu_reg(stmf_ic_reg_dereg_lun_msg_t *msg,
119     uint32_t type);
120 static stmf_status_t stmf_ic_lu_dereg(stmf_ic_reg_dereg_lun_msg_t *msg);
121 static stmf_status_t stmf_ic_rx_scsi_status(stmf_ic_scsi_status_msg_t *msg);
122 static stmf_status_t stmf_ic_rx_status(stmf_ic_status_msg_t *msg);
123 static stmf_status_t stmf_ic_rx_scsi_data(stmf_ic_scsi_data_msg_t *msg);
124 void stmf_task_lu_killall(stmf_lu_t *lu, scsi_task_t *tm_task, stmf_status_t s);
125 
126 /* pppt modhandle */
127 ddi_modhandle_t pppt_mod;
128 
129 /* pppt modload imported functions */
130 stmf_ic_reg_port_msg_alloc_func_t ic_reg_port_msg_alloc;
131 stmf_ic_dereg_port_msg_alloc_func_t ic_dereg_port_msg_alloc;
132 stmf_ic_reg_lun_msg_alloc_func_t ic_reg_lun_msg_alloc;
133 stmf_ic_dereg_lun_msg_alloc_func_t ic_dereg_lun_msg_alloc;
134 stmf_ic_lun_active_msg_alloc_func_t ic_lun_active_msg_alloc;
135 stmf_ic_scsi_cmd_msg_alloc_func_t ic_scsi_cmd_msg_alloc;
136 stmf_ic_scsi_data_xfer_done_msg_alloc_func_t ic_scsi_data_xfer_done_msg_alloc;
137 stmf_ic_session_create_msg_alloc_func_t ic_session_reg_msg_alloc;
138 stmf_ic_session_destroy_msg_alloc_func_t ic_session_dereg_msg_alloc;
139 stmf_ic_tx_msg_func_t ic_tx_msg;
140 stmf_ic_msg_free_func_t ic_msg_free;
141 
142 static void stmf_itl_task_start(stmf_i_scsi_task_t *itask);
143 static void stmf_itl_lu_new_task(stmf_i_scsi_task_t *itask);
144 static void stmf_itl_task_done(stmf_i_scsi_task_t *itask);
145 
146 static void stmf_lport_xfer_start(stmf_i_scsi_task_t *itask,
147     stmf_data_buf_t *dbuf);
148 static void stmf_lport_xfer_done(stmf_i_scsi_task_t *itask,
149     stmf_data_buf_t *dbuf);
150 
151 static void stmf_update_kstat_lu_q(scsi_task_t *, void());
152 static void stmf_update_kstat_lport_q(scsi_task_t *, void());
153 static void stmf_update_kstat_lu_io(scsi_task_t *, stmf_data_buf_t *);
154 static void stmf_update_kstat_lport_io(scsi_task_t *, stmf_data_buf_t *);
155 
156 static int stmf_irport_compare(const void *void_irport1,
157     const void *void_irport2);
158 static stmf_i_remote_port_t *stmf_irport_create(scsi_devid_desc_t *rport_devid);
159 static void stmf_irport_destroy(stmf_i_remote_port_t *irport);
160 static stmf_i_remote_port_t *stmf_irport_register(
161     scsi_devid_desc_t *rport_devid);
162 static stmf_i_remote_port_t *stmf_irport_lookup_locked(
163     scsi_devid_desc_t *rport_devid);
164 static void stmf_irport_deregister(stmf_i_remote_port_t *irport);
165 
166 static void stmf_teardown_itl_kstats(stmf_i_itl_kstat_t *ks);
167 static void stmf_delete_itl_kstat_by_lport(char *);
168 static void stmf_delete_itl_kstat_by_guid(char *);
169 static int stmf_itl_kstat_compare(const void*, const void*);
170 static stmf_i_itl_kstat_t *stmf_itl_kstat_lookup(char *kstat_nm);
171 static stmf_i_itl_kstat_t *stmf_itl_kstat_create(stmf_itl_data_t *itl,
172     char *nm, scsi_devid_desc_t *lport, scsi_devid_desc_t *lun);
173 
174 extern struct mod_ops mod_driverops;
175 
176 /* =====[ Tunables ]===== */
177 /* Internal tracing */
178 volatile int	stmf_trace_on = 1;
179 volatile int	stmf_trace_buf_size = (1 * 1024 * 1024);
180 /*
181  * The reason default task timeout is 75 is because we want the
182  * host to timeout 1st and mostly host timeout is 60 seconds.
183  */
184 volatile int	stmf_default_task_timeout = 75;
185 /*
186  * Setting this to one means, you are responsible for config load and keeping
187  * things in sync with persistent database.
188  */
189 volatile int	stmf_allow_modunload = 0;
190 
191 volatile int stmf_max_nworkers = 256;
192 volatile int stmf_min_nworkers = 4;
193 volatile int stmf_worker_scale_down_delay = 20;
194 
195 /* === [ Debugging and fault injection ] === */
196 #ifdef	DEBUG
197 volatile int stmf_drop_task_counter = 0;
198 volatile int stmf_drop_buf_counter = 0;
199 
200 #endif
201 
202 stmf_state_t		stmf_state;
203 static stmf_lu_t	*dlun0;
204 
205 static uint8_t stmf_first_zero[] =
206 	{ 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 0xff };
207 static uint8_t stmf_first_one[] =
208 	{ 0xff, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0 };
209 
210 static kmutex_t	trace_buf_lock;
211 static int	trace_buf_size;
212 static int	trace_buf_curndx;
213 caddr_t	stmf_trace_buf;
214 
215 static enum {
216 	STMF_WORKERS_DISABLED = 0,
217 	STMF_WORKERS_ENABLING,
218 	STMF_WORKERS_ENABLED
219 } stmf_workers_state = STMF_WORKERS_DISABLED;
220 static int stmf_i_max_nworkers;
221 static int stmf_i_min_nworkers;
222 static int stmf_nworkers_cur;		/* # of workers currently running */
223 static int stmf_nworkers_needed;	/* # of workers need to be running */
224 static int stmf_worker_sel_counter = 0;
225 static uint32_t stmf_cur_ntasks = 0;
226 static clock_t stmf_wm_last = 0;
227 /*
228  * This is equal to stmf_nworkers_cur while we are increasing # workers and
229  * stmf_nworkers_needed while we are decreasing the worker count.
230  */
231 static int stmf_nworkers_accepting_cmds;
232 static stmf_worker_t *stmf_workers = NULL;
233 static clock_t stmf_worker_mgmt_delay = 2;
234 static clock_t stmf_worker_scale_down_timer = 0;
235 static int stmf_worker_scale_down_qd = 0;
236 
237 static struct cb_ops stmf_cb_ops = {
238 	stmf_open,			/* open */
239 	stmf_close,			/* close */
240 	nodev,				/* strategy */
241 	nodev,				/* print */
242 	nodev,				/* dump */
243 	nodev,				/* read */
244 	nodev,				/* write */
245 	stmf_ioctl,			/* ioctl */
246 	nodev,				/* devmap */
247 	nodev,				/* mmap */
248 	nodev,				/* segmap */
249 	nochpoll,			/* chpoll */
250 	ddi_prop_op,			/* cb_prop_op */
251 	0,				/* streamtab */
252 	D_NEW | D_MP,			/* cb_flag */
253 	CB_REV,				/* rev */
254 	nodev,				/* aread */
255 	nodev				/* awrite */
256 };
257 
258 static struct dev_ops stmf_ops = {
259 	DEVO_REV,
260 	0,
261 	stmf_getinfo,
262 	nulldev,		/* identify */
263 	nulldev,		/* probe */
264 	stmf_attach,
265 	stmf_detach,
266 	nodev,			/* reset */
267 	&stmf_cb_ops,
268 	NULL,			/* bus_ops */
269 	NULL			/* power */
270 };
271 
272 #define	STMF_NAME		"COMSTAR STMF"
273 #define	STMF_MODULE_NAME	"stmf"
274 
275 static struct modldrv modldrv = {
276 	&mod_driverops,
277 	STMF_NAME,
278 	&stmf_ops
279 };
280 
281 static struct modlinkage modlinkage = {
282 	MODREV_1,
283 	&modldrv,
284 	NULL
285 };
286 
287 int
288 _init(void)
289 {
290 	int ret;
291 
292 	ret = mod_install(&modlinkage);
293 	if (ret)
294 		return (ret);
295 	stmf_trace_buf = kmem_zalloc(stmf_trace_buf_size, KM_SLEEP);
296 	trace_buf_size = stmf_trace_buf_size;
297 	trace_buf_curndx = 0;
298 	mutex_init(&trace_buf_lock, NULL, MUTEX_DRIVER, 0);
299 	bzero(&stmf_state, sizeof (stmf_state_t));
300 	/* STMF service is off by default */
301 	stmf_state.stmf_service_running = 0;
302 	/* default lu/lport states are online */
303 	stmf_state.stmf_default_lu_state = STMF_STATE_ONLINE;
304 	stmf_state.stmf_default_lport_state = STMF_STATE_ONLINE;
305 	mutex_init(&stmf_state.stmf_lock, NULL, MUTEX_DRIVER, NULL);
306 	cv_init(&stmf_state.stmf_cv, NULL, CV_DRIVER, NULL);
307 	stmf_session_counter = (uint64_t)ddi_get_lbolt();
308 	avl_create(&stmf_state.stmf_irportlist,
309 	    stmf_irport_compare, sizeof (stmf_i_remote_port_t),
310 	    offsetof(stmf_i_remote_port_t, irport_ln));
311 	stmf_state.stmf_ilport_inst_space =
312 	    id_space_create("lport-instances", 0, MAX_ILPORT);
313 	stmf_state.stmf_irport_inst_space =
314 	    id_space_create("rport-instances", 0, MAX_IRPORT);
315 	avl_create(&stmf_state.stmf_itl_kstat_list,
316 	    stmf_itl_kstat_compare, sizeof (stmf_i_itl_kstat_t),
317 	    offsetof(stmf_i_itl_kstat_t, iitl_kstat_ln));
318 	stmf_view_init();
319 	stmf_svc_init();
320 	stmf_dlun_init();
321 	return (ret);
322 }
323 
324 int
325 _fini(void)
326 {
327 	int ret;
328 	stmf_i_remote_port_t	*irport;
329 	stmf_i_itl_kstat_t	*ks_itl;
330 	void			*avl_dest_cookie = NULL;
331 
332 	if (stmf_state.stmf_service_running)
333 		return (EBUSY);
334 	if ((!stmf_allow_modunload) &&
335 	    (stmf_state.stmf_config_state != STMF_CONFIG_NONE)) {
336 		return (EBUSY);
337 	}
338 	if (stmf_state.stmf_nlps || stmf_state.stmf_npps) {
339 		return (EBUSY);
340 	}
341 	if (stmf_dlun_fini() != STMF_SUCCESS)
342 		return (EBUSY);
343 	if (stmf_worker_fini() != STMF_SUCCESS) {
344 		stmf_dlun_init();
345 		return (EBUSY);
346 	}
347 	if (stmf_svc_fini() != STMF_SUCCESS) {
348 		stmf_dlun_init();
349 		stmf_worker_init();
350 		return (EBUSY);
351 	}
352 
353 	ret = mod_remove(&modlinkage);
354 	if (ret) {
355 		stmf_svc_init();
356 		stmf_dlun_init();
357 		stmf_worker_init();
358 		return (ret);
359 	}
360 
361 	stmf_view_clear_config();
362 
363 	while ((irport = avl_destroy_nodes(&stmf_state.stmf_irportlist,
364 	    &avl_dest_cookie)) != NULL)
365 		stmf_irport_destroy(irport);
366 	avl_destroy(&stmf_state.stmf_irportlist);
367 	id_space_destroy(stmf_state.stmf_ilport_inst_space);
368 	id_space_destroy(stmf_state.stmf_irport_inst_space);
369 
370 	avl_dest_cookie = NULL;
371 	while ((ks_itl = avl_destroy_nodes(&stmf_state.stmf_itl_kstat_list,
372 	    &avl_dest_cookie)) != NULL) {
373 		stmf_teardown_itl_kstats(ks_itl);
374 		kmem_free(ks_itl, sizeof (ks_itl));
375 	}
376 	avl_destroy(&stmf_state.stmf_itl_kstat_list);
377 
378 	kmem_free(stmf_trace_buf, stmf_trace_buf_size);
379 	mutex_destroy(&trace_buf_lock);
380 	mutex_destroy(&stmf_state.stmf_lock);
381 	cv_destroy(&stmf_state.stmf_cv);
382 	return (ret);
383 }
384 
385 int
386 _info(struct modinfo *modinfop)
387 {
388 	return (mod_info(&modlinkage, modinfop));
389 }
390 
391 /* ARGSUSED */
392 static int
393 stmf_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result)
394 {
395 	switch (cmd) {
396 	case DDI_INFO_DEVT2DEVINFO:
397 		*result = stmf_state.stmf_dip;
398 		break;
399 	case DDI_INFO_DEVT2INSTANCE:
400 		*result =
401 		    (void *)(uintptr_t)ddi_get_instance(stmf_state.stmf_dip);
402 		break;
403 	default:
404 		return (DDI_FAILURE);
405 	}
406 
407 	return (DDI_SUCCESS);
408 }
409 
410 static int
411 stmf_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
412 {
413 	switch (cmd) {
414 	case DDI_ATTACH:
415 		stmf_state.stmf_dip = dip;
416 
417 		if (ddi_create_minor_node(dip, "admin", S_IFCHR, 0,
418 		    DDI_NT_STMF, 0) != DDI_SUCCESS) {
419 			break;
420 		}
421 		ddi_report_dev(dip);
422 		return (DDI_SUCCESS);
423 	}
424 
425 	return (DDI_FAILURE);
426 }
427 
428 static int
429 stmf_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
430 {
431 	switch (cmd) {
432 	case DDI_DETACH:
433 		ddi_remove_minor_node(dip, 0);
434 		return (DDI_SUCCESS);
435 	}
436 
437 	return (DDI_FAILURE);
438 }
439 
440 /* ARGSUSED */
441 static int
442 stmf_open(dev_t *devp, int flag, int otype, cred_t *credp)
443 {
444 	mutex_enter(&stmf_state.stmf_lock);
445 	if (stmf_state.stmf_exclusive_open) {
446 		mutex_exit(&stmf_state.stmf_lock);
447 		return (EBUSY);
448 	}
449 	if (flag & FEXCL) {
450 		if (stmf_state.stmf_opened) {
451 			mutex_exit(&stmf_state.stmf_lock);
452 			return (EBUSY);
453 		}
454 		stmf_state.stmf_exclusive_open = 1;
455 	}
456 	stmf_state.stmf_opened = 1;
457 	mutex_exit(&stmf_state.stmf_lock);
458 	return (0);
459 }
460 
461 /* ARGSUSED */
462 static int
463 stmf_close(dev_t dev, int flag, int otype, cred_t *credp)
464 {
465 	mutex_enter(&stmf_state.stmf_lock);
466 	stmf_state.stmf_opened = 0;
467 	if (stmf_state.stmf_exclusive_open &&
468 	    (stmf_state.stmf_config_state != STMF_CONFIG_INIT_DONE)) {
469 		stmf_state.stmf_config_state = STMF_CONFIG_NONE;
470 		stmf_delete_all_ppds();
471 		stmf_view_clear_config();
472 		stmf_view_init();
473 	}
474 	stmf_state.stmf_exclusive_open = 0;
475 	mutex_exit(&stmf_state.stmf_lock);
476 	return (0);
477 }
478 
479 int
480 stmf_copyin_iocdata(intptr_t data, int mode, stmf_iocdata_t **iocd,
481 						void **ibuf, void **obuf)
482 {
483 	int ret;
484 
485 	*ibuf = NULL;
486 	*obuf = NULL;
487 	*iocd = kmem_zalloc(sizeof (stmf_iocdata_t), KM_SLEEP);
488 
489 	ret = ddi_copyin((void *)data, *iocd, sizeof (stmf_iocdata_t), mode);
490 	if (ret)
491 		return (EFAULT);
492 	if ((*iocd)->stmf_version != STMF_VERSION_1) {
493 		ret = EINVAL;
494 		goto copyin_iocdata_done;
495 	}
496 	if ((*iocd)->stmf_ibuf_size) {
497 		*ibuf = kmem_zalloc((*iocd)->stmf_ibuf_size, KM_SLEEP);
498 		ret = ddi_copyin((void *)((unsigned long)(*iocd)->stmf_ibuf),
499 		    *ibuf, (*iocd)->stmf_ibuf_size, mode);
500 	}
501 	if ((*iocd)->stmf_obuf_size)
502 		*obuf = kmem_zalloc((*iocd)->stmf_obuf_size, KM_SLEEP);
503 
504 	if (ret == 0)
505 		return (0);
506 	ret = EFAULT;
507 copyin_iocdata_done:;
508 	if (*obuf) {
509 		kmem_free(*obuf, (*iocd)->stmf_obuf_size);
510 		*obuf = NULL;
511 	}
512 	if (*ibuf) {
513 		kmem_free(*ibuf, (*iocd)->stmf_ibuf_size);
514 		*ibuf = NULL;
515 	}
516 	kmem_free(*iocd, sizeof (stmf_iocdata_t));
517 	return (ret);
518 }
519 
520 int
521 stmf_copyout_iocdata(intptr_t data, int mode, stmf_iocdata_t *iocd, void *obuf)
522 {
523 	int ret;
524 
525 	if (iocd->stmf_obuf_size) {
526 		ret = ddi_copyout(obuf, (void *)(unsigned long)iocd->stmf_obuf,
527 		    iocd->stmf_obuf_size, mode);
528 		if (ret)
529 			return (EFAULT);
530 	}
531 	ret = ddi_copyout(iocd, (void *)data, sizeof (stmf_iocdata_t), mode);
532 	if (ret)
533 		return (EFAULT);
534 	return (0);
535 }
536 
537 /* ARGSUSED */
538 static int
539 stmf_ioctl(dev_t dev, int cmd, intptr_t data, int mode,
540 	cred_t *credp, int *rval)
541 {
542 	stmf_iocdata_t *iocd;
543 	void *ibuf = NULL, *obuf = NULL;
544 	slist_lu_t *luid_list;
545 	slist_target_port_t *lportid_list;
546 	stmf_i_lu_t *ilu;
547 	stmf_i_local_port_t *ilport;
548 	stmf_i_scsi_session_t *iss;
549 	slist_scsi_session_t *iss_list;
550 	sioc_lu_props_t *lup;
551 	sioc_target_port_props_t *lportp;
552 	stmf_ppioctl_data_t *ppi, *ppi_out = NULL;
553 	uint64_t *ppi_token = NULL;
554 	uint8_t *p_id, *id;
555 	stmf_state_desc_t *std;
556 	stmf_status_t ctl_ret;
557 	stmf_state_change_info_t ssi;
558 	int ret = 0;
559 	uint32_t n;
560 	int i;
561 	stmf_group_op_data_t *grp_entry;
562 	stmf_group_name_t *grpname;
563 	stmf_view_op_entry_t *ve;
564 	stmf_id_type_t idtype;
565 	stmf_id_data_t *id_entry;
566 	stmf_id_list_t	*id_list;
567 	stmf_view_entry_t *view_entry;
568 	stmf_set_props_t *stmf_set_props;
569 	uint32_t	veid;
570 	if ((cmd & 0xff000000) != STMF_IOCTL) {
571 		return (ENOTTY);
572 	}
573 
574 	if (drv_priv(credp) != 0) {
575 		return (EPERM);
576 	}
577 
578 	ret = stmf_copyin_iocdata(data, mode, &iocd, &ibuf, &obuf);
579 	if (ret)
580 		return (ret);
581 	iocd->stmf_error = 0;
582 
583 	switch (cmd) {
584 	case STMF_IOCTL_LU_LIST:
585 		/* retrieves both registered/unregistered */
586 		mutex_enter(&stmf_state.stmf_lock);
587 		id_list = &stmf_state.stmf_luid_list;
588 		n = min(id_list->id_count,
589 		    (iocd->stmf_obuf_size)/sizeof (slist_lu_t));
590 		iocd->stmf_obuf_max_nentries = id_list->id_count;
591 		luid_list = (slist_lu_t *)obuf;
592 		id_entry = id_list->idl_head;
593 		for (i = 0; i < n; i++) {
594 			bcopy(id_entry->id_data, luid_list[i].lu_guid, 16);
595 			id_entry = id_entry->id_next;
596 		}
597 
598 		n = iocd->stmf_obuf_size/sizeof (slist_lu_t);
599 		for (ilu = stmf_state.stmf_ilulist; ilu; ilu = ilu->ilu_next) {
600 			id = (uint8_t *)ilu->ilu_lu->lu_id;
601 			if (stmf_lookup_id(id_list, 16, id + 4) == NULL) {
602 				iocd->stmf_obuf_max_nentries++;
603 				if (i < n) {
604 					bcopy(id + 4, luid_list[i].lu_guid,
605 					    sizeof (slist_lu_t));
606 					i++;
607 				}
608 			}
609 		}
610 		iocd->stmf_obuf_nentries = i;
611 		mutex_exit(&stmf_state.stmf_lock);
612 		break;
613 
614 	case STMF_IOCTL_REG_LU_LIST:
615 		mutex_enter(&stmf_state.stmf_lock);
616 		iocd->stmf_obuf_max_nentries = stmf_state.stmf_nlus;
617 		n = min(stmf_state.stmf_nlus,
618 		    (iocd->stmf_obuf_size)/sizeof (slist_lu_t));
619 		iocd->stmf_obuf_nentries = n;
620 		ilu = stmf_state.stmf_ilulist;
621 		luid_list = (slist_lu_t *)obuf;
622 		for (i = 0; i < n; i++) {
623 			uint8_t *id;
624 			id = (uint8_t *)ilu->ilu_lu->lu_id;
625 			bcopy(id + 4, luid_list[i].lu_guid, 16);
626 			ilu = ilu->ilu_next;
627 		}
628 		mutex_exit(&stmf_state.stmf_lock);
629 		break;
630 
631 	case STMF_IOCTL_VE_LU_LIST:
632 		mutex_enter(&stmf_state.stmf_lock);
633 		id_list = &stmf_state.stmf_luid_list;
634 		n = min(id_list->id_count,
635 		    (iocd->stmf_obuf_size)/sizeof (slist_lu_t));
636 		iocd->stmf_obuf_max_nentries = id_list->id_count;
637 		iocd->stmf_obuf_nentries = n;
638 		luid_list = (slist_lu_t *)obuf;
639 		id_entry = id_list->idl_head;
640 		for (i = 0; i < n; i++) {
641 			bcopy(id_entry->id_data, luid_list[i].lu_guid, 16);
642 			id_entry = id_entry->id_next;
643 		}
644 		mutex_exit(&stmf_state.stmf_lock);
645 		break;
646 
647 	case STMF_IOCTL_TARGET_PORT_LIST:
648 		mutex_enter(&stmf_state.stmf_lock);
649 		iocd->stmf_obuf_max_nentries = stmf_state.stmf_nlports;
650 		n = min(stmf_state.stmf_nlports,
651 		    (iocd->stmf_obuf_size)/sizeof (slist_target_port_t));
652 		iocd->stmf_obuf_nentries = n;
653 		ilport = stmf_state.stmf_ilportlist;
654 		lportid_list = (slist_target_port_t *)obuf;
655 		for (i = 0; i < n; i++) {
656 			uint8_t *id;
657 			id = (uint8_t *)ilport->ilport_lport->lport_id;
658 			bcopy(id, lportid_list[i].target, id[3] + 4);
659 			ilport = ilport->ilport_next;
660 		}
661 		mutex_exit(&stmf_state.stmf_lock);
662 		break;
663 
664 	case STMF_IOCTL_SESSION_LIST:
665 		p_id = (uint8_t *)ibuf;
666 		if ((p_id == NULL) || (iocd->stmf_ibuf_size < 4) ||
667 		    (iocd->stmf_ibuf_size < (p_id[3] + 4))) {
668 			ret = EINVAL;
669 			break;
670 		}
671 		mutex_enter(&stmf_state.stmf_lock);
672 		for (ilport = stmf_state.stmf_ilportlist; ilport; ilport =
673 		    ilport->ilport_next) {
674 			uint8_t *id;
675 			id = (uint8_t *)ilport->ilport_lport->lport_id;
676 			if ((p_id[3] == id[3]) &&
677 			    (bcmp(p_id + 4, id + 4, id[3]) == 0)) {
678 				break;
679 			}
680 		}
681 		if (ilport == NULL) {
682 			mutex_exit(&stmf_state.stmf_lock);
683 			ret = ENOENT;
684 			break;
685 		}
686 		iocd->stmf_obuf_max_nentries = ilport->ilport_nsessions;
687 		n = min(ilport->ilport_nsessions,
688 		    (iocd->stmf_obuf_size)/sizeof (slist_scsi_session_t));
689 		iocd->stmf_obuf_nentries = n;
690 		iss = ilport->ilport_ss_list;
691 		iss_list = (slist_scsi_session_t *)obuf;
692 		for (i = 0; i < n; i++) {
693 			uint8_t *id;
694 			id = (uint8_t *)iss->iss_ss->ss_rport_id;
695 			bcopy(id, iss_list[i].initiator, id[3] + 4);
696 			iss_list[i].creation_time = (uint32_t)
697 			    iss->iss_creation_time;
698 			if (iss->iss_ss->ss_rport_alias) {
699 				(void) strncpy(iss_list[i].alias,
700 				    iss->iss_ss->ss_rport_alias, 255);
701 				iss_list[i].alias[255] = 0;
702 			} else {
703 				iss_list[i].alias[0] = 0;
704 			}
705 			iss = iss->iss_next;
706 		}
707 		mutex_exit(&stmf_state.stmf_lock);
708 		break;
709 
710 	case STMF_IOCTL_GET_LU_PROPERTIES:
711 		p_id = (uint8_t *)ibuf;
712 		if ((iocd->stmf_ibuf_size < 16) ||
713 		    (iocd->stmf_obuf_size < sizeof (sioc_lu_props_t)) ||
714 		    (p_id[0] == 0)) {
715 			ret = EINVAL;
716 			break;
717 		}
718 		mutex_enter(&stmf_state.stmf_lock);
719 		for (ilu = stmf_state.stmf_ilulist; ilu; ilu = ilu->ilu_next) {
720 			if (bcmp(p_id, ilu->ilu_lu->lu_id->ident, 16) == 0)
721 				break;
722 		}
723 		if (ilu == NULL) {
724 			mutex_exit(&stmf_state.stmf_lock);
725 			ret = ENOENT;
726 			break;
727 		}
728 		lup = (sioc_lu_props_t *)obuf;
729 		bcopy(ilu->ilu_lu->lu_id->ident, lup->lu_guid, 16);
730 		lup->lu_state = ilu->ilu_state & 0x0f;
731 		lup->lu_present = 1; /* XXX */
732 		(void) strncpy(lup->lu_provider_name,
733 		    ilu->ilu_lu->lu_lp->lp_name, 255);
734 		lup->lu_provider_name[254] = 0;
735 		if (ilu->ilu_lu->lu_alias) {
736 			(void) strncpy(lup->lu_alias,
737 			    ilu->ilu_lu->lu_alias, 255);
738 			lup->lu_alias[255] = 0;
739 		} else {
740 			lup->lu_alias[0] = 0;
741 		}
742 		mutex_exit(&stmf_state.stmf_lock);
743 		break;
744 
745 	case STMF_IOCTL_GET_TARGET_PORT_PROPERTIES:
746 		p_id = (uint8_t *)ibuf;
747 		if ((p_id == NULL) ||
748 		    (iocd->stmf_ibuf_size < (p_id[3] + 4)) ||
749 		    (iocd->stmf_obuf_size <
750 		    sizeof (sioc_target_port_props_t))) {
751 			ret = EINVAL;
752 			break;
753 		}
754 		mutex_enter(&stmf_state.stmf_lock);
755 		for (ilport = stmf_state.stmf_ilportlist; ilport;
756 		    ilport = ilport->ilport_next) {
757 			uint8_t *id;
758 			id = (uint8_t *)ilport->ilport_lport->lport_id;
759 			if ((p_id[3] == id[3]) &&
760 			    (bcmp(p_id+4, id+4, id[3]) == 0))
761 				break;
762 		}
763 		if (ilport == NULL) {
764 			mutex_exit(&stmf_state.stmf_lock);
765 			ret = ENOENT;
766 			break;
767 		}
768 		lportp = (sioc_target_port_props_t *)obuf;
769 		bcopy(ilport->ilport_lport->lport_id, lportp->tgt_id,
770 		    ilport->ilport_lport->lport_id->ident_length + 4);
771 		lportp->tgt_state = ilport->ilport_state & 0x0f;
772 		lportp->tgt_present = 1; /* XXX */
773 		(void) strncpy(lportp->tgt_provider_name,
774 		    ilport->ilport_lport->lport_pp->pp_name, 255);
775 		lportp->tgt_provider_name[254] = 0;
776 		if (ilport->ilport_lport->lport_alias) {
777 			(void) strncpy(lportp->tgt_alias,
778 			    ilport->ilport_lport->lport_alias, 255);
779 			lportp->tgt_alias[255] = 0;
780 		} else {
781 			lportp->tgt_alias[0] = 0;
782 		}
783 		mutex_exit(&stmf_state.stmf_lock);
784 		break;
785 
786 	case STMF_IOCTL_SET_STMF_STATE:
787 		if ((ibuf == NULL) ||
788 		    (iocd->stmf_ibuf_size < sizeof (stmf_state_desc_t))) {
789 			ret = EINVAL;
790 			break;
791 		}
792 		ret = stmf_set_stmf_state((stmf_state_desc_t *)ibuf);
793 		break;
794 
795 	case STMF_IOCTL_GET_STMF_STATE:
796 		if ((obuf == NULL) ||
797 		    (iocd->stmf_obuf_size < sizeof (stmf_state_desc_t))) {
798 			ret = EINVAL;
799 			break;
800 		}
801 		ret = stmf_get_stmf_state((stmf_state_desc_t *)obuf);
802 		break;
803 
804 	case STMF_IOCTL_SET_ALUA_STATE:
805 		if ((ibuf == NULL) ||
806 		    (iocd->stmf_ibuf_size < sizeof (stmf_alua_state_desc_t))) {
807 			ret = EINVAL;
808 			break;
809 		}
810 		ret = stmf_set_alua_state((stmf_alua_state_desc_t *)ibuf);
811 		break;
812 
813 	case STMF_IOCTL_GET_ALUA_STATE:
814 		if ((obuf == NULL) ||
815 		    (iocd->stmf_obuf_size < sizeof (stmf_alua_state_desc_t))) {
816 			ret = EINVAL;
817 			break;
818 		}
819 		stmf_get_alua_state((stmf_alua_state_desc_t *)obuf);
820 		break;
821 
822 	case STMF_IOCTL_SET_LU_STATE:
823 		ssi.st_rflags = STMF_RFLAG_USER_REQUEST;
824 		ssi.st_additional_info = NULL;
825 		std = (stmf_state_desc_t *)ibuf;
826 		if ((ibuf == NULL) ||
827 		    (iocd->stmf_ibuf_size < sizeof (stmf_state_desc_t))) {
828 			ret = EINVAL;
829 			break;
830 		}
831 		p_id = std->ident;
832 		mutex_enter(&stmf_state.stmf_lock);
833 		if (stmf_state.stmf_inventory_locked) {
834 			mutex_exit(&stmf_state.stmf_lock);
835 			ret = EBUSY;
836 			break;
837 		}
838 		for (ilu = stmf_state.stmf_ilulist; ilu; ilu = ilu->ilu_next) {
839 			if (bcmp(p_id, ilu->ilu_lu->lu_id->ident, 16) == 0)
840 				break;
841 		}
842 		if (ilu == NULL) {
843 			mutex_exit(&stmf_state.stmf_lock);
844 			ret = ENOENT;
845 			break;
846 		}
847 		stmf_state.stmf_inventory_locked = 1;
848 		mutex_exit(&stmf_state.stmf_lock);
849 		cmd = (std->state == STMF_STATE_ONLINE) ? STMF_CMD_LU_ONLINE :
850 		    STMF_CMD_LU_OFFLINE;
851 		ctl_ret = stmf_ctl(cmd, (void *)ilu->ilu_lu, &ssi);
852 		if (ctl_ret == STMF_ALREADY)
853 			ret = 0;
854 		else if (ctl_ret == STMF_BUSY)
855 			ret = EBUSY;
856 		else if (ctl_ret != STMF_SUCCESS)
857 			ret = EIO;
858 		mutex_enter(&stmf_state.stmf_lock);
859 		stmf_state.stmf_inventory_locked = 0;
860 		mutex_exit(&stmf_state.stmf_lock);
861 		break;
862 
863 	case STMF_IOCTL_SET_STMF_PROPS:
864 		if ((ibuf == NULL) ||
865 		    (iocd->stmf_ibuf_size < sizeof (stmf_set_props_t))) {
866 			ret = EINVAL;
867 			break;
868 		}
869 		stmf_set_props = (stmf_set_props_t *)ibuf;
870 		mutex_enter(&stmf_state.stmf_lock);
871 		if ((stmf_set_props->default_lu_state_value ==
872 		    STMF_STATE_OFFLINE) ||
873 		    (stmf_set_props->default_lu_state_value ==
874 		    STMF_STATE_ONLINE)) {
875 			stmf_state.stmf_default_lu_state =
876 			    stmf_set_props->default_lu_state_value;
877 		}
878 		if ((stmf_set_props->default_target_state_value ==
879 		    STMF_STATE_OFFLINE) ||
880 		    (stmf_set_props->default_target_state_value ==
881 		    STMF_STATE_ONLINE)) {
882 			stmf_state.stmf_default_lport_state =
883 			    stmf_set_props->default_target_state_value;
884 		}
885 
886 		mutex_exit(&stmf_state.stmf_lock);
887 		break;
888 
889 	case STMF_IOCTL_SET_TARGET_PORT_STATE:
890 		ssi.st_rflags = STMF_RFLAG_USER_REQUEST;
891 		ssi.st_additional_info = NULL;
892 		std = (stmf_state_desc_t *)ibuf;
893 		if ((ibuf == NULL) ||
894 		    (iocd->stmf_ibuf_size < sizeof (stmf_state_desc_t))) {
895 			ret = EINVAL;
896 			break;
897 		}
898 		p_id = std->ident;
899 		mutex_enter(&stmf_state.stmf_lock);
900 		if (stmf_state.stmf_inventory_locked) {
901 			mutex_exit(&stmf_state.stmf_lock);
902 			ret = EBUSY;
903 			break;
904 		}
905 		for (ilport = stmf_state.stmf_ilportlist; ilport;
906 		    ilport = ilport->ilport_next) {
907 			uint8_t *id;
908 			id = (uint8_t *)ilport->ilport_lport->lport_id;
909 			if ((id[3] == p_id[3]) &&
910 			    (bcmp(id+4, p_id+4, id[3]) == 0)) {
911 				break;
912 			}
913 		}
914 		if (ilport == NULL) {
915 			mutex_exit(&stmf_state.stmf_lock);
916 			ret = ENOENT;
917 			break;
918 		}
919 		stmf_state.stmf_inventory_locked = 1;
920 		mutex_exit(&stmf_state.stmf_lock);
921 		cmd = (std->state == STMF_STATE_ONLINE) ?
922 		    STMF_CMD_LPORT_ONLINE : STMF_CMD_LPORT_OFFLINE;
923 		ctl_ret = stmf_ctl(cmd, (void *)ilport->ilport_lport, &ssi);
924 		if (ctl_ret == STMF_ALREADY)
925 			ret = 0;
926 		else if (ctl_ret == STMF_BUSY)
927 			ret = EBUSY;
928 		else if (ctl_ret != STMF_SUCCESS)
929 			ret = EIO;
930 		mutex_enter(&stmf_state.stmf_lock);
931 		stmf_state.stmf_inventory_locked = 0;
932 		mutex_exit(&stmf_state.stmf_lock);
933 		break;
934 
935 	case STMF_IOCTL_ADD_HG_ENTRY:
936 		idtype = STMF_ID_TYPE_HOST;
937 		/* FALLTHROUGH */
938 	case STMF_IOCTL_ADD_TG_ENTRY:
939 		if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) {
940 			ret = EACCES;
941 			iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT;
942 			break;
943 		}
944 		if (cmd == STMF_IOCTL_ADD_TG_ENTRY) {
945 			idtype = STMF_ID_TYPE_TARGET;
946 		}
947 		grp_entry = (stmf_group_op_data_t *)ibuf;
948 		if ((ibuf == NULL) ||
949 		    (iocd->stmf_ibuf_size < sizeof (stmf_group_op_data_t))) {
950 			ret = EINVAL;
951 			break;
952 		}
953 		if (grp_entry->group.name[0] == '*') {
954 			ret = EINVAL;
955 			break; /* not allowed */
956 		}
957 		mutex_enter(&stmf_state.stmf_lock);
958 		ret = stmf_add_group_member(grp_entry->group.name,
959 		    grp_entry->group.name_size,
960 		    grp_entry->ident + 4,
961 		    grp_entry->ident[3],
962 		    idtype,
963 		    &iocd->stmf_error);
964 		mutex_exit(&stmf_state.stmf_lock);
965 		break;
966 	case STMF_IOCTL_REMOVE_HG_ENTRY:
967 		idtype = STMF_ID_TYPE_HOST;
968 		/* FALLTHROUGH */
969 	case STMF_IOCTL_REMOVE_TG_ENTRY:
970 		if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) {
971 			ret = EACCES;
972 			iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT;
973 			break;
974 		}
975 		if (cmd == STMF_IOCTL_REMOVE_TG_ENTRY) {
976 			idtype = STMF_ID_TYPE_TARGET;
977 		}
978 		grp_entry = (stmf_group_op_data_t *)ibuf;
979 		if ((ibuf == NULL) ||
980 		    (iocd->stmf_ibuf_size < sizeof (stmf_group_op_data_t))) {
981 			ret = EINVAL;
982 			break;
983 		}
984 		if (grp_entry->group.name[0] == '*') {
985 			ret = EINVAL;
986 			break; /* not allowed */
987 		}
988 		mutex_enter(&stmf_state.stmf_lock);
989 		ret = stmf_remove_group_member(grp_entry->group.name,
990 		    grp_entry->group.name_size,
991 		    grp_entry->ident + 4,
992 		    grp_entry->ident[3],
993 		    idtype,
994 		    &iocd->stmf_error);
995 		mutex_exit(&stmf_state.stmf_lock);
996 		break;
997 	case STMF_IOCTL_CREATE_HOST_GROUP:
998 		idtype = STMF_ID_TYPE_HOST_GROUP;
999 		/* FALLTHROUGH */
1000 	case STMF_IOCTL_CREATE_TARGET_GROUP:
1001 		if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) {
1002 			ret = EACCES;
1003 			iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT;
1004 			break;
1005 		}
1006 		grpname = (stmf_group_name_t *)ibuf;
1007 
1008 		if (cmd == STMF_IOCTL_CREATE_TARGET_GROUP)
1009 			idtype = STMF_ID_TYPE_TARGET_GROUP;
1010 		if ((ibuf == NULL) ||
1011 		    (iocd->stmf_ibuf_size < sizeof (stmf_group_name_t))) {
1012 			ret = EINVAL;
1013 			break;
1014 		}
1015 		if (grpname->name[0] == '*') {
1016 			ret = EINVAL;
1017 			break; /* not allowed */
1018 		}
1019 		mutex_enter(&stmf_state.stmf_lock);
1020 		ret = stmf_add_group(grpname->name,
1021 		    grpname->name_size, idtype, &iocd->stmf_error);
1022 		mutex_exit(&stmf_state.stmf_lock);
1023 		break;
1024 	case STMF_IOCTL_REMOVE_HOST_GROUP:
1025 		idtype = STMF_ID_TYPE_HOST_GROUP;
1026 		/* FALLTHROUGH */
1027 	case STMF_IOCTL_REMOVE_TARGET_GROUP:
1028 		if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) {
1029 			ret = EACCES;
1030 			iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT;
1031 			break;
1032 		}
1033 		grpname = (stmf_group_name_t *)ibuf;
1034 		if (cmd == STMF_IOCTL_REMOVE_TARGET_GROUP)
1035 			idtype = STMF_ID_TYPE_TARGET_GROUP;
1036 		if ((ibuf == NULL) ||
1037 		    (iocd->stmf_ibuf_size < sizeof (stmf_group_name_t))) {
1038 			ret = EINVAL;
1039 			break;
1040 		}
1041 		if (grpname->name[0] == '*') {
1042 			ret = EINVAL;
1043 			break; /* not allowed */
1044 		}
1045 		mutex_enter(&stmf_state.stmf_lock);
1046 		ret = stmf_remove_group(grpname->name,
1047 		    grpname->name_size, idtype, &iocd->stmf_error);
1048 		mutex_exit(&stmf_state.stmf_lock);
1049 		break;
1050 	case STMF_IOCTL_VALIDATE_VIEW:
1051 	case STMF_IOCTL_ADD_VIEW_ENTRY:
1052 		if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) {
1053 			ret = EACCES;
1054 			iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT;
1055 			break;
1056 		}
1057 		ve = (stmf_view_op_entry_t *)ibuf;
1058 		if ((ibuf == NULL) ||
1059 		    (iocd->stmf_ibuf_size < sizeof (stmf_view_op_entry_t))) {
1060 			ret = EINVAL;
1061 			break;
1062 		}
1063 		if (!ve->ve_lu_number_valid)
1064 			ve->ve_lu_nbr[2] = 0xFF;
1065 		if (ve->ve_all_hosts) {
1066 			ve->ve_host_group.name[0] = '*';
1067 			ve->ve_host_group.name_size = 1;
1068 		}
1069 		if (ve->ve_all_targets) {
1070 			ve->ve_target_group.name[0] = '*';
1071 			ve->ve_target_group.name_size = 1;
1072 		}
1073 		if (ve->ve_ndx_valid)
1074 			veid = ve->ve_ndx;
1075 		else
1076 			veid = 0xffffffff;
1077 		mutex_enter(&stmf_state.stmf_lock);
1078 		if (cmd == STMF_IOCTL_ADD_VIEW_ENTRY) {
1079 			ret = stmf_add_ve(ve->ve_host_group.name,
1080 			    ve->ve_host_group.name_size,
1081 			    ve->ve_target_group.name,
1082 			    ve->ve_target_group.name_size,
1083 			    ve->ve_guid,
1084 			    &veid,
1085 			    ve->ve_lu_nbr,
1086 			    &iocd->stmf_error);
1087 		} else {  /* STMF_IOCTL_VALIDATE_VIEW */
1088 			ret = stmf_validate_lun_ve(ve->ve_host_group.name,
1089 			    ve->ve_host_group.name_size,
1090 			    ve->ve_target_group.name,
1091 			    ve->ve_target_group.name_size,
1092 			    ve->ve_lu_nbr,
1093 			    &iocd->stmf_error);
1094 		}
1095 		mutex_exit(&stmf_state.stmf_lock);
1096 		if (ret == 0 &&
1097 		    (!ve->ve_ndx_valid || !ve->ve_lu_number_valid) &&
1098 		    iocd->stmf_obuf_size >= sizeof (stmf_view_op_entry_t)) {
1099 			stmf_view_op_entry_t *ve_ret =
1100 			    (stmf_view_op_entry_t *)obuf;
1101 			iocd->stmf_obuf_nentries = 1;
1102 			iocd->stmf_obuf_max_nentries = 1;
1103 			if (!ve->ve_ndx_valid) {
1104 				ve_ret->ve_ndx = veid;
1105 				ve_ret->ve_ndx_valid = 1;
1106 			}
1107 			if (!ve->ve_lu_number_valid) {
1108 				ve_ret->ve_lu_number_valid = 1;
1109 				bcopy(ve->ve_lu_nbr, ve_ret->ve_lu_nbr, 8);
1110 			}
1111 		}
1112 		break;
1113 	case STMF_IOCTL_REMOVE_VIEW_ENTRY:
1114 		if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) {
1115 			ret = EACCES;
1116 			iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT;
1117 			break;
1118 		}
1119 		ve = (stmf_view_op_entry_t *)ibuf;
1120 		if ((ibuf == NULL) ||
1121 		    (iocd->stmf_ibuf_size < sizeof (stmf_view_op_entry_t))) {
1122 			ret = EINVAL;
1123 			break;
1124 		}
1125 		if (!ve->ve_ndx_valid) {
1126 			ret = EINVAL;
1127 			break;
1128 		}
1129 		mutex_enter(&stmf_state.stmf_lock);
1130 		ret = stmf_remove_ve_by_id(ve->ve_guid, ve->ve_ndx,
1131 		    &iocd->stmf_error);
1132 		mutex_exit(&stmf_state.stmf_lock);
1133 		break;
1134 	case STMF_IOCTL_GET_HG_LIST:
1135 		id_list = &stmf_state.stmf_hg_list;
1136 		/* FALLTHROUGH */
1137 	case STMF_IOCTL_GET_TG_LIST:
1138 		if (cmd == STMF_IOCTL_GET_TG_LIST)
1139 			id_list = &stmf_state.stmf_tg_list;
1140 		mutex_enter(&stmf_state.stmf_lock);
1141 		iocd->stmf_obuf_max_nentries = id_list->id_count;
1142 		n = min(id_list->id_count,
1143 		    (iocd->stmf_obuf_size)/sizeof (stmf_group_name_t));
1144 		iocd->stmf_obuf_nentries = n;
1145 		id_entry = id_list->idl_head;
1146 		grpname = (stmf_group_name_t *)obuf;
1147 		for (i = 0; i < n; i++) {
1148 			if (id_entry->id_data[0] == '*') {
1149 				if (iocd->stmf_obuf_nentries > 0) {
1150 					iocd->stmf_obuf_nentries--;
1151 				}
1152 				id_entry = id_entry->id_next;
1153 				continue;
1154 			}
1155 			grpname->name_size = id_entry->id_data_size;
1156 			bcopy(id_entry->id_data, grpname->name,
1157 			    id_entry->id_data_size);
1158 			grpname++;
1159 			id_entry = id_entry->id_next;
1160 		}
1161 		mutex_exit(&stmf_state.stmf_lock);
1162 		break;
1163 	case STMF_IOCTL_GET_HG_ENTRIES:
1164 		id_list = &stmf_state.stmf_hg_list;
1165 		/* FALLTHROUGH */
1166 	case STMF_IOCTL_GET_TG_ENTRIES:
1167 		grpname = (stmf_group_name_t *)ibuf;
1168 		if ((ibuf == NULL) ||
1169 		    (iocd->stmf_ibuf_size < sizeof (stmf_group_name_t))) {
1170 			ret = EINVAL;
1171 			break;
1172 		}
1173 		if (cmd == STMF_IOCTL_GET_TG_ENTRIES) {
1174 			id_list = &stmf_state.stmf_tg_list;
1175 		}
1176 		mutex_enter(&stmf_state.stmf_lock);
1177 		id_entry = stmf_lookup_id(id_list, grpname->name_size,
1178 		    grpname->name);
1179 		if (!id_entry)
1180 			ret = ENODEV;
1181 		else {
1182 			stmf_ge_ident_t *grp_entry;
1183 			id_list = (stmf_id_list_t *)id_entry->id_impl_specific;
1184 			iocd->stmf_obuf_max_nentries = id_list->id_count;
1185 			n = min(id_list->id_count,
1186 			    iocd->stmf_obuf_size/sizeof (stmf_ge_ident_t));
1187 			iocd->stmf_obuf_nentries = n;
1188 			id_entry = id_list->idl_head;
1189 			grp_entry = (stmf_ge_ident_t *)obuf;
1190 			for (i = 0; i < n; i++) {
1191 				bcopy(id_entry->id_data, grp_entry->ident,
1192 				    id_entry->id_data_size);
1193 				grp_entry->ident_size = id_entry->id_data_size;
1194 				id_entry = id_entry->id_next;
1195 				grp_entry++;
1196 			}
1197 		}
1198 		mutex_exit(&stmf_state.stmf_lock);
1199 		break;
1200 
1201 	case STMF_IOCTL_GET_VE_LIST:
1202 		n = iocd->stmf_obuf_size/sizeof (stmf_view_op_entry_t);
1203 		mutex_enter(&stmf_state.stmf_lock);
1204 		ve = (stmf_view_op_entry_t *)obuf;
1205 		for (id_entry = stmf_state.stmf_luid_list.idl_head;
1206 		    id_entry; id_entry = id_entry->id_next) {
1207 			for (view_entry = (stmf_view_entry_t *)
1208 			    id_entry->id_impl_specific; view_entry;
1209 			    view_entry = view_entry->ve_next) {
1210 				iocd->stmf_obuf_max_nentries++;
1211 				if (iocd->stmf_obuf_nentries >= n)
1212 					continue;
1213 				ve->ve_ndx_valid = 1;
1214 				ve->ve_ndx = view_entry->ve_id;
1215 				ve->ve_lu_number_valid = 1;
1216 				bcopy(view_entry->ve_lun, ve->ve_lu_nbr, 8);
1217 				bcopy(view_entry->ve_luid->id_data, ve->ve_guid,
1218 				    view_entry->ve_luid->id_data_size);
1219 				if (view_entry->ve_hg->id_data[0] == '*') {
1220 					ve->ve_all_hosts = 1;
1221 				} else {
1222 					bcopy(view_entry->ve_hg->id_data,
1223 					    ve->ve_host_group.name,
1224 					    view_entry->ve_hg->id_data_size);
1225 					ve->ve_host_group.name_size =
1226 					    view_entry->ve_hg->id_data_size;
1227 				}
1228 
1229 				if (view_entry->ve_tg->id_data[0] == '*') {
1230 					ve->ve_all_targets = 1;
1231 				} else {
1232 					bcopy(view_entry->ve_tg->id_data,
1233 					    ve->ve_target_group.name,
1234 					    view_entry->ve_tg->id_data_size);
1235 					ve->ve_target_group.name_size =
1236 					    view_entry->ve_tg->id_data_size;
1237 				}
1238 				ve++;
1239 				iocd->stmf_obuf_nentries++;
1240 			}
1241 		}
1242 		mutex_exit(&stmf_state.stmf_lock);
1243 		break;
1244 
1245 	case STMF_IOCTL_LU_VE_LIST:
1246 		p_id = (uint8_t *)ibuf;
1247 		if ((iocd->stmf_ibuf_size != 16) ||
1248 		    (iocd->stmf_obuf_size < sizeof (stmf_view_op_entry_t))) {
1249 			ret = EINVAL;
1250 			break;
1251 		}
1252 
1253 		n = iocd->stmf_obuf_size/sizeof (stmf_view_op_entry_t);
1254 		mutex_enter(&stmf_state.stmf_lock);
1255 		ve = (stmf_view_op_entry_t *)obuf;
1256 		for (id_entry = stmf_state.stmf_luid_list.idl_head;
1257 		    id_entry; id_entry = id_entry->id_next) {
1258 			if (bcmp(id_entry->id_data, p_id, 16) != 0)
1259 				continue;
1260 			for (view_entry = (stmf_view_entry_t *)
1261 			    id_entry->id_impl_specific; view_entry;
1262 			    view_entry = view_entry->ve_next) {
1263 				iocd->stmf_obuf_max_nentries++;
1264 				if (iocd->stmf_obuf_nentries >= n)
1265 					continue;
1266 				ve->ve_ndx_valid = 1;
1267 				ve->ve_ndx = view_entry->ve_id;
1268 				ve->ve_lu_number_valid = 1;
1269 				bcopy(view_entry->ve_lun, ve->ve_lu_nbr, 8);
1270 				bcopy(view_entry->ve_luid->id_data, ve->ve_guid,
1271 				    view_entry->ve_luid->id_data_size);
1272 				if (view_entry->ve_hg->id_data[0] == '*') {
1273 					ve->ve_all_hosts = 1;
1274 				} else {
1275 					bcopy(view_entry->ve_hg->id_data,
1276 					    ve->ve_host_group.name,
1277 					    view_entry->ve_hg->id_data_size);
1278 					ve->ve_host_group.name_size =
1279 					    view_entry->ve_hg->id_data_size;
1280 				}
1281 
1282 				if (view_entry->ve_tg->id_data[0] == '*') {
1283 					ve->ve_all_targets = 1;
1284 				} else {
1285 					bcopy(view_entry->ve_tg->id_data,
1286 					    ve->ve_target_group.name,
1287 					    view_entry->ve_tg->id_data_size);
1288 					ve->ve_target_group.name_size =
1289 					    view_entry->ve_tg->id_data_size;
1290 				}
1291 				ve++;
1292 				iocd->stmf_obuf_nentries++;
1293 			}
1294 			break;
1295 		}
1296 		mutex_exit(&stmf_state.stmf_lock);
1297 		break;
1298 
1299 	case STMF_IOCTL_LOAD_PP_DATA:
1300 		if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) {
1301 			ret = EACCES;
1302 			iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT;
1303 			break;
1304 		}
1305 		ppi = (stmf_ppioctl_data_t *)ibuf;
1306 		if ((ppi == NULL) ||
1307 		    (iocd->stmf_ibuf_size < sizeof (stmf_ppioctl_data_t))) {
1308 			ret = EINVAL;
1309 			break;
1310 		}
1311 		/* returned token */
1312 		ppi_token = (uint64_t *)obuf;
1313 		if ((ppi_token == NULL) ||
1314 		    (iocd->stmf_obuf_size < sizeof (uint64_t))) {
1315 			ret = EINVAL;
1316 			break;
1317 		}
1318 		ret = stmf_load_ppd_ioctl(ppi, ppi_token, &iocd->stmf_error);
1319 		break;
1320 
1321 	case STMF_IOCTL_GET_PP_DATA:
1322 		if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) {
1323 			ret = EACCES;
1324 			iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT;
1325 			break;
1326 		}
1327 		ppi = (stmf_ppioctl_data_t *)ibuf;
1328 		if (ppi == NULL ||
1329 		    (iocd->stmf_ibuf_size < sizeof (stmf_ppioctl_data_t))) {
1330 			ret = EINVAL;
1331 			break;
1332 		}
1333 		ppi_out = (stmf_ppioctl_data_t *)obuf;
1334 		if ((ppi_out == NULL) ||
1335 		    (iocd->stmf_obuf_size < sizeof (stmf_ppioctl_data_t))) {
1336 			ret = EINVAL;
1337 			break;
1338 		}
1339 		ret = stmf_get_ppd_ioctl(ppi, ppi_out, &iocd->stmf_error);
1340 		break;
1341 
1342 	case STMF_IOCTL_CLEAR_PP_DATA:
1343 		if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) {
1344 			ret = EACCES;
1345 			iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT;
1346 			break;
1347 		}
1348 		ppi = (stmf_ppioctl_data_t *)ibuf;
1349 		if ((ppi == NULL) ||
1350 		    (iocd->stmf_ibuf_size < sizeof (stmf_ppioctl_data_t))) {
1351 			ret = EINVAL;
1352 			break;
1353 		}
1354 		ret = stmf_delete_ppd_ioctl(ppi);
1355 		break;
1356 
1357 	case STMF_IOCTL_CLEAR_TRACE:
1358 		stmf_trace_clear();
1359 		break;
1360 
1361 	case STMF_IOCTL_ADD_TRACE:
1362 		if (iocd->stmf_ibuf_size && ibuf) {
1363 			((uint8_t *)ibuf)[iocd->stmf_ibuf_size - 1] = 0;
1364 			stmf_trace("\nstradm", "%s\n", ibuf);
1365 		}
1366 		break;
1367 
1368 	case STMF_IOCTL_GET_TRACE_POSITION:
1369 		if (obuf && (iocd->stmf_obuf_size > 3)) {
1370 			mutex_enter(&trace_buf_lock);
1371 			*((int *)obuf) = trace_buf_curndx;
1372 			mutex_exit(&trace_buf_lock);
1373 		} else {
1374 			ret = EINVAL;
1375 		}
1376 		break;
1377 
1378 	case STMF_IOCTL_GET_TRACE:
1379 		if ((iocd->stmf_obuf_size == 0) || (iocd->stmf_ibuf_size < 4)) {
1380 			ret = EINVAL;
1381 			break;
1382 		}
1383 		i = *((int *)ibuf);
1384 		if ((i > trace_buf_size) || ((i + iocd->stmf_obuf_size) >
1385 		    trace_buf_size)) {
1386 			ret = EINVAL;
1387 			break;
1388 		}
1389 		mutex_enter(&trace_buf_lock);
1390 		bcopy(stmf_trace_buf + i, obuf, iocd->stmf_obuf_size);
1391 		mutex_exit(&trace_buf_lock);
1392 		break;
1393 
1394 	default:
1395 		ret = ENOTTY;
1396 	}
1397 
1398 	if (ret == 0) {
1399 		ret = stmf_copyout_iocdata(data, mode, iocd, obuf);
1400 	} else if (iocd->stmf_error) {
1401 		(void) stmf_copyout_iocdata(data, mode, iocd, obuf);
1402 	}
1403 	if (obuf) {
1404 		kmem_free(obuf, iocd->stmf_obuf_size);
1405 		obuf = NULL;
1406 	}
1407 	if (ibuf) {
1408 		kmem_free(ibuf, iocd->stmf_ibuf_size);
1409 		ibuf = NULL;
1410 	}
1411 	kmem_free(iocd, sizeof (stmf_iocdata_t));
1412 	return (ret);
1413 }
1414 
1415 static int
1416 stmf_get_service_state()
1417 {
1418 	stmf_i_local_port_t *ilport;
1419 	stmf_i_lu_t *ilu;
1420 	int online = 0;
1421 	int offline = 0;
1422 	int onlining = 0;
1423 	int offlining = 0;
1424 
1425 	ASSERT(mutex_owned(&stmf_state.stmf_lock));
1426 	for (ilport = stmf_state.stmf_ilportlist; ilport != NULL;
1427 	    ilport = ilport->ilport_next) {
1428 		if (ilport->ilport_state == STMF_STATE_OFFLINE)
1429 			offline++;
1430 		else if (ilport->ilport_state == STMF_STATE_ONLINE)
1431 			online++;
1432 		else if (ilport->ilport_state == STMF_STATE_ONLINING)
1433 			onlining++;
1434 		else if (ilport->ilport_state == STMF_STATE_OFFLINING)
1435 			offlining++;
1436 	}
1437 
1438 	for (ilu = stmf_state.stmf_ilulist; ilu != NULL;
1439 	    ilu = ilu->ilu_next) {
1440 		if (ilu->ilu_state == STMF_STATE_OFFLINE)
1441 			offline++;
1442 		else if (ilu->ilu_state == STMF_STATE_ONLINE)
1443 			online++;
1444 		else if (ilu->ilu_state == STMF_STATE_ONLINING)
1445 			onlining++;
1446 		else if (ilu->ilu_state == STMF_STATE_OFFLINING)
1447 			offlining++;
1448 	}
1449 
1450 	if (stmf_state.stmf_service_running) {
1451 		if (onlining)
1452 			return (STMF_STATE_ONLINING);
1453 		else
1454 			return (STMF_STATE_ONLINE);
1455 	}
1456 
1457 	if (offlining) {
1458 		return (STMF_STATE_OFFLINING);
1459 	}
1460 
1461 	return (STMF_STATE_OFFLINE);
1462 }
1463 
1464 static int
1465 stmf_set_stmf_state(stmf_state_desc_t *std)
1466 {
1467 	stmf_i_local_port_t *ilport;
1468 	stmf_i_lu_t *ilu;
1469 	stmf_state_change_info_t ssi;
1470 	int svc_state;
1471 
1472 	ssi.st_rflags = STMF_RFLAG_USER_REQUEST;
1473 	ssi.st_additional_info = NULL;
1474 
1475 	mutex_enter(&stmf_state.stmf_lock);
1476 	if (!stmf_state.stmf_exclusive_open) {
1477 		mutex_exit(&stmf_state.stmf_lock);
1478 		return (EACCES);
1479 	}
1480 
1481 	if (stmf_state.stmf_inventory_locked) {
1482 		mutex_exit(&stmf_state.stmf_lock);
1483 		return (EBUSY);
1484 	}
1485 
1486 	if ((std->state != STMF_STATE_ONLINE) &&
1487 	    (std->state != STMF_STATE_OFFLINE)) {
1488 		mutex_exit(&stmf_state.stmf_lock);
1489 		return (EINVAL);
1490 	}
1491 
1492 	svc_state = stmf_get_service_state();
1493 	if ((svc_state == STMF_STATE_OFFLINING) ||
1494 	    (svc_state == STMF_STATE_ONLINING)) {
1495 		mutex_exit(&stmf_state.stmf_lock);
1496 		return (EBUSY);
1497 	}
1498 
1499 	if (svc_state == STMF_STATE_OFFLINE) {
1500 		if (std->config_state == STMF_CONFIG_INIT) {
1501 			if (std->state != STMF_STATE_OFFLINE) {
1502 				mutex_exit(&stmf_state.stmf_lock);
1503 				return (EINVAL);
1504 			}
1505 			stmf_state.stmf_config_state = STMF_CONFIG_INIT;
1506 			stmf_delete_all_ppds();
1507 			stmf_view_clear_config();
1508 			stmf_view_init();
1509 			mutex_exit(&stmf_state.stmf_lock);
1510 			return (0);
1511 		}
1512 		if ((stmf_state.stmf_config_state == STMF_CONFIG_INIT) ||
1513 		    (stmf_state.stmf_config_state == STMF_CONFIG_NONE)) {
1514 			if (std->config_state != STMF_CONFIG_INIT_DONE) {
1515 				mutex_exit(&stmf_state.stmf_lock);
1516 				return (EINVAL);
1517 			}
1518 			stmf_state.stmf_config_state = STMF_CONFIG_INIT_DONE;
1519 		}
1520 		if (std->state == STMF_STATE_OFFLINE) {
1521 			mutex_exit(&stmf_state.stmf_lock);
1522 			return (0);
1523 		}
1524 		if (stmf_state.stmf_config_state == STMF_CONFIG_INIT) {
1525 			mutex_exit(&stmf_state.stmf_lock);
1526 			return (EINVAL);
1527 		}
1528 		stmf_state.stmf_inventory_locked = 1;
1529 		stmf_state.stmf_service_running = 1;
1530 		mutex_exit(&stmf_state.stmf_lock);
1531 
1532 		for (ilport = stmf_state.stmf_ilportlist; ilport != NULL;
1533 		    ilport = ilport->ilport_next) {
1534 			if (stmf_state.stmf_default_lport_state !=
1535 			    STMF_STATE_ONLINE)
1536 				continue;
1537 			(void) stmf_ctl(STMF_CMD_LPORT_ONLINE,
1538 			    ilport->ilport_lport, &ssi);
1539 		}
1540 
1541 		for (ilu = stmf_state.stmf_ilulist; ilu != NULL;
1542 		    ilu = ilu->ilu_next) {
1543 			if (stmf_state.stmf_default_lu_state !=
1544 			    STMF_STATE_ONLINE)
1545 				continue;
1546 			(void) stmf_ctl(STMF_CMD_LU_ONLINE, ilu->ilu_lu, &ssi);
1547 		}
1548 		mutex_enter(&stmf_state.stmf_lock);
1549 		stmf_state.stmf_inventory_locked = 0;
1550 		mutex_exit(&stmf_state.stmf_lock);
1551 		return (0);
1552 	}
1553 
1554 	/* svc_state is STMF_STATE_ONLINE here */
1555 	if ((std->state != STMF_STATE_OFFLINE) ||
1556 	    (std->config_state == STMF_CONFIG_INIT)) {
1557 		mutex_exit(&stmf_state.stmf_lock);
1558 		return (EACCES);
1559 	}
1560 
1561 	stmf_state.stmf_inventory_locked = 1;
1562 	stmf_state.stmf_service_running = 0;
1563 
1564 	mutex_exit(&stmf_state.stmf_lock);
1565 	for (ilport = stmf_state.stmf_ilportlist; ilport != NULL;
1566 	    ilport = ilport->ilport_next) {
1567 		if (ilport->ilport_state != STMF_STATE_ONLINE)
1568 			continue;
1569 		(void) stmf_ctl(STMF_CMD_LPORT_OFFLINE,
1570 		    ilport->ilport_lport, &ssi);
1571 	}
1572 
1573 	for (ilu = stmf_state.stmf_ilulist; ilu != NULL;
1574 	    ilu = ilu->ilu_next) {
1575 		if (ilu->ilu_state != STMF_STATE_ONLINE)
1576 			continue;
1577 		(void) stmf_ctl(STMF_CMD_LU_OFFLINE, ilu->ilu_lu, &ssi);
1578 	}
1579 	mutex_enter(&stmf_state.stmf_lock);
1580 	stmf_state.stmf_inventory_locked = 0;
1581 	mutex_exit(&stmf_state.stmf_lock);
1582 	return (0);
1583 }
1584 
1585 static int
1586 stmf_get_stmf_state(stmf_state_desc_t *std)
1587 {
1588 	mutex_enter(&stmf_state.stmf_lock);
1589 	std->state = stmf_get_service_state();
1590 	std->config_state = stmf_state.stmf_config_state;
1591 	mutex_exit(&stmf_state.stmf_lock);
1592 
1593 	return (0);
1594 }
1595 /*
1596  * handles registration message from pppt for a logical unit
1597  */
1598 stmf_status_t
1599 stmf_ic_lu_reg(stmf_ic_reg_dereg_lun_msg_t *msg, uint32_t type)
1600 {
1601 	stmf_i_lu_provider_t	*ilp;
1602 	stmf_lu_provider_t	*lp;
1603 	mutex_enter(&stmf_state.stmf_lock);
1604 	for (ilp = stmf_state.stmf_ilplist; ilp != NULL; ilp = ilp->ilp_next) {
1605 		if (strcmp(msg->icrl_lu_provider_name,
1606 		    ilp->ilp_lp->lp_name) == 0) {
1607 			lp = ilp->ilp_lp;
1608 			mutex_exit(&stmf_state.stmf_lock);
1609 			lp->lp_proxy_msg(msg->icrl_lun_id, msg->icrl_cb_arg,
1610 			    msg->icrl_cb_arg_len, type);
1611 			return (STMF_SUCCESS);
1612 		}
1613 	}
1614 	mutex_exit(&stmf_state.stmf_lock);
1615 	return (STMF_SUCCESS);
1616 }
1617 
1618 /*
1619  * handles de-registration message from pppt for a logical unit
1620  */
1621 stmf_status_t
1622 stmf_ic_lu_dereg(stmf_ic_reg_dereg_lun_msg_t *msg)
1623 {
1624 	stmf_i_lu_provider_t	*ilp;
1625 	stmf_lu_provider_t	*lp;
1626 	mutex_enter(&stmf_state.stmf_lock);
1627 	for (ilp = stmf_state.stmf_ilplist; ilp != NULL; ilp = ilp->ilp_next) {
1628 		if (strcmp(msg->icrl_lu_provider_name,
1629 		    ilp->ilp_lp->lp_name) == 0) {
1630 			lp = ilp->ilp_lp;
1631 			mutex_exit(&stmf_state.stmf_lock);
1632 			lp->lp_proxy_msg(msg->icrl_lun_id, NULL, 0,
1633 			    STMF_MSG_LU_DEREGISTER);
1634 			return (STMF_SUCCESS);
1635 		}
1636 	}
1637 	mutex_exit(&stmf_state.stmf_lock);
1638 	return (STMF_SUCCESS);
1639 }
1640 
1641 /*
1642  * helper function to find a task that matches a task_msgid
1643  */
1644 scsi_task_t *
1645 find_task_from_msgid(uint8_t *lu_id, stmf_ic_msgid_t task_msgid)
1646 {
1647 	stmf_i_lu_t *ilu;
1648 	stmf_i_scsi_task_t *itask;
1649 
1650 	mutex_enter(&stmf_state.stmf_lock);
1651 	for (ilu = stmf_state.stmf_ilulist; ilu != NULL; ilu = ilu->ilu_next) {
1652 		if (bcmp(lu_id, ilu->ilu_lu->lu_id->ident, 16) == 0) {
1653 			break;
1654 		}
1655 	}
1656 
1657 	if (ilu == NULL) {
1658 		mutex_exit(&stmf_state.stmf_lock);
1659 		return (NULL);
1660 	}
1661 
1662 	mutex_enter(&ilu->ilu_task_lock);
1663 	for (itask = ilu->ilu_tasks; itask != NULL;
1664 	    itask = itask->itask_lu_next) {
1665 		if (itask->itask_flags & (ITASK_IN_FREE_LIST |
1666 		    ITASK_BEING_ABORTED)) {
1667 			continue;
1668 		}
1669 		if (itask->itask_proxy_msg_id == task_msgid) {
1670 			break;
1671 		}
1672 	}
1673 	mutex_exit(&ilu->ilu_task_lock);
1674 	mutex_exit(&stmf_state.stmf_lock);
1675 
1676 	if (itask != NULL) {
1677 		return (itask->itask_task);
1678 	} else {
1679 		/* task not found. Likely already aborted. */
1680 		return (NULL);
1681 	}
1682 }
1683 
1684 /*
1685  * message received from pppt/ic
1686  */
1687 stmf_status_t
1688 stmf_msg_rx(stmf_ic_msg_t *msg)
1689 {
1690 	mutex_enter(&stmf_state.stmf_lock);
1691 	if (stmf_state.stmf_alua_state != 1) {
1692 		mutex_exit(&stmf_state.stmf_lock);
1693 		cmn_err(CE_WARN, "stmf alua state is disabled");
1694 		ic_msg_free(msg);
1695 		return (STMF_FAILURE);
1696 	}
1697 	mutex_exit(&stmf_state.stmf_lock);
1698 
1699 	switch (msg->icm_msg_type) {
1700 		case STMF_ICM_REGISTER_LUN:
1701 			(void) stmf_ic_lu_reg(
1702 			    (stmf_ic_reg_dereg_lun_msg_t *)msg->icm_msg,
1703 			    STMF_MSG_LU_REGISTER);
1704 			break;
1705 		case STMF_ICM_LUN_ACTIVE:
1706 			(void) stmf_ic_lu_reg(
1707 			    (stmf_ic_reg_dereg_lun_msg_t *)msg->icm_msg,
1708 			    STMF_MSG_LU_ACTIVE);
1709 			break;
1710 		case STMF_ICM_DEREGISTER_LUN:
1711 			(void) stmf_ic_lu_dereg(
1712 			    (stmf_ic_reg_dereg_lun_msg_t *)msg->icm_msg);
1713 			break;
1714 		case STMF_ICM_SCSI_DATA:
1715 			(void) stmf_ic_rx_scsi_data(
1716 			    (stmf_ic_scsi_data_msg_t *)msg->icm_msg);
1717 			break;
1718 		case STMF_ICM_SCSI_STATUS:
1719 			(void) stmf_ic_rx_scsi_status(
1720 			    (stmf_ic_scsi_status_msg_t *)msg->icm_msg);
1721 			break;
1722 		case STMF_ICM_STATUS:
1723 			(void) stmf_ic_rx_status(
1724 			    (stmf_ic_status_msg_t *)msg->icm_msg);
1725 			break;
1726 		default:
1727 			cmn_err(CE_WARN, "unknown message received %d",
1728 			    msg->icm_msg_type);
1729 			ic_msg_free(msg);
1730 			return (STMF_FAILURE);
1731 	}
1732 	ic_msg_free(msg);
1733 	return (STMF_SUCCESS);
1734 }
1735 
1736 stmf_status_t
1737 stmf_ic_rx_status(stmf_ic_status_msg_t *msg)
1738 {
1739 	stmf_i_local_port_t *ilport;
1740 
1741 	if (msg->ics_msg_type != STMF_ICM_REGISTER_PROXY_PORT) {
1742 		/* for now, ignore other message status */
1743 		return (STMF_SUCCESS);
1744 	}
1745 
1746 	if (msg->ics_status != STMF_SUCCESS) {
1747 		return (STMF_SUCCESS);
1748 	}
1749 
1750 	mutex_enter(&stmf_state.stmf_lock);
1751 	for (ilport = stmf_state.stmf_ilportlist; ilport != NULL;
1752 	    ilport = ilport->ilport_next) {
1753 		if (msg->ics_msgid == ilport->ilport_reg_msgid) {
1754 			ilport->ilport_proxy_registered = 1;
1755 			break;
1756 		}
1757 	}
1758 	mutex_exit(&stmf_state.stmf_lock);
1759 	return (STMF_SUCCESS);
1760 }
1761 
1762 /*
1763  * handles scsi status message from pppt
1764  */
1765 stmf_status_t
1766 stmf_ic_rx_scsi_status(stmf_ic_scsi_status_msg_t *msg)
1767 {
1768 	scsi_task_t *task;
1769 
1770 	/* is this a task management command */
1771 	if (msg->icss_task_msgid & MSG_ID_TM_BIT) {
1772 		return (STMF_SUCCESS);
1773 	}
1774 
1775 	task = find_task_from_msgid(msg->icss_lun_id, msg->icss_task_msgid);
1776 
1777 	if (task == NULL) {
1778 		return (STMF_SUCCESS);
1779 	}
1780 
1781 	task->task_scsi_status = msg->icss_status;
1782 	task->task_sense_data = msg->icss_sense;
1783 	task->task_sense_length = msg->icss_sense_len;
1784 	(void) stmf_send_scsi_status(task, STMF_IOF_LU_DONE);
1785 
1786 	return (STMF_SUCCESS);
1787 }
1788 
1789 /*
1790  * handles scsi data message from pppt
1791  */
1792 stmf_status_t
1793 stmf_ic_rx_scsi_data(stmf_ic_scsi_data_msg_t *msg)
1794 {
1795 	stmf_i_scsi_task_t *itask;
1796 	scsi_task_t *task;
1797 	stmf_xfer_data_t *xd = NULL;
1798 	stmf_data_buf_t *dbuf;
1799 	uint32_t sz, minsz, xd_sz, asz;
1800 
1801 	/* is this a task management command */
1802 	if (msg->icsd_task_msgid & MSG_ID_TM_BIT) {
1803 		return (STMF_SUCCESS);
1804 	}
1805 
1806 	task = find_task_from_msgid(msg->icsd_lun_id, msg->icsd_task_msgid);
1807 	if (task == NULL) {
1808 		stmf_ic_msg_t *ic_xfer_done_msg = NULL;
1809 		static uint64_t data_msg_id;
1810 		stmf_status_t ic_ret = STMF_FAILURE;
1811 		mutex_enter(&stmf_state.stmf_lock);
1812 		data_msg_id = stmf_proxy_msg_id++;
1813 		mutex_exit(&stmf_state.stmf_lock);
1814 		/*
1815 		 * send xfer done status to pppt
1816 		 * for now, set the session id to 0 as we cannot
1817 		 * ascertain it since we cannot find the task
1818 		 */
1819 		ic_xfer_done_msg = ic_scsi_data_xfer_done_msg_alloc(
1820 		    msg->icsd_task_msgid, 0, STMF_FAILURE, data_msg_id);
1821 		if (ic_xfer_done_msg) {
1822 			ic_ret = ic_tx_msg(ic_xfer_done_msg);
1823 			if (ic_ret != STMF_IC_MSG_SUCCESS) {
1824 				cmn_err(CE_WARN, "unable to xmit proxy msg");
1825 			}
1826 		}
1827 		return (STMF_FAILURE);
1828 	}
1829 
1830 	itask = (stmf_i_scsi_task_t *)task->task_stmf_private;
1831 	dbuf = itask->itask_proxy_dbuf;
1832 
1833 	task->task_cmd_xfer_length += msg->icsd_data_len;
1834 
1835 	if (task->task_additional_flags &
1836 	    TASK_AF_NO_EXPECTED_XFER_LENGTH) {
1837 		task->task_expected_xfer_length =
1838 		    task->task_cmd_xfer_length;
1839 	}
1840 
1841 	sz = min(task->task_expected_xfer_length,
1842 	    task->task_cmd_xfer_length);
1843 
1844 	xd_sz = msg->icsd_data_len;
1845 	asz = xd_sz + sizeof (*xd) - 4;
1846 	xd = (stmf_xfer_data_t *)kmem_zalloc(asz, KM_NOSLEEP);
1847 
1848 	if (xd == NULL) {
1849 		stmf_abort(STMF_QUEUE_TASK_ABORT, task,
1850 		    STMF_ALLOC_FAILURE, NULL);
1851 		return (STMF_FAILURE);
1852 	}
1853 
1854 	xd->alloc_size = asz;
1855 	xd->size_left = xd_sz;
1856 	bcopy(msg->icsd_data, xd->buf, xd_sz);
1857 
1858 	sz = min(sz, xd->size_left);
1859 	xd->size_left = sz;
1860 	minsz = min(512, sz);
1861 
1862 	if (dbuf == NULL)
1863 		dbuf = stmf_alloc_dbuf(task, sz, &minsz, 0);
1864 	if (dbuf == NULL) {
1865 		kmem_free(xd, xd->alloc_size);
1866 		stmf_abort(STMF_QUEUE_TASK_ABORT, task,
1867 		    STMF_ALLOC_FAILURE, NULL);
1868 		return (STMF_FAILURE);
1869 	}
1870 	dbuf->db_lu_private = xd;
1871 	dbuf->db_relative_offset = task->task_nbytes_transferred;
1872 	stmf_xd_to_dbuf(dbuf, 0);
1873 
1874 	dbuf->db_flags = DB_DIRECTION_TO_RPORT;
1875 	(void) stmf_xfer_data(task, dbuf, 0);
1876 	return (STMF_SUCCESS);
1877 }
1878 
1879 stmf_status_t
1880 stmf_proxy_scsi_cmd(scsi_task_t *task, stmf_data_buf_t *dbuf)
1881 {
1882 	stmf_i_scsi_task_t *itask =
1883 	    (stmf_i_scsi_task_t *)task->task_stmf_private;
1884 	stmf_i_local_port_t *ilport =
1885 	    (stmf_i_local_port_t *)task->task_lport->lport_stmf_private;
1886 	stmf_ic_msg_t *ic_cmd_msg;
1887 	stmf_ic_msg_status_t ic_ret;
1888 	stmf_status_t ret = STMF_FAILURE;
1889 
1890 	if (stmf_state.stmf_alua_state != 1) {
1891 		cmn_err(CE_WARN, "stmf alua state is disabled");
1892 		return (STMF_FAILURE);
1893 	}
1894 
1895 	if (ilport->ilport_proxy_registered == 0) {
1896 		return (STMF_FAILURE);
1897 	}
1898 
1899 	mutex_enter(&stmf_state.stmf_lock);
1900 	itask->itask_proxy_msg_id = stmf_proxy_msg_id++;
1901 	mutex_exit(&stmf_state.stmf_lock);
1902 	itask->itask_proxy_dbuf = dbuf;
1903 
1904 	/*
1905 	 * stmf will now take over the task handling for this task
1906 	 * but it still needs to be treated differently from other
1907 	 * default handled tasks, hence the ITASK_PROXY_TASK.
1908 	 * If this is a task management function, we're really just
1909 	 * duping the command to the peer. Set the TM bit so that
1910 	 * we can recognize this on return since we won't be completing
1911 	 * the proxied task in that case.
1912 	 */
1913 	if (task->task_mgmt_function) {
1914 		itask->itask_proxy_msg_id |= MSG_ID_TM_BIT;
1915 	} else {
1916 		uint32_t new, old;
1917 		do {
1918 			new = old = itask->itask_flags;
1919 			if (new & ITASK_BEING_ABORTED)
1920 				return (STMF_FAILURE);
1921 			new |= ITASK_DEFAULT_HANDLING | ITASK_PROXY_TASK;
1922 		} while (atomic_cas_32(&itask->itask_flags, old, new) != old);
1923 	}
1924 	if (dbuf) {
1925 		ic_cmd_msg = ic_scsi_cmd_msg_alloc(itask->itask_proxy_msg_id,
1926 		    task, dbuf->db_data_size, dbuf->db_sglist[0].seg_addr,
1927 		    itask->itask_proxy_msg_id);
1928 	} else {
1929 		ic_cmd_msg = ic_scsi_cmd_msg_alloc(itask->itask_proxy_msg_id,
1930 		    task, 0, NULL, itask->itask_proxy_msg_id);
1931 	}
1932 	if (ic_cmd_msg) {
1933 		ic_ret = ic_tx_msg(ic_cmd_msg);
1934 		if (ic_ret == STMF_IC_MSG_SUCCESS) {
1935 			ret = STMF_SUCCESS;
1936 		}
1937 	}
1938 	return (ret);
1939 }
1940 
1941 
1942 stmf_status_t
1943 pppt_modload()
1944 {
1945 	int error;
1946 
1947 	if (pppt_mod == NULL && ((pppt_mod =
1948 	    ddi_modopen("drv/pppt", KRTLD_MODE_FIRST, &error)) == NULL)) {
1949 		cmn_err(CE_WARN, "Unable to load pppt");
1950 		return (STMF_FAILURE);
1951 	}
1952 
1953 	if (ic_reg_port_msg_alloc == NULL && ((ic_reg_port_msg_alloc =
1954 	    (stmf_ic_reg_port_msg_alloc_func_t)
1955 	    ddi_modsym(pppt_mod, "stmf_ic_reg_port_msg_alloc",
1956 	    &error)) == NULL)) {
1957 		cmn_err(CE_WARN,
1958 		    "Unable to find symbol - stmf_ic_reg_port_msg_alloc");
1959 		return (STMF_FAILURE);
1960 	}
1961 
1962 
1963 	if (ic_dereg_port_msg_alloc == NULL && ((ic_dereg_port_msg_alloc =
1964 	    (stmf_ic_dereg_port_msg_alloc_func_t)
1965 	    ddi_modsym(pppt_mod, "stmf_ic_dereg_port_msg_alloc",
1966 	    &error)) == NULL)) {
1967 		cmn_err(CE_WARN,
1968 		    "Unable to find symbol - stmf_ic_dereg_port_msg_alloc");
1969 		return (STMF_FAILURE);
1970 	}
1971 
1972 	if (ic_reg_lun_msg_alloc == NULL && ((ic_reg_lun_msg_alloc =
1973 	    (stmf_ic_reg_lun_msg_alloc_func_t)
1974 	    ddi_modsym(pppt_mod, "stmf_ic_reg_lun_msg_alloc",
1975 	    &error)) == NULL)) {
1976 		cmn_err(CE_WARN,
1977 		    "Unable to find symbol - stmf_ic_reg_lun_msg_alloc");
1978 		return (STMF_FAILURE);
1979 	}
1980 
1981 	if (ic_lun_active_msg_alloc == NULL && ((ic_lun_active_msg_alloc =
1982 	    (stmf_ic_lun_active_msg_alloc_func_t)
1983 	    ddi_modsym(pppt_mod, "stmf_ic_lun_active_msg_alloc",
1984 	    &error)) == NULL)) {
1985 		cmn_err(CE_WARN,
1986 		    "Unable to find symbol - stmf_ic_lun_active_msg_alloc");
1987 		return (STMF_FAILURE);
1988 	}
1989 
1990 	if (ic_dereg_lun_msg_alloc == NULL && ((ic_dereg_lun_msg_alloc =
1991 	    (stmf_ic_dereg_lun_msg_alloc_func_t)
1992 	    ddi_modsym(pppt_mod, "stmf_ic_dereg_lun_msg_alloc",
1993 	    &error)) == NULL)) {
1994 		cmn_err(CE_WARN,
1995 		    "Unable to find symbol - stmf_ic_dereg_lun_msg_alloc");
1996 		return (STMF_FAILURE);
1997 	}
1998 
1999 	if (ic_scsi_cmd_msg_alloc == NULL && ((ic_scsi_cmd_msg_alloc =
2000 	    (stmf_ic_scsi_cmd_msg_alloc_func_t)
2001 	    ddi_modsym(pppt_mod, "stmf_ic_scsi_cmd_msg_alloc",
2002 	    &error)) == NULL)) {
2003 		cmn_err(CE_WARN,
2004 		    "Unable to find symbol - stmf_ic_scsi_cmd_msg_alloc");
2005 		return (STMF_FAILURE);
2006 	}
2007 
2008 	if (ic_scsi_data_xfer_done_msg_alloc == NULL &&
2009 	    ((ic_scsi_data_xfer_done_msg_alloc =
2010 	    (stmf_ic_scsi_data_xfer_done_msg_alloc_func_t)
2011 	    ddi_modsym(pppt_mod, "stmf_ic_scsi_data_xfer_done_msg_alloc",
2012 	    &error)) == NULL)) {
2013 		cmn_err(CE_WARN,
2014 		    "Unable to find symbol -"
2015 		    "stmf_ic_scsi_data_xfer_done_msg_alloc");
2016 		return (STMF_FAILURE);
2017 	}
2018 
2019 	if (ic_session_reg_msg_alloc == NULL &&
2020 	    ((ic_session_reg_msg_alloc =
2021 	    (stmf_ic_session_create_msg_alloc_func_t)
2022 	    ddi_modsym(pppt_mod, "stmf_ic_session_create_msg_alloc",
2023 	    &error)) == NULL)) {
2024 		cmn_err(CE_WARN,
2025 		    "Unable to find symbol -"
2026 		    "stmf_ic_session_create_msg_alloc");
2027 		return (STMF_FAILURE);
2028 	}
2029 
2030 	if (ic_session_dereg_msg_alloc == NULL &&
2031 	    ((ic_session_dereg_msg_alloc =
2032 	    (stmf_ic_session_destroy_msg_alloc_func_t)
2033 	    ddi_modsym(pppt_mod, "stmf_ic_session_destroy_msg_alloc",
2034 	    &error)) == NULL)) {
2035 		cmn_err(CE_WARN,
2036 		    "Unable to find symbol -"
2037 		    "stmf_ic_session_destroy_msg_alloc");
2038 		return (STMF_FAILURE);
2039 	}
2040 
2041 	if (ic_tx_msg == NULL && ((ic_tx_msg =
2042 	    (stmf_ic_tx_msg_func_t)ddi_modsym(pppt_mod, "stmf_ic_tx_msg",
2043 	    &error)) == NULL)) {
2044 		cmn_err(CE_WARN, "Unable to find symbol - stmf_ic_tx_msg");
2045 		return (STMF_FAILURE);
2046 	}
2047 
2048 	if (ic_msg_free == NULL && ((ic_msg_free =
2049 	    (stmf_ic_msg_free_func_t)ddi_modsym(pppt_mod, "stmf_ic_msg_free",
2050 	    &error)) == NULL)) {
2051 		cmn_err(CE_WARN, "Unable to find symbol - stmf_ic_msg_free");
2052 		return (STMF_FAILURE);
2053 	}
2054 	return (STMF_SUCCESS);
2055 }
2056 
2057 static void
2058 stmf_get_alua_state(stmf_alua_state_desc_t *alua_state)
2059 {
2060 	mutex_enter(&stmf_state.stmf_lock);
2061 	alua_state->alua_node = stmf_state.stmf_alua_node;
2062 	alua_state->alua_state = stmf_state.stmf_alua_state;
2063 	mutex_exit(&stmf_state.stmf_lock);
2064 }
2065 
2066 
2067 static int
2068 stmf_set_alua_state(stmf_alua_state_desc_t *alua_state)
2069 {
2070 	stmf_i_local_port_t *ilport;
2071 	stmf_i_lu_t *ilu;
2072 	stmf_lu_t *lu;
2073 	stmf_ic_msg_status_t ic_ret;
2074 	stmf_ic_msg_t *ic_reg_lun, *ic_reg_port;
2075 	stmf_local_port_t *lport;
2076 	int ret = 0;
2077 
2078 	if (alua_state->alua_state > 1 || alua_state->alua_node > 1) {
2079 		return (EINVAL);
2080 	}
2081 
2082 	mutex_enter(&stmf_state.stmf_lock);
2083 	if (alua_state->alua_state == 1) {
2084 		if (pppt_modload() == STMF_FAILURE) {
2085 			ret = EIO;
2086 			goto err;
2087 		}
2088 		if (alua_state->alua_node != 0) {
2089 			/* reset existing rtpids to new base */
2090 			stmf_rtpid_counter = 255;
2091 		}
2092 		stmf_state.stmf_alua_node = alua_state->alua_node;
2093 		stmf_state.stmf_alua_state = 1;
2094 		/* register existing local ports with ppp */
2095 		for (ilport = stmf_state.stmf_ilportlist; ilport != NULL;
2096 		    ilport = ilport->ilport_next) {
2097 			/* skip standby ports and non-alua participants */
2098 			if (ilport->ilport_standby == 1 ||
2099 			    ilport->ilport_alua == 0) {
2100 				continue;
2101 			}
2102 			if (alua_state->alua_node != 0) {
2103 				ilport->ilport_rtpid =
2104 				    atomic_add_16_nv(&stmf_rtpid_counter, 1);
2105 			}
2106 			lport = ilport->ilport_lport;
2107 			ic_reg_port = ic_reg_port_msg_alloc(
2108 			    lport->lport_id, ilport->ilport_rtpid,
2109 			    0, NULL, stmf_proxy_msg_id);
2110 			if (ic_reg_port) {
2111 				ic_ret = ic_tx_msg(ic_reg_port);
2112 				if (ic_ret == STMF_IC_MSG_SUCCESS) {
2113 					ilport->ilport_reg_msgid =
2114 					    stmf_proxy_msg_id++;
2115 				} else {
2116 					cmn_err(CE_WARN,
2117 					    "error on port registration "
2118 					    "port - %s",
2119 					    ilport->ilport_kstat_tgt_name);
2120 				}
2121 			}
2122 		}
2123 		/* register existing logical units */
2124 		for (ilu = stmf_state.stmf_ilulist; ilu != NULL;
2125 		    ilu = ilu->ilu_next) {
2126 			if (ilu->ilu_access != STMF_LU_ACTIVE) {
2127 				continue;
2128 			}
2129 			/* register with proxy module */
2130 			lu = ilu->ilu_lu;
2131 			if (lu->lu_lp && lu->lu_lp->lp_lpif_rev == LPIF_REV_2 &&
2132 			    lu->lu_lp->lp_alua_support) {
2133 				ilu->ilu_alua = 1;
2134 				/* allocate the register message */
2135 				ic_reg_lun = ic_reg_lun_msg_alloc(
2136 				    lu->lu_id->ident, lu->lu_lp->lp_name,
2137 				    lu->lu_proxy_reg_arg_len,
2138 				    (uint8_t *)lu->lu_proxy_reg_arg,
2139 				    stmf_proxy_msg_id);
2140 				/* send the message */
2141 				if (ic_reg_lun) {
2142 					ic_ret = ic_tx_msg(ic_reg_lun);
2143 					if (ic_ret == STMF_IC_MSG_SUCCESS) {
2144 						stmf_proxy_msg_id++;
2145 					}
2146 				}
2147 			}
2148 		}
2149 	} else {
2150 		stmf_state.stmf_alua_state = 0;
2151 	}
2152 
2153 err:
2154 	mutex_exit(&stmf_state.stmf_lock);
2155 	return (ret);
2156 }
2157 
2158 
2159 typedef struct {
2160 	void	*bp;	/* back pointer from internal struct to main struct */
2161 	int	alloc_size;
2162 } __istmf_t;
2163 
2164 typedef struct {
2165 	__istmf_t	*fp;	/* Framework private */
2166 	void		*cp;	/* Caller private */
2167 	void		*ss;	/* struct specific */
2168 } __stmf_t;
2169 
2170 static struct {
2171 	int shared;
2172 	int fw_private;
2173 } stmf_sizes[] = { { 0, 0 },
2174 	{ GET_STRUCT_SIZE(stmf_lu_provider_t),
2175 		GET_STRUCT_SIZE(stmf_i_lu_provider_t) },
2176 	{ GET_STRUCT_SIZE(stmf_port_provider_t),
2177 		GET_STRUCT_SIZE(stmf_i_port_provider_t) },
2178 	{ GET_STRUCT_SIZE(stmf_local_port_t),
2179 		GET_STRUCT_SIZE(stmf_i_local_port_t) },
2180 	{ GET_STRUCT_SIZE(stmf_lu_t),
2181 		GET_STRUCT_SIZE(stmf_i_lu_t) },
2182 	{ GET_STRUCT_SIZE(stmf_scsi_session_t),
2183 		GET_STRUCT_SIZE(stmf_i_scsi_session_t) },
2184 	{ GET_STRUCT_SIZE(scsi_task_t),
2185 		GET_STRUCT_SIZE(stmf_i_scsi_task_t) },
2186 	{ GET_STRUCT_SIZE(stmf_data_buf_t),
2187 		GET_STRUCT_SIZE(__istmf_t) },
2188 	{ GET_STRUCT_SIZE(stmf_dbuf_store_t),
2189 		GET_STRUCT_SIZE(__istmf_t) }
2190 
2191 };
2192 
2193 void *
2194 stmf_alloc(stmf_struct_id_t struct_id, int additional_size, int flags)
2195 {
2196 	int stmf_size;
2197 	int kmem_flag;
2198 	__stmf_t *sh;
2199 
2200 	if ((struct_id == 0) || (struct_id >= STMF_MAX_STRUCT_IDS))
2201 		return (NULL);
2202 
2203 	if ((curthread->t_flag & T_INTR_THREAD) || (flags & AF_FORCE_NOSLEEP)) {
2204 		kmem_flag = KM_NOSLEEP;
2205 	} else {
2206 		kmem_flag = KM_SLEEP;
2207 	}
2208 
2209 	additional_size = (additional_size + 7) & (~7);
2210 	stmf_size = stmf_sizes[struct_id].shared +
2211 	    stmf_sizes[struct_id].fw_private + additional_size;
2212 
2213 	if (flags & AF_DONTZERO)
2214 		sh = (__stmf_t *)kmem_alloc(stmf_size, kmem_flag);
2215 	else
2216 		sh = (__stmf_t *)kmem_zalloc(stmf_size, kmem_flag);
2217 
2218 	if (sh == NULL)
2219 		return (NULL);
2220 
2221 	/*
2222 	 * In principle, the implementation inside stmf_alloc should not
2223 	 * be changed anyway. But the original order of framework private
2224 	 * data and caller private data does not support sglist in the caller
2225 	 * private data.
2226 	 * To work around this, the memory segments of framework private
2227 	 * data and caller private data are re-ordered here.
2228 	 * A better solution is to provide a specific interface to allocate
2229 	 * the sglist, then we will not need this workaround any more.
2230 	 * But before the new interface is available, the memory segment
2231 	 * ordering should be kept as is.
2232 	 */
2233 	sh->cp = GET_BYTE_OFFSET(sh, stmf_sizes[struct_id].shared);
2234 	sh->fp = (__istmf_t *)GET_BYTE_OFFSET(sh,
2235 	    stmf_sizes[struct_id].shared + additional_size);
2236 
2237 	sh->fp->bp = sh;
2238 	/* Just store the total size instead of storing additional size */
2239 	sh->fp->alloc_size = stmf_size;
2240 
2241 	return (sh);
2242 }
2243 
2244 void
2245 stmf_free(void *ptr)
2246 {
2247 	__stmf_t *sh = (__stmf_t *)ptr;
2248 
2249 	/*
2250 	 * So far we dont need any struct specific processing. If such
2251 	 * a need ever arises, then store the struct id in the framework
2252 	 * private section and get it here as sh->fp->struct_id.
2253 	 */
2254 	kmem_free(ptr, sh->fp->alloc_size);
2255 }
2256 
2257 /*
2258  * Given a pointer to stmf_lu_t, verifies if this lu is registered with the
2259  * framework and returns a pointer to framework private data for the lu.
2260  * Returns NULL if the lu was not found.
2261  */
2262 stmf_i_lu_t *
2263 stmf_lookup_lu(stmf_lu_t *lu)
2264 {
2265 	stmf_i_lu_t *ilu;
2266 	ASSERT(mutex_owned(&stmf_state.stmf_lock));
2267 
2268 	for (ilu = stmf_state.stmf_ilulist; ilu != NULL; ilu = ilu->ilu_next) {
2269 		if (ilu->ilu_lu == lu)
2270 			return (ilu);
2271 	}
2272 	return (NULL);
2273 }
2274 
2275 /*
2276  * Given a pointer to stmf_local_port_t, verifies if this lport is registered
2277  * with the framework and returns a pointer to framework private data for
2278  * the lport.
2279  * Returns NULL if the lport was not found.
2280  */
2281 stmf_i_local_port_t *
2282 stmf_lookup_lport(stmf_local_port_t *lport)
2283 {
2284 	stmf_i_local_port_t *ilport;
2285 	ASSERT(mutex_owned(&stmf_state.stmf_lock));
2286 
2287 	for (ilport = stmf_state.stmf_ilportlist; ilport != NULL;
2288 	    ilport = ilport->ilport_next) {
2289 		if (ilport->ilport_lport == lport)
2290 			return (ilport);
2291 	}
2292 	return (NULL);
2293 }
2294 
2295 stmf_status_t
2296 stmf_register_lu_provider(stmf_lu_provider_t *lp)
2297 {
2298 	stmf_i_lu_provider_t *ilp = (stmf_i_lu_provider_t *)lp->lp_stmf_private;
2299 	stmf_pp_data_t *ppd;
2300 	uint32_t cb_flags;
2301 
2302 	if (lp->lp_lpif_rev != LPIF_REV_1 && lp->lp_lpif_rev != LPIF_REV_2)
2303 		return (STMF_FAILURE);
2304 
2305 	mutex_enter(&stmf_state.stmf_lock);
2306 	ilp->ilp_next = stmf_state.stmf_ilplist;
2307 	stmf_state.stmf_ilplist = ilp;
2308 	stmf_state.stmf_nlps++;
2309 
2310 	/* See if we need to do a callback */
2311 	for (ppd = stmf_state.stmf_ppdlist; ppd != NULL; ppd = ppd->ppd_next) {
2312 		if (strcmp(ppd->ppd_name, lp->lp_name) == 0) {
2313 			break;
2314 		}
2315 	}
2316 	if ((ppd == NULL) || (ppd->ppd_nv == NULL)) {
2317 		goto rlp_bail_out;
2318 	}
2319 	ilp->ilp_ppd = ppd;
2320 	ppd->ppd_provider = ilp;
2321 	if (lp->lp_cb == NULL)
2322 		goto rlp_bail_out;
2323 	ilp->ilp_cb_in_progress = 1;
2324 	cb_flags = STMF_PCB_PREG_COMPLETE;
2325 	if (stmf_state.stmf_config_state == STMF_CONFIG_INIT)
2326 		cb_flags |= STMF_PCB_STMF_ONLINING;
2327 	mutex_exit(&stmf_state.stmf_lock);
2328 	lp->lp_cb(lp, STMF_PROVIDER_DATA_UPDATED, ppd->ppd_nv, cb_flags);
2329 	mutex_enter(&stmf_state.stmf_lock);
2330 	ilp->ilp_cb_in_progress = 0;
2331 
2332 rlp_bail_out:
2333 	mutex_exit(&stmf_state.stmf_lock);
2334 
2335 	return (STMF_SUCCESS);
2336 }
2337 
2338 stmf_status_t
2339 stmf_deregister_lu_provider(stmf_lu_provider_t *lp)
2340 {
2341 	stmf_i_lu_provider_t	**ppilp;
2342 	stmf_i_lu_provider_t *ilp = (stmf_i_lu_provider_t *)lp->lp_stmf_private;
2343 
2344 	mutex_enter(&stmf_state.stmf_lock);
2345 	if (ilp->ilp_nlus || ilp->ilp_cb_in_progress) {
2346 		mutex_exit(&stmf_state.stmf_lock);
2347 		return (STMF_BUSY);
2348 	}
2349 	for (ppilp = &stmf_state.stmf_ilplist; *ppilp != NULL;
2350 	    ppilp = &((*ppilp)->ilp_next)) {
2351 		if (*ppilp == ilp) {
2352 			*ppilp = ilp->ilp_next;
2353 			stmf_state.stmf_nlps--;
2354 			if (ilp->ilp_ppd) {
2355 				ilp->ilp_ppd->ppd_provider = NULL;
2356 				ilp->ilp_ppd = NULL;
2357 			}
2358 			mutex_exit(&stmf_state.stmf_lock);
2359 			return (STMF_SUCCESS);
2360 		}
2361 	}
2362 	mutex_exit(&stmf_state.stmf_lock);
2363 	return (STMF_NOT_FOUND);
2364 }
2365 
2366 stmf_status_t
2367 stmf_register_port_provider(stmf_port_provider_t *pp)
2368 {
2369 	stmf_i_port_provider_t *ipp =
2370 	    (stmf_i_port_provider_t *)pp->pp_stmf_private;
2371 	stmf_pp_data_t *ppd;
2372 	uint32_t cb_flags;
2373 
2374 	if (pp->pp_portif_rev != PORTIF_REV_1)
2375 		return (STMF_FAILURE);
2376 
2377 	mutex_enter(&stmf_state.stmf_lock);
2378 	ipp->ipp_next = stmf_state.stmf_ipplist;
2379 	stmf_state.stmf_ipplist = ipp;
2380 	stmf_state.stmf_npps++;
2381 	/* See if we need to do a callback */
2382 	for (ppd = stmf_state.stmf_ppdlist; ppd != NULL; ppd = ppd->ppd_next) {
2383 		if (strcmp(ppd->ppd_name, pp->pp_name) == 0) {
2384 			break;
2385 		}
2386 	}
2387 	if ((ppd == NULL) || (ppd->ppd_nv == NULL)) {
2388 		goto rpp_bail_out;
2389 	}
2390 	ipp->ipp_ppd = ppd;
2391 	ppd->ppd_provider = ipp;
2392 	if (pp->pp_cb == NULL)
2393 		goto rpp_bail_out;
2394 	ipp->ipp_cb_in_progress = 1;
2395 	cb_flags = STMF_PCB_PREG_COMPLETE;
2396 	if (stmf_state.stmf_config_state == STMF_CONFIG_INIT)
2397 		cb_flags |= STMF_PCB_STMF_ONLINING;
2398 	mutex_exit(&stmf_state.stmf_lock);
2399 	pp->pp_cb(pp, STMF_PROVIDER_DATA_UPDATED, ppd->ppd_nv, cb_flags);
2400 	mutex_enter(&stmf_state.stmf_lock);
2401 	ipp->ipp_cb_in_progress = 0;
2402 
2403 rpp_bail_out:
2404 	mutex_exit(&stmf_state.stmf_lock);
2405 
2406 	return (STMF_SUCCESS);
2407 }
2408 
2409 stmf_status_t
2410 stmf_deregister_port_provider(stmf_port_provider_t *pp)
2411 {
2412 	stmf_i_port_provider_t *ipp =
2413 	    (stmf_i_port_provider_t *)pp->pp_stmf_private;
2414 	stmf_i_port_provider_t **ppipp;
2415 
2416 	mutex_enter(&stmf_state.stmf_lock);
2417 	if (ipp->ipp_npps || ipp->ipp_cb_in_progress) {
2418 		mutex_exit(&stmf_state.stmf_lock);
2419 		return (STMF_BUSY);
2420 	}
2421 	for (ppipp = &stmf_state.stmf_ipplist; *ppipp != NULL;
2422 	    ppipp = &((*ppipp)->ipp_next)) {
2423 		if (*ppipp == ipp) {
2424 			*ppipp = ipp->ipp_next;
2425 			stmf_state.stmf_npps--;
2426 			if (ipp->ipp_ppd) {
2427 				ipp->ipp_ppd->ppd_provider = NULL;
2428 				ipp->ipp_ppd = NULL;
2429 			}
2430 			mutex_exit(&stmf_state.stmf_lock);
2431 			return (STMF_SUCCESS);
2432 		}
2433 	}
2434 	mutex_exit(&stmf_state.stmf_lock);
2435 	return (STMF_NOT_FOUND);
2436 }
2437 
2438 int
2439 stmf_load_ppd_ioctl(stmf_ppioctl_data_t *ppi, uint64_t *ppi_token,
2440     uint32_t *err_ret)
2441 {
2442 	stmf_i_port_provider_t		*ipp;
2443 	stmf_i_lu_provider_t		*ilp;
2444 	stmf_pp_data_t			*ppd;
2445 	nvlist_t			*nv;
2446 	int				s;
2447 	int				ret;
2448 
2449 	*err_ret = 0;
2450 
2451 	if ((ppi->ppi_lu_provider + ppi->ppi_port_provider) != 1) {
2452 		return (EINVAL);
2453 	}
2454 
2455 	mutex_enter(&stmf_state.stmf_lock);
2456 	for (ppd = stmf_state.stmf_ppdlist; ppd != NULL; ppd = ppd->ppd_next) {
2457 		if (ppi->ppi_lu_provider) {
2458 			if (!ppd->ppd_lu_provider)
2459 				continue;
2460 		} else if (ppi->ppi_port_provider) {
2461 			if (!ppd->ppd_port_provider)
2462 				continue;
2463 		}
2464 		if (strncmp(ppi->ppi_name, ppd->ppd_name, 254) == 0)
2465 			break;
2466 	}
2467 
2468 	if (ppd == NULL) {
2469 		/* New provider */
2470 		s = strlen(ppi->ppi_name);
2471 		if (s > 254) {
2472 			mutex_exit(&stmf_state.stmf_lock);
2473 			return (EINVAL);
2474 		}
2475 		s += sizeof (stmf_pp_data_t) - 7;
2476 
2477 		ppd = kmem_zalloc(s, KM_NOSLEEP);
2478 		if (ppd == NULL) {
2479 			mutex_exit(&stmf_state.stmf_lock);
2480 			return (ENOMEM);
2481 		}
2482 		ppd->ppd_alloc_size = s;
2483 		(void) strcpy(ppd->ppd_name, ppi->ppi_name);
2484 
2485 		/* See if this provider already exists */
2486 		if (ppi->ppi_lu_provider) {
2487 			ppd->ppd_lu_provider = 1;
2488 			for (ilp = stmf_state.stmf_ilplist; ilp != NULL;
2489 			    ilp = ilp->ilp_next) {
2490 				if (strcmp(ppi->ppi_name,
2491 				    ilp->ilp_lp->lp_name) == 0) {
2492 					ppd->ppd_provider = ilp;
2493 					ilp->ilp_ppd = ppd;
2494 					break;
2495 				}
2496 			}
2497 		} else {
2498 			ppd->ppd_port_provider = 1;
2499 			for (ipp = stmf_state.stmf_ipplist; ipp != NULL;
2500 			    ipp = ipp->ipp_next) {
2501 				if (strcmp(ppi->ppi_name,
2502 				    ipp->ipp_pp->pp_name) == 0) {
2503 					ppd->ppd_provider = ipp;
2504 					ipp->ipp_ppd = ppd;
2505 					break;
2506 				}
2507 			}
2508 		}
2509 
2510 		/* Link this ppd in */
2511 		ppd->ppd_next = stmf_state.stmf_ppdlist;
2512 		stmf_state.stmf_ppdlist = ppd;
2513 	}
2514 
2515 	/*
2516 	 * User is requesting that the token be checked.
2517 	 * If there was another set after the user's get
2518 	 * it's an error
2519 	 */
2520 	if (ppi->ppi_token_valid) {
2521 		if (ppi->ppi_token != ppd->ppd_token) {
2522 			*err_ret = STMF_IOCERR_PPD_UPDATED;
2523 			mutex_exit(&stmf_state.stmf_lock);
2524 			return (EINVAL);
2525 		}
2526 	}
2527 
2528 	if ((ret = nvlist_unpack((char *)ppi->ppi_data,
2529 	    (size_t)ppi->ppi_data_size, &nv, KM_NOSLEEP)) != 0) {
2530 		mutex_exit(&stmf_state.stmf_lock);
2531 		return (ret);
2532 	}
2533 
2534 	/* Free any existing lists and add this one to the ppd */
2535 	if (ppd->ppd_nv)
2536 		nvlist_free(ppd->ppd_nv);
2537 	ppd->ppd_nv = nv;
2538 
2539 	/* set the token for writes */
2540 	ppd->ppd_token++;
2541 	/* return token to caller */
2542 	if (ppi_token) {
2543 		*ppi_token = ppd->ppd_token;
2544 	}
2545 
2546 	/* If there is a provider registered, do the notifications */
2547 	if (ppd->ppd_provider) {
2548 		uint32_t cb_flags = 0;
2549 
2550 		if (stmf_state.stmf_config_state == STMF_CONFIG_INIT)
2551 			cb_flags |= STMF_PCB_STMF_ONLINING;
2552 		if (ppi->ppi_lu_provider) {
2553 			ilp = (stmf_i_lu_provider_t *)ppd->ppd_provider;
2554 			if (ilp->ilp_lp->lp_cb == NULL)
2555 				goto bail_out;
2556 			ilp->ilp_cb_in_progress = 1;
2557 			mutex_exit(&stmf_state.stmf_lock);
2558 			ilp->ilp_lp->lp_cb(ilp->ilp_lp,
2559 			    STMF_PROVIDER_DATA_UPDATED, ppd->ppd_nv, cb_flags);
2560 			mutex_enter(&stmf_state.stmf_lock);
2561 			ilp->ilp_cb_in_progress = 0;
2562 		} else {
2563 			ipp = (stmf_i_port_provider_t *)ppd->ppd_provider;
2564 			if (ipp->ipp_pp->pp_cb == NULL)
2565 				goto bail_out;
2566 			ipp->ipp_cb_in_progress = 1;
2567 			mutex_exit(&stmf_state.stmf_lock);
2568 			ipp->ipp_pp->pp_cb(ipp->ipp_pp,
2569 			    STMF_PROVIDER_DATA_UPDATED, ppd->ppd_nv, cb_flags);
2570 			mutex_enter(&stmf_state.stmf_lock);
2571 			ipp->ipp_cb_in_progress = 0;
2572 		}
2573 	}
2574 
2575 bail_out:
2576 	mutex_exit(&stmf_state.stmf_lock);
2577 
2578 	return (0);
2579 }
2580 
2581 void
2582 stmf_delete_ppd(stmf_pp_data_t *ppd)
2583 {
2584 	stmf_pp_data_t **pppd;
2585 
2586 	ASSERT(mutex_owned(&stmf_state.stmf_lock));
2587 	if (ppd->ppd_provider) {
2588 		if (ppd->ppd_lu_provider) {
2589 			((stmf_i_lu_provider_t *)
2590 			    ppd->ppd_provider)->ilp_ppd = NULL;
2591 		} else {
2592 			((stmf_i_port_provider_t *)
2593 			    ppd->ppd_provider)->ipp_ppd = NULL;
2594 		}
2595 		ppd->ppd_provider = NULL;
2596 	}
2597 
2598 	for (pppd = &stmf_state.stmf_ppdlist; *pppd != NULL;
2599 	    pppd = &((*pppd)->ppd_next)) {
2600 		if (*pppd == ppd)
2601 			break;
2602 	}
2603 
2604 	if (*pppd == NULL)
2605 		return;
2606 
2607 	*pppd = ppd->ppd_next;
2608 	if (ppd->ppd_nv)
2609 		nvlist_free(ppd->ppd_nv);
2610 
2611 	kmem_free(ppd, ppd->ppd_alloc_size);
2612 }
2613 
2614 int
2615 stmf_delete_ppd_ioctl(stmf_ppioctl_data_t *ppi)
2616 {
2617 	stmf_pp_data_t *ppd;
2618 	int ret = ENOENT;
2619 
2620 	if ((ppi->ppi_lu_provider + ppi->ppi_port_provider) != 1) {
2621 		return (EINVAL);
2622 	}
2623 
2624 	mutex_enter(&stmf_state.stmf_lock);
2625 
2626 	for (ppd = stmf_state.stmf_ppdlist; ppd != NULL; ppd = ppd->ppd_next) {
2627 		if (ppi->ppi_lu_provider) {
2628 			if (!ppd->ppd_lu_provider)
2629 				continue;
2630 		} else if (ppi->ppi_port_provider) {
2631 			if (!ppd->ppd_port_provider)
2632 				continue;
2633 		}
2634 		if (strncmp(ppi->ppi_name, ppd->ppd_name, 254) == 0)
2635 			break;
2636 	}
2637 
2638 	if (ppd) {
2639 		ret = 0;
2640 		stmf_delete_ppd(ppd);
2641 	}
2642 	mutex_exit(&stmf_state.stmf_lock);
2643 
2644 	return (ret);
2645 }
2646 
2647 int
2648 stmf_get_ppd_ioctl(stmf_ppioctl_data_t *ppi, stmf_ppioctl_data_t *ppi_out,
2649     uint32_t *err_ret)
2650 {
2651 	stmf_pp_data_t *ppd;
2652 	size_t req_size;
2653 	int ret = ENOENT;
2654 	char *bufp = (char *)ppi_out->ppi_data;
2655 
2656 	if ((ppi->ppi_lu_provider + ppi->ppi_port_provider) != 1) {
2657 		return (EINVAL);
2658 	}
2659 
2660 	mutex_enter(&stmf_state.stmf_lock);
2661 
2662 	for (ppd = stmf_state.stmf_ppdlist; ppd != NULL; ppd = ppd->ppd_next) {
2663 		if (ppi->ppi_lu_provider) {
2664 			if (!ppd->ppd_lu_provider)
2665 				continue;
2666 		} else if (ppi->ppi_port_provider) {
2667 			if (!ppd->ppd_port_provider)
2668 				continue;
2669 		}
2670 		if (strncmp(ppi->ppi_name, ppd->ppd_name, 254) == 0)
2671 			break;
2672 	}
2673 
2674 	if (ppd && ppd->ppd_nv) {
2675 		ppi_out->ppi_token = ppd->ppd_token;
2676 		if ((ret = nvlist_size(ppd->ppd_nv, &req_size,
2677 		    NV_ENCODE_XDR)) != 0) {
2678 			goto done;
2679 		}
2680 		ppi_out->ppi_data_size = req_size;
2681 		if (req_size > ppi->ppi_data_size) {
2682 			*err_ret = STMF_IOCERR_INSUFFICIENT_BUF;
2683 			ret = EINVAL;
2684 			goto done;
2685 		}
2686 
2687 		if ((ret = nvlist_pack(ppd->ppd_nv, &bufp, &req_size,
2688 		    NV_ENCODE_XDR, 0)) != 0) {
2689 			goto done;
2690 		}
2691 		ret = 0;
2692 	}
2693 
2694 done:
2695 	mutex_exit(&stmf_state.stmf_lock);
2696 
2697 	return (ret);
2698 }
2699 
2700 void
2701 stmf_delete_all_ppds()
2702 {
2703 	stmf_pp_data_t *ppd, *nppd;
2704 
2705 	ASSERT(mutex_owned(&stmf_state.stmf_lock));
2706 	for (ppd = stmf_state.stmf_ppdlist; ppd != NULL; ppd = nppd) {
2707 		nppd = ppd->ppd_next;
2708 		stmf_delete_ppd(ppd);
2709 	}
2710 }
2711 
2712 /*
2713  * 16 is the max string length of a protocol_ident, increase
2714  * the size if needed.
2715  */
2716 #define	STMF_KSTAT_LU_SZ	(STMF_GUID_INPUT + 1 + 256)
2717 #define	STMF_KSTAT_TGT_SZ	(256 * 2 + 16)
2718 
2719 /*
2720  * This array matches the Protocol Identifier in stmf_ioctl.h
2721  */
2722 #define	MAX_PROTO_STR_LEN	32
2723 
2724 char *protocol_ident[PROTOCOL_ANY] = {
2725 	"Fibre Channel",
2726 	"Parallel SCSI",
2727 	"SSA",
2728 	"IEEE_1394",
2729 	"SRP",
2730 	"iSCSI",
2731 	"SAS",
2732 	"ADT",
2733 	"ATAPI",
2734 	"UNKNOWN", "UNKNOWN", "UNKNOWN", "UNKNOWN", "UNKNOWN", "UNKNOWN"
2735 };
2736 
2737 /*
2738  * Update the lun wait/run queue count
2739  */
2740 static void
2741 stmf_update_kstat_lu_q(scsi_task_t *task, void func())
2742 {
2743 	stmf_i_lu_t		*ilu;
2744 	kstat_io_t		*kip;
2745 
2746 	if (task->task_lu == dlun0)
2747 		return;
2748 	ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private;
2749 	if (ilu != NULL && ilu->ilu_kstat_io != NULL) {
2750 		kip = KSTAT_IO_PTR(ilu->ilu_kstat_io);
2751 		if (kip != NULL) {
2752 			func(kip);
2753 		}
2754 	}
2755 }
2756 
2757 /*
2758  * Update the target(lport) wait/run queue count
2759  */
2760 static void
2761 stmf_update_kstat_lport_q(scsi_task_t *task, void func())
2762 {
2763 	stmf_i_local_port_t	*ilp;
2764 	kstat_io_t		*kip;
2765 
2766 	ilp = (stmf_i_local_port_t *)task->task_lport->lport_stmf_private;
2767 	if (ilp != NULL && ilp->ilport_kstat_io != NULL) {
2768 		kip = KSTAT_IO_PTR(ilp->ilport_kstat_io);
2769 		if (kip != NULL) {
2770 			mutex_enter(ilp->ilport_kstat_io->ks_lock);
2771 			func(kip);
2772 			mutex_exit(ilp->ilport_kstat_io->ks_lock);
2773 		}
2774 	}
2775 }
2776 
2777 static void
2778 stmf_update_kstat_lport_io(scsi_task_t *task, stmf_data_buf_t *dbuf)
2779 {
2780 	stmf_i_local_port_t	*ilp;
2781 	kstat_io_t		*kip;
2782 
2783 	ilp = (stmf_i_local_port_t *)task->task_lport->lport_stmf_private;
2784 	if (ilp != NULL && ilp->ilport_kstat_io != NULL) {
2785 		kip = KSTAT_IO_PTR(ilp->ilport_kstat_io);
2786 		if (kip != NULL) {
2787 			mutex_enter(ilp->ilport_kstat_io->ks_lock);
2788 			STMF_UPDATE_KSTAT_IO(kip, dbuf);
2789 			mutex_exit(ilp->ilport_kstat_io->ks_lock);
2790 		}
2791 	}
2792 }
2793 
2794 static void
2795 stmf_update_kstat_lu_io(scsi_task_t *task, stmf_data_buf_t *dbuf)
2796 {
2797 	stmf_i_lu_t		*ilu;
2798 	kstat_io_t		*kip;
2799 
2800 	ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private;
2801 	if (ilu != NULL && ilu->ilu_kstat_io != NULL) {
2802 		kip = KSTAT_IO_PTR(ilu->ilu_kstat_io);
2803 		if (kip != NULL) {
2804 			mutex_enter(ilu->ilu_kstat_io->ks_lock);
2805 			STMF_UPDATE_KSTAT_IO(kip, dbuf);
2806 			mutex_exit(ilu->ilu_kstat_io->ks_lock);
2807 		}
2808 	}
2809 }
2810 
2811 static void
2812 stmf_create_kstat_lu(stmf_i_lu_t *ilu)
2813 {
2814 	char				ks_nm[KSTAT_STRLEN];
2815 	stmf_kstat_lu_info_t		*ks_lu;
2816 
2817 	/* create kstat lun info */
2818 	ks_lu = (stmf_kstat_lu_info_t *)kmem_zalloc(STMF_KSTAT_LU_SZ,
2819 	    KM_NOSLEEP);
2820 	if (ks_lu == NULL) {
2821 		cmn_err(CE_WARN, "STMF: kmem_zalloc failed");
2822 		return;
2823 	}
2824 
2825 	bzero(ks_nm, sizeof (ks_nm));
2826 	(void) sprintf(ks_nm, "stmf_lu_%"PRIxPTR"", (uintptr_t)ilu);
2827 	if ((ilu->ilu_kstat_info = kstat_create(STMF_MODULE_NAME, 0,
2828 	    ks_nm, "misc", KSTAT_TYPE_NAMED,
2829 	    sizeof (stmf_kstat_lu_info_t) / sizeof (kstat_named_t),
2830 	    KSTAT_FLAG_VIRTUAL)) == NULL) {
2831 		kmem_free(ks_lu, STMF_KSTAT_LU_SZ);
2832 		cmn_err(CE_WARN, "STMF: kstat_create lu failed");
2833 		return;
2834 	}
2835 
2836 	ilu->ilu_kstat_info->ks_data_size = STMF_KSTAT_LU_SZ;
2837 	ilu->ilu_kstat_info->ks_data = ks_lu;
2838 
2839 	kstat_named_init(&ks_lu->i_lun_guid, "lun-guid",
2840 	    KSTAT_DATA_STRING);
2841 	kstat_named_init(&ks_lu->i_lun_alias, "lun-alias",
2842 	    KSTAT_DATA_STRING);
2843 
2844 	/* convert guid to hex string */
2845 	int		i;
2846 	uint8_t		*p = ilu->ilu_lu->lu_id->ident;
2847 	bzero(ilu->ilu_ascii_hex_guid, sizeof (ilu->ilu_ascii_hex_guid));
2848 	for (i = 0; i < STMF_GUID_INPUT / 2; i++) {
2849 		(void) sprintf(&ilu->ilu_ascii_hex_guid[i * 2], "%02x", p[i]);
2850 	}
2851 	kstat_named_setstr(&ks_lu->i_lun_guid,
2852 	    (const char *)ilu->ilu_ascii_hex_guid);
2853 	kstat_named_setstr(&ks_lu->i_lun_alias,
2854 	    (const char *)ilu->ilu_lu->lu_alias);
2855 	kstat_install(ilu->ilu_kstat_info);
2856 
2857 	/* create kstat lun io */
2858 	bzero(ks_nm, sizeof (ks_nm));
2859 	(void) sprintf(ks_nm, "stmf_lu_io_%"PRIxPTR"", (uintptr_t)ilu);
2860 	if ((ilu->ilu_kstat_io = kstat_create(STMF_MODULE_NAME, 0,
2861 	    ks_nm, "io", KSTAT_TYPE_IO, 1, 0)) == NULL) {
2862 		cmn_err(CE_WARN, "STMF: kstat_create lu_io failed");
2863 		return;
2864 	}
2865 	mutex_init(&ilu->ilu_kstat_lock, NULL, MUTEX_DRIVER, 0);
2866 	ilu->ilu_kstat_io->ks_lock = &ilu->ilu_kstat_lock;
2867 	kstat_install(ilu->ilu_kstat_io);
2868 }
2869 
2870 static void
2871 stmf_create_kstat_lport(stmf_i_local_port_t *ilport)
2872 {
2873 	char				ks_nm[KSTAT_STRLEN];
2874 	stmf_kstat_tgt_info_t		*ks_tgt;
2875 	int				id, len;
2876 
2877 	/* create kstat lport info */
2878 	ks_tgt = (stmf_kstat_tgt_info_t *)kmem_zalloc(STMF_KSTAT_TGT_SZ,
2879 	    KM_NOSLEEP);
2880 	if (ks_tgt == NULL) {
2881 		cmn_err(CE_WARN, "STMF: kmem_zalloc failed");
2882 		return;
2883 	}
2884 
2885 	bzero(ks_nm, sizeof (ks_nm));
2886 	(void) sprintf(ks_nm, "stmf_tgt_%"PRIxPTR"", (uintptr_t)ilport);
2887 	if ((ilport->ilport_kstat_info = kstat_create(STMF_MODULE_NAME,
2888 	    0, ks_nm, "misc", KSTAT_TYPE_NAMED,
2889 	    sizeof (stmf_kstat_tgt_info_t) / sizeof (kstat_named_t),
2890 	    KSTAT_FLAG_VIRTUAL)) == NULL) {
2891 		kmem_free(ks_tgt, STMF_KSTAT_TGT_SZ);
2892 		cmn_err(CE_WARN, "STMF: kstat_create target failed");
2893 		return;
2894 	}
2895 
2896 	ilport->ilport_kstat_info->ks_data_size = STMF_KSTAT_TGT_SZ;
2897 	ilport->ilport_kstat_info->ks_data = ks_tgt;
2898 
2899 	kstat_named_init(&ks_tgt->i_tgt_name, "target-name",
2900 	    KSTAT_DATA_STRING);
2901 	kstat_named_init(&ks_tgt->i_tgt_alias, "target-alias",
2902 	    KSTAT_DATA_STRING);
2903 	kstat_named_init(&ks_tgt->i_protocol, "protocol",
2904 	    KSTAT_DATA_STRING);
2905 
2906 	/* ident might not be null terminated */
2907 	len = ilport->ilport_lport->lport_id->ident_length;
2908 	bcopy(ilport->ilport_lport->lport_id->ident,
2909 	    ilport->ilport_kstat_tgt_name, len);
2910 	ilport->ilport_kstat_tgt_name[len + 1] = NULL;
2911 	kstat_named_setstr(&ks_tgt->i_tgt_name,
2912 	    (const char *)ilport->ilport_kstat_tgt_name);
2913 	kstat_named_setstr(&ks_tgt->i_tgt_alias,
2914 	    (const char *)ilport->ilport_lport->lport_alias);
2915 	/* protocol */
2916 	if ((id = ilport->ilport_lport->lport_id->protocol_id) > PROTOCOL_ANY) {
2917 		cmn_err(CE_WARN, "STMF: protocol_id out of bound");
2918 		id = PROTOCOL_ANY;
2919 	}
2920 	kstat_named_setstr(&ks_tgt->i_protocol, protocol_ident[id]);
2921 	kstat_install(ilport->ilport_kstat_info);
2922 
2923 	/* create kstat lport io */
2924 	bzero(ks_nm, sizeof (ks_nm));
2925 	(void) sprintf(ks_nm, "stmf_tgt_io_%"PRIxPTR"", (uintptr_t)ilport);
2926 	if ((ilport->ilport_kstat_io = kstat_create(STMF_MODULE_NAME, 0,
2927 	    ks_nm, "io", KSTAT_TYPE_IO, 1, 0)) == NULL) {
2928 		cmn_err(CE_WARN, "STMF: kstat_create target_io failed");
2929 		return;
2930 	}
2931 	mutex_init(&ilport->ilport_kstat_lock, NULL, MUTEX_DRIVER, 0);
2932 	ilport->ilport_kstat_io->ks_lock = &ilport->ilport_kstat_lock;
2933 	kstat_install(ilport->ilport_kstat_io);
2934 }
2935 
2936 /*
2937  * set the asymmetric access state for a logical unit
2938  * caller is responsible for establishing SCSI unit attention on
2939  * state change
2940  */
2941 stmf_status_t
2942 stmf_set_lu_access(stmf_lu_t *lu, uint8_t access_state)
2943 {
2944 	stmf_i_lu_t *ilu;
2945 	uint8_t *p1, *p2;
2946 
2947 	if ((access_state != STMF_LU_STANDBY) &&
2948 	    (access_state != STMF_LU_ACTIVE)) {
2949 		return (STMF_INVALID_ARG);
2950 	}
2951 
2952 	p1 = &lu->lu_id->ident[0];
2953 	mutex_enter(&stmf_state.stmf_lock);
2954 	if (stmf_state.stmf_inventory_locked) {
2955 		mutex_exit(&stmf_state.stmf_lock);
2956 		return (STMF_BUSY);
2957 	}
2958 
2959 	for (ilu = stmf_state.stmf_ilulist; ilu != NULL; ilu = ilu->ilu_next) {
2960 		p2 = &ilu->ilu_lu->lu_id->ident[0];
2961 		if (bcmp(p1, p2, 16) == 0) {
2962 			break;
2963 		}
2964 	}
2965 
2966 	if (!ilu) {
2967 		ilu = (stmf_i_lu_t *)lu->lu_stmf_private;
2968 	} else {
2969 		/*
2970 		 * We're changing access state on an existing logical unit
2971 		 * Send the proxy registration message for this logical unit
2972 		 * if we're in alua mode.
2973 		 * If the requested state is STMF_LU_ACTIVE, we want to register
2974 		 * this logical unit.
2975 		 * If the requested state is STMF_LU_STANDBY, we're going to
2976 		 * abort all tasks for this logical unit.
2977 		 */
2978 		if (stmf_state.stmf_alua_state == 1 &&
2979 		    access_state == STMF_LU_ACTIVE) {
2980 			stmf_ic_msg_status_t ic_ret = STMF_IC_MSG_SUCCESS;
2981 			stmf_ic_msg_t *ic_reg_lun;
2982 			if (lu->lu_lp && lu->lu_lp->lp_lpif_rev == LPIF_REV_2 &&
2983 			    lu->lu_lp->lp_alua_support) {
2984 				ilu->ilu_alua = 1;
2985 				/* allocate the register message */
2986 				ic_reg_lun = ic_lun_active_msg_alloc(p1,
2987 				    lu->lu_lp->lp_name,
2988 				    lu->lu_proxy_reg_arg_len,
2989 				    (uint8_t *)lu->lu_proxy_reg_arg,
2990 				    stmf_proxy_msg_id);
2991 				/* send the message */
2992 				if (ic_reg_lun) {
2993 					ic_ret = ic_tx_msg(ic_reg_lun);
2994 					if (ic_ret == STMF_IC_MSG_SUCCESS) {
2995 						stmf_proxy_msg_id++;
2996 					}
2997 				}
2998 			}
2999 		} else if (stmf_state.stmf_alua_state == 1 &&
3000 		    access_state == STMF_LU_STANDBY) {
3001 			/* abort all tasks for this lu */
3002 			stmf_task_lu_killall(lu, NULL, STMF_ABORTED);
3003 		}
3004 	}
3005 
3006 	ilu->ilu_access = access_state;
3007 
3008 	mutex_exit(&stmf_state.stmf_lock);
3009 	return (STMF_SUCCESS);
3010 }
3011 
3012 
3013 stmf_status_t
3014 stmf_register_lu(stmf_lu_t *lu)
3015 {
3016 	stmf_i_lu_t *ilu;
3017 	uint8_t *p1, *p2;
3018 	stmf_state_change_info_t ssci;
3019 	stmf_id_data_t *luid;
3020 
3021 	if ((lu->lu_id->ident_type != ID_TYPE_NAA) ||
3022 	    (lu->lu_id->ident_length != 16) ||
3023 	    ((lu->lu_id->ident[0] & 0xf0) != 0x60)) {
3024 		return (STMF_INVALID_ARG);
3025 	}
3026 	p1 = &lu->lu_id->ident[0];
3027 	mutex_enter(&stmf_state.stmf_lock);
3028 	if (stmf_state.stmf_inventory_locked) {
3029 		mutex_exit(&stmf_state.stmf_lock);
3030 		return (STMF_BUSY);
3031 	}
3032 
3033 	for (ilu = stmf_state.stmf_ilulist; ilu != NULL; ilu = ilu->ilu_next) {
3034 		p2 = &ilu->ilu_lu->lu_id->ident[0];
3035 		if (bcmp(p1, p2, 16) == 0) {
3036 			mutex_exit(&stmf_state.stmf_lock);
3037 			return (STMF_ALREADY);
3038 		}
3039 	}
3040 
3041 	ilu = (stmf_i_lu_t *)lu->lu_stmf_private;
3042 	luid = stmf_lookup_id(&stmf_state.stmf_luid_list,
3043 	    lu->lu_id->ident_length, lu->lu_id->ident);
3044 	if (luid) {
3045 		luid->id_pt_to_object = (void *)ilu;
3046 		ilu->ilu_luid = luid;
3047 	}
3048 	ilu->ilu_alias = NULL;
3049 
3050 	ilu->ilu_next = stmf_state.stmf_ilulist;
3051 	ilu->ilu_prev = NULL;
3052 	if (ilu->ilu_next)
3053 		ilu->ilu_next->ilu_prev = ilu;
3054 	stmf_state.stmf_ilulist = ilu;
3055 	stmf_state.stmf_nlus++;
3056 	if (lu->lu_lp) {
3057 		((stmf_i_lu_provider_t *)
3058 		    (lu->lu_lp->lp_stmf_private))->ilp_nlus++;
3059 	}
3060 	ilu->ilu_cur_task_cntr = &ilu->ilu_task_cntr1;
3061 	STMF_EVENT_ALLOC_HANDLE(ilu->ilu_event_hdl);
3062 	stmf_create_kstat_lu(ilu);
3063 	/*
3064 	 * register with proxy module if available and logical unit
3065 	 * is in active state
3066 	 */
3067 	if (stmf_state.stmf_alua_state == 1 &&
3068 	    ilu->ilu_access == STMF_LU_ACTIVE) {
3069 		stmf_ic_msg_status_t ic_ret = STMF_IC_MSG_SUCCESS;
3070 		stmf_ic_msg_t *ic_reg_lun;
3071 		if (lu->lu_lp && lu->lu_lp->lp_lpif_rev == LPIF_REV_2 &&
3072 		    lu->lu_lp->lp_alua_support) {
3073 			ilu->ilu_alua = 1;
3074 			/* allocate the register message */
3075 			ic_reg_lun = ic_reg_lun_msg_alloc(p1,
3076 			    lu->lu_lp->lp_name, lu->lu_proxy_reg_arg_len,
3077 			    (uint8_t *)lu->lu_proxy_reg_arg, stmf_proxy_msg_id);
3078 			/* send the message */
3079 			if (ic_reg_lun) {
3080 				ic_ret = ic_tx_msg(ic_reg_lun);
3081 				if (ic_ret == STMF_IC_MSG_SUCCESS) {
3082 					stmf_proxy_msg_id++;
3083 				}
3084 			}
3085 		}
3086 	}
3087 	mutex_exit(&stmf_state.stmf_lock);
3088 
3089 	/*  check the default state for lu */
3090 	if (stmf_state.stmf_default_lu_state == STMF_STATE_OFFLINE) {
3091 		ilu->ilu_prev_state = STMF_STATE_OFFLINE;
3092 	} else {
3093 		ilu->ilu_prev_state = STMF_STATE_ONLINE;
3094 		if (stmf_state.stmf_service_running) {
3095 			ssci.st_rflags = 0;
3096 			ssci.st_additional_info = NULL;
3097 			(void) stmf_ctl(STMF_CMD_LU_ONLINE, lu, &ssci);
3098 		}
3099 	}
3100 
3101 	/* XXX: Generate event */
3102 	return (STMF_SUCCESS);
3103 }
3104 
3105 stmf_status_t
3106 stmf_deregister_lu(stmf_lu_t *lu)
3107 {
3108 	stmf_i_lu_t *ilu;
3109 
3110 	mutex_enter(&stmf_state.stmf_lock);
3111 	if (stmf_state.stmf_inventory_locked) {
3112 		mutex_exit(&stmf_state.stmf_lock);
3113 		return (STMF_BUSY);
3114 	}
3115 	ilu = stmf_lookup_lu(lu);
3116 	if (ilu == NULL) {
3117 		mutex_exit(&stmf_state.stmf_lock);
3118 		return (STMF_INVALID_ARG);
3119 	}
3120 	if (ilu->ilu_state == STMF_STATE_OFFLINE) {
3121 		ASSERT(ilu->ilu_ntasks == ilu->ilu_ntasks_free);
3122 		while (ilu->ilu_flags & ILU_STALL_DEREGISTER) {
3123 			cv_wait(&stmf_state.stmf_cv, &stmf_state.stmf_lock);
3124 		}
3125 		if (ilu->ilu_ntasks) {
3126 			stmf_i_scsi_task_t *itask, *nitask;
3127 
3128 			nitask = ilu->ilu_tasks;
3129 			do {
3130 				itask = nitask;
3131 				nitask = itask->itask_lu_next;
3132 				lu->lu_task_free(itask->itask_task);
3133 				stmf_free(itask->itask_task);
3134 			} while (nitask != NULL);
3135 
3136 			ilu->ilu_tasks = ilu->ilu_free_tasks = NULL;
3137 			ilu->ilu_ntasks = ilu->ilu_ntasks_free = 0;
3138 		}
3139 		/* de-register with proxy if available */
3140 		if (ilu->ilu_access == STMF_LU_ACTIVE &&
3141 		    stmf_state.stmf_alua_state == 1) {
3142 			/* de-register with proxy module */
3143 			stmf_ic_msg_status_t ic_ret = STMF_IC_MSG_SUCCESS;
3144 			stmf_ic_msg_t *ic_dereg_lun;
3145 			if (lu->lu_lp && lu->lu_lp->lp_lpif_rev == LPIF_REV_2 &&
3146 			    lu->lu_lp->lp_alua_support) {
3147 				ilu->ilu_alua = 1;
3148 				/* allocate the de-register message */
3149 				ic_dereg_lun = ic_dereg_lun_msg_alloc(
3150 				    lu->lu_id->ident, lu->lu_lp->lp_name, 0,
3151 				    NULL, stmf_proxy_msg_id);
3152 				/* send the message */
3153 				if (ic_dereg_lun) {
3154 					ic_ret = ic_tx_msg(ic_dereg_lun);
3155 					if (ic_ret == STMF_IC_MSG_SUCCESS) {
3156 						stmf_proxy_msg_id++;
3157 					}
3158 				}
3159 			}
3160 		}
3161 
3162 		if (ilu->ilu_next)
3163 			ilu->ilu_next->ilu_prev = ilu->ilu_prev;
3164 		if (ilu->ilu_prev)
3165 			ilu->ilu_prev->ilu_next = ilu->ilu_next;
3166 		else
3167 			stmf_state.stmf_ilulist = ilu->ilu_next;
3168 		stmf_state.stmf_nlus--;
3169 
3170 		if (ilu == stmf_state.stmf_svc_ilu_draining) {
3171 			stmf_state.stmf_svc_ilu_draining = ilu->ilu_next;
3172 		}
3173 		if (ilu == stmf_state.stmf_svc_ilu_timing) {
3174 			stmf_state.stmf_svc_ilu_timing = ilu->ilu_next;
3175 		}
3176 		if (lu->lu_lp) {
3177 			((stmf_i_lu_provider_t *)
3178 			    (lu->lu_lp->lp_stmf_private))->ilp_nlus--;
3179 		}
3180 		if (ilu->ilu_luid) {
3181 			((stmf_id_data_t *)ilu->ilu_luid)->id_pt_to_object =
3182 			    NULL;
3183 			ilu->ilu_luid = NULL;
3184 		}
3185 		STMF_EVENT_FREE_HANDLE(ilu->ilu_event_hdl);
3186 	} else {
3187 		mutex_exit(&stmf_state.stmf_lock);
3188 		return (STMF_BUSY);
3189 	}
3190 	if (ilu->ilu_kstat_info) {
3191 		kmem_free(ilu->ilu_kstat_info->ks_data,
3192 		    ilu->ilu_kstat_info->ks_data_size);
3193 		kstat_delete(ilu->ilu_kstat_info);
3194 	}
3195 	if (ilu->ilu_kstat_io) {
3196 		kstat_delete(ilu->ilu_kstat_io);
3197 		mutex_destroy(&ilu->ilu_kstat_lock);
3198 	}
3199 	stmf_delete_itl_kstat_by_guid(ilu->ilu_ascii_hex_guid);
3200 	mutex_exit(&stmf_state.stmf_lock);
3201 	return (STMF_SUCCESS);
3202 }
3203 
3204 void
3205 stmf_set_port_standby(stmf_local_port_t *lport, uint16_t rtpid)
3206 {
3207 	stmf_i_local_port_t *ilport =
3208 	    (stmf_i_local_port_t *)lport->lport_stmf_private;
3209 	ilport->ilport_rtpid = rtpid;
3210 	ilport->ilport_standby = 1;
3211 }
3212 
3213 void
3214 stmf_set_port_alua(stmf_local_port_t *lport)
3215 {
3216 	stmf_i_local_port_t *ilport =
3217 	    (stmf_i_local_port_t *)lport->lport_stmf_private;
3218 	ilport->ilport_alua = 1;
3219 }
3220 
3221 stmf_status_t
3222 stmf_register_local_port(stmf_local_port_t *lport)
3223 {
3224 	stmf_i_local_port_t *ilport;
3225 	stmf_state_change_info_t ssci;
3226 	int start_workers = 0;
3227 
3228 	mutex_enter(&stmf_state.stmf_lock);
3229 	if (stmf_state.stmf_inventory_locked) {
3230 		mutex_exit(&stmf_state.stmf_lock);
3231 		return (STMF_BUSY);
3232 	}
3233 	ilport = (stmf_i_local_port_t *)lport->lport_stmf_private;
3234 	rw_init(&ilport->ilport_lock, NULL, RW_DRIVER, NULL);
3235 
3236 	ilport->ilport_instance =
3237 	    id_alloc_nosleep(stmf_state.stmf_ilport_inst_space);
3238 	if (ilport->ilport_instance == -1) {
3239 		mutex_exit(&stmf_state.stmf_lock);
3240 		return (STMF_FAILURE);
3241 	}
3242 	ilport->ilport_next = stmf_state.stmf_ilportlist;
3243 	ilport->ilport_prev = NULL;
3244 	if (ilport->ilport_next)
3245 		ilport->ilport_next->ilport_prev = ilport;
3246 	stmf_state.stmf_ilportlist = ilport;
3247 	stmf_state.stmf_nlports++;
3248 	if (lport->lport_pp) {
3249 		((stmf_i_port_provider_t *)
3250 		    (lport->lport_pp->pp_stmf_private))->ipp_npps++;
3251 	}
3252 	ilport->ilport_tg =
3253 	    stmf_lookup_group_for_target(lport->lport_id->ident,
3254 	    lport->lport_id->ident_length);
3255 
3256 	/*
3257 	 * rtpid will/must be set if this is a standby port
3258 	 * only register ports that are not standby (proxy) ports
3259 	 * and ports that are alua participants (ilport_alua == 1)
3260 	 */
3261 	if (ilport->ilport_standby == 0) {
3262 		ilport->ilport_rtpid = atomic_add_16_nv(&stmf_rtpid_counter, 1);
3263 	}
3264 
3265 	if (stmf_state.stmf_alua_state == 1 &&
3266 	    ilport->ilport_standby == 0 &&
3267 	    ilport->ilport_alua == 1) {
3268 		stmf_ic_msg_t *ic_reg_port;
3269 		stmf_ic_msg_status_t ic_ret;
3270 		stmf_local_port_t *lport;
3271 		lport = ilport->ilport_lport;
3272 		ic_reg_port = ic_reg_port_msg_alloc(
3273 		    lport->lport_id, ilport->ilport_rtpid,
3274 		    0, NULL, stmf_proxy_msg_id);
3275 		if (ic_reg_port) {
3276 			ic_ret = ic_tx_msg(ic_reg_port);
3277 			if (ic_ret == STMF_IC_MSG_SUCCESS) {
3278 				ilport->ilport_reg_msgid = stmf_proxy_msg_id++;
3279 			} else {
3280 				cmn_err(CE_WARN, "error on port registration "
3281 				"port - %s", ilport->ilport_kstat_tgt_name);
3282 			}
3283 		}
3284 	}
3285 	STMF_EVENT_ALLOC_HANDLE(ilport->ilport_event_hdl);
3286 	stmf_create_kstat_lport(ilport);
3287 	if (stmf_workers_state == STMF_WORKERS_DISABLED) {
3288 		stmf_workers_state = STMF_WORKERS_ENABLING;
3289 		start_workers = 1;
3290 	}
3291 	mutex_exit(&stmf_state.stmf_lock);
3292 
3293 	if (start_workers)
3294 		stmf_worker_init();
3295 
3296 	/*  the default state of LPORT */
3297 
3298 	if (stmf_state.stmf_default_lport_state == STMF_STATE_OFFLINE) {
3299 		ilport->ilport_prev_state = STMF_STATE_OFFLINE;
3300 	} else {
3301 		ilport->ilport_prev_state = STMF_STATE_ONLINE;
3302 		if (stmf_state.stmf_service_running) {
3303 			ssci.st_rflags = 0;
3304 			ssci.st_additional_info = NULL;
3305 			(void) stmf_ctl(STMF_CMD_LPORT_ONLINE, lport, &ssci);
3306 		}
3307 	}
3308 
3309 	/* XXX: Generate event */
3310 	return (STMF_SUCCESS);
3311 }
3312 
3313 stmf_status_t
3314 stmf_deregister_local_port(stmf_local_port_t *lport)
3315 {
3316 	stmf_i_local_port_t *ilport;
3317 
3318 	mutex_enter(&stmf_state.stmf_lock);
3319 	if (stmf_state.stmf_inventory_locked) {
3320 		mutex_exit(&stmf_state.stmf_lock);
3321 		return (STMF_BUSY);
3322 	}
3323 
3324 	/* dequeue all object requests from active queue */
3325 	stmf_svc_kill_obj_requests(lport);
3326 
3327 	ilport = (stmf_i_local_port_t *)lport->lport_stmf_private;
3328 
3329 	/*
3330 	 * deregister ports that are not standby (proxy)
3331 	 */
3332 	if (stmf_state.stmf_alua_state == 1 &&
3333 	    ilport->ilport_standby == 0 &&
3334 	    ilport->ilport_alua == 1) {
3335 		stmf_ic_msg_t *ic_dereg_port;
3336 		stmf_ic_msg_status_t ic_ret;
3337 		ic_dereg_port = ic_dereg_port_msg_alloc(
3338 		    lport->lport_id, 0, NULL, stmf_proxy_msg_id);
3339 		if (ic_dereg_port) {
3340 			ic_ret = ic_tx_msg(ic_dereg_port);
3341 			if (ic_ret == STMF_IC_MSG_SUCCESS) {
3342 				stmf_proxy_msg_id++;
3343 			}
3344 		}
3345 	}
3346 
3347 	if (ilport->ilport_nsessions == 0) {
3348 		if (ilport->ilport_next)
3349 			ilport->ilport_next->ilport_prev = ilport->ilport_prev;
3350 		if (ilport->ilport_prev)
3351 			ilport->ilport_prev->ilport_next = ilport->ilport_next;
3352 		else
3353 			stmf_state.stmf_ilportlist = ilport->ilport_next;
3354 		id_free(stmf_state.stmf_ilport_inst_space,
3355 		    ilport->ilport_instance);
3356 		rw_destroy(&ilport->ilport_lock);
3357 		stmf_state.stmf_nlports--;
3358 		if (lport->lport_pp) {
3359 			((stmf_i_port_provider_t *)
3360 			    (lport->lport_pp->pp_stmf_private))->ipp_npps--;
3361 		}
3362 		ilport->ilport_tg = NULL;
3363 		STMF_EVENT_FREE_HANDLE(ilport->ilport_event_hdl);
3364 	} else {
3365 		mutex_exit(&stmf_state.stmf_lock);
3366 		return (STMF_BUSY);
3367 	}
3368 	if (ilport->ilport_kstat_info) {
3369 		kmem_free(ilport->ilport_kstat_info->ks_data,
3370 		    ilport->ilport_kstat_info->ks_data_size);
3371 		kstat_delete(ilport->ilport_kstat_info);
3372 	}
3373 	if (ilport->ilport_kstat_io) {
3374 		kstat_delete(ilport->ilport_kstat_io);
3375 		mutex_destroy(&ilport->ilport_kstat_lock);
3376 	}
3377 	stmf_delete_itl_kstat_by_lport(ilport->ilport_kstat_tgt_name);
3378 	mutex_exit(&stmf_state.stmf_lock);
3379 	return (STMF_SUCCESS);
3380 }
3381 
3382 /*
3383  * Rport id/instance mappings remain valid until STMF is unloaded
3384  */
3385 static int
3386 stmf_irport_compare(const void *void_irport1, const void *void_irport2)
3387 {
3388 	const	stmf_i_remote_port_t	*irport1 = void_irport1;
3389 	const	stmf_i_remote_port_t	*irport2 = void_irport2;
3390 	int			result;
3391 
3392 	/* Sort by code set then ident */
3393 	if (irport1->irport_id->code_set <
3394 	    irport2->irport_id->code_set) {
3395 		return (-1);
3396 	} else if (irport1->irport_id->code_set >
3397 	    irport2->irport_id->code_set) {
3398 		return (1);
3399 	}
3400 
3401 	/* Next by ident length */
3402 	if (irport1->irport_id->ident_length <
3403 	    irport2->irport_id->ident_length) {
3404 		return (-1);
3405 	} else if (irport1->irport_id->ident_length >
3406 	    irport2->irport_id->ident_length) {
3407 		return (1);
3408 	}
3409 
3410 	/* Code set and ident length both match, now compare idents */
3411 	result = memcmp(irport1->irport_id->ident,
3412 	    irport2->irport_id->ident,
3413 	    irport1->irport_id->ident_length);
3414 
3415 	if (result < 0) {
3416 		return (-1);
3417 	} else if (result > 0) {
3418 		return (1);
3419 	}
3420 
3421 	return (0);
3422 }
3423 
3424 static stmf_i_remote_port_t *
3425 stmf_irport_create(scsi_devid_desc_t *rport_devid)
3426 {
3427 	int			alloc_len;
3428 	stmf_i_remote_port_t	*irport;
3429 
3430 	/*
3431 	 * Lookup will bump the refcnt if there's an existing rport
3432 	 * context for this identifier.
3433 	 */
3434 	ASSERT(mutex_owned(&stmf_state.stmf_lock));
3435 
3436 	alloc_len = sizeof (*irport) + sizeof (scsi_devid_desc_t) +
3437 	    rport_devid->ident_length - 1;
3438 	irport = kmem_zalloc(alloc_len, KM_NOSLEEP);
3439 	if (irport == NULL) {
3440 		return (NULL);
3441 	}
3442 
3443 	irport->irport_instance =
3444 	    id_alloc_nosleep(stmf_state.stmf_irport_inst_space);
3445 	if (irport->irport_instance == -1) {
3446 		kmem_free(irport, alloc_len);
3447 		return (NULL);
3448 	}
3449 
3450 	irport->irport_id =
3451 	    (struct scsi_devid_desc *)(irport + 1); /* Ptr. Arith. */
3452 	bcopy(rport_devid, irport->irport_id,
3453 	    sizeof (scsi_devid_desc_t) + rport_devid->ident_length - 1);
3454 	irport->irport_refcnt = 1;
3455 	mutex_init(&irport->irport_mutex, NULL, MUTEX_DEFAULT, NULL);
3456 
3457 	return (irport);
3458 }
3459 
3460 static void
3461 stmf_irport_destroy(stmf_i_remote_port_t *irport)
3462 {
3463 	id_free(stmf_state.stmf_irport_inst_space, irport->irport_instance);
3464 	mutex_destroy(&irport->irport_mutex);
3465 	kmem_free(irport, sizeof (*irport) + sizeof (scsi_devid_desc_t) +
3466 	    irport->irport_id->ident_length - 1);
3467 }
3468 
3469 static stmf_i_remote_port_t *
3470 stmf_irport_register(scsi_devid_desc_t *rport_devid)
3471 {
3472 	stmf_i_remote_port_t	*irport;
3473 
3474 	mutex_enter(&stmf_state.stmf_lock);
3475 
3476 	/*
3477 	 * Lookup will bump the refcnt if there's an existing rport
3478 	 * context for this identifier.
3479 	 */
3480 	if ((irport = stmf_irport_lookup_locked(rport_devid)) != NULL) {
3481 		mutex_exit(&stmf_state.stmf_lock);
3482 		return (irport);
3483 	}
3484 
3485 	irport = stmf_irport_create(rport_devid);
3486 	if (irport == NULL) {
3487 		mutex_exit(&stmf_state.stmf_lock);
3488 		return (NULL);
3489 	}
3490 
3491 	avl_add(&stmf_state.stmf_irportlist, irport);
3492 	mutex_exit(&stmf_state.stmf_lock);
3493 
3494 	return (irport);
3495 }
3496 
3497 static stmf_i_remote_port_t *
3498 stmf_irport_lookup_locked(scsi_devid_desc_t *rport_devid)
3499 {
3500 	stmf_i_remote_port_t	*irport;
3501 	stmf_i_remote_port_t	tmp_irport;
3502 
3503 	ASSERT(mutex_owned(&stmf_state.stmf_lock));
3504 	tmp_irport.irport_id = rport_devid;
3505 	irport = avl_find(&stmf_state.stmf_irportlist, &tmp_irport, NULL);
3506 	if (irport != NULL) {
3507 		mutex_enter(&irport->irport_mutex);
3508 		irport->irport_refcnt++;
3509 		mutex_exit(&irport->irport_mutex);
3510 	}
3511 
3512 	return (irport);
3513 }
3514 
3515 static void
3516 stmf_irport_deregister(stmf_i_remote_port_t *irport)
3517 {
3518 	/*
3519 	 * If we were actually going to remove unreferenced remote ports
3520 	 * we would want to acquire stmf_state.stmf_lock before getting
3521 	 * the irport mutex.
3522 	 *
3523 	 * Instead we're just going to leave it there even if unreferenced.
3524 	 */
3525 	mutex_enter(&irport->irport_mutex);
3526 	irport->irport_refcnt--;
3527 	mutex_exit(&irport->irport_mutex);
3528 }
3529 
3530 /*
3531  * Port provider has to make sure that register/deregister session and
3532  * port are serialized calls.
3533  */
3534 stmf_status_t
3535 stmf_register_scsi_session(stmf_local_port_t *lport, stmf_scsi_session_t *ss)
3536 {
3537 	stmf_i_scsi_session_t *iss;
3538 	stmf_i_local_port_t *ilport = (stmf_i_local_port_t *)
3539 	    lport->lport_stmf_private;
3540 	uint8_t		lun[8];
3541 
3542 	/*
3543 	 * Port state has to be online to register a scsi session. It is
3544 	 * possible that we started an offline operation and a new SCSI
3545 	 * session started at the same time (in that case also we are going
3546 	 * to fail the registeration). But any other state is simply
3547 	 * a bad port provider implementation.
3548 	 */
3549 	if (ilport->ilport_state != STMF_STATE_ONLINE) {
3550 		if (ilport->ilport_state != STMF_STATE_OFFLINING) {
3551 			stmf_trace(lport->lport_alias, "Port is trying to "
3552 			    "register a session while the state is neither "
3553 			    "online nor offlining");
3554 		}
3555 		return (STMF_FAILURE);
3556 	}
3557 	bzero(lun, 8);
3558 	iss = (stmf_i_scsi_session_t *)ss->ss_stmf_private;
3559 	if ((iss->iss_irport = stmf_irport_register(ss->ss_rport_id)) == NULL) {
3560 		stmf_trace(lport->lport_alias, "Could not register "
3561 		    "remote port during session registration");
3562 		return (STMF_FAILURE);
3563 	}
3564 
3565 	iss->iss_flags |= ISS_BEING_CREATED;
3566 
3567 	if (ss->ss_rport == NULL) {
3568 		iss->iss_flags |= ISS_NULL_TPTID;
3569 		ss->ss_rport = stmf_scsilib_devid_to_remote_port(
3570 		    ss->ss_rport_id);
3571 		if (ss->ss_rport == NULL) {
3572 			iss->iss_flags &= ~(ISS_NULL_TPTID | ISS_BEING_CREATED);
3573 			stmf_trace(lport->lport_alias, "Device id to "
3574 			    "remote port conversion failed");
3575 			return (STMF_FAILURE);
3576 		}
3577 	} else {
3578 		if (!stmf_scsilib_tptid_validate(ss->ss_rport->rport_tptid,
3579 		    ss->ss_rport->rport_tptid_sz, NULL)) {
3580 			iss->iss_flags &= ~ISS_BEING_CREATED;
3581 			stmf_trace(lport->lport_alias, "Remote port "
3582 			    "transport id validation failed");
3583 			return (STMF_FAILURE);
3584 		}
3585 	}
3586 
3587 	/* sessions use the ilport_lock. No separate lock is required */
3588 	iss->iss_lockp = &ilport->ilport_lock;
3589 
3590 	if (iss->iss_sm != NULL)
3591 		cmn_err(CE_PANIC, "create lun map called with non NULL map");
3592 	iss->iss_sm = (stmf_lun_map_t *)kmem_zalloc(sizeof (stmf_lun_map_t),
3593 	    KM_SLEEP);
3594 
3595 	mutex_enter(&stmf_state.stmf_lock);
3596 	rw_enter(&ilport->ilport_lock, RW_WRITER);
3597 	(void) stmf_session_create_lun_map(ilport, iss);
3598 	ilport->ilport_nsessions++;
3599 	iss->iss_next = ilport->ilport_ss_list;
3600 	ilport->ilport_ss_list = iss;
3601 	rw_exit(&ilport->ilport_lock);
3602 	mutex_exit(&stmf_state.stmf_lock);
3603 
3604 	iss->iss_creation_time = ddi_get_time();
3605 	ss->ss_session_id = atomic_add_64_nv(&stmf_session_counter, 1);
3606 	iss->iss_flags &= ~ISS_BEING_CREATED;
3607 	/* XXX should we remove ISS_LUN_INVENTORY_CHANGED on new session? */
3608 	iss->iss_flags &= ~ISS_LUN_INVENTORY_CHANGED;
3609 	DTRACE_PROBE2(session__online, stmf_local_port_t *, lport,
3610 	    stmf_scsi_session_t *, ss);
3611 	return (STMF_SUCCESS);
3612 }
3613 
3614 void
3615 stmf_deregister_scsi_session(stmf_local_port_t *lport, stmf_scsi_session_t *ss)
3616 {
3617 	stmf_i_local_port_t *ilport = (stmf_i_local_port_t *)
3618 	    lport->lport_stmf_private;
3619 	stmf_i_scsi_session_t *iss, **ppss;
3620 	int found = 0;
3621 	stmf_ic_msg_t *ic_session_dereg;
3622 	stmf_status_t ic_ret = STMF_FAILURE;
3623 
3624 	DTRACE_PROBE2(session__offline, stmf_local_port_t *, lport,
3625 	    stmf_scsi_session_t *, ss);
3626 
3627 	iss = (stmf_i_scsi_session_t *)ss->ss_stmf_private;
3628 	if (ss->ss_rport_alias) {
3629 		ss->ss_rport_alias = NULL;
3630 	}
3631 
3632 try_dereg_ss_again:
3633 	mutex_enter(&stmf_state.stmf_lock);
3634 	atomic_and_32(&iss->iss_flags,
3635 	    ~(ISS_LUN_INVENTORY_CHANGED | ISS_GOT_INITIAL_LUNS));
3636 	if (iss->iss_flags & ISS_EVENT_ACTIVE) {
3637 		mutex_exit(&stmf_state.stmf_lock);
3638 		delay(1);
3639 		goto try_dereg_ss_again;
3640 	}
3641 
3642 	/* dereg proxy session if not standby port */
3643 	if (stmf_state.stmf_alua_state == 1 &&
3644 	    ilport->ilport_standby == 0 &&
3645 	    ilport->ilport_alua == 1) {
3646 		ic_session_dereg = ic_session_dereg_msg_alloc(
3647 		    ss, stmf_proxy_msg_id);
3648 		if (ic_session_dereg) {
3649 			ic_ret = ic_tx_msg(ic_session_dereg);
3650 			if (ic_ret == STMF_IC_MSG_SUCCESS) {
3651 				stmf_proxy_msg_id++;
3652 			}
3653 		}
3654 	}
3655 
3656 	rw_enter(&ilport->ilport_lock, RW_WRITER);
3657 	for (ppss = &ilport->ilport_ss_list; *ppss != NULL;
3658 	    ppss = &((*ppss)->iss_next)) {
3659 		if (iss == (*ppss)) {
3660 			*ppss = (*ppss)->iss_next;
3661 			found = 1;
3662 			break;
3663 		}
3664 	}
3665 	if (!found) {
3666 		cmn_err(CE_PANIC, "Deregister session called for non existent"
3667 		    " session");
3668 	}
3669 	ilport->ilport_nsessions--;
3670 
3671 	stmf_irport_deregister(iss->iss_irport);
3672 	(void) stmf_session_destroy_lun_map(ilport, iss);
3673 	rw_exit(&ilport->ilport_lock);
3674 	mutex_exit(&stmf_state.stmf_lock);
3675 
3676 	if (iss->iss_flags & ISS_NULL_TPTID) {
3677 		stmf_remote_port_free(ss->ss_rport);
3678 	}
3679 }
3680 
3681 stmf_i_scsi_session_t *
3682 stmf_session_id_to_issptr(uint64_t session_id, int stay_locked)
3683 {
3684 	stmf_i_local_port_t *ilport;
3685 	stmf_i_scsi_session_t *iss;
3686 
3687 	mutex_enter(&stmf_state.stmf_lock);
3688 	for (ilport = stmf_state.stmf_ilportlist; ilport != NULL;
3689 	    ilport = ilport->ilport_next) {
3690 		rw_enter(&ilport->ilport_lock, RW_WRITER);
3691 		for (iss = ilport->ilport_ss_list; iss != NULL;
3692 		    iss = iss->iss_next) {
3693 			if (iss->iss_ss->ss_session_id == session_id) {
3694 				if (!stay_locked)
3695 					rw_exit(&ilport->ilport_lock);
3696 				mutex_exit(&stmf_state.stmf_lock);
3697 				return (iss);
3698 			}
3699 		}
3700 		rw_exit(&ilport->ilport_lock);
3701 	}
3702 	mutex_exit(&stmf_state.stmf_lock);
3703 	return (NULL);
3704 }
3705 
3706 #define	MAX_ALIAS		128
3707 
3708 static int
3709 stmf_itl_kstat_compare(const void *itl_kstat_1, const void *itl_kstat_2)
3710 {
3711 	const	stmf_i_itl_kstat_t	*kstat_nm1 = itl_kstat_1;
3712 	const	stmf_i_itl_kstat_t	*kstat_nm2 = itl_kstat_2;
3713 	int	ret;
3714 
3715 	ret = strcmp(kstat_nm1->iitl_kstat_nm, kstat_nm2->iitl_kstat_nm);
3716 	if (ret < 0) {
3717 		return (-1);
3718 	} else if (ret > 0) {
3719 		return (1);
3720 	}
3721 	return (0);
3722 }
3723 
3724 static stmf_i_itl_kstat_t *
3725 stmf_itl_kstat_lookup(char *kstat_nm)
3726 {
3727 	stmf_i_itl_kstat_t	tmp;
3728 	stmf_i_itl_kstat_t	*itl_kstat;
3729 
3730 	ASSERT(mutex_owned(&stmf_state.stmf_lock));
3731 	(void) strcpy(tmp.iitl_kstat_nm, kstat_nm);
3732 	itl_kstat = avl_find(&stmf_state.stmf_itl_kstat_list, &tmp, NULL);
3733 	return (itl_kstat);
3734 }
3735 
3736 static void
3737 stmf_delete_itl_kstat_by_lport(char *tgt)
3738 {
3739 	stmf_i_itl_kstat_t	*ks_itl, *next;
3740 
3741 	ASSERT(mutex_owned(&stmf_state.stmf_lock));
3742 	ks_itl = avl_first(&stmf_state.stmf_itl_kstat_list);
3743 	for (; ks_itl != NULL; ks_itl = next) {
3744 		next = AVL_NEXT(&stmf_state.stmf_itl_kstat_list, ks_itl);
3745 		if (strcmp(ks_itl->iitl_kstat_lport, tgt) == 0) {
3746 			stmf_teardown_itl_kstats(ks_itl);
3747 			avl_remove(&stmf_state.stmf_itl_kstat_list, ks_itl);
3748 			kmem_free(ks_itl, sizeof (stmf_i_itl_kstat_t));
3749 		}
3750 	}
3751 }
3752 
3753 static void
3754 stmf_delete_itl_kstat_by_guid(char *guid)
3755 {
3756 	stmf_i_itl_kstat_t	*ks_itl, *next;
3757 
3758 	ASSERT(mutex_owned(&stmf_state.stmf_lock));
3759 	ks_itl = avl_first(&stmf_state.stmf_itl_kstat_list);
3760 	for (; ks_itl != NULL; ks_itl = next) {
3761 		next = AVL_NEXT(&stmf_state.stmf_itl_kstat_list, ks_itl);
3762 		if (strcmp(ks_itl->iitl_kstat_guid, guid) == 0) {
3763 			stmf_teardown_itl_kstats(ks_itl);
3764 			avl_remove(&stmf_state.stmf_itl_kstat_list, ks_itl);
3765 			kmem_free(ks_itl, sizeof (stmf_i_itl_kstat_t));
3766 		}
3767 	}
3768 }
3769 
3770 static stmf_i_itl_kstat_t *
3771 stmf_itl_kstat_create(stmf_itl_data_t *itl, char *nm,
3772     scsi_devid_desc_t *lport, scsi_devid_desc_t *lun)
3773 {
3774 	stmf_i_itl_kstat_t	*ks_itl;
3775 	int			i, len;
3776 
3777 	ASSERT(mutex_owned(&stmf_state.stmf_lock));
3778 	if ((ks_itl = stmf_itl_kstat_lookup(nm)) != NULL)
3779 		return (ks_itl);
3780 
3781 	len = sizeof (stmf_i_itl_kstat_t);
3782 	ks_itl = kmem_zalloc(len, KM_NOSLEEP);
3783 	if (ks_itl == NULL)
3784 		return (NULL);
3785 
3786 	(void) strcpy(ks_itl->iitl_kstat_nm, nm);
3787 	bcopy(lport->ident, ks_itl->iitl_kstat_lport, lport->ident_length);
3788 	ks_itl->iitl_kstat_lport[lport->ident_length] = '\0';
3789 	for (i = 0; i < STMF_GUID_INPUT / 2; i++) {
3790 		(void) sprintf(&ks_itl->iitl_kstat_guid[i * 2], "%02x",
3791 		    lun->ident[i]);
3792 	}
3793 	ks_itl->iitl_kstat_strbuf = itl->itl_kstat_strbuf;
3794 	ks_itl->iitl_kstat_strbuflen = itl->itl_kstat_strbuflen;
3795 	ks_itl->iitl_kstat_info = itl->itl_kstat_info;
3796 	ks_itl->iitl_kstat_taskq = itl->itl_kstat_taskq;
3797 	ks_itl->iitl_kstat_lu_xfer = itl->itl_kstat_lu_xfer;
3798 	ks_itl->iitl_kstat_lport_xfer = itl->itl_kstat_lport_xfer;
3799 	avl_add(&stmf_state.stmf_itl_kstat_list, ks_itl);
3800 
3801 	return (ks_itl);
3802 }
3803 
3804 stmf_status_t
3805 stmf_setup_itl_kstats(stmf_itl_data_t *itl)
3806 {
3807 	char				ks_itl_id[32];
3808 	char				ks_nm[KSTAT_STRLEN];
3809 	char				ks_itl_nm[KSTAT_STRLEN];
3810 	stmf_kstat_itl_info_t		*ks_itl;
3811 	stmf_scsi_session_t		*ss;
3812 	stmf_i_scsi_session_t		*iss;
3813 	stmf_i_local_port_t		*ilport;
3814 	char				*strbuf;
3815 	int				id, len, i;
3816 	char				*rport_alias;
3817 	char				*lport_alias;
3818 	char				*lu_alias;
3819 	stmf_i_itl_kstat_t		*tmp_kstat;
3820 
3821 	/*
3822 	 * Allocate enough memory in the ITL to hold the relevant
3823 	 * identifiers.
3824 	 * rport and lport identifiers come from the stmf_scsi_session_t.
3825 	 * ident might not be null terminated.
3826 	 */
3827 	ss = itl->itl_session->iss_ss;
3828 	iss = ss->ss_stmf_private;
3829 	ilport = ss->ss_lport->lport_stmf_private;
3830 	(void) snprintf(ks_itl_id, 32, "%d.%d.%d",
3831 	    iss->iss_irport->irport_instance, ilport->ilport_instance,
3832 	    itl->itl_lun);
3833 
3834 	(void) snprintf(ks_itl_nm, KSTAT_STRLEN, "itl_%s", ks_itl_id);
3835 	/*
3836 	 * let's verify this itl_kstat already exist
3837 	 */
3838 	if ((tmp_kstat = stmf_itl_kstat_lookup(ks_itl_nm)) != NULL) {
3839 		itl->itl_kstat_strbuf = tmp_kstat->iitl_kstat_strbuf;
3840 		itl->itl_kstat_strbuflen = tmp_kstat->iitl_kstat_strbuflen;
3841 		itl->itl_kstat_info = tmp_kstat->iitl_kstat_info;
3842 		itl->itl_kstat_taskq = tmp_kstat->iitl_kstat_taskq;
3843 		itl->itl_kstat_lu_xfer = tmp_kstat->iitl_kstat_lu_xfer;
3844 		itl->itl_kstat_lport_xfer = tmp_kstat->iitl_kstat_lport_xfer;
3845 		return (STMF_SUCCESS);
3846 	}
3847 
3848 	/* New itl_kstat */
3849 	rport_alias = (ss->ss_rport_alias == NULL) ?
3850 	    "" : ss->ss_rport_alias;
3851 	lport_alias = (ss->ss_lport->lport_alias == NULL) ?
3852 	    "" : ss->ss_lport->lport_alias;
3853 	lu_alias = (itl->itl_ilu->ilu_lu->lu_alias == NULL) ?
3854 	    "" : itl->itl_ilu->ilu_lu->lu_alias;
3855 
3856 	itl->itl_kstat_strbuflen = (ss->ss_rport_id->ident_length + 1) +
3857 	    (strnlen(rport_alias, MAX_ALIAS) + 1) +
3858 	    (ss->ss_lport->lport_id->ident_length + 1) +
3859 	    (strnlen(lport_alias, MAX_ALIAS) + 1) +
3860 	    (STMF_GUID_INPUT + 1) +
3861 	    (strnlen(lu_alias, MAX_ALIAS) + 1) +
3862 	    MAX_PROTO_STR_LEN;
3863 	itl->itl_kstat_strbuf = kmem_zalloc(itl->itl_kstat_strbuflen,
3864 	    KM_NOSLEEP);
3865 	if (itl->itl_kstat_strbuf == NULL) {
3866 		return (STMF_ALLOC_FAILURE);
3867 	}
3868 
3869 	ks_itl = (stmf_kstat_itl_info_t *)kmem_zalloc(sizeof (*ks_itl),
3870 	    KM_NOSLEEP);
3871 	if (ks_itl == NULL) {
3872 		kmem_free(itl->itl_kstat_strbuf, itl->itl_kstat_strbuflen);
3873 		return (STMF_ALLOC_FAILURE);
3874 	}
3875 
3876 	if ((itl->itl_kstat_info = kstat_create(STMF_MODULE_NAME,
3877 	    0, ks_itl_nm, "misc", KSTAT_TYPE_NAMED,
3878 	    sizeof (stmf_kstat_itl_info_t) / sizeof (kstat_named_t),
3879 	    KSTAT_FLAG_VIRTUAL)) == NULL) {
3880 		goto itl_kstat_cleanup;
3881 	}
3882 
3883 	itl->itl_kstat_info->ks_data_size += itl->itl_kstat_strbuflen;
3884 	itl->itl_kstat_info->ks_data = ks_itl;
3885 
3886 	kstat_named_init(&ks_itl->i_rport_name, "rport-name",
3887 	    KSTAT_DATA_STRING);
3888 	kstat_named_init(&ks_itl->i_rport_alias, "rport-alias",
3889 	    KSTAT_DATA_STRING);
3890 	kstat_named_init(&ks_itl->i_lport_name, "lport-name",
3891 	    KSTAT_DATA_STRING);
3892 	kstat_named_init(&ks_itl->i_lport_alias, "lport-alias",
3893 	    KSTAT_DATA_STRING);
3894 	kstat_named_init(&ks_itl->i_protocol, "protocol",
3895 	    KSTAT_DATA_STRING);
3896 	kstat_named_init(&ks_itl->i_lu_guid, "lu-guid",
3897 	    KSTAT_DATA_STRING);
3898 	kstat_named_init(&ks_itl->i_lu_alias, "lu-alias",
3899 	    KSTAT_DATA_STRING);
3900 	kstat_named_init(&ks_itl->i_lu_number, "lu-number",
3901 	    KSTAT_DATA_UINT64);
3902 	kstat_named_init(&ks_itl->i_task_waitq_elapsed, "task-waitq-elapsed",
3903 	    KSTAT_DATA_UINT64);
3904 	kstat_named_init(&ks_itl->i_task_read_elapsed, "task-read-elapsed",
3905 	    KSTAT_DATA_UINT64);
3906 	kstat_named_init(&ks_itl->i_task_write_elapsed, "task-write-elapsed",
3907 	    KSTAT_DATA_UINT64);
3908 	kstat_named_init(&ks_itl->i_lu_read_elapsed, "lu-read-elapsed",
3909 	    KSTAT_DATA_UINT64);
3910 	kstat_named_init(&ks_itl->i_lu_write_elapsed, "lu-write-elapsed",
3911 	    KSTAT_DATA_UINT64);
3912 	kstat_named_init(&ks_itl->i_lport_read_elapsed, "lport-read-elapsed",
3913 	    KSTAT_DATA_UINT64);
3914 	kstat_named_init(&ks_itl->i_lport_write_elapsed, "lport-write-elapsed",
3915 	    KSTAT_DATA_UINT64);
3916 
3917 	strbuf = itl->itl_kstat_strbuf;
3918 
3919 	/* Rport */
3920 	len = ss->ss_rport_id->ident_length;
3921 	bcopy(ss->ss_rport_id->ident, strbuf, len);
3922 	strbuf += len;
3923 	*strbuf = '\0';
3924 	kstat_named_setstr(&ks_itl->i_rport_name, strbuf - len);
3925 	strbuf++;
3926 
3927 	len = strnlen(rport_alias, MAX_ALIAS);
3928 	(void) strncpy(strbuf, rport_alias, len + 1);
3929 	kstat_named_setstr(&ks_itl->i_rport_alias, strbuf);
3930 	strbuf += len + 1;
3931 
3932 	/* Lport */
3933 	len = ss->ss_lport->lport_id->ident_length;
3934 	bcopy(ss->ss_lport->lport_id->ident, strbuf, len);
3935 	strbuf += len;
3936 	*strbuf = '\0';
3937 	kstat_named_setstr(&ks_itl->i_lport_name, strbuf - len);
3938 	strbuf++;
3939 
3940 	len = strnlen(lport_alias, MAX_ALIAS);
3941 	(void) strncpy(strbuf, lport_alias, len + 1);
3942 	kstat_named_setstr(&ks_itl->i_lport_alias, strbuf);
3943 	strbuf += len + 1;
3944 
3945 	id = (ss->ss_lport->lport_id->protocol_id > PROTOCOL_ANY) ?
3946 	    PROTOCOL_ANY : ss->ss_lport->lport_id->protocol_id;
3947 	kstat_named_setstr(&ks_itl->i_protocol, protocol_ident[id]);
3948 
3949 	/* LU */
3950 	for (i = 0; i < STMF_GUID_INPUT / 2; i++) {
3951 		(void) sprintf(&strbuf[i * 2], "%02x",
3952 		    itl->itl_ilu->ilu_lu->lu_id->ident[i]);
3953 	}
3954 	kstat_named_setstr(&ks_itl->i_lu_guid, strbuf);
3955 	strbuf += STMF_GUID_INPUT + 1;
3956 
3957 	len = strnlen(lu_alias, MAX_ALIAS);
3958 	(void) strncpy(strbuf, lu_alias, len + 1);
3959 	kstat_named_setstr(&ks_itl->i_lu_alias, strbuf);
3960 	strbuf += len + 1;
3961 
3962 	ks_itl->i_lu_number.value.ui64 = itl->itl_lun;
3963 
3964 	/* Now create the I/O kstats */
3965 	(void) snprintf(ks_nm, KSTAT_STRLEN, "itl_tasks_%s",  ks_itl_id);
3966 	if ((itl->itl_kstat_taskq = kstat_create(STMF_MODULE_NAME, 0,
3967 	    ks_nm, "io", KSTAT_TYPE_IO, 1, 0)) == NULL) {
3968 		goto itl_kstat_cleanup;
3969 	}
3970 
3971 	(void) snprintf(ks_nm, KSTAT_STRLEN, "itl_lu_%s",  ks_itl_id);
3972 	if ((itl->itl_kstat_lu_xfer = kstat_create(STMF_MODULE_NAME, 0,
3973 	    ks_nm, "io", KSTAT_TYPE_IO, 1, 0)) == NULL) {
3974 		goto itl_kstat_cleanup;
3975 	}
3976 
3977 	(void) snprintf(ks_nm, KSTAT_STRLEN, "itl_lport_%s",  ks_itl_id);
3978 	if ((itl->itl_kstat_lport_xfer = kstat_create(STMF_MODULE_NAME, 0,
3979 	    ks_nm, "io", KSTAT_TYPE_IO, 1, 0)) == NULL) {
3980 		goto itl_kstat_cleanup;
3981 	}
3982 
3983 	/* Install all the kstats */
3984 	kstat_install(itl->itl_kstat_info);
3985 	kstat_install(itl->itl_kstat_taskq);
3986 	kstat_install(itl->itl_kstat_lu_xfer);
3987 	kstat_install(itl->itl_kstat_lport_xfer);
3988 
3989 	/* Add new itl_kstat to stmf_itl_kstat_list */
3990 	if (stmf_itl_kstat_create(itl, ks_itl_nm, ss->ss_lport->lport_id,
3991 	    itl->itl_ilu->ilu_lu->lu_id) != NULL)
3992 		return (STMF_SUCCESS);
3993 
3994 itl_kstat_cleanup:
3995 	if (itl->itl_kstat_taskq)
3996 		kstat_delete(itl->itl_kstat_taskq);
3997 	if (itl->itl_kstat_lu_xfer)
3998 		kstat_delete(itl->itl_kstat_lu_xfer);
3999 	if (itl->itl_kstat_lport_xfer)
4000 		kstat_delete(itl->itl_kstat_lport_xfer);
4001 	if (itl->itl_kstat_info)
4002 		kstat_delete(itl->itl_kstat_info);
4003 	kmem_free(ks_itl, sizeof (*ks_itl));
4004 	kmem_free(itl->itl_kstat_strbuf, itl->itl_kstat_strbuflen);
4005 	cmn_err(CE_WARN, "STMF: kstat_create itl failed");
4006 	return (STMF_ALLOC_FAILURE);
4007 }
4008 
4009 static void
4010 stmf_teardown_itl_kstats(stmf_i_itl_kstat_t *ks)
4011 {
4012 	kstat_delete(ks->iitl_kstat_lport_xfer);
4013 	kstat_delete(ks->iitl_kstat_lu_xfer);
4014 	kstat_delete(ks->iitl_kstat_taskq);
4015 	kmem_free(ks->iitl_kstat_info->ks_data, sizeof (stmf_kstat_itl_info_t));
4016 	kstat_delete(ks->iitl_kstat_info);
4017 	kmem_free(ks->iitl_kstat_strbuf, ks->iitl_kstat_strbuflen);
4018 }
4019 
4020 void
4021 stmf_release_itl_handle(stmf_lu_t *lu, stmf_itl_data_t *itl)
4022 {
4023 	stmf_itl_data_t **itlpp;
4024 	stmf_i_lu_t *ilu;
4025 
4026 	ASSERT(itl->itl_flags & STMF_ITL_BEING_TERMINATED);
4027 
4028 	ilu = (stmf_i_lu_t *)lu->lu_stmf_private;
4029 	mutex_enter(&ilu->ilu_task_lock);
4030 	for (itlpp = &ilu->ilu_itl_list; (*itlpp) != NULL;
4031 	    itlpp = &(*itlpp)->itl_next) {
4032 		if ((*itlpp) == itl)
4033 			break;
4034 	}
4035 	ASSERT((*itlpp) != NULL);
4036 	*itlpp = itl->itl_next;
4037 	mutex_exit(&ilu->ilu_task_lock);
4038 	lu->lu_abort(lu, STMF_LU_ITL_HANDLE_REMOVED, itl->itl_handle,
4039 	    (uint32_t)itl->itl_hdlrm_reason);
4040 
4041 	kmem_free(itl, sizeof (*itl));
4042 }
4043 
4044 stmf_status_t
4045 stmf_register_itl_handle(stmf_lu_t *lu, uint8_t *lun,
4046     stmf_scsi_session_t *ss, uint64_t session_id, void *itl_handle)
4047 {
4048 	stmf_itl_data_t *itl;
4049 	stmf_i_scsi_session_t *iss;
4050 	stmf_lun_map_ent_t *lun_map_ent;
4051 	stmf_i_lu_t *ilu;
4052 	uint16_t n;
4053 
4054 	ilu = (stmf_i_lu_t *)lu->lu_stmf_private;
4055 	if (ss == NULL) {
4056 		iss = stmf_session_id_to_issptr(session_id, 1);
4057 		if (iss == NULL)
4058 			return (STMF_NOT_FOUND);
4059 	} else {
4060 		iss = (stmf_i_scsi_session_t *)ss->ss_stmf_private;
4061 	}
4062 
4063 	/*
4064 	 * Acquire stmf_lock for stmf_itl_kstat_lookup.
4065 	 */
4066 	mutex_enter(&stmf_state.stmf_lock);
4067 	rw_enter(iss->iss_lockp, RW_WRITER);
4068 	n = ((uint16_t)lun[1] | (((uint16_t)(lun[0] & 0x3F)) << 8));
4069 	lun_map_ent = (stmf_lun_map_ent_t *)
4070 	    stmf_get_ent_from_map(iss->iss_sm, n);
4071 	if ((lun_map_ent == NULL) || (lun_map_ent->ent_lu != lu)) {
4072 		rw_exit(iss->iss_lockp);
4073 		mutex_exit(&stmf_state.stmf_lock);
4074 		return (STMF_NOT_FOUND);
4075 	}
4076 	if (lun_map_ent->ent_itl_datap != NULL) {
4077 		rw_exit(iss->iss_lockp);
4078 		mutex_exit(&stmf_state.stmf_lock);
4079 		return (STMF_ALREADY);
4080 	}
4081 
4082 	itl = (stmf_itl_data_t *)kmem_zalloc(sizeof (*itl), KM_NOSLEEP);
4083 	if (itl == NULL) {
4084 		rw_exit(iss->iss_lockp);
4085 		mutex_exit(&stmf_state.stmf_lock);
4086 		return (STMF_ALLOC_FAILURE);
4087 	}
4088 
4089 	itl->itl_ilu = ilu;
4090 	itl->itl_session = iss;
4091 	itl->itl_counter = 1;
4092 	itl->itl_lun = n;
4093 	itl->itl_handle = itl_handle;
4094 
4095 	if (stmf_setup_itl_kstats(itl) != STMF_SUCCESS) {
4096 		kmem_free(itl, sizeof (*itl));
4097 		rw_exit(iss->iss_lockp);
4098 		mutex_exit(&stmf_state.stmf_lock);
4099 		return (STMF_ALLOC_FAILURE);
4100 	}
4101 
4102 	mutex_enter(&ilu->ilu_task_lock);
4103 	itl->itl_next = ilu->ilu_itl_list;
4104 	ilu->ilu_itl_list = itl;
4105 	mutex_exit(&ilu->ilu_task_lock);
4106 	lun_map_ent->ent_itl_datap = itl;
4107 	rw_exit(iss->iss_lockp);
4108 	mutex_exit(&stmf_state.stmf_lock);
4109 
4110 	return (STMF_SUCCESS);
4111 }
4112 
4113 void
4114 stmf_do_itl_dereg(stmf_lu_t *lu, stmf_itl_data_t *itl, uint8_t hdlrm_reason)
4115 {
4116 	uint8_t old, new;
4117 
4118 	do {
4119 		old = new = itl->itl_flags;
4120 		if (old & STMF_ITL_BEING_TERMINATED)
4121 			return;
4122 		new |= STMF_ITL_BEING_TERMINATED;
4123 	} while (atomic_cas_8(&itl->itl_flags, old, new) != old);
4124 	itl->itl_hdlrm_reason = hdlrm_reason;
4125 
4126 	ASSERT(itl->itl_counter);
4127 
4128 	if (atomic_add_32_nv(&itl->itl_counter, -1))
4129 		return;
4130 
4131 	drv_usecwait(10);
4132 	if (itl->itl_counter)
4133 		return;
4134 
4135 	stmf_release_itl_handle(lu, itl);
4136 }
4137 
4138 stmf_status_t
4139 stmf_deregister_all_lu_itl_handles(stmf_lu_t *lu)
4140 {
4141 	stmf_i_lu_t *ilu;
4142 	stmf_i_local_port_t *ilport;
4143 	stmf_i_scsi_session_t *iss;
4144 	stmf_lun_map_t *lm;
4145 	stmf_lun_map_ent_t *ent;
4146 	uint32_t nmaps, nu;
4147 	stmf_itl_data_t **itl_list;
4148 	int i;
4149 
4150 	ilu = (stmf_i_lu_t *)lu->lu_stmf_private;
4151 
4152 dereg_itl_start:;
4153 	nmaps = ilu->ilu_ref_cnt;
4154 	if (nmaps == 0)
4155 		return (STMF_NOT_FOUND);
4156 	itl_list = (stmf_itl_data_t **)kmem_zalloc(
4157 	    nmaps * sizeof (stmf_itl_data_t *), KM_SLEEP);
4158 	mutex_enter(&stmf_state.stmf_lock);
4159 	if (nmaps != ilu->ilu_ref_cnt) {
4160 		/* Something changed, start all over */
4161 		mutex_exit(&stmf_state.stmf_lock);
4162 		kmem_free(itl_list, nmaps * sizeof (stmf_itl_data_t *));
4163 		goto dereg_itl_start;
4164 	}
4165 	nu = 0;
4166 	for (ilport = stmf_state.stmf_ilportlist; ilport != NULL;
4167 	    ilport = ilport->ilport_next) {
4168 		rw_enter(&ilport->ilport_lock, RW_WRITER);
4169 		for (iss = ilport->ilport_ss_list; iss != NULL;
4170 		    iss = iss->iss_next) {
4171 			lm = iss->iss_sm;
4172 			if (!lm)
4173 				continue;
4174 			for (i = 0; i < lm->lm_nentries; i++) {
4175 				if (lm->lm_plus[i] == NULL)
4176 					continue;
4177 				ent = (stmf_lun_map_ent_t *)lm->lm_plus[i];
4178 				if ((ent->ent_lu == lu) &&
4179 				    (ent->ent_itl_datap)) {
4180 					itl_list[nu++] = ent->ent_itl_datap;
4181 					ent->ent_itl_datap = NULL;
4182 					if (nu == nmaps) {
4183 						rw_exit(&ilport->ilport_lock);
4184 						goto dai_scan_done;
4185 					}
4186 				}
4187 			} /* lun table for a session */
4188 		} /* sessions */
4189 		rw_exit(&ilport->ilport_lock);
4190 	} /* ports */
4191 
4192 dai_scan_done:
4193 	mutex_exit(&stmf_state.stmf_lock);
4194 
4195 	for (i = 0; i < nu; i++) {
4196 		stmf_do_itl_dereg(lu, itl_list[i],
4197 		    STMF_ITL_REASON_DEREG_REQUEST);
4198 	}
4199 	kmem_free(itl_list, nmaps * sizeof (stmf_itl_data_t *));
4200 
4201 	return (STMF_SUCCESS);
4202 }
4203 
4204 stmf_status_t
4205 stmf_deregister_itl_handle(stmf_lu_t *lu, uint8_t *lun,
4206     stmf_scsi_session_t *ss, uint64_t session_id, void *itl_handle)
4207 {
4208 	stmf_i_scsi_session_t *iss;
4209 	stmf_itl_data_t *itl;
4210 	stmf_lun_map_ent_t *ent;
4211 	stmf_lun_map_t *lm;
4212 	int i;
4213 	uint16_t n;
4214 
4215 	if (ss == NULL) {
4216 		if (session_id == STMF_SESSION_ID_NONE)
4217 			return (STMF_INVALID_ARG);
4218 		iss = stmf_session_id_to_issptr(session_id, 1);
4219 		if (iss == NULL)
4220 			return (STMF_NOT_FOUND);
4221 	} else {
4222 		iss = (stmf_i_scsi_session_t *)ss->ss_stmf_private;
4223 		rw_enter(iss->iss_lockp, RW_WRITER);
4224 	}
4225 	lm = iss->iss_sm;
4226 	if (lm == NULL) {
4227 		rw_exit(iss->iss_lockp);
4228 		return (STMF_NOT_FOUND);
4229 	}
4230 
4231 	if (lun) {
4232 		n = ((uint16_t)lun[1] | (((uint16_t)(lun[0] & 0x3F)) << 8));
4233 		ent = (stmf_lun_map_ent_t *)
4234 		    stmf_get_ent_from_map(iss->iss_sm, n);
4235 	} else {
4236 		if (itl_handle == NULL) {
4237 			rw_exit(iss->iss_lockp);
4238 			return (STMF_INVALID_ARG);
4239 		}
4240 		ent = NULL;
4241 		for (i = 0; i < lm->lm_nentries; i++) {
4242 			if (lm->lm_plus[i] == NULL)
4243 				continue;
4244 			ent = (stmf_lun_map_ent_t *)lm->lm_plus[i];
4245 			if (ent->ent_itl_datap &&
4246 			    (ent->ent_itl_datap->itl_handle == itl_handle)) {
4247 				break;
4248 			}
4249 		}
4250 	}
4251 	if ((ent == NULL) || (ent->ent_lu != lu) ||
4252 	    (ent->ent_itl_datap == NULL)) {
4253 		rw_exit(iss->iss_lockp);
4254 		return (STMF_NOT_FOUND);
4255 	}
4256 	itl = ent->ent_itl_datap;
4257 	ent->ent_itl_datap = NULL;
4258 	rw_exit(iss->iss_lockp);
4259 	stmf_do_itl_dereg(lu, itl, STMF_ITL_REASON_DEREG_REQUEST);
4260 
4261 	return (STMF_SUCCESS);
4262 }
4263 
4264 stmf_status_t
4265 stmf_get_itl_handle(stmf_lu_t *lu, uint8_t *lun, stmf_scsi_session_t *ss,
4266     uint64_t session_id, void **itl_handle_retp)
4267 {
4268 	stmf_i_scsi_session_t *iss;
4269 	stmf_lun_map_ent_t *ent;
4270 	stmf_lun_map_t *lm;
4271 	stmf_status_t ret;
4272 	int i;
4273 	uint16_t n;
4274 
4275 	if (ss == NULL) {
4276 		iss = stmf_session_id_to_issptr(session_id, 1);
4277 		if (iss == NULL)
4278 			return (STMF_NOT_FOUND);
4279 	} else {
4280 		iss = (stmf_i_scsi_session_t *)ss->ss_stmf_private;
4281 		rw_enter(iss->iss_lockp, RW_WRITER);
4282 	}
4283 
4284 	ent = NULL;
4285 	if (lun == NULL) {
4286 		lm = iss->iss_sm;
4287 		for (i = 0; i < lm->lm_nentries; i++) {
4288 			if (lm->lm_plus[i] == NULL)
4289 				continue;
4290 			ent = (stmf_lun_map_ent_t *)lm->lm_plus[i];
4291 			if (ent->ent_lu == lu)
4292 				break;
4293 		}
4294 	} else {
4295 		n = ((uint16_t)lun[1] | (((uint16_t)(lun[0] & 0x3F)) << 8));
4296 		ent = (stmf_lun_map_ent_t *)
4297 		    stmf_get_ent_from_map(iss->iss_sm, n);
4298 		if (lu && (ent->ent_lu != lu))
4299 			ent = NULL;
4300 	}
4301 	if (ent && ent->ent_itl_datap) {
4302 		*itl_handle_retp = ent->ent_itl_datap->itl_handle;
4303 		ret = STMF_SUCCESS;
4304 	} else {
4305 		ret = STMF_NOT_FOUND;
4306 	}
4307 
4308 	rw_exit(iss->iss_lockp);
4309 	return (ret);
4310 }
4311 
4312 stmf_data_buf_t *
4313 stmf_alloc_dbuf(scsi_task_t *task, uint32_t size, uint32_t *pminsize,
4314     uint32_t flags)
4315 {
4316 	stmf_i_scsi_task_t *itask =
4317 	    (stmf_i_scsi_task_t *)task->task_stmf_private;
4318 	stmf_local_port_t *lport = task->task_lport;
4319 	stmf_data_buf_t *dbuf;
4320 	uint8_t ndx;
4321 
4322 	ndx = stmf_first_zero[itask->itask_allocated_buf_map];
4323 	if (ndx == 0xff)
4324 		return (NULL);
4325 	dbuf = itask->itask_dbufs[ndx] = lport->lport_ds->ds_alloc_data_buf(
4326 	    task, size, pminsize, flags);
4327 	if (dbuf) {
4328 		task->task_cur_nbufs++;
4329 		itask->itask_allocated_buf_map |= (1 << ndx);
4330 		dbuf->db_flags &= ~DB_LPORT_XFER_ACTIVE;
4331 		dbuf->db_handle = ndx;
4332 		return (dbuf);
4333 	}
4334 
4335 	return (NULL);
4336 }
4337 
4338 stmf_status_t
4339 stmf_setup_dbuf(scsi_task_t *task, stmf_data_buf_t *dbuf, uint32_t flags)
4340 {
4341 	stmf_i_scsi_task_t *itask =
4342 	    (stmf_i_scsi_task_t *)task->task_stmf_private;
4343 	stmf_local_port_t *lport = task->task_lport;
4344 	uint8_t ndx;
4345 	stmf_status_t ret;
4346 
4347 	ASSERT(task->task_additional_flags & TASK_AF_ACCEPT_LU_DBUF);
4348 	ASSERT(lport->lport_ds->ds_setup_dbuf != NULL);
4349 	ASSERT(dbuf->db_flags & DB_LU_DATA_BUF);
4350 
4351 	if ((task->task_additional_flags & TASK_AF_ACCEPT_LU_DBUF) == 0)
4352 		return (STMF_FAILURE);
4353 	if (lport->lport_ds->ds_setup_dbuf == NULL)
4354 		return (STMF_FAILURE);
4355 
4356 	ndx = stmf_first_zero[itask->itask_allocated_buf_map];
4357 	if (ndx == 0xff)
4358 		return (STMF_FAILURE);
4359 	ret = lport->lport_ds->ds_setup_dbuf(task, dbuf, flags);
4360 	if (ret == STMF_FAILURE)
4361 		return (STMF_FAILURE);
4362 	itask->itask_dbufs[ndx] = dbuf;
4363 	task->task_cur_nbufs++;
4364 	itask->itask_allocated_buf_map |= (1 << ndx);
4365 	dbuf->db_handle = ndx;
4366 
4367 	return (STMF_SUCCESS);
4368 }
4369 
4370 void
4371 stmf_teardown_dbuf(scsi_task_t *task, stmf_data_buf_t *dbuf)
4372 {
4373 	stmf_i_scsi_task_t *itask =
4374 	    (stmf_i_scsi_task_t *)task->task_stmf_private;
4375 	stmf_local_port_t *lport = task->task_lport;
4376 
4377 	ASSERT(task->task_additional_flags & TASK_AF_ACCEPT_LU_DBUF);
4378 	ASSERT(lport->lport_ds->ds_teardown_dbuf != NULL);
4379 	ASSERT(dbuf->db_flags & DB_LU_DATA_BUF);
4380 
4381 	itask->itask_allocated_buf_map &= ~(1 << dbuf->db_handle);
4382 	task->task_cur_nbufs--;
4383 	lport->lport_ds->ds_teardown_dbuf(lport->lport_ds, dbuf);
4384 }
4385 
4386 void
4387 stmf_free_dbuf(scsi_task_t *task, stmf_data_buf_t *dbuf)
4388 {
4389 	stmf_i_scsi_task_t *itask =
4390 	    (stmf_i_scsi_task_t *)task->task_stmf_private;
4391 	stmf_local_port_t *lport = task->task_lport;
4392 
4393 	itask->itask_allocated_buf_map &= ~(1 << dbuf->db_handle);
4394 	task->task_cur_nbufs--;
4395 	lport->lport_ds->ds_free_data_buf(lport->lport_ds, dbuf);
4396 }
4397 
4398 stmf_data_buf_t *
4399 stmf_handle_to_buf(scsi_task_t *task, uint8_t h)
4400 {
4401 	stmf_i_scsi_task_t *itask;
4402 
4403 	itask = (stmf_i_scsi_task_t *)task->task_stmf_private;
4404 	if (h > 3)
4405 		return (NULL);
4406 	return (itask->itask_dbufs[h]);
4407 }
4408 
4409 /* ARGSUSED */
4410 struct scsi_task *
4411 stmf_task_alloc(struct stmf_local_port *lport, stmf_scsi_session_t *ss,
4412 			uint8_t *lun, uint16_t cdb_length_in, uint16_t ext_id)
4413 {
4414 	stmf_lu_t *lu;
4415 	stmf_i_scsi_session_t *iss;
4416 	stmf_i_lu_t *ilu;
4417 	stmf_i_scsi_task_t *itask;
4418 	stmf_i_scsi_task_t **ppitask;
4419 	scsi_task_t *task;
4420 	uint8_t	*l;
4421 	stmf_lun_map_ent_t *lun_map_ent;
4422 	uint16_t cdb_length;
4423 	uint16_t luNbr;
4424 	uint8_t new_task = 0;
4425 
4426 	/*
4427 	 * We allocate 7 extra bytes for CDB to provide a cdb pointer which
4428 	 * is guaranteed to be 8 byte aligned. Some LU providers like OSD
4429 	 * depend upon this alignment.
4430 	 */
4431 	if (cdb_length_in >= 16)
4432 		cdb_length = cdb_length_in + 7;
4433 	else
4434 		cdb_length = 16 + 7;
4435 	iss = (stmf_i_scsi_session_t *)ss->ss_stmf_private;
4436 	luNbr = ((uint16_t)lun[1] | (((uint16_t)(lun[0] & 0x3F)) << 8));
4437 	rw_enter(iss->iss_lockp, RW_READER);
4438 	lun_map_ent =
4439 	    (stmf_lun_map_ent_t *)stmf_get_ent_from_map(iss->iss_sm, luNbr);
4440 	if (!lun_map_ent) {
4441 		lu = dlun0;
4442 	} else {
4443 		lu = lun_map_ent->ent_lu;
4444 	}
4445 	ilu = lu->lu_stmf_private;
4446 	if (ilu->ilu_flags & ILU_RESET_ACTIVE) {
4447 		rw_exit(iss->iss_lockp);
4448 		return (NULL);
4449 	}
4450 	do {
4451 		if (ilu->ilu_free_tasks == NULL) {
4452 			new_task = 1;
4453 			break;
4454 		}
4455 		mutex_enter(&ilu->ilu_task_lock);
4456 		for (ppitask = &ilu->ilu_free_tasks; (*ppitask != NULL) &&
4457 		    ((*ppitask)->itask_cdb_buf_size < cdb_length);
4458 		    ppitask = &((*ppitask)->itask_lu_free_next))
4459 			;
4460 		if (*ppitask) {
4461 			itask = *ppitask;
4462 			*ppitask = (*ppitask)->itask_lu_free_next;
4463 			ilu->ilu_ntasks_free--;
4464 			if (ilu->ilu_ntasks_free < ilu->ilu_ntasks_min_free)
4465 				ilu->ilu_ntasks_min_free = ilu->ilu_ntasks_free;
4466 		} else {
4467 			new_task = 1;
4468 		}
4469 		mutex_exit(&ilu->ilu_task_lock);
4470 	/* CONSTCOND */
4471 	} while (0);
4472 
4473 	if (!new_task) {
4474 		/*
4475 		 * Save the task_cdb pointer and zero per cmd fields.
4476 		 * We know the task_cdb_length is large enough by task
4477 		 * selection process above.
4478 		 */
4479 		uint8_t *save_cdb;
4480 		uintptr_t t_start, t_end;
4481 
4482 		task = itask->itask_task;
4483 		save_cdb = task->task_cdb;	/* save */
4484 		t_start = (uintptr_t)&task->task_flags;
4485 		t_end = (uintptr_t)&task->task_extended_cmd;
4486 		bzero((void *)t_start, (size_t)(t_end - t_start));
4487 		task->task_cdb = save_cdb;	/* restore */
4488 		itask->itask_ncmds = 0;
4489 	} else {
4490 		task = (scsi_task_t *)stmf_alloc(STMF_STRUCT_SCSI_TASK,
4491 		    cdb_length, AF_FORCE_NOSLEEP);
4492 		if (task == NULL) {
4493 			rw_exit(iss->iss_lockp);
4494 			return (NULL);
4495 		}
4496 		task->task_lu = lu;
4497 		l = task->task_lun_no;
4498 		l[0] = lun[0];
4499 		l[1] = lun[1];
4500 		l[2] = lun[2];
4501 		l[3] = lun[3];
4502 		l[4] = lun[4];
4503 		l[5] = lun[5];
4504 		l[6] = lun[6];
4505 		l[7] = lun[7];
4506 		task->task_cdb = (uint8_t *)task->task_port_private;
4507 		if ((ulong_t)(task->task_cdb) & 7ul) {
4508 			task->task_cdb = (uint8_t *)(((ulong_t)
4509 			    (task->task_cdb) + 7ul) & ~(7ul));
4510 		}
4511 		itask = (stmf_i_scsi_task_t *)task->task_stmf_private;
4512 		itask->itask_cdb_buf_size = cdb_length;
4513 		mutex_init(&itask->itask_audit_mutex, NULL, MUTEX_DRIVER, NULL);
4514 	}
4515 	task->task_session = ss;
4516 	task->task_lport = lport;
4517 	task->task_cdb_length = cdb_length_in;
4518 	itask->itask_flags = ITASK_IN_TRANSITION;
4519 	itask->itask_waitq_time = 0;
4520 	itask->itask_lu_read_time = itask->itask_lu_write_time = 0;
4521 	itask->itask_lport_read_time = itask->itask_lport_write_time = 0;
4522 	itask->itask_read_xfer = itask->itask_write_xfer = 0;
4523 	itask->itask_audit_index = 0;
4524 
4525 	if (new_task) {
4526 		if (lu->lu_task_alloc(task) != STMF_SUCCESS) {
4527 			rw_exit(iss->iss_lockp);
4528 			stmf_free(task);
4529 			return (NULL);
4530 		}
4531 		mutex_enter(&ilu->ilu_task_lock);
4532 		if (ilu->ilu_flags & ILU_RESET_ACTIVE) {
4533 			mutex_exit(&ilu->ilu_task_lock);
4534 			rw_exit(iss->iss_lockp);
4535 			stmf_free(task);
4536 			return (NULL);
4537 		}
4538 		itask->itask_lu_next = ilu->ilu_tasks;
4539 		if (ilu->ilu_tasks)
4540 			ilu->ilu_tasks->itask_lu_prev = itask;
4541 		ilu->ilu_tasks = itask;
4542 		/* kmem_zalloc automatically makes itask->itask_lu_prev NULL */
4543 		ilu->ilu_ntasks++;
4544 		mutex_exit(&ilu->ilu_task_lock);
4545 	}
4546 
4547 	itask->itask_ilu_task_cntr = ilu->ilu_cur_task_cntr;
4548 	atomic_add_32(itask->itask_ilu_task_cntr, 1);
4549 	itask->itask_start_time = ddi_get_lbolt();
4550 
4551 	if ((lun_map_ent != NULL) && ((itask->itask_itl_datap =
4552 	    lun_map_ent->ent_itl_datap) != NULL)) {
4553 		atomic_add_32(&itask->itask_itl_datap->itl_counter, 1);
4554 		task->task_lu_itl_handle = itask->itask_itl_datap->itl_handle;
4555 	} else {
4556 		itask->itask_itl_datap = NULL;
4557 		task->task_lu_itl_handle = NULL;
4558 	}
4559 
4560 	rw_exit(iss->iss_lockp);
4561 	return (task);
4562 }
4563 
4564 static void
4565 stmf_task_lu_free(scsi_task_t *task, stmf_i_scsi_session_t *iss)
4566 {
4567 	stmf_i_scsi_task_t *itask =
4568 	    (stmf_i_scsi_task_t *)task->task_stmf_private;
4569 	stmf_i_lu_t *ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private;
4570 
4571 	ASSERT(rw_lock_held(iss->iss_lockp));
4572 	itask->itask_flags = ITASK_IN_FREE_LIST;
4573 	itask->itask_proxy_msg_id = 0;
4574 	mutex_enter(&ilu->ilu_task_lock);
4575 	itask->itask_lu_free_next = ilu->ilu_free_tasks;
4576 	ilu->ilu_free_tasks = itask;
4577 	ilu->ilu_ntasks_free++;
4578 	mutex_exit(&ilu->ilu_task_lock);
4579 	atomic_add_32(itask->itask_ilu_task_cntr, -1);
4580 }
4581 
4582 void
4583 stmf_task_lu_check_freelist(stmf_i_lu_t *ilu)
4584 {
4585 	uint32_t	num_to_release, ndx;
4586 	stmf_i_scsi_task_t *itask;
4587 	stmf_lu_t	*lu = ilu->ilu_lu;
4588 
4589 	ASSERT(ilu->ilu_ntasks_min_free <= ilu->ilu_ntasks_free);
4590 
4591 	/* free half of the minimal free of the free tasks */
4592 	num_to_release = (ilu->ilu_ntasks_min_free + 1) / 2;
4593 	if (!num_to_release) {
4594 		return;
4595 	}
4596 	for (ndx = 0; ndx < num_to_release; ndx++) {
4597 		mutex_enter(&ilu->ilu_task_lock);
4598 		itask = ilu->ilu_free_tasks;
4599 		if (itask == NULL) {
4600 			mutex_exit(&ilu->ilu_task_lock);
4601 			break;
4602 		}
4603 		ilu->ilu_free_tasks = itask->itask_lu_free_next;
4604 		ilu->ilu_ntasks_free--;
4605 		mutex_exit(&ilu->ilu_task_lock);
4606 
4607 		lu->lu_task_free(itask->itask_task);
4608 		mutex_enter(&ilu->ilu_task_lock);
4609 		if (itask->itask_lu_next)
4610 			itask->itask_lu_next->itask_lu_prev =
4611 			    itask->itask_lu_prev;
4612 		if (itask->itask_lu_prev)
4613 			itask->itask_lu_prev->itask_lu_next =
4614 			    itask->itask_lu_next;
4615 		else
4616 			ilu->ilu_tasks = itask->itask_lu_next;
4617 
4618 		ilu->ilu_ntasks--;
4619 		mutex_exit(&ilu->ilu_task_lock);
4620 		stmf_free(itask->itask_task);
4621 	}
4622 }
4623 
4624 /*
4625  * Called with stmf_lock held
4626  */
4627 void
4628 stmf_check_freetask()
4629 {
4630 	stmf_i_lu_t *ilu;
4631 	clock_t	endtime = ddi_get_lbolt() + drv_usectohz(10000);
4632 
4633 	/* stmf_svc_ilu_draining may get changed after stmf_lock is released */
4634 	while ((ilu = stmf_state.stmf_svc_ilu_draining) != NULL) {
4635 		stmf_state.stmf_svc_ilu_draining = ilu->ilu_next;
4636 		if (!ilu->ilu_ntasks_min_free) {
4637 			ilu->ilu_ntasks_min_free = ilu->ilu_ntasks_free;
4638 			continue;
4639 		}
4640 		ilu->ilu_flags |= ILU_STALL_DEREGISTER;
4641 		mutex_exit(&stmf_state.stmf_lock);
4642 		stmf_task_lu_check_freelist(ilu);
4643 		/*
4644 		 * we do not care about the accuracy of
4645 		 * ilu_ntasks_min_free, so we don't lock here
4646 		 */
4647 		ilu->ilu_ntasks_min_free = ilu->ilu_ntasks_free;
4648 		mutex_enter(&stmf_state.stmf_lock);
4649 		ilu->ilu_flags &= ~ILU_STALL_DEREGISTER;
4650 		cv_broadcast(&stmf_state.stmf_cv);
4651 		if (ddi_get_lbolt() >= endtime)
4652 			break;
4653 	}
4654 }
4655 
4656 void
4657 stmf_do_ilu_timeouts(stmf_i_lu_t *ilu)
4658 {
4659 	clock_t l = ddi_get_lbolt();
4660 	clock_t ps = drv_usectohz(1000000);
4661 	stmf_i_scsi_task_t *itask;
4662 	scsi_task_t *task;
4663 	uint32_t to;
4664 
4665 	mutex_enter(&ilu->ilu_task_lock);
4666 	for (itask = ilu->ilu_tasks; itask != NULL;
4667 	    itask = itask->itask_lu_next) {
4668 		if (itask->itask_flags & (ITASK_IN_FREE_LIST |
4669 		    ITASK_BEING_ABORTED)) {
4670 			continue;
4671 		}
4672 		task = itask->itask_task;
4673 		if (task->task_timeout == 0)
4674 			to = stmf_default_task_timeout;
4675 		else
4676 			to = task->task_timeout;
4677 		if ((itask->itask_start_time + (to * ps)) > l)
4678 			continue;
4679 		stmf_abort(STMF_QUEUE_TASK_ABORT, task,
4680 		    STMF_TIMEOUT, NULL);
4681 	}
4682 	mutex_exit(&ilu->ilu_task_lock);
4683 }
4684 
4685 /*
4686  * Called with stmf_lock held
4687  */
4688 void
4689 stmf_check_ilu_timing()
4690 {
4691 	stmf_i_lu_t *ilu;
4692 	clock_t	endtime = ddi_get_lbolt() + drv_usectohz(10000);
4693 
4694 	/* stmf_svc_ilu_timing may get changed after stmf_lock is released */
4695 	while ((ilu = stmf_state.stmf_svc_ilu_timing) != NULL) {
4696 		stmf_state.stmf_svc_ilu_timing = ilu->ilu_next;
4697 		if (ilu->ilu_cur_task_cntr == (&ilu->ilu_task_cntr1)) {
4698 			if (ilu->ilu_task_cntr2 == 0) {
4699 				ilu->ilu_cur_task_cntr = &ilu->ilu_task_cntr2;
4700 				continue;
4701 			}
4702 		} else {
4703 			if (ilu->ilu_task_cntr1 == 0) {
4704 				ilu->ilu_cur_task_cntr = &ilu->ilu_task_cntr1;
4705 				continue;
4706 			}
4707 		}
4708 		/*
4709 		 * If we are here then it means that there is some slowdown
4710 		 * in tasks on this lu. We need to check.
4711 		 */
4712 		ilu->ilu_flags |= ILU_STALL_DEREGISTER;
4713 		mutex_exit(&stmf_state.stmf_lock);
4714 		stmf_do_ilu_timeouts(ilu);
4715 		mutex_enter(&stmf_state.stmf_lock);
4716 		ilu->ilu_flags &= ~ILU_STALL_DEREGISTER;
4717 		cv_broadcast(&stmf_state.stmf_cv);
4718 		if (ddi_get_lbolt() >= endtime)
4719 			break;
4720 	}
4721 }
4722 
4723 /*
4724  * Kills all tasks on a lu except tm_task
4725  */
4726 void
4727 stmf_task_lu_killall(stmf_lu_t *lu, scsi_task_t *tm_task, stmf_status_t s)
4728 {
4729 	stmf_i_lu_t *ilu = (stmf_i_lu_t *)lu->lu_stmf_private;
4730 	stmf_i_scsi_task_t *itask;
4731 
4732 	mutex_enter(&ilu->ilu_task_lock);
4733 
4734 	for (itask = ilu->ilu_tasks; itask != NULL;
4735 	    itask = itask->itask_lu_next) {
4736 		if (itask->itask_flags & ITASK_IN_FREE_LIST)
4737 			continue;
4738 		if (itask->itask_task == tm_task)
4739 			continue;
4740 		stmf_abort(STMF_QUEUE_TASK_ABORT, itask->itask_task, s, NULL);
4741 	}
4742 	mutex_exit(&ilu->ilu_task_lock);
4743 }
4744 
4745 void
4746 stmf_free_task_bufs(stmf_i_scsi_task_t *itask, stmf_local_port_t *lport)
4747 {
4748 	int i;
4749 	uint8_t map;
4750 
4751 	if ((map = itask->itask_allocated_buf_map) == 0)
4752 		return;
4753 	for (i = 0; i < 4; i++) {
4754 		if (map & 1) {
4755 			stmf_data_buf_t *dbuf;
4756 
4757 			dbuf = itask->itask_dbufs[i];
4758 			if (dbuf->db_xfer_start_timestamp) {
4759 				stmf_lport_xfer_done(itask, dbuf);
4760 			}
4761 			if (dbuf->db_flags & DB_LU_DATA_BUF) {
4762 				/*
4763 				 * LU needs to clean up buffer.
4764 				 * LU is required to free the buffer
4765 				 * in the xfer_done handler.
4766 				 */
4767 				scsi_task_t *task = itask->itask_task;
4768 				stmf_lu_t *lu = task->task_lu;
4769 
4770 				lu->lu_dbuf_free(task, dbuf);
4771 				ASSERT(((itask->itask_allocated_buf_map>>i)
4772 				    & 1) == 0); /* must be gone */
4773 			} else {
4774 				ASSERT(dbuf->db_lu_private == NULL);
4775 				dbuf->db_lu_private = NULL;
4776 				lport->lport_ds->ds_free_data_buf(
4777 				    lport->lport_ds, dbuf);
4778 			}
4779 		}
4780 		map >>= 1;
4781 	}
4782 	itask->itask_allocated_buf_map = 0;
4783 }
4784 
4785 void
4786 stmf_task_free(scsi_task_t *task)
4787 {
4788 	stmf_local_port_t *lport = task->task_lport;
4789 	stmf_i_scsi_task_t *itask = (stmf_i_scsi_task_t *)
4790 	    task->task_stmf_private;
4791 	stmf_i_scsi_session_t *iss = (stmf_i_scsi_session_t *)
4792 	    task->task_session->ss_stmf_private;
4793 
4794 	stmf_task_audit(itask, TE_TASK_FREE, CMD_OR_IOF_NA, NULL);
4795 
4796 	stmf_free_task_bufs(itask, lport);
4797 	stmf_itl_task_done(itask);
4798 	DTRACE_PROBE2(stmf__task__end, scsi_task_t *, task,
4799 	    hrtime_t,
4800 	    itask->itask_done_timestamp - itask->itask_start_timestamp);
4801 	if (itask->itask_itl_datap) {
4802 		if (atomic_add_32_nv(&itask->itask_itl_datap->itl_counter,
4803 		    -1) == 0) {
4804 			stmf_release_itl_handle(task->task_lu,
4805 			    itask->itask_itl_datap);
4806 		}
4807 	}
4808 
4809 	rw_enter(iss->iss_lockp, RW_READER);
4810 	lport->lport_task_free(task);
4811 	if (itask->itask_worker) {
4812 		atomic_add_32(&stmf_cur_ntasks, -1);
4813 		atomic_add_32(&itask->itask_worker->worker_ref_count, -1);
4814 	}
4815 	/*
4816 	 * After calling stmf_task_lu_free, the task pointer can no longer
4817 	 * be trusted.
4818 	 */
4819 	stmf_task_lu_free(task, iss);
4820 	rw_exit(iss->iss_lockp);
4821 }
4822 
4823 void
4824 stmf_post_task(scsi_task_t *task, stmf_data_buf_t *dbuf)
4825 {
4826 	stmf_i_scsi_task_t *itask = (stmf_i_scsi_task_t *)
4827 	    task->task_stmf_private;
4828 	stmf_i_lu_t *ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private;
4829 	int nv;
4830 	uint32_t old, new;
4831 	uint32_t ct;
4832 	stmf_worker_t *w, *w1;
4833 	uint8_t tm;
4834 
4835 	if (task->task_max_nbufs > 4)
4836 		task->task_max_nbufs = 4;
4837 	task->task_cur_nbufs = 0;
4838 	/* Latest value of currently running tasks */
4839 	ct = atomic_add_32_nv(&stmf_cur_ntasks, 1);
4840 
4841 	/* Select the next worker using round robin */
4842 	nv = (int)atomic_add_32_nv((uint32_t *)&stmf_worker_sel_counter, 1);
4843 	if (nv >= stmf_nworkers_accepting_cmds) {
4844 		int s = nv;
4845 		do {
4846 			nv -= stmf_nworkers_accepting_cmds;
4847 		} while (nv >= stmf_nworkers_accepting_cmds);
4848 		if (nv < 0)
4849 			nv = 0;
4850 		/* Its ok if this cas fails */
4851 		(void) atomic_cas_32((uint32_t *)&stmf_worker_sel_counter,
4852 		    s, nv);
4853 	}
4854 	w = &stmf_workers[nv];
4855 
4856 	/*
4857 	 * A worker can be pinned by interrupt. So select the next one
4858 	 * if it has lower load.
4859 	 */
4860 	if ((nv + 1) >= stmf_nworkers_accepting_cmds) {
4861 		w1 = stmf_workers;
4862 	} else {
4863 		w1 = &stmf_workers[nv + 1];
4864 	}
4865 	if (w1->worker_queue_depth < w->worker_queue_depth)
4866 		w = w1;
4867 
4868 	mutex_enter(&w->worker_lock);
4869 	if (((w->worker_flags & STMF_WORKER_STARTED) == 0) ||
4870 	    (w->worker_flags & STMF_WORKER_TERMINATE)) {
4871 		/*
4872 		 * Maybe we are in the middle of a change. Just go to
4873 		 * the 1st worker.
4874 		 */
4875 		mutex_exit(&w->worker_lock);
4876 		w = stmf_workers;
4877 		mutex_enter(&w->worker_lock);
4878 	}
4879 	itask->itask_worker = w;
4880 	/*
4881 	 * Track max system load inside the worker as we already have the
4882 	 * worker lock (no point implementing another lock). The service
4883 	 * thread will do the comparisons and figure out the max overall
4884 	 * system load.
4885 	 */
4886 	if (w->worker_max_sys_qdepth_pu < ct)
4887 		w->worker_max_sys_qdepth_pu = ct;
4888 
4889 	do {
4890 		old = new = itask->itask_flags;
4891 		new |= ITASK_KNOWN_TO_TGT_PORT | ITASK_IN_WORKER_QUEUE;
4892 		if (task->task_mgmt_function) {
4893 			tm = task->task_mgmt_function;
4894 			if ((tm == TM_TARGET_RESET) ||
4895 			    (tm == TM_TARGET_COLD_RESET) ||
4896 			    (tm == TM_TARGET_WARM_RESET)) {
4897 				new |= ITASK_DEFAULT_HANDLING;
4898 			}
4899 		} else if (task->task_cdb[0] == SCMD_REPORT_LUNS) {
4900 			new |= ITASK_DEFAULT_HANDLING;
4901 		}
4902 		new &= ~ITASK_IN_TRANSITION;
4903 	} while (atomic_cas_32(&itask->itask_flags, old, new) != old);
4904 
4905 	stmf_itl_task_start(itask);
4906 
4907 	itask->itask_worker_next = NULL;
4908 	if (w->worker_task_tail) {
4909 		w->worker_task_tail->itask_worker_next = itask;
4910 	} else {
4911 		w->worker_task_head = itask;
4912 	}
4913 	w->worker_task_tail = itask;
4914 	if (++(w->worker_queue_depth) > w->worker_max_qdepth_pu) {
4915 		w->worker_max_qdepth_pu = w->worker_queue_depth;
4916 	}
4917 	/* Measure task waitq time */
4918 	itask->itask_waitq_enter_timestamp = gethrtime();
4919 	atomic_add_32(&w->worker_ref_count, 1);
4920 	itask->itask_cmd_stack[0] = ITASK_CMD_NEW_TASK;
4921 	itask->itask_ncmds = 1;
4922 	stmf_task_audit(itask, TE_TASK_START, CMD_OR_IOF_NA, dbuf);
4923 	if (dbuf) {
4924 		itask->itask_allocated_buf_map = 1;
4925 		itask->itask_dbufs[0] = dbuf;
4926 		dbuf->db_handle = 0;
4927 	} else {
4928 		itask->itask_allocated_buf_map = 0;
4929 		itask->itask_dbufs[0] = NULL;
4930 	}
4931 
4932 	if ((w->worker_flags & STMF_WORKER_ACTIVE) == 0) {
4933 		w->worker_signal_timestamp = gethrtime();
4934 		DTRACE_PROBE2(worker__signal, stmf_worker_t *, w,
4935 		    scsi_task_t *, task);
4936 		cv_signal(&w->worker_cv);
4937 	}
4938 	mutex_exit(&w->worker_lock);
4939 
4940 	/*
4941 	 * This can only happen if during stmf_task_alloc(), ILU_RESET_ACTIVE
4942 	 * was set between checking of ILU_RESET_ACTIVE and clearing of the
4943 	 * ITASK_IN_FREE_LIST flag. Take care of these "sneaked-in" tasks here.
4944 	 */
4945 	if (ilu->ilu_flags & ILU_RESET_ACTIVE) {
4946 		stmf_abort(STMF_QUEUE_TASK_ABORT, task, STMF_ABORTED, NULL);
4947 	}
4948 }
4949 
4950 static void
4951 stmf_task_audit(stmf_i_scsi_task_t *itask,
4952     task_audit_event_t te, uint32_t cmd_or_iof, stmf_data_buf_t *dbuf)
4953 {
4954 	stmf_task_audit_rec_t *ar;
4955 
4956 	mutex_enter(&itask->itask_audit_mutex);
4957 	ar = &itask->itask_audit_records[itask->itask_audit_index++];
4958 	itask->itask_audit_index &= (ITASK_TASK_AUDIT_DEPTH - 1);
4959 	ar->ta_event = te;
4960 	ar->ta_cmd_or_iof = cmd_or_iof;
4961 	ar->ta_itask_flags = itask->itask_flags;
4962 	ar->ta_dbuf = dbuf;
4963 	gethrestime(&ar->ta_timestamp);
4964 	mutex_exit(&itask->itask_audit_mutex);
4965 }
4966 
4967 
4968 /*
4969  * ++++++++++++++ ABORT LOGIC ++++++++++++++++++++
4970  * Once ITASK_BEING_ABORTED is set, ITASK_KNOWN_TO_LU can be reset already
4971  * i.e. before ITASK_BEING_ABORTED being set. But if it was not, it cannot
4972  * be reset until the LU explicitly calls stmf_task_lu_aborted(). Of course
4973  * the LU will make this call only if we call the LU's abort entry point.
4974  * we will only call that entry point if ITASK_KNOWN_TO_LU was set.
4975  *
4976  * Same logic applies for the port.
4977  *
4978  * Also ITASK_BEING_ABORTED will not be allowed to set if both KNOWN_TO_LU
4979  * and KNOWN_TO_TGT_PORT are reset.
4980  *
4981  * +++++++++++++++++++++++++++++++++++++++++++++++
4982  */
4983 
4984 stmf_status_t
4985 stmf_xfer_data(scsi_task_t *task, stmf_data_buf_t *dbuf, uint32_t ioflags)
4986 {
4987 	stmf_status_t ret = STMF_SUCCESS;
4988 
4989 	stmf_i_scsi_task_t *itask =
4990 	    (stmf_i_scsi_task_t *)task->task_stmf_private;
4991 
4992 	stmf_task_audit(itask, TE_XFER_START, ioflags, dbuf);
4993 
4994 	if (ioflags & STMF_IOF_LU_DONE) {
4995 		uint32_t new, old;
4996 		do {
4997 			new = old = itask->itask_flags;
4998 			if (new & ITASK_BEING_ABORTED)
4999 				return (STMF_ABORTED);
5000 			new &= ~ITASK_KNOWN_TO_LU;
5001 		} while (atomic_cas_32(&itask->itask_flags, old, new) != old);
5002 	}
5003 	if (itask->itask_flags & ITASK_BEING_ABORTED)
5004 		return (STMF_ABORTED);
5005 #ifdef	DEBUG
5006 	if (!(ioflags & STMF_IOF_STATS_ONLY) && stmf_drop_buf_counter > 0) {
5007 		if (atomic_add_32_nv((uint32_t *)&stmf_drop_buf_counter, -1) ==
5008 		    1)
5009 			return (STMF_SUCCESS);
5010 	}
5011 #endif
5012 
5013 	stmf_update_kstat_lu_io(task, dbuf);
5014 	stmf_update_kstat_lport_io(task, dbuf);
5015 	stmf_lport_xfer_start(itask, dbuf);
5016 	if (ioflags & STMF_IOF_STATS_ONLY) {
5017 		stmf_lport_xfer_done(itask, dbuf);
5018 		return (STMF_SUCCESS);
5019 	}
5020 
5021 	dbuf->db_flags |= DB_LPORT_XFER_ACTIVE;
5022 	ret = task->task_lport->lport_xfer_data(task, dbuf, ioflags);
5023 
5024 	/*
5025 	 * Port provider may have already called the buffer callback in
5026 	 * which case dbuf->db_xfer_start_timestamp will be 0.
5027 	 */
5028 	if (ret != STMF_SUCCESS) {
5029 		dbuf->db_flags &= ~DB_LPORT_XFER_ACTIVE;
5030 		if (dbuf->db_xfer_start_timestamp != 0)
5031 			stmf_lport_xfer_done(itask, dbuf);
5032 	}
5033 
5034 	return (ret);
5035 }
5036 
5037 void
5038 stmf_data_xfer_done(scsi_task_t *task, stmf_data_buf_t *dbuf, uint32_t iof)
5039 {
5040 	stmf_i_scsi_task_t *itask =
5041 	    (stmf_i_scsi_task_t *)task->task_stmf_private;
5042 	stmf_i_local_port_t *ilport;
5043 	stmf_worker_t *w = itask->itask_worker;
5044 	uint32_t new, old;
5045 	uint8_t update_queue_flags, free_it, queue_it;
5046 
5047 	stmf_lport_xfer_done(itask, dbuf);
5048 
5049 	stmf_task_audit(itask, TE_XFER_DONE, iof, dbuf);
5050 
5051 	/* Guard against unexpected completions from the lport */
5052 	if (dbuf->db_flags & DB_LPORT_XFER_ACTIVE) {
5053 		dbuf->db_flags &= ~DB_LPORT_XFER_ACTIVE;
5054 	} else {
5055 		/*
5056 		 * This should never happen.
5057 		 */
5058 		ilport = task->task_lport->lport_stmf_private;
5059 		ilport->ilport_unexpected_comp++;
5060 		cmn_err(CE_PANIC, "Unexpected xfer completion task %p dbuf %p",
5061 		    (void *)task, (void *)dbuf);
5062 		return;
5063 	}
5064 
5065 	mutex_enter(&w->worker_lock);
5066 	do {
5067 		new = old = itask->itask_flags;
5068 		if (old & ITASK_BEING_ABORTED) {
5069 			mutex_exit(&w->worker_lock);
5070 			return;
5071 		}
5072 		free_it = 0;
5073 		if (iof & STMF_IOF_LPORT_DONE) {
5074 			new &= ~ITASK_KNOWN_TO_TGT_PORT;
5075 			task->task_completion_status = dbuf->db_xfer_status;
5076 			free_it = 1;
5077 		}
5078 		/*
5079 		 * If the task is known to LU then queue it. But if
5080 		 * it is already queued (multiple completions) then
5081 		 * just update the buffer information by grabbing the
5082 		 * worker lock. If the task is not known to LU,
5083 		 * completed/aborted, then see if we need to
5084 		 * free this task.
5085 		 */
5086 		if (old & ITASK_KNOWN_TO_LU) {
5087 			free_it = 0;
5088 			update_queue_flags = 1;
5089 			if (old & ITASK_IN_WORKER_QUEUE) {
5090 				queue_it = 0;
5091 			} else {
5092 				queue_it = 1;
5093 				new |= ITASK_IN_WORKER_QUEUE;
5094 			}
5095 		} else {
5096 			update_queue_flags = 0;
5097 			queue_it = 0;
5098 		}
5099 	} while (atomic_cas_32(&itask->itask_flags, old, new) != old);
5100 
5101 	if (update_queue_flags) {
5102 		uint8_t cmd = (dbuf->db_handle << 5) | ITASK_CMD_DATA_XFER_DONE;
5103 
5104 		ASSERT(itask->itask_ncmds < ITASK_MAX_NCMDS);
5105 		itask->itask_cmd_stack[itask->itask_ncmds++] = cmd;
5106 		if (queue_it) {
5107 			itask->itask_worker_next = NULL;
5108 			if (w->worker_task_tail) {
5109 				w->worker_task_tail->itask_worker_next = itask;
5110 			} else {
5111 				w->worker_task_head = itask;
5112 			}
5113 			w->worker_task_tail = itask;
5114 			/* Measure task waitq time */
5115 			itask->itask_waitq_enter_timestamp = gethrtime();
5116 			if (++(w->worker_queue_depth) >
5117 			    w->worker_max_qdepth_pu) {
5118 				w->worker_max_qdepth_pu = w->worker_queue_depth;
5119 			}
5120 			if ((w->worker_flags & STMF_WORKER_ACTIVE) == 0)
5121 				cv_signal(&w->worker_cv);
5122 		}
5123 	}
5124 	mutex_exit(&w->worker_lock);
5125 
5126 	if (free_it) {
5127 		if ((itask->itask_flags & (ITASK_KNOWN_TO_LU |
5128 		    ITASK_KNOWN_TO_TGT_PORT | ITASK_IN_WORKER_QUEUE |
5129 		    ITASK_BEING_ABORTED)) == 0) {
5130 			stmf_task_free(task);
5131 		}
5132 	}
5133 }
5134 
5135 stmf_status_t
5136 stmf_send_scsi_status(scsi_task_t *task, uint32_t ioflags)
5137 {
5138 	DTRACE_PROBE1(scsi__send__status, scsi_task_t *, task);
5139 
5140 	stmf_i_scsi_task_t *itask =
5141 	    (stmf_i_scsi_task_t *)task->task_stmf_private;
5142 
5143 	stmf_task_audit(itask, TE_SEND_STATUS, ioflags, NULL);
5144 
5145 	if (ioflags & STMF_IOF_LU_DONE) {
5146 		uint32_t new, old;
5147 		do {
5148 			new = old = itask->itask_flags;
5149 			if (new & ITASK_BEING_ABORTED)
5150 				return (STMF_ABORTED);
5151 			new &= ~ITASK_KNOWN_TO_LU;
5152 		} while (atomic_cas_32(&itask->itask_flags, old, new) != old);
5153 	}
5154 
5155 	if (!(itask->itask_flags & ITASK_KNOWN_TO_TGT_PORT)) {
5156 		return (STMF_SUCCESS);
5157 	}
5158 
5159 	if (itask->itask_flags & ITASK_BEING_ABORTED)
5160 		return (STMF_ABORTED);
5161 
5162 	if (task->task_additional_flags & TASK_AF_NO_EXPECTED_XFER_LENGTH) {
5163 		task->task_status_ctrl = 0;
5164 		task->task_resid = 0;
5165 	} else if (task->task_cmd_xfer_length >
5166 	    task->task_expected_xfer_length) {
5167 		task->task_status_ctrl = TASK_SCTRL_OVER;
5168 		task->task_resid = task->task_cmd_xfer_length -
5169 		    task->task_expected_xfer_length;
5170 	} else if (task->task_nbytes_transferred <
5171 	    task->task_expected_xfer_length) {
5172 		task->task_status_ctrl = TASK_SCTRL_UNDER;
5173 		task->task_resid = task->task_expected_xfer_length -
5174 		    task->task_nbytes_transferred;
5175 	} else {
5176 		task->task_status_ctrl = 0;
5177 		task->task_resid = 0;
5178 	}
5179 	return (task->task_lport->lport_send_status(task, ioflags));
5180 }
5181 
5182 void
5183 stmf_send_status_done(scsi_task_t *task, stmf_status_t s, uint32_t iof)
5184 {
5185 	stmf_i_scsi_task_t *itask =
5186 	    (stmf_i_scsi_task_t *)task->task_stmf_private;
5187 	stmf_worker_t *w = itask->itask_worker;
5188 	uint32_t new, old;
5189 	uint8_t free_it, queue_it;
5190 
5191 	stmf_task_audit(itask, TE_SEND_STATUS_DONE, iof, NULL);
5192 
5193 	mutex_enter(&w->worker_lock);
5194 	do {
5195 		new = old = itask->itask_flags;
5196 		if (old & ITASK_BEING_ABORTED) {
5197 			mutex_exit(&w->worker_lock);
5198 			return;
5199 		}
5200 		free_it = 0;
5201 		if (iof & STMF_IOF_LPORT_DONE) {
5202 			new &= ~ITASK_KNOWN_TO_TGT_PORT;
5203 			free_it = 1;
5204 		}
5205 		/*
5206 		 * If the task is known to LU then queue it. But if
5207 		 * it is already queued (multiple completions) then
5208 		 * just update the buffer information by grabbing the
5209 		 * worker lock. If the task is not known to LU,
5210 		 * completed/aborted, then see if we need to
5211 		 * free this task.
5212 		 */
5213 		if (old & ITASK_KNOWN_TO_LU) {
5214 			free_it = 0;
5215 			queue_it = 1;
5216 			if (old & ITASK_IN_WORKER_QUEUE) {
5217 				cmn_err(CE_PANIC, "status completion received"
5218 				    " when task is already in worker queue "
5219 				    " task = %p", (void *)task);
5220 			}
5221 			new |= ITASK_IN_WORKER_QUEUE;
5222 		} else {
5223 			queue_it = 0;
5224 		}
5225 	} while (atomic_cas_32(&itask->itask_flags, old, new) != old);
5226 	task->task_completion_status = s;
5227 
5228 
5229 	if (queue_it) {
5230 		ASSERT(itask->itask_ncmds < ITASK_MAX_NCMDS);
5231 		itask->itask_cmd_stack[itask->itask_ncmds++] =
5232 		    ITASK_CMD_STATUS_DONE;
5233 		itask->itask_worker_next = NULL;
5234 		if (w->worker_task_tail) {
5235 			w->worker_task_tail->itask_worker_next = itask;
5236 		} else {
5237 			w->worker_task_head = itask;
5238 		}
5239 		w->worker_task_tail = itask;
5240 		/* Measure task waitq time */
5241 		itask->itask_waitq_enter_timestamp = gethrtime();
5242 		if (++(w->worker_queue_depth) > w->worker_max_qdepth_pu) {
5243 			w->worker_max_qdepth_pu = w->worker_queue_depth;
5244 		}
5245 		if ((w->worker_flags & STMF_WORKER_ACTIVE) == 0)
5246 			cv_signal(&w->worker_cv);
5247 	}
5248 	mutex_exit(&w->worker_lock);
5249 
5250 	if (free_it) {
5251 		if ((itask->itask_flags & (ITASK_KNOWN_TO_LU |
5252 		    ITASK_KNOWN_TO_TGT_PORT | ITASK_IN_WORKER_QUEUE |
5253 		    ITASK_BEING_ABORTED)) == 0) {
5254 			stmf_task_free(task);
5255 		} else {
5256 			cmn_err(CE_PANIC, "LU is done with the task but LPORT "
5257 			    " is not done, itask %p itask_flags %x",
5258 			    (void *)itask, itask->itask_flags);
5259 		}
5260 	}
5261 }
5262 
5263 void
5264 stmf_task_lu_done(scsi_task_t *task)
5265 {
5266 	stmf_i_scsi_task_t *itask =
5267 	    (stmf_i_scsi_task_t *)task->task_stmf_private;
5268 	stmf_worker_t *w = itask->itask_worker;
5269 	uint32_t new, old;
5270 
5271 	mutex_enter(&w->worker_lock);
5272 	do {
5273 		new = old = itask->itask_flags;
5274 		if (old & ITASK_BEING_ABORTED) {
5275 			mutex_exit(&w->worker_lock);
5276 			return;
5277 		}
5278 		if (old & ITASK_IN_WORKER_QUEUE) {
5279 			cmn_err(CE_PANIC, "task_lu_done received"
5280 			    " when task is in worker queue "
5281 			    " task = %p", (void *)task);
5282 		}
5283 		new &= ~ITASK_KNOWN_TO_LU;
5284 	} while (atomic_cas_32(&itask->itask_flags, old, new) != old);
5285 
5286 	mutex_exit(&w->worker_lock);
5287 
5288 	if ((itask->itask_flags & (ITASK_KNOWN_TO_LU |
5289 	    ITASK_KNOWN_TO_TGT_PORT | ITASK_IN_WORKER_QUEUE |
5290 	    ITASK_BEING_ABORTED)) == 0) {
5291 		stmf_task_free(task);
5292 	} else {
5293 		cmn_err(CE_PANIC, "stmf_lu_done should be the last stage but "
5294 		    " the task is still not done, task = %p", (void *)task);
5295 	}
5296 }
5297 
5298 void
5299 stmf_queue_task_for_abort(scsi_task_t *task, stmf_status_t s)
5300 {
5301 	stmf_i_scsi_task_t *itask =
5302 	    (stmf_i_scsi_task_t *)task->task_stmf_private;
5303 	stmf_worker_t *w;
5304 	uint32_t old, new;
5305 
5306 	stmf_task_audit(itask, TE_TASK_ABORT, CMD_OR_IOF_NA, NULL);
5307 
5308 	do {
5309 		old = new = itask->itask_flags;
5310 		if ((old & ITASK_BEING_ABORTED) ||
5311 		    ((old & (ITASK_KNOWN_TO_TGT_PORT |
5312 		    ITASK_KNOWN_TO_LU)) == 0)) {
5313 			return;
5314 		}
5315 		new |= ITASK_BEING_ABORTED;
5316 	} while (atomic_cas_32(&itask->itask_flags, old, new) != old);
5317 	task->task_completion_status = s;
5318 	itask->itask_start_time = ddi_get_lbolt();
5319 
5320 	if (((w = itask->itask_worker) == NULL) ||
5321 	    (itask->itask_flags & ITASK_IN_TRANSITION)) {
5322 		return;
5323 	}
5324 
5325 	/* Queue it and get out */
5326 	mutex_enter(&w->worker_lock);
5327 	if (itask->itask_flags & ITASK_IN_WORKER_QUEUE) {
5328 		mutex_exit(&w->worker_lock);
5329 		return;
5330 	}
5331 	atomic_or_32(&itask->itask_flags, ITASK_IN_WORKER_QUEUE);
5332 	itask->itask_worker_next = NULL;
5333 	if (w->worker_task_tail) {
5334 		w->worker_task_tail->itask_worker_next = itask;
5335 	} else {
5336 		w->worker_task_head = itask;
5337 	}
5338 	w->worker_task_tail = itask;
5339 	if (++(w->worker_queue_depth) > w->worker_max_qdepth_pu) {
5340 		w->worker_max_qdepth_pu = w->worker_queue_depth;
5341 	}
5342 	if ((w->worker_flags & STMF_WORKER_ACTIVE) == 0)
5343 		cv_signal(&w->worker_cv);
5344 	mutex_exit(&w->worker_lock);
5345 }
5346 
5347 void
5348 stmf_abort(int abort_cmd, scsi_task_t *task, stmf_status_t s, void *arg)
5349 {
5350 	stmf_i_scsi_task_t *itask = NULL;
5351 	uint32_t old, new, f, rf;
5352 
5353 	DTRACE_PROBE2(scsi__task__abort, scsi_task_t *, task,
5354 	    stmf_status_t, s);
5355 
5356 	switch (abort_cmd) {
5357 	case STMF_QUEUE_ABORT_LU:
5358 		stmf_task_lu_killall((stmf_lu_t *)arg, task, s);
5359 		return;
5360 	case STMF_QUEUE_TASK_ABORT:
5361 		stmf_queue_task_for_abort(task, s);
5362 		return;
5363 	case STMF_REQUEUE_TASK_ABORT_LPORT:
5364 		rf = ITASK_TGT_PORT_ABORT_CALLED;
5365 		f = ITASK_KNOWN_TO_TGT_PORT;
5366 		break;
5367 	case STMF_REQUEUE_TASK_ABORT_LU:
5368 		rf = ITASK_LU_ABORT_CALLED;
5369 		f = ITASK_KNOWN_TO_LU;
5370 		break;
5371 	default:
5372 		return;
5373 	}
5374 	itask = (stmf_i_scsi_task_t *)task->task_stmf_private;
5375 	f |= ITASK_BEING_ABORTED | rf;
5376 	do {
5377 		old = new = itask->itask_flags;
5378 		if ((old & f) != f) {
5379 			return;
5380 		}
5381 		new &= ~rf;
5382 	} while (atomic_cas_32(&itask->itask_flags, old, new) != old);
5383 }
5384 
5385 void
5386 stmf_task_lu_aborted(scsi_task_t *task, stmf_status_t s, uint32_t iof)
5387 {
5388 	char			 info[STMF_CHANGE_INFO_LEN];
5389 	stmf_i_scsi_task_t	*itask = TASK_TO_ITASK(task);
5390 	unsigned long long	st;
5391 
5392 	stmf_task_audit(itask, TE_TASK_LU_ABORTED, iof, NULL);
5393 
5394 	st = s;	/* gcc fix */
5395 	if ((s != STMF_ABORT_SUCCESS) && (s != STMF_NOT_FOUND)) {
5396 		(void) snprintf(info, sizeof (info),
5397 		    "task %p, lu failed to abort ret=%llx", (void *)task, st);
5398 	} else if ((iof & STMF_IOF_LU_DONE) == 0) {
5399 		(void) snprintf(info, sizeof (info),
5400 		    "Task aborted but LU is not finished, task ="
5401 		    "%p, s=%llx, iof=%x", (void *)task, st, iof);
5402 	} else {
5403 		/*
5404 		 * LU abort successfully
5405 		 */
5406 		atomic_and_32(&itask->itask_flags, ~ITASK_KNOWN_TO_LU);
5407 		return;
5408 	}
5409 
5410 	stmf_abort_task_offline(task, 1, info);
5411 }
5412 
5413 void
5414 stmf_task_lport_aborted(scsi_task_t *task, stmf_status_t s, uint32_t iof)
5415 {
5416 	char			info[STMF_CHANGE_INFO_LEN];
5417 	stmf_i_scsi_task_t	*itask = TASK_TO_ITASK(task);
5418 	unsigned long long	st;
5419 	uint32_t		old, new;
5420 
5421 	stmf_task_audit(itask, TE_TASK_LPORT_ABORTED, iof, NULL);
5422 
5423 	st = s;
5424 	if ((s != STMF_ABORT_SUCCESS) && (s != STMF_NOT_FOUND)) {
5425 		(void) snprintf(info, sizeof (info),
5426 		    "task %p, tgt port failed to abort ret=%llx", (void *)task,
5427 		    st);
5428 	} else if ((iof & STMF_IOF_LPORT_DONE) == 0) {
5429 		(void) snprintf(info, sizeof (info),
5430 		    "Task aborted but tgt port is not finished, "
5431 		    "task=%p, s=%llx, iof=%x", (void *)task, st, iof);
5432 	} else {
5433 		/*
5434 		 * LPORT abort successfully
5435 		 */
5436 		do {
5437 			old = new = itask->itask_flags;
5438 			if (!(old & ITASK_KNOWN_TO_TGT_PORT))
5439 				return;
5440 			new &= ~ITASK_KNOWN_TO_TGT_PORT;
5441 		} while (atomic_cas_32(&itask->itask_flags, old, new) != old);
5442 		return;
5443 	}
5444 
5445 	stmf_abort_task_offline(task, 0, info);
5446 }
5447 
5448 stmf_status_t
5449 stmf_task_poll_lu(scsi_task_t *task, uint32_t timeout)
5450 {
5451 	stmf_i_scsi_task_t *itask = (stmf_i_scsi_task_t *)
5452 	    task->task_stmf_private;
5453 	stmf_worker_t *w = itask->itask_worker;
5454 	int i;
5455 
5456 	ASSERT(itask->itask_flags & ITASK_KNOWN_TO_LU);
5457 	mutex_enter(&w->worker_lock);
5458 	if (itask->itask_ncmds >= ITASK_MAX_NCMDS) {
5459 		mutex_exit(&w->worker_lock);
5460 		return (STMF_BUSY);
5461 	}
5462 	for (i = 0; i < itask->itask_ncmds; i++) {
5463 		if (itask->itask_cmd_stack[i] == ITASK_CMD_POLL_LU) {
5464 			mutex_exit(&w->worker_lock);
5465 			return (STMF_SUCCESS);
5466 		}
5467 	}
5468 	itask->itask_cmd_stack[itask->itask_ncmds++] = ITASK_CMD_POLL_LU;
5469 	if (timeout == ITASK_DEFAULT_POLL_TIMEOUT) {
5470 		itask->itask_poll_timeout = ddi_get_lbolt() + 1;
5471 	} else {
5472 		clock_t t = drv_usectohz(timeout * 1000);
5473 		if (t == 0)
5474 			t = 1;
5475 		itask->itask_poll_timeout = ddi_get_lbolt() + t;
5476 	}
5477 	if ((itask->itask_flags & ITASK_IN_WORKER_QUEUE) == 0) {
5478 		itask->itask_worker_next = NULL;
5479 		if (w->worker_task_tail) {
5480 			w->worker_task_tail->itask_worker_next = itask;
5481 		} else {
5482 			w->worker_task_head = itask;
5483 		}
5484 		w->worker_task_tail = itask;
5485 		if (++(w->worker_queue_depth) > w->worker_max_qdepth_pu) {
5486 			w->worker_max_qdepth_pu = w->worker_queue_depth;
5487 		}
5488 		atomic_or_32(&itask->itask_flags, ITASK_IN_WORKER_QUEUE);
5489 		if ((w->worker_flags & STMF_WORKER_ACTIVE) == 0)
5490 			cv_signal(&w->worker_cv);
5491 	}
5492 	mutex_exit(&w->worker_lock);
5493 	return (STMF_SUCCESS);
5494 }
5495 
5496 stmf_status_t
5497 stmf_task_poll_lport(scsi_task_t *task, uint32_t timeout)
5498 {
5499 	stmf_i_scsi_task_t *itask = (stmf_i_scsi_task_t *)
5500 	    task->task_stmf_private;
5501 	stmf_worker_t *w = itask->itask_worker;
5502 	int i;
5503 
5504 	ASSERT(itask->itask_flags & ITASK_KNOWN_TO_TGT_PORT);
5505 	mutex_enter(&w->worker_lock);
5506 	if (itask->itask_ncmds >= ITASK_MAX_NCMDS) {
5507 		mutex_exit(&w->worker_lock);
5508 		return (STMF_BUSY);
5509 	}
5510 	for (i = 0; i < itask->itask_ncmds; i++) {
5511 		if (itask->itask_cmd_stack[i] == ITASK_CMD_POLL_LPORT) {
5512 			mutex_exit(&w->worker_lock);
5513 			return (STMF_SUCCESS);
5514 		}
5515 	}
5516 	itask->itask_cmd_stack[itask->itask_ncmds++] = ITASK_CMD_POLL_LPORT;
5517 	if (timeout == ITASK_DEFAULT_POLL_TIMEOUT) {
5518 		itask->itask_poll_timeout = ddi_get_lbolt() + 1;
5519 	} else {
5520 		clock_t t = drv_usectohz(timeout * 1000);
5521 		if (t == 0)
5522 			t = 1;
5523 		itask->itask_poll_timeout = ddi_get_lbolt() + t;
5524 	}
5525 	if ((itask->itask_flags & ITASK_IN_WORKER_QUEUE) == 0) {
5526 		itask->itask_worker_next = NULL;
5527 		if (w->worker_task_tail) {
5528 			w->worker_task_tail->itask_worker_next = itask;
5529 		} else {
5530 			w->worker_task_head = itask;
5531 		}
5532 		w->worker_task_tail = itask;
5533 		if (++(w->worker_queue_depth) > w->worker_max_qdepth_pu) {
5534 			w->worker_max_qdepth_pu = w->worker_queue_depth;
5535 		}
5536 		if ((w->worker_flags & STMF_WORKER_ACTIVE) == 0)
5537 			cv_signal(&w->worker_cv);
5538 	}
5539 	mutex_exit(&w->worker_lock);
5540 	return (STMF_SUCCESS);
5541 }
5542 
5543 void
5544 stmf_do_task_abort(scsi_task_t *task)
5545 {
5546 	stmf_i_scsi_task_t	*itask = TASK_TO_ITASK(task);
5547 	stmf_lu_t		*lu;
5548 	stmf_local_port_t	*lport;
5549 	unsigned long long	 ret;
5550 	uint32_t		 old, new;
5551 	uint8_t			 call_lu_abort, call_port_abort;
5552 	char			 info[STMF_CHANGE_INFO_LEN];
5553 
5554 	lu = task->task_lu;
5555 	lport = task->task_lport;
5556 	do {
5557 		old = new = itask->itask_flags;
5558 		if ((old & (ITASK_KNOWN_TO_LU | ITASK_LU_ABORT_CALLED)) ==
5559 		    ITASK_KNOWN_TO_LU) {
5560 			new |= ITASK_LU_ABORT_CALLED;
5561 			call_lu_abort = 1;
5562 		} else {
5563 			call_lu_abort = 0;
5564 		}
5565 	} while (atomic_cas_32(&itask->itask_flags, old, new) != old);
5566 
5567 	if (call_lu_abort) {
5568 		if ((itask->itask_flags & ITASK_DEFAULT_HANDLING) == 0) {
5569 			ret = lu->lu_abort(lu, STMF_LU_ABORT_TASK, task, 0);
5570 		} else {
5571 			ret = dlun0->lu_abort(lu, STMF_LU_ABORT_TASK, task, 0);
5572 		}
5573 		if ((ret == STMF_ABORT_SUCCESS) || (ret == STMF_NOT_FOUND)) {
5574 			stmf_task_lu_aborted(task, ret, STMF_IOF_LU_DONE);
5575 		} else if (ret == STMF_BUSY) {
5576 			atomic_and_32(&itask->itask_flags,
5577 			    ~ITASK_LU_ABORT_CALLED);
5578 		} else if (ret != STMF_SUCCESS) {
5579 			(void) snprintf(info, sizeof (info),
5580 			    "Abort failed by LU %p, ret %llx", (void *)lu, ret);
5581 			stmf_abort_task_offline(task, 1, info);
5582 		}
5583 	} else if (itask->itask_flags & ITASK_KNOWN_TO_LU) {
5584 		if (ddi_get_lbolt() > (itask->itask_start_time +
5585 		    STMF_SEC2TICK(lu->lu_abort_timeout?
5586 		    lu->lu_abort_timeout : ITASK_DEFAULT_ABORT_TIMEOUT))) {
5587 			(void) snprintf(info, sizeof (info),
5588 			    "lu abort timed out");
5589 			stmf_abort_task_offline(itask->itask_task, 1, info);
5590 		}
5591 	}
5592 
5593 	do {
5594 		old = new = itask->itask_flags;
5595 		if ((old & (ITASK_KNOWN_TO_TGT_PORT |
5596 		    ITASK_TGT_PORT_ABORT_CALLED)) == ITASK_KNOWN_TO_TGT_PORT) {
5597 			new |= ITASK_TGT_PORT_ABORT_CALLED;
5598 			call_port_abort = 1;
5599 		} else {
5600 			call_port_abort = 0;
5601 		}
5602 	} while (atomic_cas_32(&itask->itask_flags, old, new) != old);
5603 	if (call_port_abort) {
5604 		ret = lport->lport_abort(lport, STMF_LPORT_ABORT_TASK, task, 0);
5605 		if ((ret == STMF_ABORT_SUCCESS) || (ret == STMF_NOT_FOUND)) {
5606 			stmf_task_lport_aborted(task, ret, STMF_IOF_LPORT_DONE);
5607 		} else if (ret == STMF_BUSY) {
5608 			atomic_and_32(&itask->itask_flags,
5609 			    ~ITASK_TGT_PORT_ABORT_CALLED);
5610 		} else if (ret != STMF_SUCCESS) {
5611 			(void) snprintf(info, sizeof (info),
5612 			    "Abort failed by tgt port %p ret %llx",
5613 			    (void *)lport, ret);
5614 			stmf_abort_task_offline(task, 0, info);
5615 		}
5616 	} else if (itask->itask_flags & ITASK_KNOWN_TO_TGT_PORT) {
5617 		if (ddi_get_lbolt() > (itask->itask_start_time +
5618 		    STMF_SEC2TICK(lport->lport_abort_timeout?
5619 		    lport->lport_abort_timeout :
5620 		    ITASK_DEFAULT_ABORT_TIMEOUT))) {
5621 			(void) snprintf(info, sizeof (info),
5622 			    "lport abort timed out");
5623 			stmf_abort_task_offline(itask->itask_task, 0, info);
5624 		}
5625 	}
5626 }
5627 
5628 stmf_status_t
5629 stmf_ctl(int cmd, void *obj, void *arg)
5630 {
5631 	stmf_status_t			ret;
5632 	stmf_i_lu_t			*ilu;
5633 	stmf_i_local_port_t		*ilport;
5634 	stmf_state_change_info_t	*ssci = (stmf_state_change_info_t *)arg;
5635 
5636 	mutex_enter(&stmf_state.stmf_lock);
5637 	ret = STMF_INVALID_ARG;
5638 	if (cmd & STMF_CMD_LU_OP) {
5639 		ilu = stmf_lookup_lu((stmf_lu_t *)obj);
5640 		if (ilu == NULL) {
5641 			goto stmf_ctl_lock_exit;
5642 		}
5643 		DTRACE_PROBE3(lu__state__change,
5644 		    stmf_lu_t *, ilu->ilu_lu,
5645 		    int, cmd, stmf_state_change_info_t *, ssci);
5646 	} else if (cmd & STMF_CMD_LPORT_OP) {
5647 		ilport = stmf_lookup_lport((stmf_local_port_t *)obj);
5648 		if (ilport == NULL) {
5649 			goto stmf_ctl_lock_exit;
5650 		}
5651 		DTRACE_PROBE3(lport__state__change,
5652 		    stmf_local_port_t *, ilport->ilport_lport,
5653 		    int, cmd, stmf_state_change_info_t *, ssci);
5654 	} else {
5655 		goto stmf_ctl_lock_exit;
5656 	}
5657 
5658 	switch (cmd) {
5659 	case STMF_CMD_LU_ONLINE:
5660 		switch (ilu->ilu_state) {
5661 			case STMF_STATE_OFFLINE:
5662 				ret = STMF_SUCCESS;
5663 				break;
5664 			case STMF_STATE_ONLINE:
5665 			case STMF_STATE_ONLINING:
5666 				ret = STMF_ALREADY;
5667 				break;
5668 			case STMF_STATE_OFFLINING:
5669 				ret = STMF_BUSY;
5670 				break;
5671 			default:
5672 				ret = STMF_BADSTATE;
5673 				break;
5674 		}
5675 		if (ret != STMF_SUCCESS)
5676 			goto stmf_ctl_lock_exit;
5677 
5678 		ilu->ilu_state = STMF_STATE_ONLINING;
5679 		mutex_exit(&stmf_state.stmf_lock);
5680 		stmf_svc_queue(cmd, obj, (stmf_state_change_info_t *)arg);
5681 		break;
5682 
5683 	case STMF_CMD_LU_ONLINE_COMPLETE:
5684 		if (ilu->ilu_state != STMF_STATE_ONLINING) {
5685 			ret = STMF_BADSTATE;
5686 			goto stmf_ctl_lock_exit;
5687 		}
5688 		if (((stmf_change_status_t *)arg)->st_completion_status ==
5689 		    STMF_SUCCESS) {
5690 			ilu->ilu_state = STMF_STATE_ONLINE;
5691 			mutex_exit(&stmf_state.stmf_lock);
5692 			((stmf_lu_t *)obj)->lu_ctl((stmf_lu_t *)obj,
5693 			    STMF_ACK_LU_ONLINE_COMPLETE, arg);
5694 			mutex_enter(&stmf_state.stmf_lock);
5695 			stmf_add_lu_to_active_sessions((stmf_lu_t *)obj);
5696 		} else {
5697 			/* XXX: should throw a meesage an record more data */
5698 			ilu->ilu_state = STMF_STATE_OFFLINE;
5699 		}
5700 		ret = STMF_SUCCESS;
5701 		goto stmf_ctl_lock_exit;
5702 
5703 	case STMF_CMD_LU_OFFLINE:
5704 		switch (ilu->ilu_state) {
5705 			case STMF_STATE_ONLINE:
5706 				ret = STMF_SUCCESS;
5707 				break;
5708 			case STMF_STATE_OFFLINE:
5709 			case STMF_STATE_OFFLINING:
5710 				ret = STMF_ALREADY;
5711 				break;
5712 			case STMF_STATE_ONLINING:
5713 				ret = STMF_BUSY;
5714 				break;
5715 			default:
5716 				ret = STMF_BADSTATE;
5717 				break;
5718 		}
5719 		if (ret != STMF_SUCCESS)
5720 			goto stmf_ctl_lock_exit;
5721 		ilu->ilu_state = STMF_STATE_OFFLINING;
5722 		mutex_exit(&stmf_state.stmf_lock);
5723 		stmf_svc_queue(cmd, obj, (stmf_state_change_info_t *)arg);
5724 		break;
5725 
5726 	case STMF_CMD_LU_OFFLINE_COMPLETE:
5727 		if (ilu->ilu_state != STMF_STATE_OFFLINING) {
5728 			ret = STMF_BADSTATE;
5729 			goto stmf_ctl_lock_exit;
5730 		}
5731 		if (((stmf_change_status_t *)arg)->st_completion_status ==
5732 		    STMF_SUCCESS) {
5733 			ilu->ilu_state = STMF_STATE_OFFLINE;
5734 			mutex_exit(&stmf_state.stmf_lock);
5735 			((stmf_lu_t *)obj)->lu_ctl((stmf_lu_t *)obj,
5736 			    STMF_ACK_LU_OFFLINE_COMPLETE, arg);
5737 			mutex_enter(&stmf_state.stmf_lock);
5738 		} else {
5739 			ilu->ilu_state = STMF_STATE_ONLINE;
5740 			stmf_add_lu_to_active_sessions((stmf_lu_t *)obj);
5741 		}
5742 		mutex_exit(&stmf_state.stmf_lock);
5743 		break;
5744 
5745 	/*
5746 	 * LPORT_ONLINE/OFFLINE has nothing to do with link offline/online.
5747 	 * It's related with hardware disable/enable.
5748 	 */
5749 	case STMF_CMD_LPORT_ONLINE:
5750 		switch (ilport->ilport_state) {
5751 			case STMF_STATE_OFFLINE:
5752 				ret = STMF_SUCCESS;
5753 				break;
5754 			case STMF_STATE_ONLINE:
5755 			case STMF_STATE_ONLINING:
5756 				ret = STMF_ALREADY;
5757 				break;
5758 			case STMF_STATE_OFFLINING:
5759 				ret = STMF_BUSY;
5760 				break;
5761 			default:
5762 				ret = STMF_BADSTATE;
5763 				break;
5764 		}
5765 		if (ret != STMF_SUCCESS)
5766 			goto stmf_ctl_lock_exit;
5767 
5768 		/*
5769 		 * Only user request can recover the port from the
5770 		 * FORCED_OFFLINE state
5771 		 */
5772 		if (ilport->ilport_flags & ILPORT_FORCED_OFFLINE) {
5773 			if (!(ssci->st_rflags & STMF_RFLAG_USER_REQUEST)) {
5774 				ret = STMF_FAILURE;
5775 				goto stmf_ctl_lock_exit;
5776 			}
5777 		}
5778 
5779 		/*
5780 		 * Avoid too frequent request to online
5781 		 */
5782 		if (ssci->st_rflags & STMF_RFLAG_USER_REQUEST) {
5783 			ilport->ilport_online_times = 0;
5784 			ilport->ilport_avg_interval = 0;
5785 		}
5786 		if ((ilport->ilport_avg_interval < STMF_AVG_ONLINE_INTERVAL) &&
5787 		    (ilport->ilport_online_times >= 4)) {
5788 			ret = STMF_FAILURE;
5789 			ilport->ilport_flags |= ILPORT_FORCED_OFFLINE;
5790 			stmf_trace(NULL, "stmf_ctl: too frequent request to "
5791 			    "online the port");
5792 			cmn_err(CE_WARN, "stmf_ctl: too frequent request to "
5793 			    "online the port, set FORCED_OFFLINE now");
5794 			goto stmf_ctl_lock_exit;
5795 		}
5796 		if (ilport->ilport_online_times > 0) {
5797 			if (ilport->ilport_online_times == 1) {
5798 				ilport->ilport_avg_interval = ddi_get_lbolt() -
5799 				    ilport->ilport_last_online_clock;
5800 			} else {
5801 				ilport->ilport_avg_interval =
5802 				    (ilport->ilport_avg_interval +
5803 				    ddi_get_lbolt() -
5804 				    ilport->ilport_last_online_clock) >> 1;
5805 			}
5806 		}
5807 		ilport->ilport_last_online_clock = ddi_get_lbolt();
5808 		ilport->ilport_online_times++;
5809 
5810 		/*
5811 		 * Submit online service request
5812 		 */
5813 		ilport->ilport_flags &= ~ILPORT_FORCED_OFFLINE;
5814 		ilport->ilport_state = STMF_STATE_ONLINING;
5815 		mutex_exit(&stmf_state.stmf_lock);
5816 		stmf_svc_queue(cmd, obj, (stmf_state_change_info_t *)arg);
5817 		break;
5818 
5819 	case STMF_CMD_LPORT_ONLINE_COMPLETE:
5820 		if (ilport->ilport_state != STMF_STATE_ONLINING) {
5821 			ret = STMF_BADSTATE;
5822 			goto stmf_ctl_lock_exit;
5823 		}
5824 		if (((stmf_change_status_t *)arg)->st_completion_status ==
5825 		    STMF_SUCCESS) {
5826 			ilport->ilport_state = STMF_STATE_ONLINE;
5827 			mutex_exit(&stmf_state.stmf_lock);
5828 			((stmf_local_port_t *)obj)->lport_ctl(
5829 			    (stmf_local_port_t *)obj,
5830 			    STMF_ACK_LPORT_ONLINE_COMPLETE, arg);
5831 			mutex_enter(&stmf_state.stmf_lock);
5832 		} else {
5833 			ilport->ilport_state = STMF_STATE_OFFLINE;
5834 		}
5835 		ret = STMF_SUCCESS;
5836 		goto stmf_ctl_lock_exit;
5837 
5838 	case STMF_CMD_LPORT_OFFLINE:
5839 		switch (ilport->ilport_state) {
5840 			case STMF_STATE_ONLINE:
5841 				ret = STMF_SUCCESS;
5842 				break;
5843 			case STMF_STATE_OFFLINE:
5844 			case STMF_STATE_OFFLINING:
5845 				ret = STMF_ALREADY;
5846 				break;
5847 			case STMF_STATE_ONLINING:
5848 				ret = STMF_BUSY;
5849 				break;
5850 			default:
5851 				ret = STMF_BADSTATE;
5852 				break;
5853 		}
5854 		if (ret != STMF_SUCCESS)
5855 			goto stmf_ctl_lock_exit;
5856 
5857 		ilport->ilport_state = STMF_STATE_OFFLINING;
5858 		mutex_exit(&stmf_state.stmf_lock);
5859 		stmf_svc_queue(cmd, obj, (stmf_state_change_info_t *)arg);
5860 		break;
5861 
5862 	case STMF_CMD_LPORT_OFFLINE_COMPLETE:
5863 		if (ilport->ilport_state != STMF_STATE_OFFLINING) {
5864 			ret = STMF_BADSTATE;
5865 			goto stmf_ctl_lock_exit;
5866 		}
5867 		if (((stmf_change_status_t *)arg)->st_completion_status ==
5868 		    STMF_SUCCESS) {
5869 			ilport->ilport_state = STMF_STATE_OFFLINE;
5870 			mutex_exit(&stmf_state.stmf_lock);
5871 			((stmf_local_port_t *)obj)->lport_ctl(
5872 			    (stmf_local_port_t *)obj,
5873 			    STMF_ACK_LPORT_OFFLINE_COMPLETE, arg);
5874 			mutex_enter(&stmf_state.stmf_lock);
5875 		} else {
5876 			ilport->ilport_state = STMF_STATE_ONLINE;
5877 		}
5878 		mutex_exit(&stmf_state.stmf_lock);
5879 		break;
5880 
5881 	default:
5882 		cmn_err(CE_WARN, "Invalid ctl cmd received %x", cmd);
5883 		ret = STMF_INVALID_ARG;
5884 		goto stmf_ctl_lock_exit;
5885 	}
5886 
5887 	return (STMF_SUCCESS);
5888 
5889 stmf_ctl_lock_exit:;
5890 	mutex_exit(&stmf_state.stmf_lock);
5891 	return (ret);
5892 }
5893 
5894 /* ARGSUSED */
5895 stmf_status_t
5896 stmf_info_impl(uint32_t cmd, void *arg1, void *arg2, uint8_t *buf,
5897 						uint32_t *bufsizep)
5898 {
5899 	return (STMF_NOT_SUPPORTED);
5900 }
5901 
5902 /* ARGSUSED */
5903 stmf_status_t
5904 stmf_info(uint32_t cmd, void *arg1, void *arg2, uint8_t *buf,
5905 						uint32_t *bufsizep)
5906 {
5907 	uint32_t cl = SI_GET_CLASS(cmd);
5908 
5909 	if (cl == SI_STMF) {
5910 		return (stmf_info_impl(cmd, arg1, arg2, buf, bufsizep));
5911 	}
5912 	if (cl == SI_LPORT) {
5913 		return (((stmf_local_port_t *)arg1)->lport_info(cmd, arg1,
5914 		    arg2, buf, bufsizep));
5915 	} else if (cl == SI_LU) {
5916 		return (((stmf_lu_t *)arg1)->lu_info(cmd, arg1, arg2, buf,
5917 		    bufsizep));
5918 	}
5919 
5920 	return (STMF_NOT_SUPPORTED);
5921 }
5922 
5923 /*
5924  * Used by port providers. pwwn is 8 byte wwn, sdid is the devid used by
5925  * stmf to register local ports. The ident should have 20 bytes in buffer
5926  * space to convert the wwn to "wwn.xxxxxxxxxxxxxxxx" string.
5927  */
5928 void
5929 stmf_wwn_to_devid_desc(scsi_devid_desc_t *sdid, uint8_t *wwn,
5930     uint8_t protocol_id)
5931 {
5932 	char wwn_str[20+1];
5933 
5934 	sdid->protocol_id = protocol_id;
5935 	sdid->piv = 1;
5936 	sdid->code_set = CODE_SET_ASCII;
5937 	sdid->association = ID_IS_TARGET_PORT;
5938 	sdid->ident_length = 20;
5939 	/* Convert wwn value to "wwn.XXXXXXXXXXXXXXXX" format */
5940 	(void) snprintf(wwn_str, sizeof (wwn_str),
5941 	    "wwn.%02X%02X%02X%02X%02X%02X%02X%02X",
5942 	    wwn[0], wwn[1], wwn[2], wwn[3], wwn[4], wwn[5], wwn[6], wwn[7]);
5943 	bcopy(wwn_str, (char *)sdid->ident, 20);
5944 }
5945 
5946 
5947 stmf_xfer_data_t *
5948 stmf_prepare_tpgs_data(uint8_t ilu_alua)
5949 {
5950 	stmf_xfer_data_t *xd;
5951 	stmf_i_local_port_t *ilport;
5952 	uint8_t *p;
5953 	uint32_t sz, asz, nports = 0, nports_standby = 0;
5954 
5955 	mutex_enter(&stmf_state.stmf_lock);
5956 	/* check if any ports are standby and create second group */
5957 	for (ilport = stmf_state.stmf_ilportlist; ilport;
5958 	    ilport = ilport->ilport_next) {
5959 		if (ilport->ilport_standby == 1) {
5960 			nports_standby++;
5961 		} else {
5962 			nports++;
5963 		}
5964 	}
5965 
5966 	/* The spec only allows for 255 ports to be reported per group */
5967 	nports = min(nports, 255);
5968 	nports_standby = min(nports_standby, 255);
5969 	sz = (nports * 4) + 12;
5970 	if (nports_standby && ilu_alua) {
5971 		sz += (nports_standby * 4) + 8;
5972 	}
5973 	asz = sz + sizeof (*xd) - 4;
5974 	xd = (stmf_xfer_data_t *)kmem_zalloc(asz, KM_NOSLEEP);
5975 	if (xd == NULL) {
5976 		mutex_exit(&stmf_state.stmf_lock);
5977 		return (NULL);
5978 	}
5979 	xd->alloc_size = asz;
5980 	xd->size_left = sz;
5981 
5982 	p = xd->buf;
5983 
5984 	*((uint32_t *)p) = BE_32(sz - 4);
5985 	p += 4;
5986 	p[0] = 0x80;	/* PREF */
5987 	p[1] = 5;	/* AO_SUP, S_SUP */
5988 	if (stmf_state.stmf_alua_node == 1) {
5989 		p[3] = 1;	/* Group 1 */
5990 	} else {
5991 		p[3] = 0;	/* Group 0 */
5992 	}
5993 	p[7] = nports & 0xff;
5994 	p += 8;
5995 	for (ilport = stmf_state.stmf_ilportlist; ilport;
5996 	    ilport = ilport->ilport_next) {
5997 		if (ilport->ilport_standby == 1) {
5998 			continue;
5999 		}
6000 		((uint16_t *)p)[1] = BE_16(ilport->ilport_rtpid);
6001 		p += 4;
6002 	}
6003 	if (nports_standby && ilu_alua) {
6004 		p[0] = 0x02;	/* Non PREF, Standby */
6005 		p[1] = 5;	/* AO_SUP, S_SUP */
6006 		if (stmf_state.stmf_alua_node == 1) {
6007 			p[3] = 0;	/* Group 0 */
6008 		} else {
6009 			p[3] = 1;	/* Group 1 */
6010 		}
6011 		p[7] = nports_standby & 0xff;
6012 		p += 8;
6013 		for (ilport = stmf_state.stmf_ilportlist; ilport;
6014 		    ilport = ilport->ilport_next) {
6015 			if (ilport->ilport_standby == 0) {
6016 				continue;
6017 			}
6018 			((uint16_t *)p)[1] = BE_16(ilport->ilport_rtpid);
6019 			p += 4;
6020 		}
6021 	}
6022 
6023 	mutex_exit(&stmf_state.stmf_lock);
6024 
6025 	return (xd);
6026 }
6027 
6028 struct scsi_devid_desc *
6029 stmf_scsilib_get_devid_desc(uint16_t rtpid)
6030 {
6031 	scsi_devid_desc_t *devid = NULL;
6032 	stmf_i_local_port_t *ilport;
6033 
6034 	mutex_enter(&stmf_state.stmf_lock);
6035 
6036 	for (ilport = stmf_state.stmf_ilportlist; ilport;
6037 	    ilport = ilport->ilport_next) {
6038 		if (ilport->ilport_rtpid == rtpid) {
6039 			scsi_devid_desc_t *id = ilport->ilport_lport->lport_id;
6040 			uint32_t id_sz = sizeof (scsi_devid_desc_t) +
6041 			    id->ident_length;
6042 			devid = (scsi_devid_desc_t *)kmem_zalloc(id_sz,
6043 			    KM_NOSLEEP);
6044 			if (devid != NULL) {
6045 				bcopy(id, devid, id_sz);
6046 			}
6047 			break;
6048 		}
6049 	}
6050 
6051 	mutex_exit(&stmf_state.stmf_lock);
6052 	return (devid);
6053 }
6054 
6055 uint16_t
6056 stmf_scsilib_get_lport_rtid(struct scsi_devid_desc *devid)
6057 {
6058 	stmf_i_local_port_t	*ilport;
6059 	scsi_devid_desc_t	*id;
6060 	uint16_t		rtpid = 0;
6061 
6062 	mutex_enter(&stmf_state.stmf_lock);
6063 	for (ilport = stmf_state.stmf_ilportlist; ilport;
6064 	    ilport = ilport->ilport_next) {
6065 		id = ilport->ilport_lport->lport_id;
6066 		if ((devid->ident_length == id->ident_length) &&
6067 		    (memcmp(devid->ident, id->ident, id->ident_length) == 0)) {
6068 			rtpid = ilport->ilport_rtpid;
6069 			break;
6070 		}
6071 	}
6072 	mutex_exit(&stmf_state.stmf_lock);
6073 	return (rtpid);
6074 }
6075 
6076 static uint16_t stmf_lu_id_gen_number = 0;
6077 
6078 stmf_status_t
6079 stmf_scsilib_uniq_lu_id(uint32_t company_id, scsi_devid_desc_t *lu_id)
6080 {
6081 	return (stmf_scsilib_uniq_lu_id2(company_id, 0, lu_id));
6082 }
6083 
6084 stmf_status_t
6085 stmf_scsilib_uniq_lu_id2(uint32_t company_id, uint32_t host_id,
6086     scsi_devid_desc_t *lu_id)
6087 {
6088 	uint8_t *p;
6089 	struct timeval32 timestamp32;
6090 	uint32_t *t = (uint32_t *)&timestamp32;
6091 	struct ether_addr mac;
6092 	uint8_t *e = (uint8_t *)&mac;
6093 	int hid = (int)host_id;
6094 
6095 	if (company_id == COMPANY_ID_NONE)
6096 		company_id = COMPANY_ID_SUN;
6097 
6098 	if (lu_id->ident_length != 0x10)
6099 		return (STMF_INVALID_ARG);
6100 
6101 	p = (uint8_t *)lu_id;
6102 
6103 	atomic_add_16(&stmf_lu_id_gen_number, 1);
6104 
6105 	p[0] = 0xf1; p[1] = 3; p[2] = 0; p[3] = 0x10;
6106 	p[4] = ((company_id >> 20) & 0xf) | 0x60;
6107 	p[5] = (company_id >> 12) & 0xff;
6108 	p[6] = (company_id >> 4) & 0xff;
6109 	p[7] = (company_id << 4) & 0xf0;
6110 	if (hid == 0 && !localetheraddr((struct ether_addr *)NULL, &mac)) {
6111 		hid = BE_32((int)zone_get_hostid(NULL));
6112 	}
6113 	if (hid != 0) {
6114 		e[0] = (hid >> 24) & 0xff;
6115 		e[1] = (hid >> 16) & 0xff;
6116 		e[2] = (hid >> 8) & 0xff;
6117 		e[3] = hid & 0xff;
6118 		e[4] = e[5] = 0;
6119 	}
6120 	bcopy(e, p+8, 6);
6121 	uniqtime32(&timestamp32);
6122 	*t = BE_32(*t);
6123 	bcopy(t, p+14, 4);
6124 	p[18] = (stmf_lu_id_gen_number >> 8) & 0xff;
6125 	p[19] = stmf_lu_id_gen_number & 0xff;
6126 
6127 	return (STMF_SUCCESS);
6128 }
6129 
6130 /*
6131  * saa is sense key, ASC, ASCQ
6132  */
6133 void
6134 stmf_scsilib_send_status(scsi_task_t *task, uint8_t st, uint32_t saa)
6135 {
6136 	uint8_t sd[18];
6137 	task->task_scsi_status = st;
6138 	if (st == 2) {
6139 		bzero(sd, 18);
6140 		sd[0] = 0x70;
6141 		sd[2] = (saa >> 16) & 0xf;
6142 		sd[7] = 10;
6143 		sd[12] = (saa >> 8) & 0xff;
6144 		sd[13] = saa & 0xff;
6145 		task->task_sense_data = sd;
6146 		task->task_sense_length = 18;
6147 	} else {
6148 		task->task_sense_data = NULL;
6149 		task->task_sense_length = 0;
6150 	}
6151 	(void) stmf_send_scsi_status(task, STMF_IOF_LU_DONE);
6152 }
6153 
6154 uint32_t
6155 stmf_scsilib_prepare_vpd_page83(scsi_task_t *task, uint8_t *page,
6156     uint32_t page_len, uint8_t byte0, uint32_t vpd_mask)
6157 {
6158 	uint8_t		*p = NULL;
6159 	uint8_t		small_buf[32];
6160 	uint32_t	sz = 0;
6161 	uint32_t	n = 4;
6162 	uint32_t	m = 0;
6163 	uint32_t	last_bit = 0;
6164 
6165 	if (page_len < 4)
6166 		return (0);
6167 	if (page_len > 65535)
6168 		page_len = 65535;
6169 
6170 	page[0] = byte0;
6171 	page[1] = 0x83;
6172 
6173 	/* CONSTCOND */
6174 	while (1) {
6175 		m += sz;
6176 		if (sz && (page_len > n)) {
6177 			uint32_t copysz;
6178 			copysz = page_len > (n + sz) ? sz : page_len - n;
6179 			bcopy(p, page + n, copysz);
6180 			n += copysz;
6181 		}
6182 		vpd_mask &= ~last_bit;
6183 		if (vpd_mask == 0)
6184 			break;
6185 
6186 		if (vpd_mask & STMF_VPD_LU_ID) {
6187 			last_bit = STMF_VPD_LU_ID;
6188 			sz = task->task_lu->lu_id->ident_length + 4;
6189 			p = (uint8_t *)task->task_lu->lu_id;
6190 			continue;
6191 		} else if (vpd_mask & STMF_VPD_TARGET_ID) {
6192 			last_bit = STMF_VPD_TARGET_ID;
6193 			sz = task->task_lport->lport_id->ident_length + 4;
6194 			p = (uint8_t *)task->task_lport->lport_id;
6195 			continue;
6196 		} else if (vpd_mask & STMF_VPD_TP_GROUP) {
6197 			stmf_i_local_port_t *ilport;
6198 			last_bit = STMF_VPD_TP_GROUP;
6199 			p = small_buf;
6200 			bzero(p, 8);
6201 			p[0] = 1;
6202 			p[1] = 0x15;
6203 			p[3] = 4;
6204 			ilport = (stmf_i_local_port_t *)
6205 			    task->task_lport->lport_stmf_private;
6206 			/*
6207 			 * If we're in alua mode, group 1 contains all alua
6208 			 * participating ports and all standby ports
6209 			 * > 255. Otherwise, if we're in alua mode, any local
6210 			 * ports (non standby/pppt) are also in group 1 if the
6211 			 * alua node is 1. Otherwise the group is 0.
6212 			 */
6213 			if ((stmf_state.stmf_alua_state &&
6214 			    (ilport->ilport_alua || ilport->ilport_standby) &&
6215 			    ilport->ilport_rtpid > 255) ||
6216 			    (stmf_state.stmf_alua_node == 1 &&
6217 			    ilport->ilport_standby != 1)) {
6218 				p[7] = 1;	/* Group 1 */
6219 			}
6220 			sz = 8;
6221 			continue;
6222 		} else if (vpd_mask & STMF_VPD_RELATIVE_TP_ID) {
6223 			stmf_i_local_port_t *ilport;
6224 
6225 			last_bit = STMF_VPD_RELATIVE_TP_ID;
6226 			p = small_buf;
6227 			bzero(p, 8);
6228 			p[0] = 1;
6229 			p[1] = 0x14;
6230 			p[3] = 4;
6231 			ilport = (stmf_i_local_port_t *)
6232 			    task->task_lport->lport_stmf_private;
6233 			p[6] = (ilport->ilport_rtpid >> 8) & 0xff;
6234 			p[7] = ilport->ilport_rtpid & 0xff;
6235 			sz = 8;
6236 			continue;
6237 		} else {
6238 			cmn_err(CE_WARN, "Invalid vpd_mask");
6239 			break;
6240 		}
6241 	}
6242 
6243 	page[2] = (m >> 8) & 0xff;
6244 	page[3] = m & 0xff;
6245 
6246 	return (n);
6247 }
6248 
6249 void
6250 stmf_scsilib_handle_report_tpgs(scsi_task_t *task, stmf_data_buf_t *dbuf)
6251 {
6252 	stmf_i_scsi_task_t *itask =
6253 	    (stmf_i_scsi_task_t *)task->task_stmf_private;
6254 	stmf_i_lu_t *ilu =
6255 	    (stmf_i_lu_t *)task->task_lu->lu_stmf_private;
6256 	stmf_xfer_data_t *xd;
6257 	uint32_t sz, minsz;
6258 
6259 	itask->itask_flags |= ITASK_DEFAULT_HANDLING;
6260 	task->task_cmd_xfer_length =
6261 	    ((((uint32_t)task->task_cdb[6]) << 24) |
6262 	    (((uint32_t)task->task_cdb[7]) << 16) |
6263 	    (((uint32_t)task->task_cdb[8]) << 8) |
6264 	    ((uint32_t)task->task_cdb[9]));
6265 
6266 	if (task->task_additional_flags &
6267 	    TASK_AF_NO_EXPECTED_XFER_LENGTH) {
6268 		task->task_expected_xfer_length =
6269 		    task->task_cmd_xfer_length;
6270 	}
6271 
6272 	if (task->task_cmd_xfer_length == 0) {
6273 		stmf_scsilib_send_status(task, STATUS_GOOD, 0);
6274 		return;
6275 	}
6276 	if (task->task_cmd_xfer_length < 4) {
6277 		stmf_scsilib_send_status(task, STATUS_CHECK,
6278 		    STMF_SAA_INVALID_FIELD_IN_CDB);
6279 		return;
6280 	}
6281 
6282 	sz = min(task->task_expected_xfer_length,
6283 	    task->task_cmd_xfer_length);
6284 
6285 	xd = stmf_prepare_tpgs_data(ilu->ilu_alua);
6286 
6287 	if (xd == NULL) {
6288 		stmf_abort(STMF_QUEUE_TASK_ABORT, task,
6289 		    STMF_ALLOC_FAILURE, NULL);
6290 		return;
6291 	}
6292 
6293 	sz = min(sz, xd->size_left);
6294 	xd->size_left = sz;
6295 	minsz = min(512, sz);
6296 
6297 	if (dbuf == NULL)
6298 		dbuf = stmf_alloc_dbuf(task, sz, &minsz, 0);
6299 	if (dbuf == NULL) {
6300 		kmem_free(xd, xd->alloc_size);
6301 		stmf_abort(STMF_QUEUE_TASK_ABORT, task,
6302 		    STMF_ALLOC_FAILURE, NULL);
6303 		return;
6304 	}
6305 	dbuf->db_lu_private = xd;
6306 	stmf_xd_to_dbuf(dbuf, 1);
6307 
6308 	dbuf->db_flags = DB_DIRECTION_TO_RPORT;
6309 	(void) stmf_xfer_data(task, dbuf, 0);
6310 
6311 }
6312 
6313 void
6314 stmf_scsilib_handle_task_mgmt(scsi_task_t *task)
6315 {
6316 
6317 	switch (task->task_mgmt_function) {
6318 	/*
6319 	 * For now we will abort all I/Os on the LU in case of ABORT_TASK_SET
6320 	 * and ABORT_TASK. But unlike LUN_RESET we will not reset LU state
6321 	 * in these cases. This needs to be changed to abort only the required
6322 	 * set.
6323 	 */
6324 	case TM_ABORT_TASK:
6325 	case TM_ABORT_TASK_SET:
6326 	case TM_CLEAR_TASK_SET:
6327 	case TM_LUN_RESET:
6328 		stmf_handle_lun_reset(task);
6329 		/* issue the reset to the proxy node as well */
6330 		if (stmf_state.stmf_alua_state == 1) {
6331 			(void) stmf_proxy_scsi_cmd(task, NULL);
6332 		}
6333 		return;
6334 	case TM_TARGET_RESET:
6335 	case TM_TARGET_COLD_RESET:
6336 	case TM_TARGET_WARM_RESET:
6337 		stmf_handle_target_reset(task);
6338 		return;
6339 	default:
6340 		/* We dont support this task mgmt function */
6341 		stmf_scsilib_send_status(task, STATUS_CHECK,
6342 		    STMF_SAA_INVALID_FIELD_IN_CMD_IU);
6343 		return;
6344 	}
6345 }
6346 
6347 void
6348 stmf_handle_lun_reset(scsi_task_t *task)
6349 {
6350 	stmf_i_scsi_task_t *itask;
6351 	stmf_i_lu_t *ilu;
6352 
6353 	itask = (stmf_i_scsi_task_t *)task->task_stmf_private;
6354 	ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private;
6355 
6356 	/*
6357 	 * To sync with target reset, grab this lock. The LU is not going
6358 	 * anywhere as there is atleast one task pending (this task).
6359 	 */
6360 	mutex_enter(&stmf_state.stmf_lock);
6361 
6362 	if (ilu->ilu_flags & ILU_RESET_ACTIVE) {
6363 		mutex_exit(&stmf_state.stmf_lock);
6364 		stmf_scsilib_send_status(task, STATUS_CHECK,
6365 		    STMF_SAA_OPERATION_IN_PROGRESS);
6366 		return;
6367 	}
6368 	atomic_or_32(&ilu->ilu_flags, ILU_RESET_ACTIVE);
6369 	mutex_exit(&stmf_state.stmf_lock);
6370 
6371 	/*
6372 	 * Mark this task as the one causing LU reset so that we know who
6373 	 * was responsible for setting the ILU_RESET_ACTIVE. In case this
6374 	 * task itself gets aborted, we will clear ILU_RESET_ACTIVE.
6375 	 */
6376 	itask->itask_flags |= ITASK_DEFAULT_HANDLING | ITASK_CAUSING_LU_RESET;
6377 
6378 	/* Initiatiate abort on all commands on this LU except this one */
6379 	stmf_abort(STMF_QUEUE_ABORT_LU, task, STMF_ABORTED, task->task_lu);
6380 
6381 	/* Start polling on this task */
6382 	if (stmf_task_poll_lu(task, ITASK_DEFAULT_POLL_TIMEOUT)
6383 	    != STMF_SUCCESS) {
6384 		stmf_abort(STMF_QUEUE_TASK_ABORT, task, STMF_ALLOC_FAILURE,
6385 		    NULL);
6386 		return;
6387 	}
6388 }
6389 
6390 void
6391 stmf_handle_target_reset(scsi_task_t *task)
6392 {
6393 	stmf_i_scsi_task_t *itask;
6394 	stmf_i_lu_t *ilu;
6395 	stmf_i_scsi_session_t *iss;
6396 	stmf_lun_map_t *lm;
6397 	stmf_lun_map_ent_t *lm_ent;
6398 	int i, lf;
6399 
6400 	itask = (stmf_i_scsi_task_t *)task->task_stmf_private;
6401 	iss = (stmf_i_scsi_session_t *)task->task_session->ss_stmf_private;
6402 	ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private;
6403 
6404 	/*
6405 	 * To sync with LUN reset, grab this lock. The session is not going
6406 	 * anywhere as there is atleast one task pending (this task).
6407 	 */
6408 	mutex_enter(&stmf_state.stmf_lock);
6409 
6410 	/* Grab the session lock as a writer to prevent any changes in it */
6411 	rw_enter(iss->iss_lockp, RW_WRITER);
6412 
6413 	if (iss->iss_flags & ISS_RESET_ACTIVE) {
6414 		rw_exit(iss->iss_lockp);
6415 		mutex_exit(&stmf_state.stmf_lock);
6416 		stmf_scsilib_send_status(task, STATUS_CHECK,
6417 		    STMF_SAA_OPERATION_IN_PROGRESS);
6418 		return;
6419 	}
6420 	atomic_or_32(&iss->iss_flags, ISS_RESET_ACTIVE);
6421 
6422 	/*
6423 	 * Now go through each LUN in this session and make sure all of them
6424 	 * can be reset.
6425 	 */
6426 	lm = iss->iss_sm;
6427 	for (i = 0, lf = 0; i < lm->lm_nentries; i++) {
6428 		if (lm->lm_plus[i] == NULL)
6429 			continue;
6430 		lf++;
6431 		lm_ent = (stmf_lun_map_ent_t *)lm->lm_plus[i];
6432 		ilu = (stmf_i_lu_t *)(lm_ent->ent_lu->lu_stmf_private);
6433 		if (ilu->ilu_flags & ILU_RESET_ACTIVE) {
6434 			atomic_and_32(&iss->iss_flags, ~ISS_RESET_ACTIVE);
6435 			rw_exit(iss->iss_lockp);
6436 			mutex_exit(&stmf_state.stmf_lock);
6437 			stmf_scsilib_send_status(task, STATUS_CHECK,
6438 			    STMF_SAA_OPERATION_IN_PROGRESS);
6439 			return;
6440 		}
6441 	}
6442 	if (lf == 0) {
6443 		/* No luns in this session */
6444 		atomic_and_32(&iss->iss_flags, ~ISS_RESET_ACTIVE);
6445 		rw_exit(iss->iss_lockp);
6446 		mutex_exit(&stmf_state.stmf_lock);
6447 		stmf_scsilib_send_status(task, STATUS_GOOD, 0);
6448 		return;
6449 	}
6450 
6451 	/* ok, start the damage */
6452 	itask->itask_flags |= ITASK_DEFAULT_HANDLING |
6453 	    ITASK_CAUSING_TARGET_RESET;
6454 	for (i = 0; i < lm->lm_nentries; i++) {
6455 		if (lm->lm_plus[i] == NULL)
6456 			continue;
6457 		lm_ent = (stmf_lun_map_ent_t *)lm->lm_plus[i];
6458 		ilu = (stmf_i_lu_t *)(lm_ent->ent_lu->lu_stmf_private);
6459 		atomic_or_32(&ilu->ilu_flags, ILU_RESET_ACTIVE);
6460 	}
6461 
6462 	for (i = 0; i < lm->lm_nentries; i++) {
6463 		if (lm->lm_plus[i] == NULL)
6464 			continue;
6465 		lm_ent = (stmf_lun_map_ent_t *)lm->lm_plus[i];
6466 		stmf_abort(STMF_QUEUE_ABORT_LU, task, STMF_ABORTED,
6467 		    lm_ent->ent_lu);
6468 	}
6469 
6470 	rw_exit(iss->iss_lockp);
6471 	mutex_exit(&stmf_state.stmf_lock);
6472 
6473 	/* Start polling on this task */
6474 	if (stmf_task_poll_lu(task, ITASK_DEFAULT_POLL_TIMEOUT)
6475 	    != STMF_SUCCESS) {
6476 		stmf_abort(STMF_QUEUE_TASK_ABORT, task, STMF_ALLOC_FAILURE,
6477 		    NULL);
6478 		return;
6479 	}
6480 }
6481 
6482 int
6483 stmf_handle_cmd_during_ic(stmf_i_scsi_task_t *itask)
6484 {
6485 	scsi_task_t *task = itask->itask_task;
6486 	stmf_i_scsi_session_t *iss = (stmf_i_scsi_session_t *)
6487 	    task->task_session->ss_stmf_private;
6488 
6489 	rw_enter(iss->iss_lockp, RW_WRITER);
6490 	if (((iss->iss_flags & ISS_LUN_INVENTORY_CHANGED) == 0) ||
6491 	    (task->task_cdb[0] == SCMD_INQUIRY)) {
6492 		rw_exit(iss->iss_lockp);
6493 		return (0);
6494 	}
6495 	atomic_and_32(&iss->iss_flags,
6496 	    ~(ISS_LUN_INVENTORY_CHANGED | ISS_GOT_INITIAL_LUNS));
6497 	rw_exit(iss->iss_lockp);
6498 
6499 	if (task->task_cdb[0] == SCMD_REPORT_LUNS) {
6500 		return (0);
6501 	}
6502 	stmf_scsilib_send_status(task, STATUS_CHECK,
6503 	    STMF_SAA_REPORT_LUN_DATA_HAS_CHANGED);
6504 	return (1);
6505 }
6506 
6507 void
6508 stmf_worker_init()
6509 {
6510 	uint32_t i;
6511 
6512 	/* Make local copy of global tunables */
6513 	stmf_i_max_nworkers = stmf_max_nworkers;
6514 	stmf_i_min_nworkers = stmf_min_nworkers;
6515 
6516 	ASSERT(stmf_workers == NULL);
6517 	if (stmf_i_min_nworkers < 4) {
6518 		stmf_i_min_nworkers = 4;
6519 	}
6520 	if (stmf_i_max_nworkers < stmf_i_min_nworkers) {
6521 		stmf_i_max_nworkers = stmf_i_min_nworkers;
6522 	}
6523 	stmf_workers = (stmf_worker_t *)kmem_zalloc(
6524 	    sizeof (stmf_worker_t) * stmf_i_max_nworkers, KM_SLEEP);
6525 	for (i = 0; i < stmf_i_max_nworkers; i++) {
6526 		stmf_worker_t *w = &stmf_workers[i];
6527 		mutex_init(&w->worker_lock, NULL, MUTEX_DRIVER, NULL);
6528 		cv_init(&w->worker_cv, NULL, CV_DRIVER, NULL);
6529 	}
6530 	stmf_worker_mgmt_delay = drv_usectohz(20 * 1000);
6531 	stmf_workers_state = STMF_WORKERS_ENABLED;
6532 
6533 	/* Workers will be started by stmf_worker_mgmt() */
6534 
6535 	/* Lets wait for atleast one worker to start */
6536 	while (stmf_nworkers_cur == 0)
6537 		delay(drv_usectohz(20 * 1000));
6538 	stmf_worker_mgmt_delay = drv_usectohz(3 * 1000 * 1000);
6539 }
6540 
6541 stmf_status_t
6542 stmf_worker_fini()
6543 {
6544 	int i;
6545 	clock_t sb;
6546 
6547 	if (stmf_workers_state == STMF_WORKERS_DISABLED)
6548 		return (STMF_SUCCESS);
6549 	ASSERT(stmf_workers);
6550 	stmf_workers_state = STMF_WORKERS_DISABLED;
6551 	stmf_worker_mgmt_delay = drv_usectohz(20 * 1000);
6552 	cv_signal(&stmf_state.stmf_cv);
6553 
6554 	sb = ddi_get_lbolt() + drv_usectohz(10 * 1000 * 1000);
6555 	/* Wait for all the threads to die */
6556 	while (stmf_nworkers_cur != 0) {
6557 		if (ddi_get_lbolt() > sb) {
6558 			stmf_workers_state = STMF_WORKERS_ENABLED;
6559 			return (STMF_BUSY);
6560 		}
6561 		delay(drv_usectohz(100 * 1000));
6562 	}
6563 	for (i = 0; i < stmf_i_max_nworkers; i++) {
6564 		stmf_worker_t *w = &stmf_workers[i];
6565 		mutex_destroy(&w->worker_lock);
6566 		cv_destroy(&w->worker_cv);
6567 	}
6568 	kmem_free(stmf_workers, sizeof (stmf_worker_t) * stmf_i_max_nworkers);
6569 	stmf_workers = NULL;
6570 
6571 	return (STMF_SUCCESS);
6572 }
6573 
6574 void
6575 stmf_worker_task(void *arg)
6576 {
6577 	stmf_worker_t *w;
6578 	stmf_i_scsi_session_t *iss;
6579 	scsi_task_t *task;
6580 	stmf_i_scsi_task_t *itask;
6581 	stmf_data_buf_t *dbuf;
6582 	stmf_lu_t *lu;
6583 	clock_t wait_timer = 0;
6584 	clock_t wait_ticks, wait_delta = 0;
6585 	uint32_t old, new;
6586 	uint8_t curcmd;
6587 	uint8_t abort_free;
6588 	uint8_t wait_queue;
6589 	uint8_t dec_qdepth;
6590 
6591 	w = (stmf_worker_t *)arg;
6592 	wait_ticks = drv_usectohz(10000);
6593 
6594 	DTRACE_PROBE1(worker__create, stmf_worker_t, w);
6595 	mutex_enter(&w->worker_lock);
6596 	w->worker_flags |= STMF_WORKER_STARTED | STMF_WORKER_ACTIVE;
6597 stmf_worker_loop:;
6598 	if ((w->worker_ref_count == 0) &&
6599 	    (w->worker_flags & STMF_WORKER_TERMINATE)) {
6600 		w->worker_flags &= ~(STMF_WORKER_STARTED |
6601 		    STMF_WORKER_ACTIVE | STMF_WORKER_TERMINATE);
6602 		w->worker_tid = NULL;
6603 		mutex_exit(&w->worker_lock);
6604 		DTRACE_PROBE1(worker__destroy, stmf_worker_t, w);
6605 		thread_exit();
6606 	}
6607 	/* CONSTCOND */
6608 	while (1) {
6609 		dec_qdepth = 0;
6610 		if (wait_timer && (ddi_get_lbolt() >= wait_timer)) {
6611 			wait_timer = 0;
6612 			wait_delta = 0;
6613 			if (w->worker_wait_head) {
6614 				ASSERT(w->worker_wait_tail);
6615 				if (w->worker_task_head == NULL)
6616 					w->worker_task_head =
6617 					    w->worker_wait_head;
6618 				else
6619 					w->worker_task_tail->itask_worker_next =
6620 					    w->worker_wait_head;
6621 				w->worker_task_tail = w->worker_wait_tail;
6622 				w->worker_wait_head = w->worker_wait_tail =
6623 				    NULL;
6624 			}
6625 		}
6626 		if ((itask = w->worker_task_head) == NULL) {
6627 			break;
6628 		}
6629 		task = itask->itask_task;
6630 		DTRACE_PROBE2(worker__active, stmf_worker_t, w,
6631 		    scsi_task_t *, task);
6632 		w->worker_task_head = itask->itask_worker_next;
6633 		if (w->worker_task_head == NULL)
6634 			w->worker_task_tail = NULL;
6635 
6636 		wait_queue = 0;
6637 		abort_free = 0;
6638 		if (itask->itask_ncmds > 0) {
6639 			curcmd = itask->itask_cmd_stack[itask->itask_ncmds - 1];
6640 		} else {
6641 			ASSERT(itask->itask_flags & ITASK_BEING_ABORTED);
6642 		}
6643 		do {
6644 			old = itask->itask_flags;
6645 			if (old & ITASK_BEING_ABORTED) {
6646 				itask->itask_ncmds = 1;
6647 				curcmd = itask->itask_cmd_stack[0] =
6648 				    ITASK_CMD_ABORT;
6649 				goto out_itask_flag_loop;
6650 			} else if ((curcmd & ITASK_CMD_MASK) ==
6651 			    ITASK_CMD_NEW_TASK) {
6652 				/*
6653 				 * set ITASK_KSTAT_IN_RUNQ, this flag
6654 				 * will not reset until task completed
6655 				 */
6656 				new = old | ITASK_KNOWN_TO_LU |
6657 				    ITASK_KSTAT_IN_RUNQ;
6658 			} else {
6659 				goto out_itask_flag_loop;
6660 			}
6661 		} while (atomic_cas_32(&itask->itask_flags, old, new) != old);
6662 
6663 out_itask_flag_loop:
6664 
6665 		/*
6666 		 * Decide if this task needs to go to a queue and/or if
6667 		 * we can decrement the itask_cmd_stack.
6668 		 */
6669 		if (curcmd == ITASK_CMD_ABORT) {
6670 			if (itask->itask_flags & (ITASK_KNOWN_TO_LU |
6671 			    ITASK_KNOWN_TO_TGT_PORT)) {
6672 				wait_queue = 1;
6673 			} else {
6674 				abort_free = 1;
6675 			}
6676 		} else if ((curcmd & ITASK_CMD_POLL) &&
6677 		    (itask->itask_poll_timeout > ddi_get_lbolt())) {
6678 			wait_queue = 1;
6679 		}
6680 
6681 		if (wait_queue) {
6682 			itask->itask_worker_next = NULL;
6683 			if (w->worker_wait_tail) {
6684 				w->worker_wait_tail->itask_worker_next = itask;
6685 			} else {
6686 				w->worker_wait_head = itask;
6687 			}
6688 			w->worker_wait_tail = itask;
6689 			if (wait_timer == 0) {
6690 				wait_timer = ddi_get_lbolt() + wait_ticks;
6691 				wait_delta = wait_ticks;
6692 			}
6693 		} else if ((--(itask->itask_ncmds)) != 0) {
6694 			itask->itask_worker_next = NULL;
6695 			if (w->worker_task_tail) {
6696 				w->worker_task_tail->itask_worker_next = itask;
6697 			} else {
6698 				w->worker_task_head = itask;
6699 			}
6700 			w->worker_task_tail = itask;
6701 		} else {
6702 			atomic_and_32(&itask->itask_flags,
6703 			    ~ITASK_IN_WORKER_QUEUE);
6704 			/*
6705 			 * This is where the queue depth should go down by
6706 			 * one but we delay that on purpose to account for
6707 			 * the call into the provider. The actual decrement
6708 			 * happens after the worker has done its job.
6709 			 */
6710 			dec_qdepth = 1;
6711 			itask->itask_waitq_time +=
6712 			    gethrtime() - itask->itask_waitq_enter_timestamp;
6713 		}
6714 
6715 		/* We made it here means we are going to call LU */
6716 		if ((itask->itask_flags & ITASK_DEFAULT_HANDLING) == 0)
6717 			lu = task->task_lu;
6718 		else
6719 			lu = dlun0;
6720 		dbuf = itask->itask_dbufs[ITASK_CMD_BUF_NDX(curcmd)];
6721 		mutex_exit(&w->worker_lock);
6722 		curcmd &= ITASK_CMD_MASK;
6723 		stmf_task_audit(itask, TE_PROCESS_CMD, curcmd, dbuf);
6724 		switch (curcmd) {
6725 		case ITASK_CMD_NEW_TASK:
6726 			iss = (stmf_i_scsi_session_t *)
6727 			    task->task_session->ss_stmf_private;
6728 			stmf_itl_lu_new_task(itask);
6729 			if (iss->iss_flags & ISS_LUN_INVENTORY_CHANGED) {
6730 				if (stmf_handle_cmd_during_ic(itask))
6731 					break;
6732 			}
6733 #ifdef	DEBUG
6734 			if (stmf_drop_task_counter > 0) {
6735 				if (atomic_add_32_nv(
6736 				    (uint32_t *)&stmf_drop_task_counter,
6737 				    -1) == 1) {
6738 					break;
6739 				}
6740 			}
6741 #endif
6742 			DTRACE_PROBE1(scsi__task__start, scsi_task_t *, task);
6743 			lu->lu_new_task(task, dbuf);
6744 			break;
6745 		case ITASK_CMD_DATA_XFER_DONE:
6746 			lu->lu_dbuf_xfer_done(task, dbuf);
6747 			break;
6748 		case ITASK_CMD_STATUS_DONE:
6749 			lu->lu_send_status_done(task);
6750 			break;
6751 		case ITASK_CMD_ABORT:
6752 			if (abort_free) {
6753 				stmf_task_free(task);
6754 			} else {
6755 				stmf_do_task_abort(task);
6756 			}
6757 			break;
6758 		case ITASK_CMD_POLL_LU:
6759 			if (!wait_queue) {
6760 				lu->lu_task_poll(task);
6761 			}
6762 			break;
6763 		case ITASK_CMD_POLL_LPORT:
6764 			if (!wait_queue)
6765 				task->task_lport->lport_task_poll(task);
6766 			break;
6767 		case ITASK_CMD_SEND_STATUS:
6768 		/* case ITASK_CMD_XFER_DATA: */
6769 			break;
6770 		}
6771 		mutex_enter(&w->worker_lock);
6772 		if (dec_qdepth) {
6773 			w->worker_queue_depth--;
6774 		}
6775 	}
6776 	if ((w->worker_flags & STMF_WORKER_TERMINATE) && (wait_timer == 0)) {
6777 		if (w->worker_ref_count == 0)
6778 			goto stmf_worker_loop;
6779 		else {
6780 			wait_timer = ddi_get_lbolt() + 1;
6781 			wait_delta = 1;
6782 		}
6783 	}
6784 	w->worker_flags &= ~STMF_WORKER_ACTIVE;
6785 	if (wait_timer) {
6786 		DTRACE_PROBE1(worker__timed__sleep, stmf_worker_t, w);
6787 		(void) cv_reltimedwait(&w->worker_cv, &w->worker_lock,
6788 		    wait_delta, TR_CLOCK_TICK);
6789 	} else {
6790 		DTRACE_PROBE1(worker__sleep, stmf_worker_t, w);
6791 		cv_wait(&w->worker_cv, &w->worker_lock);
6792 	}
6793 	DTRACE_PROBE1(worker__wakeup, stmf_worker_t, w);
6794 	w->worker_flags |= STMF_WORKER_ACTIVE;
6795 	goto stmf_worker_loop;
6796 }
6797 
6798 void
6799 stmf_worker_mgmt()
6800 {
6801 	int i;
6802 	int workers_needed;
6803 	uint32_t qd;
6804 	clock_t tps, d = 0;
6805 	uint32_t cur_max_ntasks = 0;
6806 	stmf_worker_t *w;
6807 
6808 	/* Check if we are trying to increase the # of threads */
6809 	for (i = stmf_nworkers_cur; i < stmf_nworkers_needed; i++) {
6810 		if (stmf_workers[i].worker_flags & STMF_WORKER_STARTED) {
6811 			stmf_nworkers_cur++;
6812 			stmf_nworkers_accepting_cmds++;
6813 		} else {
6814 			/* Wait for transition to complete */
6815 			return;
6816 		}
6817 	}
6818 	/* Check if we are trying to decrease the # of workers */
6819 	for (i = (stmf_nworkers_cur - 1); i >= stmf_nworkers_needed; i--) {
6820 		if ((stmf_workers[i].worker_flags & STMF_WORKER_STARTED) == 0) {
6821 			stmf_nworkers_cur--;
6822 			/*
6823 			 * stmf_nworkers_accepting_cmds has already been
6824 			 * updated by the request to reduce the # of workers.
6825 			 */
6826 		} else {
6827 			/* Wait for transition to complete */
6828 			return;
6829 		}
6830 	}
6831 	/* Check if we are being asked to quit */
6832 	if (stmf_workers_state != STMF_WORKERS_ENABLED) {
6833 		if (stmf_nworkers_cur) {
6834 			workers_needed = 0;
6835 			goto worker_mgmt_trigger_change;
6836 		}
6837 		return;
6838 	}
6839 	/* Check if we are starting */
6840 	if (stmf_nworkers_cur < stmf_i_min_nworkers) {
6841 		workers_needed = stmf_i_min_nworkers;
6842 		goto worker_mgmt_trigger_change;
6843 	}
6844 
6845 	tps = drv_usectohz(1 * 1000 * 1000);
6846 	if ((stmf_wm_last != 0) &&
6847 	    ((d = ddi_get_lbolt() - stmf_wm_last) > tps)) {
6848 		qd = 0;
6849 		for (i = 0; i < stmf_nworkers_accepting_cmds; i++) {
6850 			qd += stmf_workers[i].worker_max_qdepth_pu;
6851 			stmf_workers[i].worker_max_qdepth_pu = 0;
6852 			if (stmf_workers[i].worker_max_sys_qdepth_pu >
6853 			    cur_max_ntasks) {
6854 				cur_max_ntasks =
6855 				    stmf_workers[i].worker_max_sys_qdepth_pu;
6856 			}
6857 			stmf_workers[i].worker_max_sys_qdepth_pu = 0;
6858 		}
6859 	}
6860 	stmf_wm_last = ddi_get_lbolt();
6861 	if (d <= tps) {
6862 		/* still ramping up */
6863 		return;
6864 	}
6865 	/* max qdepth cannot be more than max tasks */
6866 	if (qd > cur_max_ntasks)
6867 		qd = cur_max_ntasks;
6868 
6869 	/* See if we have more workers */
6870 	if (qd < stmf_nworkers_accepting_cmds) {
6871 		/*
6872 		 * Since we dont reduce the worker count right away, monitor
6873 		 * the highest load during the scale_down_delay.
6874 		 */
6875 		if (qd > stmf_worker_scale_down_qd)
6876 			stmf_worker_scale_down_qd = qd;
6877 		if (stmf_worker_scale_down_timer == 0) {
6878 			stmf_worker_scale_down_timer = ddi_get_lbolt() +
6879 			    drv_usectohz(stmf_worker_scale_down_delay *
6880 			    1000 * 1000);
6881 			return;
6882 		}
6883 		if (ddi_get_lbolt() < stmf_worker_scale_down_timer) {
6884 			return;
6885 		}
6886 		/* Its time to reduce the workers */
6887 		if (stmf_worker_scale_down_qd < stmf_i_min_nworkers)
6888 			stmf_worker_scale_down_qd = stmf_i_min_nworkers;
6889 		if (stmf_worker_scale_down_qd > stmf_i_max_nworkers)
6890 			stmf_worker_scale_down_qd = stmf_i_max_nworkers;
6891 		if (stmf_worker_scale_down_qd == stmf_nworkers_cur)
6892 			return;
6893 		workers_needed = stmf_worker_scale_down_qd;
6894 		stmf_worker_scale_down_qd = 0;
6895 		goto worker_mgmt_trigger_change;
6896 	}
6897 	stmf_worker_scale_down_qd = 0;
6898 	stmf_worker_scale_down_timer = 0;
6899 	if (qd > stmf_i_max_nworkers)
6900 		qd = stmf_i_max_nworkers;
6901 	if (qd < stmf_i_min_nworkers)
6902 		qd = stmf_i_min_nworkers;
6903 	if (qd == stmf_nworkers_cur)
6904 		return;
6905 	workers_needed = qd;
6906 	goto worker_mgmt_trigger_change;
6907 
6908 	/* NOTREACHED */
6909 	return;
6910 
6911 worker_mgmt_trigger_change:
6912 	ASSERT(workers_needed != stmf_nworkers_cur);
6913 	if (workers_needed > stmf_nworkers_cur) {
6914 		stmf_nworkers_needed = workers_needed;
6915 		for (i = stmf_nworkers_cur; i < workers_needed; i++) {
6916 			w = &stmf_workers[i];
6917 			w->worker_tid = thread_create(NULL, 0, stmf_worker_task,
6918 			    (void *)&stmf_workers[i], 0, &p0, TS_RUN,
6919 			    minclsyspri);
6920 		}
6921 		return;
6922 	}
6923 	/* At this point we know that we are decreasing the # of workers */
6924 	stmf_nworkers_accepting_cmds = workers_needed;
6925 	stmf_nworkers_needed = workers_needed;
6926 	/* Signal the workers that its time to quit */
6927 	for (i = (stmf_nworkers_cur - 1); i >= stmf_nworkers_needed; i--) {
6928 		w = &stmf_workers[i];
6929 		ASSERT(w && (w->worker_flags & STMF_WORKER_STARTED));
6930 		mutex_enter(&w->worker_lock);
6931 		w->worker_flags |= STMF_WORKER_TERMINATE;
6932 		if ((w->worker_flags & STMF_WORKER_ACTIVE) == 0)
6933 			cv_signal(&w->worker_cv);
6934 		mutex_exit(&w->worker_lock);
6935 	}
6936 }
6937 
6938 /*
6939  * Fills out a dbuf from stmf_xfer_data_t (contained in the db_lu_private).
6940  * If all the data has been filled out, frees the xd and makes
6941  * db_lu_private NULL.
6942  */
6943 void
6944 stmf_xd_to_dbuf(stmf_data_buf_t *dbuf, int set_rel_off)
6945 {
6946 	stmf_xfer_data_t *xd;
6947 	uint8_t *p;
6948 	int i;
6949 	uint32_t s;
6950 
6951 	xd = (stmf_xfer_data_t *)dbuf->db_lu_private;
6952 	dbuf->db_data_size = 0;
6953 	if (set_rel_off)
6954 		dbuf->db_relative_offset = xd->size_done;
6955 	for (i = 0; i < dbuf->db_sglist_length; i++) {
6956 		s = min(xd->size_left, dbuf->db_sglist[i].seg_length);
6957 		p = &xd->buf[xd->size_done];
6958 		bcopy(p, dbuf->db_sglist[i].seg_addr, s);
6959 		xd->size_left -= s;
6960 		xd->size_done += s;
6961 		dbuf->db_data_size += s;
6962 		if (xd->size_left == 0) {
6963 			kmem_free(xd, xd->alloc_size);
6964 			dbuf->db_lu_private = NULL;
6965 			return;
6966 		}
6967 	}
6968 }
6969 
6970 /* ARGSUSED */
6971 stmf_status_t
6972 stmf_dlun0_task_alloc(scsi_task_t *task)
6973 {
6974 	return (STMF_SUCCESS);
6975 }
6976 
6977 void
6978 stmf_dlun0_new_task(scsi_task_t *task, stmf_data_buf_t *dbuf)
6979 {
6980 	uint8_t *cdbp = (uint8_t *)&task->task_cdb[0];
6981 	stmf_i_scsi_session_t *iss;
6982 	uint32_t sz, minsz;
6983 	uint8_t *p;
6984 	stmf_xfer_data_t *xd;
6985 	uint8_t inq_page_length = 31;
6986 
6987 	if (task->task_mgmt_function) {
6988 		stmf_scsilib_handle_task_mgmt(task);
6989 		return;
6990 	}
6991 
6992 	switch (cdbp[0]) {
6993 	case SCMD_INQUIRY:
6994 		/*
6995 		 * Basic protocol checks.  In addition, only reply to
6996 		 * standard inquiry.  Otherwise, the LU provider needs
6997 		 * to respond.
6998 		 */
6999 
7000 		if (cdbp[2] || (cdbp[1] & 1) || cdbp[5]) {
7001 			stmf_scsilib_send_status(task, STATUS_CHECK,
7002 			    STMF_SAA_INVALID_FIELD_IN_CDB);
7003 			return;
7004 		}
7005 
7006 		task->task_cmd_xfer_length =
7007 		    (((uint32_t)cdbp[3]) << 8) | cdbp[4];
7008 
7009 		if (task->task_additional_flags &
7010 		    TASK_AF_NO_EXPECTED_XFER_LENGTH) {
7011 			task->task_expected_xfer_length =
7012 			    task->task_cmd_xfer_length;
7013 		}
7014 
7015 		sz = min(task->task_expected_xfer_length,
7016 		    min(36, task->task_cmd_xfer_length));
7017 		minsz = 36;
7018 
7019 		if (sz == 0) {
7020 			stmf_scsilib_send_status(task, STATUS_GOOD, 0);
7021 			return;
7022 		}
7023 
7024 		if (dbuf && (dbuf->db_sglist[0].seg_length < 36)) {
7025 			/*
7026 			 * Ignore any preallocated dbuf if the size is less
7027 			 * than 36. It will be freed during the task_free.
7028 			 */
7029 			dbuf = NULL;
7030 		}
7031 		if (dbuf == NULL)
7032 			dbuf = stmf_alloc_dbuf(task, minsz, &minsz, 0);
7033 		if ((dbuf == NULL) || (dbuf->db_sglist[0].seg_length < sz)) {
7034 			stmf_abort(STMF_QUEUE_TASK_ABORT, task,
7035 			    STMF_ALLOC_FAILURE, NULL);
7036 			return;
7037 		}
7038 		dbuf->db_lu_private = NULL;
7039 
7040 		p = dbuf->db_sglist[0].seg_addr;
7041 
7042 		/*
7043 		 * Standard inquiry handling only.
7044 		 */
7045 
7046 		bzero(p, inq_page_length + 5);
7047 
7048 		p[0] = DPQ_SUPPORTED | DTYPE_UNKNOWN;
7049 		p[2] = 5;
7050 		p[3] = 0x12;
7051 		p[4] = inq_page_length;
7052 		p[6] = 0x80;
7053 
7054 		(void) strncpy((char *)p+8, "SUN     ", 8);
7055 		(void) strncpy((char *)p+16, "COMSTAR	       ", 16);
7056 		(void) strncpy((char *)p+32, "1.0 ", 4);
7057 
7058 		dbuf->db_data_size = sz;
7059 		dbuf->db_relative_offset = 0;
7060 		dbuf->db_flags = DB_DIRECTION_TO_RPORT;
7061 		(void) stmf_xfer_data(task, dbuf, 0);
7062 
7063 		return;
7064 
7065 	case SCMD_REPORT_LUNS:
7066 		task->task_cmd_xfer_length =
7067 		    ((((uint32_t)task->task_cdb[6]) << 24) |
7068 		    (((uint32_t)task->task_cdb[7]) << 16) |
7069 		    (((uint32_t)task->task_cdb[8]) << 8) |
7070 		    ((uint32_t)task->task_cdb[9]));
7071 
7072 		if (task->task_additional_flags &
7073 		    TASK_AF_NO_EXPECTED_XFER_LENGTH) {
7074 			task->task_expected_xfer_length =
7075 			    task->task_cmd_xfer_length;
7076 		}
7077 
7078 		sz = min(task->task_expected_xfer_length,
7079 		    task->task_cmd_xfer_length);
7080 
7081 		if (sz < 16) {
7082 			stmf_scsilib_send_status(task, STATUS_CHECK,
7083 			    STMF_SAA_INVALID_FIELD_IN_CDB);
7084 			return;
7085 		}
7086 
7087 		iss = (stmf_i_scsi_session_t *)
7088 		    task->task_session->ss_stmf_private;
7089 		rw_enter(iss->iss_lockp, RW_WRITER);
7090 		xd = stmf_session_prepare_report_lun_data(iss->iss_sm);
7091 		rw_exit(iss->iss_lockp);
7092 
7093 		if (xd == NULL) {
7094 			stmf_abort(STMF_QUEUE_TASK_ABORT, task,
7095 			    STMF_ALLOC_FAILURE, NULL);
7096 			return;
7097 		}
7098 
7099 		sz = min(sz, xd->size_left);
7100 		xd->size_left = sz;
7101 		minsz = min(512, sz);
7102 
7103 		if (dbuf == NULL)
7104 			dbuf = stmf_alloc_dbuf(task, sz, &minsz, 0);
7105 		if (dbuf == NULL) {
7106 			kmem_free(xd, xd->alloc_size);
7107 			stmf_abort(STMF_QUEUE_TASK_ABORT, task,
7108 			    STMF_ALLOC_FAILURE, NULL);
7109 			return;
7110 		}
7111 		dbuf->db_lu_private = xd;
7112 		stmf_xd_to_dbuf(dbuf, 1);
7113 
7114 		atomic_and_32(&iss->iss_flags,
7115 		    ~(ISS_LUN_INVENTORY_CHANGED | ISS_GOT_INITIAL_LUNS));
7116 		dbuf->db_flags = DB_DIRECTION_TO_RPORT;
7117 		(void) stmf_xfer_data(task, dbuf, 0);
7118 		return;
7119 	}
7120 
7121 	stmf_scsilib_send_status(task, STATUS_CHECK, STMF_SAA_INVALID_OPCODE);
7122 }
7123 
7124 void
7125 stmf_dlun0_dbuf_done(scsi_task_t *task, stmf_data_buf_t *dbuf)
7126 {
7127 	stmf_i_scsi_task_t *itask =
7128 	    (stmf_i_scsi_task_t *)task->task_stmf_private;
7129 
7130 	if (dbuf->db_xfer_status != STMF_SUCCESS) {
7131 		stmf_abort(STMF_QUEUE_TASK_ABORT, task,
7132 		    dbuf->db_xfer_status, NULL);
7133 		return;
7134 	}
7135 	task->task_nbytes_transferred += dbuf->db_data_size;
7136 	if (dbuf->db_lu_private) {
7137 		/* There is more */
7138 		stmf_xd_to_dbuf(dbuf, 1);
7139 		(void) stmf_xfer_data(task, dbuf, 0);
7140 		return;
7141 	}
7142 
7143 	stmf_free_dbuf(task, dbuf);
7144 	/*
7145 	 * If this is a proxy task, it will need to be completed from the
7146 	 * proxy port provider. This message lets pppt know that the xfer
7147 	 * is complete. When we receive the status from pppt, we will
7148 	 * then relay that status back to the lport.
7149 	 */
7150 	if (itask->itask_flags & ITASK_PROXY_TASK) {
7151 		stmf_ic_msg_t *ic_xfer_done_msg = NULL;
7152 		stmf_status_t ic_ret = STMF_FAILURE;
7153 		uint64_t session_msg_id;
7154 		mutex_enter(&stmf_state.stmf_lock);
7155 		session_msg_id = stmf_proxy_msg_id++;
7156 		mutex_exit(&stmf_state.stmf_lock);
7157 		/* send xfer done status to pppt */
7158 		ic_xfer_done_msg = ic_scsi_data_xfer_done_msg_alloc(
7159 		    itask->itask_proxy_msg_id,
7160 		    task->task_session->ss_session_id,
7161 		    STMF_SUCCESS, session_msg_id);
7162 		if (ic_xfer_done_msg) {
7163 			ic_ret = ic_tx_msg(ic_xfer_done_msg);
7164 			if (ic_ret != STMF_IC_MSG_SUCCESS) {
7165 				cmn_err(CE_WARN, "unable to xmit session msg");
7166 			}
7167 		}
7168 		/* task will be completed from pppt */
7169 		return;
7170 	}
7171 	stmf_scsilib_send_status(task, STATUS_GOOD, 0);
7172 }
7173 
7174 /* ARGSUSED */
7175 void
7176 stmf_dlun0_status_done(scsi_task_t *task)
7177 {
7178 }
7179 
7180 /* ARGSUSED */
7181 void
7182 stmf_dlun0_task_free(scsi_task_t *task)
7183 {
7184 }
7185 
7186 /* ARGSUSED */
7187 stmf_status_t
7188 stmf_dlun0_abort(struct stmf_lu *lu, int abort_cmd, void *arg, uint32_t flags)
7189 {
7190 	scsi_task_t *task = (scsi_task_t *)arg;
7191 	stmf_i_scsi_task_t *itask =
7192 	    (stmf_i_scsi_task_t *)task->task_stmf_private;
7193 	stmf_i_lu_t *ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private;
7194 	int i;
7195 	uint8_t map;
7196 
7197 	if ((task->task_mgmt_function) && (itask->itask_flags &
7198 	    (ITASK_CAUSING_LU_RESET | ITASK_CAUSING_TARGET_RESET))) {
7199 		switch (task->task_mgmt_function) {
7200 		case TM_ABORT_TASK:
7201 		case TM_ABORT_TASK_SET:
7202 		case TM_CLEAR_TASK_SET:
7203 		case TM_LUN_RESET:
7204 			atomic_and_32(&ilu->ilu_flags, ~ILU_RESET_ACTIVE);
7205 			break;
7206 		case TM_TARGET_RESET:
7207 		case TM_TARGET_COLD_RESET:
7208 		case TM_TARGET_WARM_RESET:
7209 			stmf_abort_target_reset(task);
7210 			break;
7211 		}
7212 		return (STMF_ABORT_SUCCESS);
7213 	}
7214 
7215 	/*
7216 	 * OK so its not a task mgmt. Make sure we free any xd sitting
7217 	 * inside any dbuf.
7218 	 */
7219 	if ((map = itask->itask_allocated_buf_map) != 0) {
7220 		for (i = 0; i < 4; i++) {
7221 			if ((map & 1) &&
7222 			    ((itask->itask_dbufs[i])->db_lu_private)) {
7223 				stmf_xfer_data_t *xd;
7224 				stmf_data_buf_t *dbuf;
7225 
7226 				dbuf = itask->itask_dbufs[i];
7227 				xd = (stmf_xfer_data_t *)dbuf->db_lu_private;
7228 				dbuf->db_lu_private = NULL;
7229 				kmem_free(xd, xd->alloc_size);
7230 			}
7231 			map >>= 1;
7232 		}
7233 	}
7234 	return (STMF_ABORT_SUCCESS);
7235 }
7236 
7237 void
7238 stmf_dlun0_task_poll(struct scsi_task *task)
7239 {
7240 	/* Right now we only do this for handling task management functions */
7241 	ASSERT(task->task_mgmt_function);
7242 
7243 	switch (task->task_mgmt_function) {
7244 	case TM_ABORT_TASK:
7245 	case TM_ABORT_TASK_SET:
7246 	case TM_CLEAR_TASK_SET:
7247 	case TM_LUN_RESET:
7248 		(void) stmf_lun_reset_poll(task->task_lu, task, 0);
7249 		return;
7250 	case TM_TARGET_RESET:
7251 	case TM_TARGET_COLD_RESET:
7252 	case TM_TARGET_WARM_RESET:
7253 		stmf_target_reset_poll(task);
7254 		return;
7255 	}
7256 }
7257 
7258 /* ARGSUSED */
7259 void
7260 stmf_dlun0_ctl(struct stmf_lu *lu, int cmd, void *arg)
7261 {
7262 	/* This function will never be called */
7263 	cmn_err(CE_WARN, "stmf_dlun0_ctl called with cmd %x", cmd);
7264 }
7265 
7266 void
7267 stmf_dlun_init()
7268 {
7269 	stmf_i_lu_t *ilu;
7270 
7271 	dlun0 = stmf_alloc(STMF_STRUCT_STMF_LU, 0, 0);
7272 	dlun0->lu_task_alloc = stmf_dlun0_task_alloc;
7273 	dlun0->lu_new_task = stmf_dlun0_new_task;
7274 	dlun0->lu_dbuf_xfer_done = stmf_dlun0_dbuf_done;
7275 	dlun0->lu_send_status_done = stmf_dlun0_status_done;
7276 	dlun0->lu_task_free = stmf_dlun0_task_free;
7277 	dlun0->lu_abort = stmf_dlun0_abort;
7278 	dlun0->lu_task_poll = stmf_dlun0_task_poll;
7279 	dlun0->lu_ctl = stmf_dlun0_ctl;
7280 
7281 	ilu = (stmf_i_lu_t *)dlun0->lu_stmf_private;
7282 	ilu->ilu_cur_task_cntr = &ilu->ilu_task_cntr1;
7283 }
7284 
7285 stmf_status_t
7286 stmf_dlun_fini()
7287 {
7288 	stmf_i_lu_t *ilu;
7289 
7290 	ilu = (stmf_i_lu_t *)dlun0->lu_stmf_private;
7291 
7292 	ASSERT(ilu->ilu_ntasks == ilu->ilu_ntasks_free);
7293 	if (ilu->ilu_ntasks) {
7294 		stmf_i_scsi_task_t *itask, *nitask;
7295 
7296 		nitask = ilu->ilu_tasks;
7297 		do {
7298 			itask = nitask;
7299 			nitask = itask->itask_lu_next;
7300 			dlun0->lu_task_free(itask->itask_task);
7301 			stmf_free(itask->itask_task);
7302 		} while (nitask != NULL);
7303 
7304 	}
7305 	stmf_free(dlun0);
7306 	return (STMF_SUCCESS);
7307 }
7308 
7309 void
7310 stmf_abort_target_reset(scsi_task_t *task)
7311 {
7312 	stmf_i_scsi_session_t *iss = (stmf_i_scsi_session_t *)
7313 	    task->task_session->ss_stmf_private;
7314 	stmf_lun_map_t *lm;
7315 	stmf_lun_map_ent_t *lm_ent;
7316 	stmf_i_lu_t *ilu;
7317 	int i;
7318 
7319 	rw_enter(iss->iss_lockp, RW_READER);
7320 	lm = iss->iss_sm;
7321 	for (i = 0; i < lm->lm_nentries; i++) {
7322 		if (lm->lm_plus[i] == NULL)
7323 			continue;
7324 		lm_ent = (stmf_lun_map_ent_t *)lm->lm_plus[i];
7325 		ilu = (stmf_i_lu_t *)lm_ent->ent_lu->lu_stmf_private;
7326 		if (ilu->ilu_flags & ILU_RESET_ACTIVE) {
7327 			atomic_and_32(&ilu->ilu_flags, ~ILU_RESET_ACTIVE);
7328 		}
7329 	}
7330 	atomic_and_32(&iss->iss_flags, ~ISS_RESET_ACTIVE);
7331 	rw_exit(iss->iss_lockp);
7332 }
7333 
7334 /*
7335  * The return value is only used by function managing target reset.
7336  */
7337 stmf_status_t
7338 stmf_lun_reset_poll(stmf_lu_t *lu, struct scsi_task *task, int target_reset)
7339 {
7340 	stmf_i_lu_t *ilu = (stmf_i_lu_t *)lu->lu_stmf_private;
7341 	int ntasks_pending;
7342 
7343 	ntasks_pending = ilu->ilu_ntasks - ilu->ilu_ntasks_free;
7344 	/*
7345 	 * This function is also used during Target reset. The idea is that
7346 	 * once all the commands are aborted, call the LU's reset entry
7347 	 * point (abort entry point with a reset flag). But if this Task
7348 	 * mgmt is running on this LU then all the tasks cannot be aborted.
7349 	 * one task (this task) will still be running which is OK.
7350 	 */
7351 	if ((ntasks_pending == 0) || ((task->task_lu == lu) &&
7352 	    (ntasks_pending == 1))) {
7353 		stmf_status_t ret;
7354 
7355 		if ((task->task_mgmt_function == TM_LUN_RESET) ||
7356 		    (task->task_mgmt_function == TM_TARGET_RESET) ||
7357 		    (task->task_mgmt_function == TM_TARGET_WARM_RESET) ||
7358 		    (task->task_mgmt_function == TM_TARGET_COLD_RESET)) {
7359 			ret = lu->lu_abort(lu, STMF_LU_RESET_STATE, task, 0);
7360 		} else {
7361 			ret = STMF_SUCCESS;
7362 		}
7363 		if (ret == STMF_SUCCESS) {
7364 			atomic_and_32(&ilu->ilu_flags, ~ILU_RESET_ACTIVE);
7365 		}
7366 		if (target_reset) {
7367 			return (ret);
7368 		}
7369 		if (ret == STMF_SUCCESS) {
7370 			stmf_scsilib_send_status(task, STATUS_GOOD, 0);
7371 			return (ret);
7372 		}
7373 		if (ret != STMF_BUSY) {
7374 			stmf_abort(STMF_QUEUE_TASK_ABORT, task, ret, NULL);
7375 			return (ret);
7376 		}
7377 	}
7378 
7379 	if (target_reset) {
7380 		/* Tell target reset polling code that we are not done */
7381 		return (STMF_BUSY);
7382 	}
7383 
7384 	if (stmf_task_poll_lu(task, ITASK_DEFAULT_POLL_TIMEOUT)
7385 	    != STMF_SUCCESS) {
7386 		stmf_abort(STMF_QUEUE_TASK_ABORT, task,
7387 		    STMF_ALLOC_FAILURE, NULL);
7388 		return (STMF_SUCCESS);
7389 	}
7390 
7391 	return (STMF_SUCCESS);
7392 }
7393 
7394 void
7395 stmf_target_reset_poll(struct scsi_task *task)
7396 {
7397 	stmf_i_scsi_session_t *iss = (stmf_i_scsi_session_t *)
7398 	    task->task_session->ss_stmf_private;
7399 	stmf_lun_map_t *lm;
7400 	stmf_lun_map_ent_t *lm_ent;
7401 	stmf_i_lu_t *ilu;
7402 	stmf_status_t ret;
7403 	int i;
7404 	int not_done = 0;
7405 
7406 	ASSERT(iss->iss_flags & ISS_RESET_ACTIVE);
7407 
7408 	rw_enter(iss->iss_lockp, RW_READER);
7409 	lm = iss->iss_sm;
7410 	for (i = 0; i < lm->lm_nentries; i++) {
7411 		if (lm->lm_plus[i] == NULL)
7412 			continue;
7413 		lm_ent = (stmf_lun_map_ent_t *)lm->lm_plus[i];
7414 		ilu = (stmf_i_lu_t *)lm_ent->ent_lu->lu_stmf_private;
7415 		if (ilu->ilu_flags & ILU_RESET_ACTIVE) {
7416 			rw_exit(iss->iss_lockp);
7417 			ret = stmf_lun_reset_poll(lm_ent->ent_lu, task, 1);
7418 			rw_enter(iss->iss_lockp, RW_READER);
7419 			if (ret == STMF_SUCCESS)
7420 				continue;
7421 			not_done = 1;
7422 			if (ret != STMF_BUSY) {
7423 				rw_exit(iss->iss_lockp);
7424 				stmf_abort(STMF_QUEUE_TASK_ABORT, task,
7425 				    STMF_ABORTED, NULL);
7426 				return;
7427 			}
7428 		}
7429 	}
7430 	rw_exit(iss->iss_lockp);
7431 
7432 	if (not_done) {
7433 		if (stmf_task_poll_lu(task, ITASK_DEFAULT_POLL_TIMEOUT)
7434 		    != STMF_SUCCESS) {
7435 			stmf_abort(STMF_QUEUE_TASK_ABORT, task,
7436 			    STMF_ALLOC_FAILURE, NULL);
7437 			return;
7438 		}
7439 		return;
7440 	}
7441 
7442 	atomic_and_32(&iss->iss_flags, ~ISS_RESET_ACTIVE);
7443 
7444 	stmf_scsilib_send_status(task, STATUS_GOOD, 0);
7445 }
7446 
7447 stmf_status_t
7448 stmf_lu_add_event(stmf_lu_t *lu, int eventid)
7449 {
7450 	stmf_i_lu_t *ilu = (stmf_i_lu_t *)lu->lu_stmf_private;
7451 
7452 	if ((eventid < 0) || (eventid >= STMF_MAX_NUM_EVENTS)) {
7453 		return (STMF_INVALID_ARG);
7454 	}
7455 
7456 	STMF_EVENT_ADD(ilu->ilu_event_hdl, eventid);
7457 	return (STMF_SUCCESS);
7458 }
7459 
7460 stmf_status_t
7461 stmf_lu_remove_event(stmf_lu_t *lu, int eventid)
7462 {
7463 	stmf_i_lu_t *ilu = (stmf_i_lu_t *)lu->lu_stmf_private;
7464 
7465 	if (eventid == STMF_EVENT_ALL) {
7466 		STMF_EVENT_CLEAR_ALL(ilu->ilu_event_hdl);
7467 		return (STMF_SUCCESS);
7468 	}
7469 
7470 	if ((eventid < 0) || (eventid >= STMF_MAX_NUM_EVENTS)) {
7471 		return (STMF_INVALID_ARG);
7472 	}
7473 
7474 	STMF_EVENT_REMOVE(ilu->ilu_event_hdl, eventid);
7475 	return (STMF_SUCCESS);
7476 }
7477 
7478 stmf_status_t
7479 stmf_lport_add_event(stmf_local_port_t *lport, int eventid)
7480 {
7481 	stmf_i_local_port_t *ilport =
7482 	    (stmf_i_local_port_t *)lport->lport_stmf_private;
7483 
7484 	if ((eventid < 0) || (eventid >= STMF_MAX_NUM_EVENTS)) {
7485 		return (STMF_INVALID_ARG);
7486 	}
7487 
7488 	STMF_EVENT_ADD(ilport->ilport_event_hdl, eventid);
7489 	return (STMF_SUCCESS);
7490 }
7491 
7492 stmf_status_t
7493 stmf_lport_remove_event(stmf_local_port_t *lport, int eventid)
7494 {
7495 	stmf_i_local_port_t *ilport =
7496 	    (stmf_i_local_port_t *)lport->lport_stmf_private;
7497 
7498 	if (eventid == STMF_EVENT_ALL) {
7499 		STMF_EVENT_CLEAR_ALL(ilport->ilport_event_hdl);
7500 		return (STMF_SUCCESS);
7501 	}
7502 
7503 	if ((eventid < 0) || (eventid >= STMF_MAX_NUM_EVENTS)) {
7504 		return (STMF_INVALID_ARG);
7505 	}
7506 
7507 	STMF_EVENT_REMOVE(ilport->ilport_event_hdl, eventid);
7508 	return (STMF_SUCCESS);
7509 }
7510 
7511 void
7512 stmf_generate_lu_event(stmf_i_lu_t *ilu, int eventid, void *arg, uint32_t flags)
7513 {
7514 	if (STMF_EVENT_ENABLED(ilu->ilu_event_hdl, eventid) &&
7515 	    (ilu->ilu_lu->lu_event_handler != NULL)) {
7516 		ilu->ilu_lu->lu_event_handler(ilu->ilu_lu, eventid, arg, flags);
7517 	}
7518 }
7519 
7520 void
7521 stmf_generate_lport_event(stmf_i_local_port_t *ilport, int eventid, void *arg,
7522 				uint32_t flags)
7523 {
7524 	if (STMF_EVENT_ENABLED(ilport->ilport_event_hdl, eventid) &&
7525 	    (ilport->ilport_lport->lport_event_handler != NULL)) {
7526 		ilport->ilport_lport->lport_event_handler(
7527 		    ilport->ilport_lport, eventid, arg, flags);
7528 	}
7529 }
7530 
7531 /*
7532  * With the possibility of having multiple itl sessions pointing to the
7533  * same itl_kstat_info, the ilu_kstat_lock mutex is used to synchronize
7534  * the kstat update of the ilu_kstat_io, itl_kstat_taskq and itl_kstat_lu_xfer
7535  * statistics.
7536  */
7537 void
7538 stmf_itl_task_start(stmf_i_scsi_task_t *itask)
7539 {
7540 	stmf_itl_data_t	*itl = itask->itask_itl_datap;
7541 	scsi_task_t	*task = itask->itask_task;
7542 	stmf_i_lu_t	*ilu;
7543 
7544 	if (itl == NULL || task->task_lu == dlun0)
7545 		return;
7546 	ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private;
7547 	mutex_enter(ilu->ilu_kstat_io->ks_lock);
7548 	itask->itask_start_timestamp = gethrtime();
7549 	kstat_waitq_enter(KSTAT_IO_PTR(itl->itl_kstat_taskq));
7550 	stmf_update_kstat_lu_q(itask->itask_task, kstat_waitq_enter);
7551 	mutex_exit(ilu->ilu_kstat_io->ks_lock);
7552 
7553 	stmf_update_kstat_lport_q(itask->itask_task, kstat_waitq_enter);
7554 }
7555 
7556 void
7557 stmf_itl_lu_new_task(stmf_i_scsi_task_t *itask)
7558 {
7559 	stmf_itl_data_t	*itl = itask->itask_itl_datap;
7560 	scsi_task_t	*task = itask->itask_task;
7561 	stmf_i_lu_t	*ilu;
7562 
7563 	if (itl == NULL || task->task_lu == dlun0)
7564 		return;
7565 	ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private;
7566 	mutex_enter(ilu->ilu_kstat_io->ks_lock);
7567 	kstat_waitq_to_runq(KSTAT_IO_PTR(itl->itl_kstat_taskq));
7568 	stmf_update_kstat_lu_q(itask->itask_task, kstat_waitq_to_runq);
7569 	mutex_exit(ilu->ilu_kstat_io->ks_lock);
7570 
7571 	stmf_update_kstat_lport_q(itask->itask_task, kstat_waitq_to_runq);
7572 }
7573 
7574 void
7575 stmf_itl_task_done(stmf_i_scsi_task_t *itask)
7576 {
7577 	stmf_itl_data_t		*itl = itask->itask_itl_datap;
7578 	scsi_task_t		*task = itask->itask_task;
7579 	kstat_io_t		*kip;
7580 	hrtime_t		elapsed_time;
7581 	stmf_kstat_itl_info_t	*itli;
7582 	stmf_i_lu_t	*ilu;
7583 
7584 	if (itl == NULL || task->task_lu == dlun0)
7585 		return;
7586 	ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private;
7587 
7588 	mutex_enter(ilu->ilu_kstat_io->ks_lock);
7589 	itli = (stmf_kstat_itl_info_t *)KSTAT_NAMED_PTR(itl->itl_kstat_info);
7590 	kip = KSTAT_IO_PTR(itl->itl_kstat_taskq);
7591 
7592 	itli->i_task_waitq_elapsed.value.ui64 += itask->itask_waitq_time;
7593 
7594 	itask->itask_done_timestamp = gethrtime();
7595 	elapsed_time =
7596 	    itask->itask_done_timestamp - itask->itask_start_timestamp;
7597 
7598 	if (task->task_flags & TF_READ_DATA) {
7599 		kip->reads++;
7600 		kip->nread += itask->itask_read_xfer;
7601 		itli->i_task_read_elapsed.value.ui64 += elapsed_time;
7602 		itli->i_lu_read_elapsed.value.ui64 +=
7603 		    itask->itask_lu_read_time;
7604 		itli->i_lport_read_elapsed.value.ui64 +=
7605 		    itask->itask_lport_read_time;
7606 	}
7607 
7608 	if (task->task_flags & TF_WRITE_DATA) {
7609 		kip->writes++;
7610 		kip->nwritten += itask->itask_write_xfer;
7611 		itli->i_task_write_elapsed.value.ui64 += elapsed_time;
7612 		itli->i_lu_write_elapsed.value.ui64 +=
7613 		    itask->itask_lu_write_time;
7614 		itli->i_lport_write_elapsed.value.ui64 +=
7615 		    itask->itask_lport_write_time;
7616 	}
7617 
7618 	if (itask->itask_flags & ITASK_KSTAT_IN_RUNQ) {
7619 		kstat_runq_exit(kip);
7620 		stmf_update_kstat_lu_q(task, kstat_runq_exit);
7621 		mutex_exit(ilu->ilu_kstat_io->ks_lock);
7622 		stmf_update_kstat_lport_q(task, kstat_runq_exit);
7623 	} else {
7624 		kstat_waitq_exit(kip);
7625 		stmf_update_kstat_lu_q(task, kstat_waitq_exit);
7626 		mutex_exit(ilu->ilu_kstat_io->ks_lock);
7627 		stmf_update_kstat_lport_q(task, kstat_waitq_exit);
7628 	}
7629 }
7630 
7631 void
7632 stmf_lu_xfer_start(scsi_task_t *task)
7633 {
7634 	stmf_i_scsi_task_t *itask = task->task_stmf_private;
7635 	stmf_itl_data_t	*itl = itask->itask_itl_datap;
7636 	stmf_i_lu_t	*ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private;
7637 	kstat_io_t		*kip;
7638 
7639 	if (itl == NULL || task->task_lu == dlun0)
7640 		return;
7641 
7642 	kip = KSTAT_IO_PTR(itl->itl_kstat_lu_xfer);
7643 	mutex_enter(ilu->ilu_kstat_io->ks_lock);
7644 	kstat_runq_enter(kip);
7645 	mutex_exit(ilu->ilu_kstat_io->ks_lock);
7646 }
7647 
7648 void
7649 stmf_lu_xfer_done(scsi_task_t *task, boolean_t read, uint64_t xfer_bytes,
7650     hrtime_t elapsed_time)
7651 {
7652 	stmf_i_scsi_task_t	*itask = task->task_stmf_private;
7653 	stmf_itl_data_t		*itl = itask->itask_itl_datap;
7654 	stmf_i_lu_t	*ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private;
7655 	kstat_io_t		*kip;
7656 
7657 	if (itl == NULL || task->task_lu == dlun0)
7658 		return;
7659 
7660 	if (read) {
7661 		atomic_add_64((uint64_t *)&itask->itask_lu_read_time,
7662 		    elapsed_time);
7663 	} else {
7664 		atomic_add_64((uint64_t *)&itask->itask_lu_write_time,
7665 		    elapsed_time);
7666 	}
7667 
7668 	kip = KSTAT_IO_PTR(itl->itl_kstat_lu_xfer);
7669 	mutex_enter(ilu->ilu_kstat_io->ks_lock);
7670 	kstat_runq_exit(kip);
7671 	if (read) {
7672 		kip->reads++;
7673 		kip->nread += xfer_bytes;
7674 	} else {
7675 		kip->writes++;
7676 		kip->nwritten += xfer_bytes;
7677 	}
7678 	mutex_exit(ilu->ilu_kstat_io->ks_lock);
7679 }
7680 
7681 static void
7682 stmf_lport_xfer_start(stmf_i_scsi_task_t *itask, stmf_data_buf_t *dbuf)
7683 {
7684 	stmf_itl_data_t		*itl = itask->itask_itl_datap;
7685 
7686 	if (itl == NULL)
7687 		return;
7688 
7689 	DTRACE_PROBE2(scsi__xfer__start, scsi_task_t *, itask->itask_task,
7690 	    stmf_data_buf_t *, dbuf);
7691 
7692 	dbuf->db_xfer_start_timestamp = gethrtime();
7693 }
7694 
7695 static void
7696 stmf_lport_xfer_done(stmf_i_scsi_task_t *itask, stmf_data_buf_t *dbuf)
7697 {
7698 	stmf_itl_data_t		*itl = itask->itask_itl_datap;
7699 	scsi_task_t		*task;
7700 	stmf_i_local_port_t	*ilp;
7701 	kstat_io_t		*kip;
7702 	hrtime_t		elapsed_time;
7703 	uint64_t		xfer_size;
7704 
7705 	if (itl == NULL)
7706 		return;
7707 
7708 	task = (scsi_task_t *)itask->itask_task;
7709 	ilp = (stmf_i_local_port_t *)task->task_lport->lport_stmf_private;
7710 	xfer_size = (dbuf->db_xfer_status == STMF_SUCCESS) ?
7711 	    dbuf->db_data_size : 0;
7712 
7713 	elapsed_time = gethrtime() - dbuf->db_xfer_start_timestamp;
7714 	if (dbuf->db_flags & DB_DIRECTION_TO_RPORT) {
7715 		atomic_add_64((uint64_t *)&itask->itask_lport_read_time,
7716 		    elapsed_time);
7717 		atomic_add_64((uint64_t *)&itask->itask_read_xfer,
7718 		    xfer_size);
7719 	} else {
7720 		atomic_add_64((uint64_t *)&itask->itask_lport_write_time,
7721 		    elapsed_time);
7722 		atomic_add_64((uint64_t *)&itask->itask_write_xfer,
7723 		    xfer_size);
7724 	}
7725 
7726 	DTRACE_PROBE3(scsi__xfer__end, scsi_task_t *, itask->itask_task,
7727 	    stmf_data_buf_t *, dbuf, hrtime_t, elapsed_time);
7728 
7729 	kip = KSTAT_IO_PTR(itl->itl_kstat_lport_xfer);
7730 	mutex_enter(ilp->ilport_kstat_io->ks_lock);
7731 	if (dbuf->db_flags & DB_DIRECTION_TO_RPORT) {
7732 		kip->reads++;
7733 		kip->nread += xfer_size;
7734 	} else {
7735 		kip->writes++;
7736 		kip->nwritten += xfer_size;
7737 	}
7738 	mutex_exit(ilp->ilport_kstat_io->ks_lock);
7739 
7740 	dbuf->db_xfer_start_timestamp = 0;
7741 }
7742 
7743 void
7744 stmf_svc_init()
7745 {
7746 	if (stmf_state.stmf_svc_flags & STMF_SVC_STARTED)
7747 		return;
7748 	stmf_state.stmf_svc_tailp = &stmf_state.stmf_svc_active;
7749 	stmf_state.stmf_svc_taskq = ddi_taskq_create(0, "STMF_SVC_TASKQ", 1,
7750 	    TASKQ_DEFAULTPRI, 0);
7751 	(void) ddi_taskq_dispatch(stmf_state.stmf_svc_taskq,
7752 	    stmf_svc, 0, DDI_SLEEP);
7753 }
7754 
7755 stmf_status_t
7756 stmf_svc_fini()
7757 {
7758 	uint32_t i;
7759 
7760 	mutex_enter(&stmf_state.stmf_lock);
7761 	if (stmf_state.stmf_svc_flags & STMF_SVC_STARTED) {
7762 		stmf_state.stmf_svc_flags |= STMF_SVC_TERMINATE;
7763 		cv_signal(&stmf_state.stmf_cv);
7764 	}
7765 	mutex_exit(&stmf_state.stmf_lock);
7766 
7767 	/* Wait for 5 seconds */
7768 	for (i = 0; i < 500; i++) {
7769 		if (stmf_state.stmf_svc_flags & STMF_SVC_STARTED)
7770 			delay(drv_usectohz(10000));
7771 		else
7772 			break;
7773 	}
7774 	if (i == 500)
7775 		return (STMF_BUSY);
7776 
7777 	ddi_taskq_destroy(stmf_state.stmf_svc_taskq);
7778 
7779 	return (STMF_SUCCESS);
7780 }
7781 
7782 struct stmf_svc_clocks {
7783 	clock_t drain_start, drain_next;
7784 	clock_t timing_start, timing_next;
7785 	clock_t worker_delay;
7786 };
7787 
7788 /* ARGSUSED */
7789 void
7790 stmf_svc(void *arg)
7791 {
7792 	stmf_svc_req_t *req;
7793 	stmf_lu_t *lu;
7794 	stmf_i_lu_t *ilu;
7795 	stmf_local_port_t *lport;
7796 	struct stmf_svc_clocks clks = { 0 };
7797 
7798 	mutex_enter(&stmf_state.stmf_lock);
7799 	stmf_state.stmf_svc_flags |= STMF_SVC_STARTED | STMF_SVC_ACTIVE;
7800 
7801 	while (!(stmf_state.stmf_svc_flags & STMF_SVC_TERMINATE)) {
7802 		if (stmf_state.stmf_svc_active == NULL) {
7803 			stmf_svc_timeout(&clks);
7804 			continue;
7805 		}
7806 
7807 		/*
7808 		 * Pop the front request from the active list.  After this,
7809 		 * the request will no longer be referenced by global state,
7810 		 * so it should be safe to access it without holding the
7811 		 * stmf state lock.
7812 		 */
7813 		req = stmf_state.stmf_svc_active;
7814 		stmf_state.stmf_svc_active = req->svc_next;
7815 
7816 		if (stmf_state.stmf_svc_active == NULL)
7817 			stmf_state.stmf_svc_tailp = &stmf_state.stmf_svc_active;
7818 
7819 		switch (req->svc_cmd) {
7820 		case STMF_CMD_LPORT_ONLINE:
7821 			/* Fallthrough */
7822 		case STMF_CMD_LPORT_OFFLINE:
7823 			mutex_exit(&stmf_state.stmf_lock);
7824 			lport = (stmf_local_port_t *)req->svc_obj;
7825 			lport->lport_ctl(lport, req->svc_cmd, &req->svc_info);
7826 			break;
7827 		case STMF_CMD_LU_ONLINE:
7828 			mutex_exit(&stmf_state.stmf_lock);
7829 			lu = (stmf_lu_t *)req->svc_obj;
7830 			lu->lu_ctl(lu, req->svc_cmd, &req->svc_info);
7831 			break;
7832 		case STMF_CMD_LU_OFFLINE:
7833 			/* Remove all mappings of this LU */
7834 			stmf_session_lu_unmapall((stmf_lu_t *)req->svc_obj);
7835 			/* Kill all the pending I/Os for this LU */
7836 			mutex_exit(&stmf_state.stmf_lock);
7837 			stmf_task_lu_killall((stmf_lu_t *)req->svc_obj, NULL,
7838 			    STMF_ABORTED);
7839 			lu = (stmf_lu_t *)req->svc_obj;
7840 			ilu = (stmf_i_lu_t *)lu->lu_stmf_private;
7841 			if (ilu->ilu_ntasks != ilu->ilu_ntasks_free)
7842 				break;
7843 			lu->lu_ctl(lu, req->svc_cmd, &req->svc_info);
7844 			break;
7845 		default:
7846 			cmn_err(CE_PANIC, "stmf_svc: unknown cmd %d",
7847 			    req->svc_cmd);
7848 		}
7849 
7850 		mutex_enter(&stmf_state.stmf_lock);
7851 	}
7852 
7853 	stmf_state.stmf_svc_flags &= ~(STMF_SVC_STARTED | STMF_SVC_ACTIVE);
7854 	mutex_exit(&stmf_state.stmf_lock);
7855 }
7856 
7857 static void
7858 stmf_svc_timeout(struct stmf_svc_clocks *clks)
7859 {
7860 	clock_t td;
7861 	stmf_i_local_port_t *ilport, *next_ilport;
7862 	stmf_i_scsi_session_t *iss;
7863 
7864 	ASSERT(mutex_owned(&stmf_state.stmf_lock));
7865 
7866 	td = drv_usectohz(20000);
7867 
7868 	/* Do timeouts */
7869 	if (stmf_state.stmf_nlus &&
7870 	    ((!clks->timing_next) || (ddi_get_lbolt() >= clks->timing_next))) {
7871 		if (!stmf_state.stmf_svc_ilu_timing) {
7872 			/* we are starting a new round */
7873 			stmf_state.stmf_svc_ilu_timing =
7874 			    stmf_state.stmf_ilulist;
7875 			clks->timing_start = ddi_get_lbolt();
7876 		}
7877 
7878 		stmf_check_ilu_timing();
7879 		if (!stmf_state.stmf_svc_ilu_timing) {
7880 			/* we finished a complete round */
7881 			clks->timing_next =
7882 			    clks->timing_start + drv_usectohz(5*1000*1000);
7883 		} else {
7884 			/* we still have some ilu items to check */
7885 			clks->timing_next =
7886 			    ddi_get_lbolt() + drv_usectohz(1*1000*1000);
7887 		}
7888 
7889 		if (stmf_state.stmf_svc_active)
7890 			return;
7891 	}
7892 
7893 	/* Check if there are free tasks to clear */
7894 	if (stmf_state.stmf_nlus &&
7895 	    ((!clks->drain_next) || (ddi_get_lbolt() >= clks->drain_next))) {
7896 		if (!stmf_state.stmf_svc_ilu_draining) {
7897 			/* we are starting a new round */
7898 			stmf_state.stmf_svc_ilu_draining =
7899 			    stmf_state.stmf_ilulist;
7900 			clks->drain_start = ddi_get_lbolt();
7901 		}
7902 
7903 		stmf_check_freetask();
7904 		if (!stmf_state.stmf_svc_ilu_draining) {
7905 			/* we finished a complete round */
7906 			clks->drain_next =
7907 			    clks->drain_start + drv_usectohz(10*1000*1000);
7908 		} else {
7909 			/* we still have some ilu items to check */
7910 			clks->drain_next =
7911 			    ddi_get_lbolt() + drv_usectohz(1*1000*1000);
7912 		}
7913 
7914 		if (stmf_state.stmf_svc_active)
7915 			return;
7916 	}
7917 
7918 	/* Check if we need to run worker_mgmt */
7919 	if (ddi_get_lbolt() > clks->worker_delay) {
7920 		stmf_worker_mgmt();
7921 		clks->worker_delay = ddi_get_lbolt() +
7922 		    stmf_worker_mgmt_delay;
7923 	}
7924 
7925 	/* Check if any active session got its 1st LUN */
7926 	if (stmf_state.stmf_process_initial_luns) {
7927 		int stmf_level = 0;
7928 		int port_level;
7929 
7930 		for (ilport = stmf_state.stmf_ilportlist; ilport;
7931 		    ilport = next_ilport) {
7932 			int ilport_lock_held;
7933 			next_ilport = ilport->ilport_next;
7934 
7935 			if ((ilport->ilport_flags &
7936 			    ILPORT_SS_GOT_INITIAL_LUNS) == 0)
7937 				continue;
7938 
7939 			port_level = 0;
7940 			rw_enter(&ilport->ilport_lock, RW_READER);
7941 			ilport_lock_held = 1;
7942 
7943 			for (iss = ilport->ilport_ss_list; iss;
7944 			    iss = iss->iss_next) {
7945 				if ((iss->iss_flags &
7946 				    ISS_GOT_INITIAL_LUNS) == 0)
7947 					continue;
7948 
7949 				port_level++;
7950 				stmf_level++;
7951 				atomic_and_32(&iss->iss_flags,
7952 				    ~ISS_GOT_INITIAL_LUNS);
7953 				atomic_or_32(&iss->iss_flags,
7954 				    ISS_EVENT_ACTIVE);
7955 				rw_exit(&ilport->ilport_lock);
7956 				ilport_lock_held = 0;
7957 				mutex_exit(&stmf_state.stmf_lock);
7958 				stmf_generate_lport_event(ilport,
7959 				    LPORT_EVENT_INITIAL_LUN_MAPPED,
7960 				    iss->iss_ss, 0);
7961 				atomic_and_32(&iss->iss_flags,
7962 				    ~ISS_EVENT_ACTIVE);
7963 				mutex_enter(&stmf_state.stmf_lock);
7964 				/*
7965 				 * scan all the ilports again as the
7966 				 * ilport list might have changed.
7967 				 */
7968 				next_ilport = stmf_state.stmf_ilportlist;
7969 				break;
7970 			}
7971 
7972 			if (port_level == 0)
7973 				atomic_and_32(&ilport->ilport_flags,
7974 				    ~ILPORT_SS_GOT_INITIAL_LUNS);
7975 			/* drop the lock if we are holding it. */
7976 			if (ilport_lock_held == 1)
7977 				rw_exit(&ilport->ilport_lock);
7978 
7979 			/* Max 4 session at a time */
7980 			if (stmf_level >= 4)
7981 				break;
7982 		}
7983 
7984 		if (stmf_level == 0)
7985 			stmf_state.stmf_process_initial_luns = 0;
7986 	}
7987 
7988 	stmf_state.stmf_svc_flags &= ~STMF_SVC_ACTIVE;
7989 	(void) cv_reltimedwait(&stmf_state.stmf_cv,
7990 	    &stmf_state.stmf_lock, td, TR_CLOCK_TICK);
7991 	stmf_state.stmf_svc_flags |= STMF_SVC_ACTIVE;
7992 }
7993 
7994 void
7995 stmf_svc_queue(int cmd, void *obj, stmf_state_change_info_t *info)
7996 {
7997 	stmf_svc_req_t *req;
7998 	int s;
7999 
8000 	ASSERT(!mutex_owned(&stmf_state.stmf_lock));
8001 	s = sizeof (stmf_svc_req_t);
8002 	if (info->st_additional_info) {
8003 		s += strlen(info->st_additional_info) + 1;
8004 	}
8005 	req = kmem_zalloc(s, KM_SLEEP);
8006 
8007 	req->svc_cmd = cmd;
8008 	req->svc_obj = obj;
8009 	req->svc_info.st_rflags = info->st_rflags;
8010 	if (info->st_additional_info) {
8011 		req->svc_info.st_additional_info = (char *)(GET_BYTE_OFFSET(req,
8012 		    sizeof (stmf_svc_req_t)));
8013 		(void) strcpy(req->svc_info.st_additional_info,
8014 		    info->st_additional_info);
8015 	}
8016 	req->svc_req_alloc_size = s;
8017 	req->svc_next = NULL;
8018 
8019 	mutex_enter(&stmf_state.stmf_lock);
8020 	*stmf_state.stmf_svc_tailp = req;
8021 	stmf_state.stmf_svc_tailp = &req->svc_next;
8022 	if ((stmf_state.stmf_svc_flags & STMF_SVC_ACTIVE) == 0) {
8023 		cv_signal(&stmf_state.stmf_cv);
8024 	}
8025 	mutex_exit(&stmf_state.stmf_lock);
8026 }
8027 
8028 static void
8029 stmf_svc_kill_obj_requests(void *obj)
8030 {
8031 	stmf_svc_req_t *prev_req = NULL;
8032 	stmf_svc_req_t *next_req;
8033 	stmf_svc_req_t *req;
8034 
8035 	ASSERT(mutex_owned(&stmf_state.stmf_lock));
8036 
8037 	for (req = stmf_state.stmf_svc_active; req != NULL; req = next_req) {
8038 		next_req = req->svc_next;
8039 
8040 		if (req->svc_obj == obj) {
8041 			if (prev_req != NULL)
8042 				prev_req->svc_next = next_req;
8043 			else
8044 				stmf_state.stmf_svc_active = next_req;
8045 
8046 			if (next_req == NULL)
8047 				stmf_state.stmf_svc_tailp = (prev_req != NULL) ?
8048 				    &prev_req->svc_next :
8049 				    &stmf_state.stmf_svc_active;
8050 
8051 			kmem_free(req, req->svc_req_alloc_size);
8052 		} else {
8053 			prev_req = req;
8054 		}
8055 	}
8056 }
8057 
8058 void
8059 stmf_trace(caddr_t ident, const char *fmt, ...)
8060 {
8061 	va_list args;
8062 	char tbuf[160];
8063 	int len;
8064 
8065 	if (!stmf_trace_on)
8066 		return;
8067 	len = snprintf(tbuf, 158, "%s:%07lu: ", ident ? ident : "",
8068 	    ddi_get_lbolt());
8069 	va_start(args, fmt);
8070 	len += vsnprintf(tbuf + len, 158 - len, fmt, args);
8071 	va_end(args);
8072 
8073 	if (len > 158) {
8074 		len = 158;
8075 	}
8076 	tbuf[len++] = '\n';
8077 	tbuf[len] = 0;
8078 
8079 	mutex_enter(&trace_buf_lock);
8080 	bcopy(tbuf, &stmf_trace_buf[trace_buf_curndx], len+1);
8081 	trace_buf_curndx += len;
8082 	if (trace_buf_curndx > (trace_buf_size - 320))
8083 		trace_buf_curndx = 0;
8084 	mutex_exit(&trace_buf_lock);
8085 }
8086 
8087 void
8088 stmf_trace_clear()
8089 {
8090 	if (!stmf_trace_on)
8091 		return;
8092 	mutex_enter(&trace_buf_lock);
8093 	trace_buf_curndx = 0;
8094 	if (trace_buf_size > 0)
8095 		stmf_trace_buf[0] = 0;
8096 	mutex_exit(&trace_buf_lock);
8097 }
8098 
8099 static void
8100 stmf_abort_task_offline(scsi_task_t *task, int offline_lu, char *info)
8101 {
8102 	stmf_state_change_info_t	change_info;
8103 	void				*ctl_private;
8104 	uint32_t			ctl_cmd;
8105 	int				msg = 0;
8106 
8107 	stmf_trace("FROM STMF", "abort_task_offline called for %s: %s",
8108 	    offline_lu ? "LU" : "LPORT", info ? info : "no additional info");
8109 	change_info.st_additional_info = info;
8110 	if (offline_lu) {
8111 		change_info.st_rflags = STMF_RFLAG_RESET |
8112 		    STMF_RFLAG_LU_ABORT;
8113 		ctl_private = task->task_lu;
8114 		if (((stmf_i_lu_t *)
8115 		    task->task_lu->lu_stmf_private)->ilu_state ==
8116 		    STMF_STATE_ONLINE) {
8117 			msg = 1;
8118 		}
8119 		ctl_cmd = STMF_CMD_LU_OFFLINE;
8120 	} else {
8121 		change_info.st_rflags = STMF_RFLAG_RESET |
8122 		    STMF_RFLAG_LPORT_ABORT;
8123 		ctl_private = task->task_lport;
8124 		if (((stmf_i_local_port_t *)
8125 		    task->task_lport->lport_stmf_private)->ilport_state ==
8126 		    STMF_STATE_ONLINE) {
8127 			msg = 1;
8128 		}
8129 		ctl_cmd = STMF_CMD_LPORT_OFFLINE;
8130 	}
8131 
8132 	if (msg) {
8133 		stmf_trace(0, "Calling stmf_ctl to offline %s : %s",
8134 		    offline_lu ? "LU" : "LPORT", info ? info :
8135 		    "<no additional info>");
8136 	}
8137 	(void) stmf_ctl(ctl_cmd, ctl_private, &change_info);
8138 }
8139 
8140 static char
8141 stmf_ctoi(char c)
8142 {
8143 	if ((c >= '0') && (c <= '9'))
8144 		c -= '0';
8145 	else if ((c >= 'A') && (c <= 'F'))
8146 		c = c - 'A' + 10;
8147 	else if ((c >= 'a') && (c <= 'f'))
8148 		c = c - 'a' + 10;
8149 	else
8150 		c = -1;
8151 	return (c);
8152 }
8153 
8154 /* Convert from Hex value in ASCII format to the equivalent bytes */
8155 static boolean_t
8156 stmf_base16_str_to_binary(char *c, int dplen, uint8_t *dp)
8157 {
8158 	int		ii;
8159 
8160 	for (ii = 0; ii < dplen; ii++) {
8161 		char nibble1, nibble2;
8162 		char enc_char = *c++;
8163 		nibble1 = stmf_ctoi(enc_char);
8164 
8165 		enc_char = *c++;
8166 		nibble2 = stmf_ctoi(enc_char);
8167 		if (nibble1 == -1 || nibble2 == -1)
8168 			return (B_FALSE);
8169 
8170 		dp[ii] = (nibble1 << 4) | nibble2;
8171 	}
8172 	return (B_TRUE);
8173 }
8174 
8175 boolean_t
8176 stmf_scsilib_tptid_validate(scsi_transport_id_t *tptid, uint32_t total_sz,
8177 				uint16_t *tptid_sz)
8178 {
8179 	uint16_t tpd_len = SCSI_TPTID_SIZE;
8180 
8181 	if (tptid_sz)
8182 		*tptid_sz = 0;
8183 	if (total_sz < sizeof (scsi_transport_id_t))
8184 		return (B_FALSE);
8185 
8186 	switch (tptid->protocol_id) {
8187 
8188 	case PROTOCOL_FIBRE_CHANNEL:
8189 		/* FC Transport ID validation checks. SPC3 rev23, Table 284 */
8190 		if (total_sz < tpd_len || tptid->format_code != 0)
8191 			return (B_FALSE);
8192 		break;
8193 
8194 	case PROTOCOL_iSCSI:
8195 		{
8196 		iscsi_transport_id_t	*iscsiid;
8197 		uint16_t		adn_len, name_len;
8198 
8199 		/* Check for valid format code, SPC3 rev 23 Table 288 */
8200 		if ((total_sz < tpd_len) ||
8201 		    (tptid->format_code != 0 && tptid->format_code != 1))
8202 			return (B_FALSE);
8203 
8204 		iscsiid = (iscsi_transport_id_t *)tptid;
8205 		adn_len = READ_SCSI16(iscsiid->add_len, uint16_t);
8206 		tpd_len = sizeof (iscsi_transport_id_t) + adn_len - 1;
8207 
8208 		/*
8209 		 * iSCSI Transport ID validation checks.
8210 		 * As per SPC3 rev 23 Section 7.5.4.6 and Table 289 & Table 290
8211 		 */
8212 		if (adn_len < 20 || (adn_len % 4 != 0))
8213 			return (B_FALSE);
8214 
8215 		name_len = strnlen(iscsiid->iscsi_name, adn_len);
8216 		if (name_len == 0 || name_len >= adn_len)
8217 			return (B_FALSE);
8218 
8219 		/* If the format_code is 1 check for ISID seperator */
8220 		if ((tptid->format_code == 1) && (strstr(iscsiid->iscsi_name,
8221 		    SCSI_TPTID_ISCSI_ISID_SEPERATOR) == NULL))
8222 			return (B_FALSE);
8223 
8224 		}
8225 		break;
8226 
8227 	case PROTOCOL_SRP:
8228 		/* SRP Transport ID validation checks. SPC3 rev23, Table 287 */
8229 		if (total_sz < tpd_len || tptid->format_code != 0)
8230 			return (B_FALSE);
8231 		break;
8232 
8233 	case PROTOCOL_PARALLEL_SCSI:
8234 	case PROTOCOL_SSA:
8235 	case PROTOCOL_IEEE_1394:
8236 	case PROTOCOL_SAS:
8237 	case PROTOCOL_ADT:
8238 	case PROTOCOL_ATAPI:
8239 	default:
8240 		{
8241 		stmf_dflt_scsi_tptid_t *dflttpd;
8242 
8243 		tpd_len = sizeof (stmf_dflt_scsi_tptid_t);
8244 		if (total_sz < tpd_len)
8245 			return (B_FALSE);
8246 		dflttpd = (stmf_dflt_scsi_tptid_t *)tptid;
8247 		tpd_len = tpd_len + SCSI_READ16(&dflttpd->ident_len) - 1;
8248 		if (total_sz < tpd_len)
8249 			return (B_FALSE);
8250 		}
8251 		break;
8252 	}
8253 	if (tptid_sz)
8254 		*tptid_sz = tpd_len;
8255 	return (B_TRUE);
8256 }
8257 
8258 boolean_t
8259 stmf_scsilib_tptid_compare(scsi_transport_id_t *tpd1,
8260 				scsi_transport_id_t *tpd2)
8261 {
8262 	if ((tpd1->protocol_id != tpd2->protocol_id) ||
8263 	    (tpd1->format_code != tpd2->format_code))
8264 		return (B_FALSE);
8265 
8266 	switch (tpd1->protocol_id) {
8267 
8268 	case PROTOCOL_iSCSI:
8269 		{
8270 		iscsi_transport_id_t *iscsitpd1, *iscsitpd2;
8271 		uint16_t len;
8272 
8273 		iscsitpd1 = (iscsi_transport_id_t *)tpd1;
8274 		iscsitpd2 = (iscsi_transport_id_t *)tpd2;
8275 		len = SCSI_READ16(&iscsitpd1->add_len);
8276 		if ((memcmp(iscsitpd1->add_len, iscsitpd2->add_len, 2) != 0) ||
8277 		    (memcmp(iscsitpd1->iscsi_name, iscsitpd2->iscsi_name, len)
8278 		    != 0))
8279 			return (B_FALSE);
8280 		}
8281 		break;
8282 
8283 	case PROTOCOL_SRP:
8284 		{
8285 		scsi_srp_transport_id_t *srptpd1, *srptpd2;
8286 
8287 		srptpd1 = (scsi_srp_transport_id_t *)tpd1;
8288 		srptpd2 = (scsi_srp_transport_id_t *)tpd2;
8289 		if (memcmp(srptpd1->srp_name, srptpd2->srp_name,
8290 		    sizeof (srptpd1->srp_name)) != 0)
8291 			return (B_FALSE);
8292 		}
8293 		break;
8294 
8295 	case PROTOCOL_FIBRE_CHANNEL:
8296 		{
8297 		scsi_fc_transport_id_t *fctpd1, *fctpd2;
8298 
8299 		fctpd1 = (scsi_fc_transport_id_t *)tpd1;
8300 		fctpd2 = (scsi_fc_transport_id_t *)tpd2;
8301 		if (memcmp(fctpd1->port_name, fctpd2->port_name,
8302 		    sizeof (fctpd1->port_name)) != 0)
8303 			return (B_FALSE);
8304 		}
8305 		break;
8306 
8307 	case PROTOCOL_PARALLEL_SCSI:
8308 	case PROTOCOL_SSA:
8309 	case PROTOCOL_IEEE_1394:
8310 	case PROTOCOL_SAS:
8311 	case PROTOCOL_ADT:
8312 	case PROTOCOL_ATAPI:
8313 	default:
8314 		{
8315 		stmf_dflt_scsi_tptid_t *dflt1, *dflt2;
8316 		uint16_t len;
8317 
8318 		dflt1 = (stmf_dflt_scsi_tptid_t *)tpd1;
8319 		dflt2 = (stmf_dflt_scsi_tptid_t *)tpd2;
8320 		len = SCSI_READ16(&dflt1->ident_len);
8321 		if ((memcmp(dflt1->ident_len, dflt2->ident_len, 2) != 0) ||
8322 		    (memcmp(dflt1->ident, dflt2->ident, len) != 0))
8323 			return (B_FALSE);
8324 		}
8325 		break;
8326 	}
8327 	return (B_TRUE);
8328 }
8329 
8330 /*
8331  * Changes devid_desc to corresponding TransportID format
8332  * Returns :- pointer to stmf_remote_port_t
8333  * Note    :- Allocates continous memory for stmf_remote_port_t and TransportID,
8334  *            This memory need to be freed when this remote_port is no longer
8335  *            used.
8336  */
8337 stmf_remote_port_t *
8338 stmf_scsilib_devid_to_remote_port(scsi_devid_desc_t *devid)
8339 {
8340 	struct scsi_fc_transport_id	*fc_tpd;
8341 	struct iscsi_transport_id	*iscsi_tpd;
8342 	struct scsi_srp_transport_id	*srp_tpd;
8343 	struct stmf_dflt_scsi_tptid	*dflt_tpd;
8344 	uint16_t ident_len,  sz = 0;
8345 	stmf_remote_port_t *rpt = NULL;
8346 
8347 	ident_len = devid->ident_length;
8348 	ASSERT(ident_len);
8349 	switch (devid->protocol_id) {
8350 	case PROTOCOL_FIBRE_CHANNEL:
8351 		sz = sizeof (scsi_fc_transport_id_t);
8352 		rpt = stmf_remote_port_alloc(sz);
8353 		rpt->rport_tptid->format_code = 0;
8354 		rpt->rport_tptid->protocol_id = devid->protocol_id;
8355 		fc_tpd = (scsi_fc_transport_id_t *)rpt->rport_tptid;
8356 		/*
8357 		 * convert from "wwn.xxxxxxxxxxxxxxxx" to 8-byte binary
8358 		 * skip first 4 byte for "wwn."
8359 		 */
8360 		ASSERT(strncmp("wwn.", (char *)devid->ident, 4) == 0);
8361 		if ((ident_len < SCSI_TPTID_FC_PORT_NAME_SIZE * 2 + 4) ||
8362 		    !stmf_base16_str_to_binary((char *)devid->ident + 4,
8363 		    SCSI_TPTID_FC_PORT_NAME_SIZE, fc_tpd->port_name))
8364 			goto devid_to_remote_port_fail;
8365 		break;
8366 
8367 	case PROTOCOL_iSCSI:
8368 		sz = ALIGNED_TO_8BYTE_BOUNDARY(sizeof (iscsi_transport_id_t) +
8369 		    ident_len - 1);
8370 		rpt = stmf_remote_port_alloc(sz);
8371 		rpt->rport_tptid->format_code = 0;
8372 		rpt->rport_tptid->protocol_id = devid->protocol_id;
8373 		iscsi_tpd = (iscsi_transport_id_t *)rpt->rport_tptid;
8374 		SCSI_WRITE16(iscsi_tpd->add_len, ident_len);
8375 		(void) memcpy(iscsi_tpd->iscsi_name, devid->ident, ident_len);
8376 		break;
8377 
8378 	case PROTOCOL_SRP:
8379 		sz = sizeof (scsi_srp_transport_id_t);
8380 		rpt = stmf_remote_port_alloc(sz);
8381 		rpt->rport_tptid->format_code = 0;
8382 		rpt->rport_tptid->protocol_id = devid->protocol_id;
8383 		srp_tpd = (scsi_srp_transport_id_t *)rpt->rport_tptid;
8384 		/*
8385 		 * convert from "eui.xxxxxxxxxxxxxxx" to 8-byte binary
8386 		 * skip first 4 byte for "eui."
8387 		 * Assume 8-byte initiator-extension part of srp_name is NOT
8388 		 * stored in devid and hence will be set as zero
8389 		 */
8390 		ASSERT(strncmp("eui.", (char *)devid->ident, 4) == 0);
8391 		if ((ident_len < (SCSI_TPTID_SRP_PORT_NAME_LEN - 8) * 2 + 4) ||
8392 		    !stmf_base16_str_to_binary((char *)devid->ident+4,
8393 		    SCSI_TPTID_SRP_PORT_NAME_LEN, srp_tpd->srp_name))
8394 			goto devid_to_remote_port_fail;
8395 		break;
8396 
8397 	case PROTOCOL_PARALLEL_SCSI:
8398 	case PROTOCOL_SSA:
8399 	case PROTOCOL_IEEE_1394:
8400 	case PROTOCOL_SAS:
8401 	case PROTOCOL_ADT:
8402 	case PROTOCOL_ATAPI:
8403 	default :
8404 		ident_len = devid->ident_length;
8405 		sz = ALIGNED_TO_8BYTE_BOUNDARY(sizeof (stmf_dflt_scsi_tptid_t) +
8406 		    ident_len - 1);
8407 		rpt = stmf_remote_port_alloc(sz);
8408 		rpt->rport_tptid->format_code = 0;
8409 		rpt->rport_tptid->protocol_id = devid->protocol_id;
8410 		dflt_tpd = (stmf_dflt_scsi_tptid_t *)rpt->rport_tptid;
8411 		SCSI_WRITE16(dflt_tpd->ident_len, ident_len);
8412 		(void) memcpy(dflt_tpd->ident, devid->ident, ident_len);
8413 		break;
8414 	}
8415 	return (rpt);
8416 
8417 devid_to_remote_port_fail:
8418 	stmf_remote_port_free(rpt);
8419 	return (NULL);
8420 
8421 }
8422 
8423 stmf_remote_port_t *
8424 stmf_remote_port_alloc(uint16_t tptid_sz) {
8425 	stmf_remote_port_t *rpt;
8426 	rpt = (stmf_remote_port_t *)kmem_zalloc(
8427 	    sizeof (stmf_remote_port_t) + tptid_sz, KM_SLEEP);
8428 	rpt->rport_tptid_sz = tptid_sz;
8429 	rpt->rport_tptid = (scsi_transport_id_t *)(rpt + 1);
8430 	return (rpt);
8431 }
8432 
8433 void
8434 stmf_remote_port_free(stmf_remote_port_t *rpt)
8435 {
8436 	/*
8437 	 * Note: stmf_scsilib_devid_to_remote_port() function allocates
8438 	 *	remote port structures for all transports in the same way, So
8439 	 *	it is safe to deallocate it in a protocol independent manner.
8440 	 *	If any of the allocation method changes, corresponding changes
8441 	 *	need to be made here too.
8442 	 */
8443 	kmem_free(rpt, sizeof (stmf_remote_port_t) + rpt->rport_tptid_sz);
8444 }
8445