xref: /illumos-gate/usr/src/uts/common/io/comstar/stmf/stmf.c (revision e4d060fb4c00d44cd578713eb9a921f594b733b8)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #include <sys/conf.h>
27 #include <sys/file.h>
28 #include <sys/ddi.h>
29 #include <sys/sunddi.h>
30 #include <sys/modctl.h>
31 #include <sys/scsi/scsi.h>
32 #include <sys/scsi/impl/scsi_reset_notify.h>
33 #include <sys/disp.h>
34 #include <sys/byteorder.h>
35 #include <sys/atomic.h>
36 #include <sys/ethernet.h>
37 #include <sys/sdt.h>
38 #include <sys/nvpair.h>
39 #include <sys/zone.h>
40 #include <sys/id_space.h>
41 
42 #include <stmf.h>
43 #include <lpif.h>
44 #include <portif.h>
45 #include <stmf_ioctl.h>
46 #include <stmf_impl.h>
47 #include <lun_map.h>
48 #include <stmf_state.h>
49 #include <pppt_ic_if.h>
50 #include <stmf_stats.h>
51 
52 static uint64_t stmf_session_counter = 0;
53 static uint16_t stmf_rtpid_counter = 0;
54 /* start messages at 1 */
55 static uint64_t stmf_proxy_msg_id = 1;
56 #define	MSG_ID_TM_BIT	0x8000000000000000
57 
58 static int stmf_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
59 static int stmf_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
60 static int stmf_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg,
61 	void **result);
62 static int stmf_open(dev_t *devp, int flag, int otype, cred_t *credp);
63 static int stmf_close(dev_t dev, int flag, int otype, cred_t *credp);
64 static int stmf_ioctl(dev_t dev, int cmd, intptr_t data, int mode,
65 	cred_t *credp, int *rval);
66 static int stmf_get_stmf_state(stmf_state_desc_t *std);
67 static int stmf_set_stmf_state(stmf_state_desc_t *std);
68 static void stmf_abort_task_offline(scsi_task_t *task, int offline_lu,
69     char *info);
70 static int stmf_set_alua_state(stmf_alua_state_desc_t *alua_state);
71 static void stmf_get_alua_state(stmf_alua_state_desc_t *alua_state);
72 stmf_xfer_data_t *stmf_prepare_tpgs_data(uint8_t ilu_alua);
73 void stmf_svc_init();
74 stmf_status_t stmf_svc_fini();
75 void stmf_svc(void *arg);
76 void stmf_svc_queue(int cmd, void *obj, stmf_state_change_info_t *info);
77 void stmf_check_freetask();
78 void stmf_abort_target_reset(scsi_task_t *task);
79 stmf_status_t stmf_lun_reset_poll(stmf_lu_t *lu, struct scsi_task *task,
80 							int target_reset);
81 void stmf_target_reset_poll(struct scsi_task *task);
82 void stmf_handle_lun_reset(scsi_task_t *task);
83 void stmf_handle_target_reset(scsi_task_t *task);
84 void stmf_xd_to_dbuf(stmf_data_buf_t *dbuf);
85 int stmf_load_ppd_ioctl(stmf_ppioctl_data_t *ppi, uint64_t *ppi_token,
86     uint32_t *err_ret);
87 int stmf_delete_ppd_ioctl(stmf_ppioctl_data_t *ppi);
88 int stmf_get_ppd_ioctl(stmf_ppioctl_data_t *ppi, stmf_ppioctl_data_t *ppi_out,
89     uint32_t *err_ret);
90 void stmf_delete_ppd(stmf_pp_data_t *ppd);
91 void stmf_delete_all_ppds();
92 void stmf_trace_clear();
93 void stmf_worker_init();
94 stmf_status_t stmf_worker_fini();
95 void stmf_worker_mgmt();
96 void stmf_worker_task(void *arg);
97 static void stmf_task_lu_free(scsi_task_t *task, stmf_i_scsi_session_t *iss);
98 static stmf_status_t stmf_ic_lu_reg(stmf_ic_reg_dereg_lun_msg_t *msg,
99     uint32_t type);
100 static stmf_status_t stmf_ic_lu_dereg(stmf_ic_reg_dereg_lun_msg_t *msg);
101 static stmf_status_t stmf_ic_rx_scsi_status(stmf_ic_scsi_status_msg_t *msg);
102 static stmf_status_t stmf_ic_rx_status(stmf_ic_status_msg_t *msg);
103 static stmf_status_t stmf_ic_rx_scsi_data(stmf_ic_scsi_data_msg_t *msg);
104 void stmf_task_lu_killall(stmf_lu_t *lu, scsi_task_t *tm_task, stmf_status_t s);
105 
106 /* pppt modhandle */
107 ddi_modhandle_t pppt_mod;
108 
109 /* pppt modload imported functions */
110 stmf_ic_reg_port_msg_alloc_func_t ic_reg_port_msg_alloc;
111 stmf_ic_dereg_port_msg_alloc_func_t ic_dereg_port_msg_alloc;
112 stmf_ic_reg_lun_msg_alloc_func_t ic_reg_lun_msg_alloc;
113 stmf_ic_dereg_lun_msg_alloc_func_t ic_dereg_lun_msg_alloc;
114 stmf_ic_lun_active_msg_alloc_func_t ic_lun_active_msg_alloc;
115 stmf_ic_scsi_cmd_msg_alloc_func_t ic_scsi_cmd_msg_alloc;
116 stmf_ic_scsi_data_xfer_done_msg_alloc_func_t ic_scsi_data_xfer_done_msg_alloc;
117 stmf_ic_session_create_msg_alloc_func_t ic_session_reg_msg_alloc;
118 stmf_ic_session_destroy_msg_alloc_func_t ic_session_dereg_msg_alloc;
119 stmf_ic_tx_msg_func_t ic_tx_msg;
120 stmf_ic_msg_free_func_t ic_msg_free;
121 
122 static void stmf_itl_task_start(stmf_i_scsi_task_t *itask);
123 static void stmf_itl_lu_new_task(stmf_i_scsi_task_t *itask);
124 static void stmf_itl_task_done(stmf_i_scsi_task_t *itask);
125 
126 static void stmf_lport_xfer_start(stmf_i_scsi_task_t *itask,
127     stmf_data_buf_t *dbuf);
128 static void stmf_lport_xfer_done(stmf_i_scsi_task_t *itask,
129     stmf_data_buf_t *dbuf);
130 
131 static void stmf_update_kstat_lu_q(scsi_task_t *, void());
132 static void stmf_update_kstat_lport_q(scsi_task_t *, void());
133 static void stmf_update_kstat_lu_io(scsi_task_t *, stmf_data_buf_t *);
134 static void stmf_update_kstat_lport_io(scsi_task_t *, stmf_data_buf_t *);
135 
136 static int stmf_irport_compare(const void *void_irport1,
137     const void *void_irport2);
138 static stmf_i_remote_port_t *stmf_irport_create(scsi_devid_desc_t *rport_devid);
139 static void stmf_irport_destroy(stmf_i_remote_port_t *irport);
140 static stmf_i_remote_port_t *stmf_irport_register(
141     scsi_devid_desc_t *rport_devid);
142 static stmf_i_remote_port_t *stmf_irport_lookup_locked(
143     scsi_devid_desc_t *rport_devid);
144 static void stmf_irport_deregister(stmf_i_remote_port_t *irport);
145 
146 static void stmf_teardown_itl_kstats(stmf_i_itl_kstat_t *ks);
147 static void stmf_delete_itl_kstat_by_lport(char *);
148 static void stmf_delete_itl_kstat_by_guid(char *);
149 static int stmf_itl_kstat_compare(const void*, const void*);
150 static stmf_i_itl_kstat_t *stmf_itl_kstat_lookup(char *kstat_nm);
151 static stmf_i_itl_kstat_t *stmf_itl_kstat_create(stmf_itl_data_t *itl,
152     char *nm, scsi_devid_desc_t *lport, scsi_devid_desc_t *lun);
153 
154 extern struct mod_ops mod_driverops;
155 
156 /* =====[ Tunables ]===== */
157 /* Internal tracing */
158 volatile int	stmf_trace_on = 1;
159 volatile int	stmf_trace_buf_size = (1 * 1024 * 1024);
160 /*
161  * The reason default task timeout is 75 is because we want the
162  * host to timeout 1st and mostly host timeout is 60 seconds.
163  */
164 volatile int	stmf_default_task_timeout = 75;
165 /*
166  * Setting this to one means, you are responsible for config load and keeping
167  * things in sync with persistent database.
168  */
169 volatile int	stmf_allow_modunload = 0;
170 
171 volatile int stmf_max_nworkers = 256;
172 volatile int stmf_min_nworkers = 4;
173 volatile int stmf_worker_scale_down_delay = 20;
174 
175 /* === [ Debugging and fault injection ] === */
176 #ifdef	DEBUG
177 volatile int stmf_drop_task_counter = 0;
178 volatile int stmf_drop_buf_counter = 0;
179 
180 #endif
181 
182 stmf_state_t		stmf_state;
183 static stmf_lu_t	*dlun0;
184 
185 static uint8_t stmf_first_zero[] =
186 	{ 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 0xff };
187 static uint8_t stmf_first_one[] =
188 	{ 0xff, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0 };
189 
190 static kmutex_t	trace_buf_lock;
191 static int	trace_buf_size;
192 static int	trace_buf_curndx;
193 caddr_t	stmf_trace_buf;
194 
195 static enum {
196 	STMF_WORKERS_DISABLED = 0,
197 	STMF_WORKERS_ENABLING,
198 	STMF_WORKERS_ENABLED
199 } stmf_workers_state = STMF_WORKERS_DISABLED;
200 static int stmf_i_max_nworkers;
201 static int stmf_i_min_nworkers;
202 static int stmf_nworkers_cur;		/* # of workers currently running */
203 static int stmf_nworkers_needed;	/* # of workers need to be running */
204 static int stmf_worker_sel_counter = 0;
205 static uint32_t stmf_cur_ntasks = 0;
206 static clock_t stmf_wm_last = 0;
207 /*
208  * This is equal to stmf_nworkers_cur while we are increasing # workers and
209  * stmf_nworkers_needed while we are decreasing the worker count.
210  */
211 static int stmf_nworkers_accepting_cmds;
212 static stmf_worker_t *stmf_workers = NULL;
213 static clock_t stmf_worker_mgmt_delay = 2;
214 static clock_t stmf_worker_scale_down_timer = 0;
215 static int stmf_worker_scale_down_qd = 0;
216 
217 static struct cb_ops stmf_cb_ops = {
218 	stmf_open,			/* open */
219 	stmf_close,			/* close */
220 	nodev,				/* strategy */
221 	nodev,				/* print */
222 	nodev,				/* dump */
223 	nodev,				/* read */
224 	nodev,				/* write */
225 	stmf_ioctl,			/* ioctl */
226 	nodev,				/* devmap */
227 	nodev,				/* mmap */
228 	nodev,				/* segmap */
229 	nochpoll,			/* chpoll */
230 	ddi_prop_op,			/* cb_prop_op */
231 	0,				/* streamtab */
232 	D_NEW | D_MP,			/* cb_flag */
233 	CB_REV,				/* rev */
234 	nodev,				/* aread */
235 	nodev				/* awrite */
236 };
237 
238 static struct dev_ops stmf_ops = {
239 	DEVO_REV,
240 	0,
241 	stmf_getinfo,
242 	nulldev,		/* identify */
243 	nulldev,		/* probe */
244 	stmf_attach,
245 	stmf_detach,
246 	nodev,			/* reset */
247 	&stmf_cb_ops,
248 	NULL,			/* bus_ops */
249 	NULL			/* power */
250 };
251 
252 #define	STMF_NAME		"COMSTAR STMF"
253 #define	STMF_MODULE_NAME	"stmf"
254 
255 static struct modldrv modldrv = {
256 	&mod_driverops,
257 	STMF_NAME,
258 	&stmf_ops
259 };
260 
261 static struct modlinkage modlinkage = {
262 	MODREV_1,
263 	&modldrv,
264 	NULL
265 };
266 
267 int
268 _init(void)
269 {
270 	int ret;
271 
272 	ret = mod_install(&modlinkage);
273 	if (ret)
274 		return (ret);
275 	stmf_trace_buf = kmem_zalloc(stmf_trace_buf_size, KM_SLEEP);
276 	trace_buf_size = stmf_trace_buf_size;
277 	trace_buf_curndx = 0;
278 	mutex_init(&trace_buf_lock, NULL, MUTEX_DRIVER, 0);
279 	bzero(&stmf_state, sizeof (stmf_state_t));
280 	/* STMF service is off by default */
281 	stmf_state.stmf_service_running = 0;
282 	mutex_init(&stmf_state.stmf_lock, NULL, MUTEX_DRIVER, NULL);
283 	cv_init(&stmf_state.stmf_cv, NULL, CV_DRIVER, NULL);
284 	stmf_session_counter = (uint64_t)ddi_get_lbolt();
285 	avl_create(&stmf_state.stmf_irportlist,
286 	    stmf_irport_compare, sizeof (stmf_i_remote_port_t),
287 	    offsetof(stmf_i_remote_port_t, irport_ln));
288 	stmf_state.stmf_ilport_inst_space =
289 	    id_space_create("lport-instances", 0, MAX_ILPORT);
290 	stmf_state.stmf_irport_inst_space =
291 	    id_space_create("rport-instances", 0, MAX_IRPORT);
292 	avl_create(&stmf_state.stmf_itl_kstat_list,
293 	    stmf_itl_kstat_compare, sizeof (stmf_i_itl_kstat_t),
294 	    offsetof(stmf_i_itl_kstat_t, iitl_kstat_ln));
295 	stmf_view_init();
296 	stmf_svc_init();
297 	stmf_dlun_init();
298 	return (ret);
299 }
300 
301 int
302 _fini(void)
303 {
304 	int ret;
305 	stmf_i_remote_port_t	*irport;
306 	stmf_i_itl_kstat_t	*ks_itl;
307 	void			*avl_dest_cookie = NULL;
308 
309 	if (stmf_state.stmf_service_running)
310 		return (EBUSY);
311 	if ((!stmf_allow_modunload) &&
312 	    (stmf_state.stmf_config_state != STMF_CONFIG_NONE)) {
313 		return (EBUSY);
314 	}
315 	if (stmf_state.stmf_nlps || stmf_state.stmf_npps) {
316 		return (EBUSY);
317 	}
318 	if (stmf_dlun_fini() != STMF_SUCCESS)
319 		return (EBUSY);
320 	if (stmf_worker_fini() != STMF_SUCCESS) {
321 		stmf_dlun_init();
322 		return (EBUSY);
323 	}
324 	if (stmf_svc_fini() != STMF_SUCCESS) {
325 		stmf_dlun_init();
326 		stmf_worker_init();
327 		return (EBUSY);
328 	}
329 
330 	ret = mod_remove(&modlinkage);
331 	if (ret) {
332 		stmf_svc_init();
333 		stmf_dlun_init();
334 		stmf_worker_init();
335 		return (ret);
336 	}
337 
338 	stmf_view_clear_config();
339 
340 	while ((irport = avl_destroy_nodes(&stmf_state.stmf_irportlist,
341 	    &avl_dest_cookie)) != NULL)
342 		stmf_irport_destroy(irport);
343 	avl_destroy(&stmf_state.stmf_irportlist);
344 	id_space_destroy(stmf_state.stmf_ilport_inst_space);
345 	id_space_destroy(stmf_state.stmf_irport_inst_space);
346 
347 	avl_dest_cookie = NULL;
348 	while ((ks_itl = avl_destroy_nodes(&stmf_state.stmf_itl_kstat_list,
349 	    &avl_dest_cookie)) != NULL) {
350 		stmf_teardown_itl_kstats(ks_itl);
351 		kmem_free(ks_itl, sizeof (ks_itl));
352 	}
353 	avl_destroy(&stmf_state.stmf_itl_kstat_list);
354 
355 	kmem_free(stmf_trace_buf, stmf_trace_buf_size);
356 	mutex_destroy(&trace_buf_lock);
357 	mutex_destroy(&stmf_state.stmf_lock);
358 	cv_destroy(&stmf_state.stmf_cv);
359 	return (ret);
360 }
361 
362 int
363 _info(struct modinfo *modinfop)
364 {
365 	return (mod_info(&modlinkage, modinfop));
366 }
367 
368 /* ARGSUSED */
369 static int
370 stmf_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result)
371 {
372 	switch (cmd) {
373 	case DDI_INFO_DEVT2DEVINFO:
374 		*result = stmf_state.stmf_dip;
375 		break;
376 	case DDI_INFO_DEVT2INSTANCE:
377 		*result =
378 		    (void *)(uintptr_t)ddi_get_instance(stmf_state.stmf_dip);
379 		break;
380 	default:
381 		return (DDI_FAILURE);
382 	}
383 
384 	return (DDI_SUCCESS);
385 }
386 
387 static int
388 stmf_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
389 {
390 	switch (cmd) {
391 	case DDI_ATTACH:
392 		stmf_state.stmf_dip = dip;
393 
394 		if (ddi_create_minor_node(dip, "admin", S_IFCHR, 0,
395 		    DDI_NT_STMF, 0) != DDI_SUCCESS) {
396 			break;
397 		}
398 		ddi_report_dev(dip);
399 		return (DDI_SUCCESS);
400 	}
401 
402 	return (DDI_FAILURE);
403 }
404 
405 static int
406 stmf_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
407 {
408 	switch (cmd) {
409 	case DDI_DETACH:
410 		ddi_remove_minor_node(dip, 0);
411 		return (DDI_SUCCESS);
412 	}
413 
414 	return (DDI_FAILURE);
415 }
416 
417 /* ARGSUSED */
418 static int
419 stmf_open(dev_t *devp, int flag, int otype, cred_t *credp)
420 {
421 	mutex_enter(&stmf_state.stmf_lock);
422 	if (stmf_state.stmf_exclusive_open) {
423 		mutex_exit(&stmf_state.stmf_lock);
424 		return (EBUSY);
425 	}
426 	if (flag & FEXCL) {
427 		if (stmf_state.stmf_opened) {
428 			mutex_exit(&stmf_state.stmf_lock);
429 			return (EBUSY);
430 		}
431 		stmf_state.stmf_exclusive_open = 1;
432 	}
433 	stmf_state.stmf_opened = 1;
434 	mutex_exit(&stmf_state.stmf_lock);
435 	return (0);
436 }
437 
438 /* ARGSUSED */
439 static int
440 stmf_close(dev_t dev, int flag, int otype, cred_t *credp)
441 {
442 	mutex_enter(&stmf_state.stmf_lock);
443 	stmf_state.stmf_opened = 0;
444 	if (stmf_state.stmf_exclusive_open &&
445 	    (stmf_state.stmf_config_state != STMF_CONFIG_INIT_DONE)) {
446 		stmf_state.stmf_config_state = STMF_CONFIG_NONE;
447 		stmf_delete_all_ppds();
448 		stmf_view_clear_config();
449 		stmf_view_init();
450 	}
451 	stmf_state.stmf_exclusive_open = 0;
452 	mutex_exit(&stmf_state.stmf_lock);
453 	return (0);
454 }
455 
456 int
457 stmf_copyin_iocdata(intptr_t data, int mode, stmf_iocdata_t **iocd,
458 						void **ibuf, void **obuf)
459 {
460 	int ret;
461 
462 	*ibuf = NULL;
463 	*obuf = NULL;
464 	*iocd = kmem_zalloc(sizeof (stmf_iocdata_t), KM_SLEEP);
465 
466 	ret = ddi_copyin((void *)data, *iocd, sizeof (stmf_iocdata_t), mode);
467 	if (ret)
468 		return (EFAULT);
469 	if ((*iocd)->stmf_version != STMF_VERSION_1) {
470 		ret = EINVAL;
471 		goto copyin_iocdata_done;
472 	}
473 	if ((*iocd)->stmf_ibuf_size) {
474 		*ibuf = kmem_zalloc((*iocd)->stmf_ibuf_size, KM_SLEEP);
475 		ret = ddi_copyin((void *)((unsigned long)(*iocd)->stmf_ibuf),
476 		    *ibuf, (*iocd)->stmf_ibuf_size, mode);
477 	}
478 	if ((*iocd)->stmf_obuf_size)
479 		*obuf = kmem_zalloc((*iocd)->stmf_obuf_size, KM_SLEEP);
480 
481 	if (ret == 0)
482 		return (0);
483 	ret = EFAULT;
484 copyin_iocdata_done:;
485 	if (*obuf) {
486 		kmem_free(*obuf, (*iocd)->stmf_obuf_size);
487 		*obuf = NULL;
488 	}
489 	if (*ibuf) {
490 		kmem_free(*ibuf, (*iocd)->stmf_ibuf_size);
491 		*ibuf = NULL;
492 	}
493 	kmem_free(*iocd, sizeof (stmf_iocdata_t));
494 	return (ret);
495 }
496 
497 int
498 stmf_copyout_iocdata(intptr_t data, int mode, stmf_iocdata_t *iocd, void *obuf)
499 {
500 	int ret;
501 
502 	if (iocd->stmf_obuf_size) {
503 		ret = ddi_copyout(obuf, (void *)(unsigned long)iocd->stmf_obuf,
504 		    iocd->stmf_obuf_size, mode);
505 		if (ret)
506 			return (EFAULT);
507 	}
508 	ret = ddi_copyout(iocd, (void *)data, sizeof (stmf_iocdata_t), mode);
509 	if (ret)
510 		return (EFAULT);
511 	return (0);
512 }
513 
514 /* ARGSUSED */
515 static int
516 stmf_ioctl(dev_t dev, int cmd, intptr_t data, int mode,
517 	cred_t *credp, int *rval)
518 {
519 	stmf_iocdata_t *iocd;
520 	void *ibuf = NULL, *obuf = NULL;
521 	slist_lu_t *luid_list;
522 	slist_target_port_t *lportid_list;
523 	stmf_i_lu_t *ilu;
524 	stmf_i_local_port_t *ilport;
525 	stmf_i_scsi_session_t *iss;
526 	slist_scsi_session_t *iss_list;
527 	sioc_lu_props_t *lup;
528 	sioc_target_port_props_t *lportp;
529 	stmf_ppioctl_data_t *ppi, *ppi_out = NULL;
530 	uint64_t *ppi_token = NULL;
531 	uint8_t *p_id, *id;
532 	stmf_state_desc_t *std;
533 	stmf_status_t ctl_ret;
534 	stmf_state_change_info_t ssi;
535 	int ret = 0;
536 	uint32_t n;
537 	int i;
538 	stmf_group_op_data_t *grp_entry;
539 	stmf_group_name_t *grpname;
540 	stmf_view_op_entry_t *ve;
541 	stmf_id_type_t idtype;
542 	stmf_id_data_t *id_entry;
543 	stmf_id_list_t	*id_list;
544 	stmf_view_entry_t *view_entry;
545 	uint32_t	veid;
546 
547 	if ((cmd & 0xff000000) != STMF_IOCTL) {
548 		return (ENOTTY);
549 	}
550 
551 	if (drv_priv(credp) != 0) {
552 		return (EPERM);
553 	}
554 
555 	ret = stmf_copyin_iocdata(data, mode, &iocd, &ibuf, &obuf);
556 	if (ret)
557 		return (ret);
558 	iocd->stmf_error = 0;
559 
560 	switch (cmd) {
561 	case STMF_IOCTL_LU_LIST:
562 		/* retrieves both registered/unregistered */
563 		mutex_enter(&stmf_state.stmf_lock);
564 		id_list = &stmf_state.stmf_luid_list;
565 		n = min(id_list->id_count,
566 		    (iocd->stmf_obuf_size)/sizeof (slist_lu_t));
567 		iocd->stmf_obuf_max_nentries = id_list->id_count;
568 		luid_list = (slist_lu_t *)obuf;
569 		id_entry = id_list->idl_head;
570 		for (i = 0; i < n; i++) {
571 			bcopy(id_entry->id_data, luid_list[i].lu_guid, 16);
572 			id_entry = id_entry->id_next;
573 		}
574 
575 		n = iocd->stmf_obuf_size/sizeof (slist_lu_t);
576 		for (ilu = stmf_state.stmf_ilulist; ilu; ilu = ilu->ilu_next) {
577 			id = (uint8_t *)ilu->ilu_lu->lu_id;
578 			if (stmf_lookup_id(id_list, 16, id + 4) == NULL) {
579 				iocd->stmf_obuf_max_nentries++;
580 				if (i < n) {
581 					bcopy(id + 4, luid_list[i].lu_guid,
582 					    sizeof (slist_lu_t));
583 					i++;
584 				}
585 			}
586 		}
587 		iocd->stmf_obuf_nentries = i;
588 		mutex_exit(&stmf_state.stmf_lock);
589 		break;
590 
591 	case STMF_IOCTL_REG_LU_LIST:
592 		mutex_enter(&stmf_state.stmf_lock);
593 		iocd->stmf_obuf_max_nentries = stmf_state.stmf_nlus;
594 		n = min(stmf_state.stmf_nlus,
595 		    (iocd->stmf_obuf_size)/sizeof (slist_lu_t));
596 		iocd->stmf_obuf_nentries = n;
597 		ilu = stmf_state.stmf_ilulist;
598 		luid_list = (slist_lu_t *)obuf;
599 		for (i = 0; i < n; i++) {
600 			uint8_t *id;
601 			id = (uint8_t *)ilu->ilu_lu->lu_id;
602 			bcopy(id + 4, luid_list[i].lu_guid, 16);
603 			ilu = ilu->ilu_next;
604 		}
605 		mutex_exit(&stmf_state.stmf_lock);
606 		break;
607 
608 	case STMF_IOCTL_VE_LU_LIST:
609 		mutex_enter(&stmf_state.stmf_lock);
610 		id_list = &stmf_state.stmf_luid_list;
611 		n = min(id_list->id_count,
612 		    (iocd->stmf_obuf_size)/sizeof (slist_lu_t));
613 		iocd->stmf_obuf_max_nentries = id_list->id_count;
614 		iocd->stmf_obuf_nentries = n;
615 		luid_list = (slist_lu_t *)obuf;
616 		id_entry = id_list->idl_head;
617 		for (i = 0; i < n; i++) {
618 			bcopy(id_entry->id_data, luid_list[i].lu_guid, 16);
619 			id_entry = id_entry->id_next;
620 		}
621 		mutex_exit(&stmf_state.stmf_lock);
622 		break;
623 
624 	case STMF_IOCTL_TARGET_PORT_LIST:
625 		mutex_enter(&stmf_state.stmf_lock);
626 		iocd->stmf_obuf_max_nentries = stmf_state.stmf_nlports;
627 		n = min(stmf_state.stmf_nlports,
628 		    (iocd->stmf_obuf_size)/sizeof (slist_target_port_t));
629 		iocd->stmf_obuf_nentries = n;
630 		ilport = stmf_state.stmf_ilportlist;
631 		lportid_list = (slist_target_port_t *)obuf;
632 		for (i = 0; i < n; i++) {
633 			uint8_t *id;
634 			id = (uint8_t *)ilport->ilport_lport->lport_id;
635 			bcopy(id, lportid_list[i].target, id[3] + 4);
636 			ilport = ilport->ilport_next;
637 		}
638 		mutex_exit(&stmf_state.stmf_lock);
639 		break;
640 
641 	case STMF_IOCTL_SESSION_LIST:
642 		p_id = (uint8_t *)ibuf;
643 		if ((p_id == NULL) || (iocd->stmf_ibuf_size < 4) ||
644 		    (iocd->stmf_ibuf_size < (p_id[3] + 4))) {
645 			ret = EINVAL;
646 			break;
647 		}
648 		mutex_enter(&stmf_state.stmf_lock);
649 		for (ilport = stmf_state.stmf_ilportlist; ilport; ilport =
650 		    ilport->ilport_next) {
651 			uint8_t *id;
652 			id = (uint8_t *)ilport->ilport_lport->lport_id;
653 			if ((p_id[3] == id[3]) &&
654 			    (bcmp(p_id + 4, id + 4, id[3]) == 0)) {
655 				break;
656 			}
657 		}
658 		if (ilport == NULL) {
659 			mutex_exit(&stmf_state.stmf_lock);
660 			ret = ENOENT;
661 			break;
662 		}
663 		iocd->stmf_obuf_max_nentries = ilport->ilport_nsessions;
664 		n = min(ilport->ilport_nsessions,
665 		    (iocd->stmf_obuf_size)/sizeof (slist_scsi_session_t));
666 		iocd->stmf_obuf_nentries = n;
667 		iss = ilport->ilport_ss_list;
668 		iss_list = (slist_scsi_session_t *)obuf;
669 		for (i = 0; i < n; i++) {
670 			uint8_t *id;
671 			id = (uint8_t *)iss->iss_ss->ss_rport_id;
672 			bcopy(id, iss_list[i].initiator, id[3] + 4);
673 			iss_list[i].creation_time = (uint32_t)
674 			    iss->iss_creation_time;
675 			if (iss->iss_ss->ss_rport_alias) {
676 				(void) strncpy(iss_list[i].alias,
677 				    iss->iss_ss->ss_rport_alias, 255);
678 				iss_list[i].alias[255] = 0;
679 			} else {
680 				iss_list[i].alias[0] = 0;
681 			}
682 			iss = iss->iss_next;
683 		}
684 		mutex_exit(&stmf_state.stmf_lock);
685 		break;
686 
687 	case STMF_IOCTL_GET_LU_PROPERTIES:
688 		p_id = (uint8_t *)ibuf;
689 		if ((iocd->stmf_ibuf_size < 16) ||
690 		    (iocd->stmf_obuf_size < sizeof (sioc_lu_props_t)) ||
691 		    (p_id[0] == 0)) {
692 			ret = EINVAL;
693 			break;
694 		}
695 		mutex_enter(&stmf_state.stmf_lock);
696 		for (ilu = stmf_state.stmf_ilulist; ilu; ilu = ilu->ilu_next) {
697 			if (bcmp(p_id, ilu->ilu_lu->lu_id->ident, 16) == 0)
698 				break;
699 		}
700 		if (ilu == NULL) {
701 			mutex_exit(&stmf_state.stmf_lock);
702 			ret = ENOENT;
703 			break;
704 		}
705 		lup = (sioc_lu_props_t *)obuf;
706 		bcopy(ilu->ilu_lu->lu_id->ident, lup->lu_guid, 16);
707 		lup->lu_state = ilu->ilu_state & 0x0f;
708 		lup->lu_present = 1; /* XXX */
709 		(void) strncpy(lup->lu_provider_name,
710 		    ilu->ilu_lu->lu_lp->lp_name, 255);
711 		lup->lu_provider_name[254] = 0;
712 		if (ilu->ilu_lu->lu_alias) {
713 			(void) strncpy(lup->lu_alias,
714 			    ilu->ilu_lu->lu_alias, 255);
715 			lup->lu_alias[255] = 0;
716 		} else {
717 			lup->lu_alias[0] = 0;
718 		}
719 		mutex_exit(&stmf_state.stmf_lock);
720 		break;
721 
722 	case STMF_IOCTL_GET_TARGET_PORT_PROPERTIES:
723 		p_id = (uint8_t *)ibuf;
724 		if ((p_id == NULL) ||
725 		    (iocd->stmf_ibuf_size < (p_id[3] + 4)) ||
726 		    (iocd->stmf_obuf_size <
727 		    sizeof (sioc_target_port_props_t))) {
728 			ret = EINVAL;
729 			break;
730 		}
731 		mutex_enter(&stmf_state.stmf_lock);
732 		for (ilport = stmf_state.stmf_ilportlist; ilport;
733 		    ilport = ilport->ilport_next) {
734 			uint8_t *id;
735 			id = (uint8_t *)ilport->ilport_lport->lport_id;
736 			if ((p_id[3] == id[3]) &&
737 			    (bcmp(p_id+4, id+4, id[3]) == 0))
738 				break;
739 		}
740 		if (ilport == NULL) {
741 			mutex_exit(&stmf_state.stmf_lock);
742 			ret = ENOENT;
743 			break;
744 		}
745 		lportp = (sioc_target_port_props_t *)obuf;
746 		bcopy(ilport->ilport_lport->lport_id, lportp->tgt_id,
747 		    ilport->ilport_lport->lport_id->ident_length + 4);
748 		lportp->tgt_state = ilport->ilport_state & 0x0f;
749 		lportp->tgt_present = 1; /* XXX */
750 		(void) strncpy(lportp->tgt_provider_name,
751 		    ilport->ilport_lport->lport_pp->pp_name, 255);
752 		lportp->tgt_provider_name[254] = 0;
753 		if (ilport->ilport_lport->lport_alias) {
754 			(void) strncpy(lportp->tgt_alias,
755 			    ilport->ilport_lport->lport_alias, 255);
756 			lportp->tgt_alias[255] = 0;
757 		} else {
758 			lportp->tgt_alias[0] = 0;
759 		}
760 		mutex_exit(&stmf_state.stmf_lock);
761 		break;
762 
763 	case STMF_IOCTL_SET_STMF_STATE:
764 		if ((ibuf == NULL) ||
765 		    (iocd->stmf_ibuf_size < sizeof (stmf_state_desc_t))) {
766 			ret = EINVAL;
767 			break;
768 		}
769 		ret = stmf_set_stmf_state((stmf_state_desc_t *)ibuf);
770 		break;
771 
772 	case STMF_IOCTL_GET_STMF_STATE:
773 		if ((obuf == NULL) ||
774 		    (iocd->stmf_obuf_size < sizeof (stmf_state_desc_t))) {
775 			ret = EINVAL;
776 			break;
777 		}
778 		ret = stmf_get_stmf_state((stmf_state_desc_t *)obuf);
779 		break;
780 
781 	case STMF_IOCTL_SET_ALUA_STATE:
782 		if ((ibuf == NULL) ||
783 		    (iocd->stmf_ibuf_size < sizeof (stmf_alua_state_desc_t))) {
784 			ret = EINVAL;
785 			break;
786 		}
787 		ret = stmf_set_alua_state((stmf_alua_state_desc_t *)ibuf);
788 		break;
789 
790 	case STMF_IOCTL_GET_ALUA_STATE:
791 		if ((obuf == NULL) ||
792 		    (iocd->stmf_obuf_size < sizeof (stmf_alua_state_desc_t))) {
793 			ret = EINVAL;
794 			break;
795 		}
796 		stmf_get_alua_state((stmf_alua_state_desc_t *)obuf);
797 		break;
798 
799 	case STMF_IOCTL_SET_LU_STATE:
800 		ssi.st_rflags = STMF_RFLAG_USER_REQUEST;
801 		ssi.st_additional_info = NULL;
802 		std = (stmf_state_desc_t *)ibuf;
803 		if ((ibuf == NULL) ||
804 		    (iocd->stmf_ibuf_size < sizeof (stmf_state_desc_t))) {
805 			ret = EINVAL;
806 			break;
807 		}
808 		p_id = std->ident;
809 		mutex_enter(&stmf_state.stmf_lock);
810 		if (stmf_state.stmf_inventory_locked) {
811 			mutex_exit(&stmf_state.stmf_lock);
812 			ret = EBUSY;
813 			break;
814 		}
815 		for (ilu = stmf_state.stmf_ilulist; ilu; ilu = ilu->ilu_next) {
816 			if (bcmp(p_id, ilu->ilu_lu->lu_id->ident, 16) == 0)
817 				break;
818 		}
819 		if (ilu == NULL) {
820 			mutex_exit(&stmf_state.stmf_lock);
821 			ret = ENOENT;
822 			break;
823 		}
824 		stmf_state.stmf_inventory_locked = 1;
825 		mutex_exit(&stmf_state.stmf_lock);
826 		cmd = (std->state == STMF_STATE_ONLINE) ? STMF_CMD_LU_ONLINE :
827 		    STMF_CMD_LU_OFFLINE;
828 		ctl_ret = stmf_ctl(cmd, (void *)ilu->ilu_lu, &ssi);
829 		if (ctl_ret == STMF_ALREADY)
830 			ret = 0;
831 		else if (ctl_ret == STMF_BUSY)
832 			ret = EBUSY;
833 		else if (ctl_ret != STMF_SUCCESS)
834 			ret = EIO;
835 		mutex_enter(&stmf_state.stmf_lock);
836 		stmf_state.stmf_inventory_locked = 0;
837 		mutex_exit(&stmf_state.stmf_lock);
838 		break;
839 
840 	case STMF_IOCTL_SET_TARGET_PORT_STATE:
841 		ssi.st_rflags = STMF_RFLAG_USER_REQUEST;
842 		ssi.st_additional_info = NULL;
843 		std = (stmf_state_desc_t *)ibuf;
844 		if ((ibuf == NULL) ||
845 		    (iocd->stmf_ibuf_size < sizeof (stmf_state_desc_t))) {
846 			ret = EINVAL;
847 			break;
848 		}
849 		p_id = std->ident;
850 		mutex_enter(&stmf_state.stmf_lock);
851 		if (stmf_state.stmf_inventory_locked) {
852 			mutex_exit(&stmf_state.stmf_lock);
853 			ret = EBUSY;
854 			break;
855 		}
856 		for (ilport = stmf_state.stmf_ilportlist; ilport;
857 		    ilport = ilport->ilport_next) {
858 			uint8_t *id;
859 			id = (uint8_t *)ilport->ilport_lport->lport_id;
860 			if ((id[3] == p_id[3]) &&
861 			    (bcmp(id+4, p_id+4, id[3]) == 0)) {
862 				break;
863 			}
864 		}
865 		if (ilport == NULL) {
866 			mutex_exit(&stmf_state.stmf_lock);
867 			ret = ENOENT;
868 			break;
869 		}
870 		stmf_state.stmf_inventory_locked = 1;
871 		mutex_exit(&stmf_state.stmf_lock);
872 		cmd = (std->state == STMF_STATE_ONLINE) ?
873 		    STMF_CMD_LPORT_ONLINE : STMF_CMD_LPORT_OFFLINE;
874 		ctl_ret = stmf_ctl(cmd, (void *)ilport->ilport_lport, &ssi);
875 		if (ctl_ret == STMF_ALREADY)
876 			ret = 0;
877 		else if (ctl_ret == STMF_BUSY)
878 			ret = EBUSY;
879 		else if (ctl_ret != STMF_SUCCESS)
880 			ret = EIO;
881 		mutex_enter(&stmf_state.stmf_lock);
882 		stmf_state.stmf_inventory_locked = 0;
883 		mutex_exit(&stmf_state.stmf_lock);
884 		break;
885 
886 	case STMF_IOCTL_ADD_HG_ENTRY:
887 		idtype = STMF_ID_TYPE_HOST;
888 		/* FALLTHROUGH */
889 	case STMF_IOCTL_ADD_TG_ENTRY:
890 		if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) {
891 			ret = EACCES;
892 			iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT;
893 			break;
894 		}
895 		if (cmd == STMF_IOCTL_ADD_TG_ENTRY) {
896 			idtype = STMF_ID_TYPE_TARGET;
897 		}
898 		grp_entry = (stmf_group_op_data_t *)ibuf;
899 		if ((ibuf == NULL) ||
900 		    (iocd->stmf_ibuf_size < sizeof (stmf_group_op_data_t))) {
901 			ret = EINVAL;
902 			break;
903 		}
904 		if (grp_entry->group.name[0] == '*') {
905 			ret = EINVAL;
906 			break; /* not allowed */
907 		}
908 		mutex_enter(&stmf_state.stmf_lock);
909 		ret = stmf_add_group_member(grp_entry->group.name,
910 		    grp_entry->group.name_size,
911 		    grp_entry->ident + 4,
912 		    grp_entry->ident[3],
913 		    idtype,
914 		    &iocd->stmf_error);
915 		mutex_exit(&stmf_state.stmf_lock);
916 		break;
917 	case STMF_IOCTL_REMOVE_HG_ENTRY:
918 		idtype = STMF_ID_TYPE_HOST;
919 		/* FALLTHROUGH */
920 	case STMF_IOCTL_REMOVE_TG_ENTRY:
921 		if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) {
922 			ret = EACCES;
923 			iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT;
924 			break;
925 		}
926 		if (cmd == STMF_IOCTL_REMOVE_TG_ENTRY) {
927 			idtype = STMF_ID_TYPE_TARGET;
928 		}
929 		grp_entry = (stmf_group_op_data_t *)ibuf;
930 		if ((ibuf == NULL) ||
931 		    (iocd->stmf_ibuf_size < sizeof (stmf_group_op_data_t))) {
932 			ret = EINVAL;
933 			break;
934 		}
935 		if (grp_entry->group.name[0] == '*') {
936 			ret = EINVAL;
937 			break; /* not allowed */
938 		}
939 		mutex_enter(&stmf_state.stmf_lock);
940 		ret = stmf_remove_group_member(grp_entry->group.name,
941 		    grp_entry->group.name_size,
942 		    grp_entry->ident + 4,
943 		    grp_entry->ident[3],
944 		    idtype,
945 		    &iocd->stmf_error);
946 		mutex_exit(&stmf_state.stmf_lock);
947 		break;
948 	case STMF_IOCTL_CREATE_HOST_GROUP:
949 		idtype = STMF_ID_TYPE_HOST_GROUP;
950 		/* FALLTHROUGH */
951 	case STMF_IOCTL_CREATE_TARGET_GROUP:
952 		if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) {
953 			ret = EACCES;
954 			iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT;
955 			break;
956 		}
957 		grpname = (stmf_group_name_t *)ibuf;
958 
959 		if (cmd == STMF_IOCTL_CREATE_TARGET_GROUP)
960 			idtype = STMF_ID_TYPE_TARGET_GROUP;
961 		if ((ibuf == NULL) ||
962 		    (iocd->stmf_ibuf_size < sizeof (stmf_group_name_t))) {
963 			ret = EINVAL;
964 			break;
965 		}
966 		if (grpname->name[0] == '*') {
967 			ret = EINVAL;
968 			break; /* not allowed */
969 		}
970 		mutex_enter(&stmf_state.stmf_lock);
971 		ret = stmf_add_group(grpname->name,
972 		    grpname->name_size, idtype, &iocd->stmf_error);
973 		mutex_exit(&stmf_state.stmf_lock);
974 		break;
975 	case STMF_IOCTL_REMOVE_HOST_GROUP:
976 		idtype = STMF_ID_TYPE_HOST_GROUP;
977 		/* FALLTHROUGH */
978 	case STMF_IOCTL_REMOVE_TARGET_GROUP:
979 		if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) {
980 			ret = EACCES;
981 			iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT;
982 			break;
983 		}
984 		grpname = (stmf_group_name_t *)ibuf;
985 		if (cmd == STMF_IOCTL_REMOVE_TARGET_GROUP)
986 			idtype = STMF_ID_TYPE_TARGET_GROUP;
987 		if ((ibuf == NULL) ||
988 		    (iocd->stmf_ibuf_size < sizeof (stmf_group_name_t))) {
989 			ret = EINVAL;
990 			break;
991 		}
992 		if (grpname->name[0] == '*') {
993 			ret = EINVAL;
994 			break; /* not allowed */
995 		}
996 		mutex_enter(&stmf_state.stmf_lock);
997 		ret = stmf_remove_group(grpname->name,
998 		    grpname->name_size, idtype, &iocd->stmf_error);
999 		mutex_exit(&stmf_state.stmf_lock);
1000 		break;
1001 	case STMF_IOCTL_VALIDATE_VIEW:
1002 	case STMF_IOCTL_ADD_VIEW_ENTRY:
1003 		if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) {
1004 			ret = EACCES;
1005 			iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT;
1006 			break;
1007 		}
1008 		ve = (stmf_view_op_entry_t *)ibuf;
1009 		if ((ibuf == NULL) ||
1010 		    (iocd->stmf_ibuf_size < sizeof (stmf_view_op_entry_t))) {
1011 			ret = EINVAL;
1012 			break;
1013 		}
1014 		if (!ve->ve_lu_number_valid)
1015 			ve->ve_lu_nbr[2] = 0xFF;
1016 		if (ve->ve_all_hosts) {
1017 			ve->ve_host_group.name[0] = '*';
1018 			ve->ve_host_group.name_size = 1;
1019 		}
1020 		if (ve->ve_all_targets) {
1021 			ve->ve_target_group.name[0] = '*';
1022 			ve->ve_target_group.name_size = 1;
1023 		}
1024 		if (ve->ve_ndx_valid)
1025 			veid = ve->ve_ndx;
1026 		else
1027 			veid = 0xffffffff;
1028 		mutex_enter(&stmf_state.stmf_lock);
1029 		if (cmd == STMF_IOCTL_ADD_VIEW_ENTRY) {
1030 			ret = stmf_add_ve(ve->ve_host_group.name,
1031 			    ve->ve_host_group.name_size,
1032 			    ve->ve_target_group.name,
1033 			    ve->ve_target_group.name_size,
1034 			    ve->ve_guid,
1035 			    &veid,
1036 			    ve->ve_lu_nbr,
1037 			    &iocd->stmf_error);
1038 		} else {  /* STMF_IOCTL_VALIDATE_VIEW */
1039 			ret = stmf_validate_lun_ve(ve->ve_host_group.name,
1040 			    ve->ve_host_group.name_size,
1041 			    ve->ve_target_group.name,
1042 			    ve->ve_target_group.name_size,
1043 			    ve->ve_lu_nbr,
1044 			    &iocd->stmf_error);
1045 		}
1046 		mutex_exit(&stmf_state.stmf_lock);
1047 		if (ret == 0 &&
1048 		    (!ve->ve_ndx_valid || !ve->ve_lu_number_valid) &&
1049 		    iocd->stmf_obuf_size >= sizeof (stmf_view_op_entry_t)) {
1050 			stmf_view_op_entry_t *ve_ret =
1051 			    (stmf_view_op_entry_t *)obuf;
1052 			iocd->stmf_obuf_nentries = 1;
1053 			iocd->stmf_obuf_max_nentries = 1;
1054 			if (!ve->ve_ndx_valid) {
1055 				ve_ret->ve_ndx = veid;
1056 				ve_ret->ve_ndx_valid = 1;
1057 			}
1058 			if (!ve->ve_lu_number_valid) {
1059 				ve_ret->ve_lu_number_valid = 1;
1060 				bcopy(ve->ve_lu_nbr, ve_ret->ve_lu_nbr, 8);
1061 			}
1062 		}
1063 		break;
1064 	case STMF_IOCTL_REMOVE_VIEW_ENTRY:
1065 		if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) {
1066 			ret = EACCES;
1067 			iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT;
1068 			break;
1069 		}
1070 		ve = (stmf_view_op_entry_t *)ibuf;
1071 		if ((ibuf == NULL) ||
1072 		    (iocd->stmf_ibuf_size < sizeof (stmf_view_op_entry_t))) {
1073 			ret = EINVAL;
1074 			break;
1075 		}
1076 		if (!ve->ve_ndx_valid) {
1077 			ret = EINVAL;
1078 			break;
1079 		}
1080 		mutex_enter(&stmf_state.stmf_lock);
1081 		ret = stmf_remove_ve_by_id(ve->ve_guid, ve->ve_ndx,
1082 		    &iocd->stmf_error);
1083 		mutex_exit(&stmf_state.stmf_lock);
1084 		break;
1085 	case STMF_IOCTL_GET_HG_LIST:
1086 		id_list = &stmf_state.stmf_hg_list;
1087 		/* FALLTHROUGH */
1088 	case STMF_IOCTL_GET_TG_LIST:
1089 		if (cmd == STMF_IOCTL_GET_TG_LIST)
1090 			id_list = &stmf_state.stmf_tg_list;
1091 		mutex_enter(&stmf_state.stmf_lock);
1092 		iocd->stmf_obuf_max_nentries = id_list->id_count;
1093 		n = min(id_list->id_count,
1094 		    (iocd->stmf_obuf_size)/sizeof (stmf_group_name_t));
1095 		iocd->stmf_obuf_nentries = n;
1096 		id_entry = id_list->idl_head;
1097 		grpname = (stmf_group_name_t *)obuf;
1098 		for (i = 0; i < n; i++) {
1099 			if (id_entry->id_data[0] == '*') {
1100 				if (iocd->stmf_obuf_nentries > 0) {
1101 					iocd->stmf_obuf_nentries--;
1102 				}
1103 				id_entry = id_entry->id_next;
1104 				continue;
1105 			}
1106 			grpname->name_size = id_entry->id_data_size;
1107 			bcopy(id_entry->id_data, grpname->name,
1108 			    id_entry->id_data_size);
1109 			grpname++;
1110 			id_entry = id_entry->id_next;
1111 		}
1112 		mutex_exit(&stmf_state.stmf_lock);
1113 		break;
1114 	case STMF_IOCTL_GET_HG_ENTRIES:
1115 		id_list = &stmf_state.stmf_hg_list;
1116 		/* FALLTHROUGH */
1117 	case STMF_IOCTL_GET_TG_ENTRIES:
1118 		grpname = (stmf_group_name_t *)ibuf;
1119 		if ((ibuf == NULL) ||
1120 		    (iocd->stmf_ibuf_size < sizeof (stmf_group_name_t))) {
1121 			ret = EINVAL;
1122 			break;
1123 		}
1124 		if (cmd == STMF_IOCTL_GET_TG_ENTRIES) {
1125 			id_list = &stmf_state.stmf_tg_list;
1126 		}
1127 		mutex_enter(&stmf_state.stmf_lock);
1128 		id_entry = stmf_lookup_id(id_list, grpname->name_size,
1129 		    grpname->name);
1130 		if (!id_entry)
1131 			ret = ENODEV;
1132 		else {
1133 			stmf_ge_ident_t *grp_entry;
1134 			id_list = (stmf_id_list_t *)id_entry->id_impl_specific;
1135 			iocd->stmf_obuf_max_nentries = id_list->id_count;
1136 			n = min(id_list->id_count,
1137 			    iocd->stmf_obuf_size/sizeof (stmf_ge_ident_t));
1138 			iocd->stmf_obuf_nentries = n;
1139 			id_entry = id_list->idl_head;
1140 			grp_entry = (stmf_ge_ident_t *)obuf;
1141 			for (i = 0; i < n; i++) {
1142 				bcopy(id_entry->id_data, grp_entry->ident,
1143 				    id_entry->id_data_size);
1144 				grp_entry->ident_size = id_entry->id_data_size;
1145 				id_entry = id_entry->id_next;
1146 				grp_entry++;
1147 			}
1148 		}
1149 		mutex_exit(&stmf_state.stmf_lock);
1150 		break;
1151 
1152 	case STMF_IOCTL_GET_VE_LIST:
1153 		n = iocd->stmf_obuf_size/sizeof (stmf_view_op_entry_t);
1154 		mutex_enter(&stmf_state.stmf_lock);
1155 		ve = (stmf_view_op_entry_t *)obuf;
1156 		for (id_entry = stmf_state.stmf_luid_list.idl_head;
1157 		    id_entry; id_entry = id_entry->id_next) {
1158 			for (view_entry = (stmf_view_entry_t *)
1159 			    id_entry->id_impl_specific; view_entry;
1160 			    view_entry = view_entry->ve_next) {
1161 				iocd->stmf_obuf_max_nentries++;
1162 				if (iocd->stmf_obuf_nentries >= n)
1163 					continue;
1164 				ve->ve_ndx_valid = 1;
1165 				ve->ve_ndx = view_entry->ve_id;
1166 				ve->ve_lu_number_valid = 1;
1167 				bcopy(view_entry->ve_lun, ve->ve_lu_nbr, 8);
1168 				bcopy(view_entry->ve_luid->id_data, ve->ve_guid,
1169 				    view_entry->ve_luid->id_data_size);
1170 				if (view_entry->ve_hg->id_data[0] == '*') {
1171 					ve->ve_all_hosts = 1;
1172 				} else {
1173 					bcopy(view_entry->ve_hg->id_data,
1174 					    ve->ve_host_group.name,
1175 					    view_entry->ve_hg->id_data_size);
1176 					ve->ve_host_group.name_size =
1177 					    view_entry->ve_hg->id_data_size;
1178 				}
1179 
1180 				if (view_entry->ve_tg->id_data[0] == '*') {
1181 					ve->ve_all_targets = 1;
1182 				} else {
1183 					bcopy(view_entry->ve_tg->id_data,
1184 					    ve->ve_target_group.name,
1185 					    view_entry->ve_tg->id_data_size);
1186 					ve->ve_target_group.name_size =
1187 					    view_entry->ve_tg->id_data_size;
1188 				}
1189 				ve++;
1190 				iocd->stmf_obuf_nentries++;
1191 			}
1192 		}
1193 		mutex_exit(&stmf_state.stmf_lock);
1194 		break;
1195 
1196 	case STMF_IOCTL_LU_VE_LIST:
1197 		p_id = (uint8_t *)ibuf;
1198 		if ((iocd->stmf_ibuf_size != 16) ||
1199 		    (iocd->stmf_obuf_size < sizeof (stmf_view_op_entry_t))) {
1200 			ret = EINVAL;
1201 			break;
1202 		}
1203 
1204 		n = iocd->stmf_obuf_size/sizeof (stmf_view_op_entry_t);
1205 		mutex_enter(&stmf_state.stmf_lock);
1206 		ve = (stmf_view_op_entry_t *)obuf;
1207 		for (id_entry = stmf_state.stmf_luid_list.idl_head;
1208 		    id_entry; id_entry = id_entry->id_next) {
1209 			if (bcmp(id_entry->id_data, p_id, 16) != 0)
1210 				continue;
1211 			for (view_entry = (stmf_view_entry_t *)
1212 			    id_entry->id_impl_specific; view_entry;
1213 			    view_entry = view_entry->ve_next) {
1214 				iocd->stmf_obuf_max_nentries++;
1215 				if (iocd->stmf_obuf_nentries >= n)
1216 					continue;
1217 				ve->ve_ndx_valid = 1;
1218 				ve->ve_ndx = view_entry->ve_id;
1219 				ve->ve_lu_number_valid = 1;
1220 				bcopy(view_entry->ve_lun, ve->ve_lu_nbr, 8);
1221 				bcopy(view_entry->ve_luid->id_data, ve->ve_guid,
1222 				    view_entry->ve_luid->id_data_size);
1223 				if (view_entry->ve_hg->id_data[0] == '*') {
1224 					ve->ve_all_hosts = 1;
1225 				} else {
1226 					bcopy(view_entry->ve_hg->id_data,
1227 					    ve->ve_host_group.name,
1228 					    view_entry->ve_hg->id_data_size);
1229 					ve->ve_host_group.name_size =
1230 					    view_entry->ve_hg->id_data_size;
1231 				}
1232 
1233 				if (view_entry->ve_tg->id_data[0] == '*') {
1234 					ve->ve_all_targets = 1;
1235 				} else {
1236 					bcopy(view_entry->ve_tg->id_data,
1237 					    ve->ve_target_group.name,
1238 					    view_entry->ve_tg->id_data_size);
1239 					ve->ve_target_group.name_size =
1240 					    view_entry->ve_tg->id_data_size;
1241 				}
1242 				ve++;
1243 				iocd->stmf_obuf_nentries++;
1244 			}
1245 			break;
1246 		}
1247 		mutex_exit(&stmf_state.stmf_lock);
1248 		break;
1249 
1250 	case STMF_IOCTL_LOAD_PP_DATA:
1251 		if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) {
1252 			ret = EACCES;
1253 			iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT;
1254 			break;
1255 		}
1256 		ppi = (stmf_ppioctl_data_t *)ibuf;
1257 		if ((ppi == NULL) ||
1258 		    (iocd->stmf_ibuf_size < sizeof (stmf_ppioctl_data_t))) {
1259 			ret = EINVAL;
1260 			break;
1261 		}
1262 		/* returned token */
1263 		ppi_token = (uint64_t *)obuf;
1264 		if ((ppi_token == NULL) ||
1265 		    (iocd->stmf_obuf_size < sizeof (uint64_t))) {
1266 			ret = EINVAL;
1267 			break;
1268 		}
1269 		ret = stmf_load_ppd_ioctl(ppi, ppi_token, &iocd->stmf_error);
1270 		break;
1271 
1272 	case STMF_IOCTL_GET_PP_DATA:
1273 		if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) {
1274 			ret = EACCES;
1275 			iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT;
1276 			break;
1277 		}
1278 		ppi = (stmf_ppioctl_data_t *)ibuf;
1279 		if (ppi == NULL ||
1280 		    (iocd->stmf_ibuf_size < sizeof (stmf_ppioctl_data_t))) {
1281 			ret = EINVAL;
1282 			break;
1283 		}
1284 		ppi_out = (stmf_ppioctl_data_t *)obuf;
1285 		if ((ppi_out == NULL) ||
1286 		    (iocd->stmf_obuf_size < sizeof (stmf_ppioctl_data_t))) {
1287 			ret = EINVAL;
1288 			break;
1289 		}
1290 		ret = stmf_get_ppd_ioctl(ppi, ppi_out, &iocd->stmf_error);
1291 		break;
1292 
1293 	case STMF_IOCTL_CLEAR_PP_DATA:
1294 		if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) {
1295 			ret = EACCES;
1296 			iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT;
1297 			break;
1298 		}
1299 		ppi = (stmf_ppioctl_data_t *)ibuf;
1300 		if ((ppi == NULL) ||
1301 		    (iocd->stmf_ibuf_size < sizeof (stmf_ppioctl_data_t))) {
1302 			ret = EINVAL;
1303 			break;
1304 		}
1305 		ret = stmf_delete_ppd_ioctl(ppi);
1306 		break;
1307 
1308 	case STMF_IOCTL_CLEAR_TRACE:
1309 		stmf_trace_clear();
1310 		break;
1311 
1312 	case STMF_IOCTL_ADD_TRACE:
1313 		if (iocd->stmf_ibuf_size && ibuf) {
1314 			((uint8_t *)ibuf)[iocd->stmf_ibuf_size - 1] = 0;
1315 			stmf_trace("\nstradm", "%s\n", ibuf);
1316 		}
1317 		break;
1318 
1319 	case STMF_IOCTL_GET_TRACE_POSITION:
1320 		if (obuf && (iocd->stmf_obuf_size > 3)) {
1321 			mutex_enter(&trace_buf_lock);
1322 			*((int *)obuf) = trace_buf_curndx;
1323 			mutex_exit(&trace_buf_lock);
1324 		} else {
1325 			ret = EINVAL;
1326 		}
1327 		break;
1328 
1329 	case STMF_IOCTL_GET_TRACE:
1330 		if ((iocd->stmf_obuf_size == 0) || (iocd->stmf_ibuf_size < 4)) {
1331 			ret = EINVAL;
1332 			break;
1333 		}
1334 		i = *((int *)ibuf);
1335 		if ((i > trace_buf_size) || ((i + iocd->stmf_obuf_size) >
1336 		    trace_buf_size)) {
1337 			ret = EINVAL;
1338 			break;
1339 		}
1340 		mutex_enter(&trace_buf_lock);
1341 		bcopy(stmf_trace_buf + i, obuf, iocd->stmf_obuf_size);
1342 		mutex_exit(&trace_buf_lock);
1343 		break;
1344 
1345 	default:
1346 		ret = ENOTTY;
1347 	}
1348 
1349 	if (ret == 0) {
1350 		ret = stmf_copyout_iocdata(data, mode, iocd, obuf);
1351 	} else if (iocd->stmf_error) {
1352 		(void) stmf_copyout_iocdata(data, mode, iocd, obuf);
1353 	}
1354 	if (obuf) {
1355 		kmem_free(obuf, iocd->stmf_obuf_size);
1356 		obuf = NULL;
1357 	}
1358 	if (ibuf) {
1359 		kmem_free(ibuf, iocd->stmf_ibuf_size);
1360 		ibuf = NULL;
1361 	}
1362 	kmem_free(iocd, sizeof (stmf_iocdata_t));
1363 	return (ret);
1364 }
1365 
1366 static int
1367 stmf_get_service_state()
1368 {
1369 	stmf_i_local_port_t *ilport;
1370 	stmf_i_lu_t *ilu;
1371 	int online = 0;
1372 	int offline = 0;
1373 	int onlining = 0;
1374 	int offlining = 0;
1375 
1376 	ASSERT(mutex_owned(&stmf_state.stmf_lock));
1377 	for (ilport = stmf_state.stmf_ilportlist; ilport != NULL;
1378 	    ilport = ilport->ilport_next) {
1379 		if (ilport->ilport_state == STMF_STATE_OFFLINE)
1380 			offline++;
1381 		else if (ilport->ilport_state == STMF_STATE_ONLINE)
1382 			online++;
1383 		else if (ilport->ilport_state == STMF_STATE_ONLINING)
1384 			onlining++;
1385 		else if (ilport->ilport_state == STMF_STATE_OFFLINING)
1386 			offlining++;
1387 	}
1388 
1389 	for (ilu = stmf_state.stmf_ilulist; ilu != NULL;
1390 	    ilu = ilu->ilu_next) {
1391 		if (ilu->ilu_state == STMF_STATE_OFFLINE)
1392 			offline++;
1393 		else if (ilu->ilu_state == STMF_STATE_ONLINE)
1394 			online++;
1395 		else if (ilu->ilu_state == STMF_STATE_ONLINING)
1396 			onlining++;
1397 		else if (ilu->ilu_state == STMF_STATE_OFFLINING)
1398 			offlining++;
1399 	}
1400 
1401 	if (stmf_state.stmf_service_running) {
1402 		if (onlining)
1403 			return (STMF_STATE_ONLINING);
1404 		else
1405 			return (STMF_STATE_ONLINE);
1406 	}
1407 
1408 	if (offlining) {
1409 		return (STMF_STATE_OFFLINING);
1410 	}
1411 
1412 	return (STMF_STATE_OFFLINE);
1413 }
1414 
1415 static int
1416 stmf_set_stmf_state(stmf_state_desc_t *std)
1417 {
1418 	stmf_i_local_port_t *ilport;
1419 	stmf_i_lu_t *ilu;
1420 	stmf_state_change_info_t ssi;
1421 	int svc_state;
1422 
1423 	ssi.st_rflags = STMF_RFLAG_USER_REQUEST;
1424 	ssi.st_additional_info = NULL;
1425 
1426 	mutex_enter(&stmf_state.stmf_lock);
1427 	if (!stmf_state.stmf_exclusive_open) {
1428 		mutex_exit(&stmf_state.stmf_lock);
1429 		return (EACCES);
1430 	}
1431 
1432 	if (stmf_state.stmf_inventory_locked) {
1433 		mutex_exit(&stmf_state.stmf_lock);
1434 		return (EBUSY);
1435 	}
1436 
1437 	if ((std->state != STMF_STATE_ONLINE) &&
1438 	    (std->state != STMF_STATE_OFFLINE)) {
1439 		mutex_exit(&stmf_state.stmf_lock);
1440 		return (EINVAL);
1441 	}
1442 
1443 	svc_state = stmf_get_service_state();
1444 	if ((svc_state == STMF_STATE_OFFLINING) ||
1445 	    (svc_state == STMF_STATE_ONLINING)) {
1446 		mutex_exit(&stmf_state.stmf_lock);
1447 		return (EBUSY);
1448 	}
1449 
1450 	if (svc_state == STMF_STATE_OFFLINE) {
1451 		if (std->config_state == STMF_CONFIG_INIT) {
1452 			if (std->state != STMF_STATE_OFFLINE) {
1453 				mutex_exit(&stmf_state.stmf_lock);
1454 				return (EINVAL);
1455 			}
1456 			stmf_state.stmf_config_state = STMF_CONFIG_INIT;
1457 			stmf_delete_all_ppds();
1458 			stmf_view_clear_config();
1459 			stmf_view_init();
1460 			mutex_exit(&stmf_state.stmf_lock);
1461 			return (0);
1462 		}
1463 		if ((stmf_state.stmf_config_state == STMF_CONFIG_INIT) ||
1464 		    (stmf_state.stmf_config_state == STMF_CONFIG_NONE)) {
1465 			if (std->config_state != STMF_CONFIG_INIT_DONE) {
1466 				mutex_exit(&stmf_state.stmf_lock);
1467 				return (EINVAL);
1468 			}
1469 			stmf_state.stmf_config_state = STMF_CONFIG_INIT_DONE;
1470 		}
1471 		if (std->state == STMF_STATE_OFFLINE) {
1472 			mutex_exit(&stmf_state.stmf_lock);
1473 			return (0);
1474 		}
1475 		if (stmf_state.stmf_config_state == STMF_CONFIG_INIT) {
1476 			mutex_exit(&stmf_state.stmf_lock);
1477 			return (EINVAL);
1478 		}
1479 		stmf_state.stmf_inventory_locked = 1;
1480 		stmf_state.stmf_service_running = 1;
1481 		mutex_exit(&stmf_state.stmf_lock);
1482 
1483 		for (ilport = stmf_state.stmf_ilportlist; ilport != NULL;
1484 		    ilport = ilport->ilport_next) {
1485 			if (ilport->ilport_prev_state != STMF_STATE_ONLINE)
1486 				continue;
1487 			(void) stmf_ctl(STMF_CMD_LPORT_ONLINE,
1488 			    ilport->ilport_lport, &ssi);
1489 		}
1490 
1491 		for (ilu = stmf_state.stmf_ilulist; ilu != NULL;
1492 		    ilu = ilu->ilu_next) {
1493 			if (ilu->ilu_prev_state != STMF_STATE_ONLINE)
1494 				continue;
1495 			(void) stmf_ctl(STMF_CMD_LU_ONLINE, ilu->ilu_lu, &ssi);
1496 		}
1497 		mutex_enter(&stmf_state.stmf_lock);
1498 		stmf_state.stmf_inventory_locked = 0;
1499 		mutex_exit(&stmf_state.stmf_lock);
1500 		return (0);
1501 	}
1502 
1503 	/* svc_state is STMF_STATE_ONLINE here */
1504 	if ((std->state != STMF_STATE_OFFLINE) ||
1505 	    (std->config_state == STMF_CONFIG_INIT)) {
1506 		mutex_exit(&stmf_state.stmf_lock);
1507 		return (EACCES);
1508 	}
1509 
1510 	stmf_state.stmf_inventory_locked = 1;
1511 	stmf_state.stmf_service_running = 0;
1512 
1513 	mutex_exit(&stmf_state.stmf_lock);
1514 	for (ilport = stmf_state.stmf_ilportlist; ilport != NULL;
1515 	    ilport = ilport->ilport_next) {
1516 		if (ilport->ilport_state != STMF_STATE_ONLINE)
1517 			continue;
1518 		(void) stmf_ctl(STMF_CMD_LPORT_OFFLINE,
1519 		    ilport->ilport_lport, &ssi);
1520 	}
1521 
1522 	for (ilu = stmf_state.stmf_ilulist; ilu != NULL;
1523 	    ilu = ilu->ilu_next) {
1524 		if (ilu->ilu_state != STMF_STATE_ONLINE)
1525 			continue;
1526 		(void) stmf_ctl(STMF_CMD_LU_OFFLINE, ilu->ilu_lu, &ssi);
1527 	}
1528 	mutex_enter(&stmf_state.stmf_lock);
1529 	stmf_state.stmf_inventory_locked = 0;
1530 	mutex_exit(&stmf_state.stmf_lock);
1531 	return (0);
1532 }
1533 
1534 static int
1535 stmf_get_stmf_state(stmf_state_desc_t *std)
1536 {
1537 	mutex_enter(&stmf_state.stmf_lock);
1538 	std->state = stmf_get_service_state();
1539 	std->config_state = stmf_state.stmf_config_state;
1540 	mutex_exit(&stmf_state.stmf_lock);
1541 
1542 	return (0);
1543 }
1544 
1545 /*
1546  * handles registration message from pppt for a logical unit
1547  */
1548 stmf_status_t
1549 stmf_ic_lu_reg(stmf_ic_reg_dereg_lun_msg_t *msg, uint32_t type)
1550 {
1551 	stmf_i_lu_provider_t	*ilp;
1552 	stmf_lu_provider_t	*lp;
1553 	mutex_enter(&stmf_state.stmf_lock);
1554 	for (ilp = stmf_state.stmf_ilplist; ilp != NULL; ilp = ilp->ilp_next) {
1555 		if (strcmp(msg->icrl_lu_provider_name,
1556 		    ilp->ilp_lp->lp_name) == 0) {
1557 			lp = ilp->ilp_lp;
1558 			mutex_exit(&stmf_state.stmf_lock);
1559 			lp->lp_proxy_msg(msg->icrl_lun_id, msg->icrl_cb_arg,
1560 			    msg->icrl_cb_arg_len, type);
1561 			return (STMF_SUCCESS);
1562 		}
1563 	}
1564 	mutex_exit(&stmf_state.stmf_lock);
1565 	return (STMF_SUCCESS);
1566 }
1567 
1568 /*
1569  * handles de-registration message from pppt for a logical unit
1570  */
1571 stmf_status_t
1572 stmf_ic_lu_dereg(stmf_ic_reg_dereg_lun_msg_t *msg)
1573 {
1574 	stmf_i_lu_provider_t	*ilp;
1575 	stmf_lu_provider_t	*lp;
1576 	mutex_enter(&stmf_state.stmf_lock);
1577 	for (ilp = stmf_state.stmf_ilplist; ilp != NULL; ilp = ilp->ilp_next) {
1578 		if (strcmp(msg->icrl_lu_provider_name,
1579 		    ilp->ilp_lp->lp_name) == 0) {
1580 			lp = ilp->ilp_lp;
1581 			mutex_exit(&stmf_state.stmf_lock);
1582 			lp->lp_proxy_msg(msg->icrl_lun_id, NULL, 0,
1583 			    STMF_MSG_LU_DEREGISTER);
1584 			return (STMF_SUCCESS);
1585 		}
1586 	}
1587 	mutex_exit(&stmf_state.stmf_lock);
1588 	return (STMF_SUCCESS);
1589 }
1590 
1591 /*
1592  * helper function to find a task that matches a task_msgid
1593  */
1594 scsi_task_t *
1595 find_task_from_msgid(uint8_t *lu_id, stmf_ic_msgid_t task_msgid)
1596 {
1597 	stmf_i_lu_t *ilu;
1598 	stmf_i_scsi_task_t *itask;
1599 
1600 	mutex_enter(&stmf_state.stmf_lock);
1601 	for (ilu = stmf_state.stmf_ilulist; ilu != NULL; ilu = ilu->ilu_next) {
1602 		if (bcmp(lu_id, ilu->ilu_lu->lu_id->ident, 16) == 0) {
1603 			break;
1604 		}
1605 	}
1606 
1607 	if (ilu == NULL) {
1608 		mutex_exit(&stmf_state.stmf_lock);
1609 		return (NULL);
1610 	}
1611 
1612 	mutex_enter(&ilu->ilu_task_lock);
1613 	for (itask = ilu->ilu_tasks; itask != NULL;
1614 	    itask = itask->itask_lu_next) {
1615 		if (itask->itask_flags & (ITASK_IN_FREE_LIST |
1616 		    ITASK_BEING_ABORTED)) {
1617 			continue;
1618 		}
1619 		if (itask->itask_proxy_msg_id == task_msgid) {
1620 			break;
1621 		}
1622 	}
1623 	mutex_exit(&ilu->ilu_task_lock);
1624 	mutex_exit(&stmf_state.stmf_lock);
1625 
1626 	if (itask != NULL) {
1627 		return (itask->itask_task);
1628 	} else {
1629 		/* task not found. Likely already aborted. */
1630 		return (NULL);
1631 	}
1632 }
1633 
1634 /*
1635  * message received from pppt/ic
1636  */
1637 stmf_status_t
1638 stmf_msg_rx(stmf_ic_msg_t *msg)
1639 {
1640 	mutex_enter(&stmf_state.stmf_lock);
1641 	if (stmf_state.stmf_alua_state != 1) {
1642 		mutex_exit(&stmf_state.stmf_lock);
1643 		cmn_err(CE_WARN, "stmf alua state is disabled");
1644 		ic_msg_free(msg);
1645 		return (STMF_FAILURE);
1646 	}
1647 	mutex_exit(&stmf_state.stmf_lock);
1648 
1649 	switch (msg->icm_msg_type) {
1650 		case STMF_ICM_REGISTER_LUN:
1651 			(void) stmf_ic_lu_reg(
1652 			    (stmf_ic_reg_dereg_lun_msg_t *)msg->icm_msg,
1653 			    STMF_MSG_LU_REGISTER);
1654 			break;
1655 		case STMF_ICM_LUN_ACTIVE:
1656 			(void) stmf_ic_lu_reg(
1657 			    (stmf_ic_reg_dereg_lun_msg_t *)msg->icm_msg,
1658 			    STMF_MSG_LU_ACTIVE);
1659 			break;
1660 		case STMF_ICM_DEREGISTER_LUN:
1661 			(void) stmf_ic_lu_dereg(
1662 			    (stmf_ic_reg_dereg_lun_msg_t *)msg->icm_msg);
1663 			break;
1664 		case STMF_ICM_SCSI_DATA:
1665 			(void) stmf_ic_rx_scsi_data(
1666 			    (stmf_ic_scsi_data_msg_t *)msg->icm_msg);
1667 			break;
1668 		case STMF_ICM_SCSI_STATUS:
1669 			(void) stmf_ic_rx_scsi_status(
1670 			    (stmf_ic_scsi_status_msg_t *)msg->icm_msg);
1671 			break;
1672 		case STMF_ICM_STATUS:
1673 			(void) stmf_ic_rx_status(
1674 			    (stmf_ic_status_msg_t *)msg->icm_msg);
1675 			break;
1676 		default:
1677 			cmn_err(CE_WARN, "unknown message received %d",
1678 			    msg->icm_msg_type);
1679 			ic_msg_free(msg);
1680 			return (STMF_FAILURE);
1681 	}
1682 	ic_msg_free(msg);
1683 	return (STMF_SUCCESS);
1684 }
1685 
1686 stmf_status_t
1687 stmf_ic_rx_status(stmf_ic_status_msg_t *msg)
1688 {
1689 	stmf_i_local_port_t *ilport;
1690 
1691 	if (msg->ics_msg_type != STMF_ICM_REGISTER_PROXY_PORT) {
1692 		/* for now, ignore other message status */
1693 		return (STMF_SUCCESS);
1694 	}
1695 
1696 	if (msg->ics_status != STMF_SUCCESS) {
1697 		return (STMF_SUCCESS);
1698 	}
1699 
1700 	mutex_enter(&stmf_state.stmf_lock);
1701 	for (ilport = stmf_state.stmf_ilportlist; ilport != NULL;
1702 	    ilport = ilport->ilport_next) {
1703 		if (msg->ics_msgid == ilport->ilport_reg_msgid) {
1704 			ilport->ilport_proxy_registered = 1;
1705 			break;
1706 		}
1707 	}
1708 	mutex_exit(&stmf_state.stmf_lock);
1709 	return (STMF_SUCCESS);
1710 }
1711 
1712 /*
1713  * handles scsi status message from pppt
1714  */
1715 stmf_status_t
1716 stmf_ic_rx_scsi_status(stmf_ic_scsi_status_msg_t *msg)
1717 {
1718 	scsi_task_t *task;
1719 
1720 	/* is this a task management command */
1721 	if (msg->icss_task_msgid & MSG_ID_TM_BIT) {
1722 		return (STMF_SUCCESS);
1723 	}
1724 
1725 	task = find_task_from_msgid(msg->icss_lun_id, msg->icss_task_msgid);
1726 
1727 	if (task == NULL) {
1728 		return (STMF_SUCCESS);
1729 	}
1730 
1731 	task->task_scsi_status = msg->icss_status;
1732 	task->task_sense_data = msg->icss_sense;
1733 	task->task_sense_length = msg->icss_sense_len;
1734 	(void) stmf_send_scsi_status(task, STMF_IOF_LU_DONE);
1735 
1736 	return (STMF_SUCCESS);
1737 }
1738 
1739 /*
1740  * handles scsi data message from pppt
1741  */
1742 stmf_status_t
1743 stmf_ic_rx_scsi_data(stmf_ic_scsi_data_msg_t *msg)
1744 {
1745 	stmf_i_scsi_task_t *itask;
1746 	scsi_task_t *task;
1747 	stmf_xfer_data_t *xd = NULL;
1748 	stmf_data_buf_t *dbuf;
1749 	uint32_t sz, minsz, xd_sz, asz;
1750 
1751 	/* is this a task management command */
1752 	if (msg->icsd_task_msgid & MSG_ID_TM_BIT) {
1753 		return (STMF_SUCCESS);
1754 	}
1755 
1756 	task = find_task_from_msgid(msg->icsd_lun_id, msg->icsd_task_msgid);
1757 	if (task == NULL) {
1758 		stmf_ic_msg_t *ic_xfer_done_msg = NULL;
1759 		static uint64_t data_msg_id;
1760 		stmf_status_t ic_ret = STMF_FAILURE;
1761 		mutex_enter(&stmf_state.stmf_lock);
1762 		data_msg_id = stmf_proxy_msg_id++;
1763 		mutex_exit(&stmf_state.stmf_lock);
1764 		/*
1765 		 * send xfer done status to pppt
1766 		 * for now, set the session id to 0 as we cannot
1767 		 * ascertain it since we cannot find the task
1768 		 */
1769 		ic_xfer_done_msg = ic_scsi_data_xfer_done_msg_alloc(
1770 		    msg->icsd_task_msgid, 0, STMF_FAILURE, data_msg_id);
1771 		if (ic_xfer_done_msg) {
1772 			ic_ret = ic_tx_msg(ic_xfer_done_msg);
1773 			if (ic_ret != STMF_IC_MSG_SUCCESS) {
1774 				cmn_err(CE_WARN, "unable to xmit proxy msg");
1775 			}
1776 		}
1777 		return (STMF_FAILURE);
1778 	}
1779 
1780 	itask = (stmf_i_scsi_task_t *)task->task_stmf_private;
1781 	dbuf = itask->itask_proxy_dbuf;
1782 
1783 	task->task_cmd_xfer_length = msg->icsd_data_len;
1784 
1785 	if (task->task_additional_flags &
1786 	    TASK_AF_NO_EXPECTED_XFER_LENGTH) {
1787 		task->task_expected_xfer_length =
1788 		    task->task_cmd_xfer_length;
1789 	}
1790 
1791 	sz = min(task->task_expected_xfer_length,
1792 	    task->task_cmd_xfer_length);
1793 
1794 	xd_sz = msg->icsd_data_len;
1795 	asz = xd_sz + sizeof (*xd) - 4;
1796 	xd = (stmf_xfer_data_t *)kmem_zalloc(asz, KM_NOSLEEP);
1797 
1798 	if (xd == NULL) {
1799 		stmf_abort(STMF_QUEUE_TASK_ABORT, task,
1800 		    STMF_ALLOC_FAILURE, NULL);
1801 		return (STMF_FAILURE);
1802 	}
1803 
1804 	xd->alloc_size = asz;
1805 	xd->size_left = xd_sz;
1806 	bcopy(msg->icsd_data, xd->buf, xd_sz);
1807 
1808 	sz = min(sz, xd->size_left);
1809 	xd->size_left = sz;
1810 	minsz = min(512, sz);
1811 
1812 	if (dbuf == NULL)
1813 		dbuf = stmf_alloc_dbuf(task, sz, &minsz, 0);
1814 	if (dbuf == NULL) {
1815 		kmem_free(xd, xd->alloc_size);
1816 		stmf_abort(STMF_QUEUE_TASK_ABORT, task,
1817 		    STMF_ALLOC_FAILURE, NULL);
1818 		return (STMF_FAILURE);
1819 	}
1820 	dbuf->db_lu_private = xd;
1821 	stmf_xd_to_dbuf(dbuf);
1822 
1823 	dbuf->db_flags = DB_DIRECTION_TO_RPORT;
1824 	(void) stmf_xfer_data(task, dbuf, 0);
1825 	return (STMF_SUCCESS);
1826 }
1827 
1828 stmf_status_t
1829 stmf_proxy_scsi_cmd(scsi_task_t *task, stmf_data_buf_t *dbuf)
1830 {
1831 	stmf_i_scsi_task_t *itask =
1832 	    (stmf_i_scsi_task_t *)task->task_stmf_private;
1833 	stmf_i_local_port_t *ilport =
1834 	    (stmf_i_local_port_t *)task->task_lport->lport_stmf_private;
1835 	stmf_ic_msg_t *ic_cmd_msg;
1836 	stmf_ic_msg_status_t ic_ret;
1837 	stmf_status_t ret = STMF_FAILURE;
1838 
1839 	if (stmf_state.stmf_alua_state != 1) {
1840 		cmn_err(CE_WARN, "stmf alua state is disabled");
1841 		return (STMF_FAILURE);
1842 	}
1843 
1844 	if (ilport->ilport_proxy_registered == 0) {
1845 		return (STMF_FAILURE);
1846 	}
1847 
1848 	mutex_enter(&stmf_state.stmf_lock);
1849 	itask->itask_proxy_msg_id = stmf_proxy_msg_id++;
1850 	mutex_exit(&stmf_state.stmf_lock);
1851 	itask->itask_proxy_dbuf = dbuf;
1852 
1853 	/*
1854 	 * stmf will now take over the task handling for this task
1855 	 * but it still needs to be treated differently from other
1856 	 * default handled tasks, hence the ITASK_PROXY_TASK.
1857 	 * If this is a task management function, we're really just
1858 	 * duping the command to the peer. Set the TM bit so that
1859 	 * we can recognize this on return since we won't be completing
1860 	 * the proxied task in that case.
1861 	 */
1862 	if (task->task_mgmt_function) {
1863 		itask->itask_proxy_msg_id |= MSG_ID_TM_BIT;
1864 	} else {
1865 		itask->itask_flags |= ITASK_DEFAULT_HANDLING | ITASK_PROXY_TASK;
1866 	}
1867 	if (dbuf) {
1868 		ic_cmd_msg = ic_scsi_cmd_msg_alloc(itask->itask_proxy_msg_id,
1869 		    task, dbuf->db_data_size, dbuf->db_sglist[0].seg_addr,
1870 		    itask->itask_proxy_msg_id);
1871 	} else {
1872 		ic_cmd_msg = ic_scsi_cmd_msg_alloc(itask->itask_proxy_msg_id,
1873 		    task, 0, NULL, itask->itask_proxy_msg_id);
1874 	}
1875 	if (ic_cmd_msg) {
1876 		ic_ret = ic_tx_msg(ic_cmd_msg);
1877 		if (ic_ret == STMF_IC_MSG_SUCCESS) {
1878 			ret = STMF_SUCCESS;
1879 		}
1880 	}
1881 	return (ret);
1882 }
1883 
1884 
1885 stmf_status_t
1886 pppt_modload()
1887 {
1888 	int error;
1889 
1890 	if (pppt_mod == NULL && ((pppt_mod =
1891 	    ddi_modopen("drv/pppt", KRTLD_MODE_FIRST, &error)) == NULL)) {
1892 		cmn_err(CE_WARN, "Unable to load pppt");
1893 		return (STMF_FAILURE);
1894 	}
1895 
1896 	if (ic_reg_port_msg_alloc == NULL && ((ic_reg_port_msg_alloc =
1897 	    (stmf_ic_reg_port_msg_alloc_func_t)
1898 	    ddi_modsym(pppt_mod, "stmf_ic_reg_port_msg_alloc",
1899 	    &error)) == NULL)) {
1900 		cmn_err(CE_WARN,
1901 		    "Unable to find symbol - stmf_ic_reg_port_msg_alloc");
1902 		return (STMF_FAILURE);
1903 	}
1904 
1905 
1906 	if (ic_dereg_port_msg_alloc == NULL && ((ic_dereg_port_msg_alloc =
1907 	    (stmf_ic_dereg_port_msg_alloc_func_t)
1908 	    ddi_modsym(pppt_mod, "stmf_ic_dereg_port_msg_alloc",
1909 	    &error)) == NULL)) {
1910 		cmn_err(CE_WARN,
1911 		    "Unable to find symbol - stmf_ic_dereg_port_msg_alloc");
1912 		return (STMF_FAILURE);
1913 	}
1914 
1915 	if (ic_reg_lun_msg_alloc == NULL && ((ic_reg_lun_msg_alloc =
1916 	    (stmf_ic_reg_lun_msg_alloc_func_t)
1917 	    ddi_modsym(pppt_mod, "stmf_ic_reg_lun_msg_alloc",
1918 	    &error)) == NULL)) {
1919 		cmn_err(CE_WARN,
1920 		    "Unable to find symbol - stmf_ic_reg_lun_msg_alloc");
1921 		return (STMF_FAILURE);
1922 	}
1923 
1924 	if (ic_lun_active_msg_alloc == NULL && ((ic_lun_active_msg_alloc =
1925 	    (stmf_ic_lun_active_msg_alloc_func_t)
1926 	    ddi_modsym(pppt_mod, "stmf_ic_lun_active_msg_alloc",
1927 	    &error)) == NULL)) {
1928 		cmn_err(CE_WARN,
1929 		    "Unable to find symbol - stmf_ic_lun_active_msg_alloc");
1930 		return (STMF_FAILURE);
1931 	}
1932 
1933 	if (ic_dereg_lun_msg_alloc == NULL && ((ic_dereg_lun_msg_alloc =
1934 	    (stmf_ic_dereg_lun_msg_alloc_func_t)
1935 	    ddi_modsym(pppt_mod, "stmf_ic_dereg_lun_msg_alloc",
1936 	    &error)) == NULL)) {
1937 		cmn_err(CE_WARN,
1938 		    "Unable to find symbol - stmf_ic_dereg_lun_msg_alloc");
1939 		return (STMF_FAILURE);
1940 	}
1941 
1942 	if (ic_scsi_cmd_msg_alloc == NULL && ((ic_scsi_cmd_msg_alloc =
1943 	    (stmf_ic_scsi_cmd_msg_alloc_func_t)
1944 	    ddi_modsym(pppt_mod, "stmf_ic_scsi_cmd_msg_alloc",
1945 	    &error)) == NULL)) {
1946 		cmn_err(CE_WARN,
1947 		    "Unable to find symbol - stmf_ic_scsi_cmd_msg_alloc");
1948 		return (STMF_FAILURE);
1949 	}
1950 
1951 	if (ic_scsi_data_xfer_done_msg_alloc == NULL &&
1952 	    ((ic_scsi_data_xfer_done_msg_alloc =
1953 	    (stmf_ic_scsi_data_xfer_done_msg_alloc_func_t)
1954 	    ddi_modsym(pppt_mod, "stmf_ic_scsi_data_xfer_done_msg_alloc",
1955 	    &error)) == NULL)) {
1956 		cmn_err(CE_WARN,
1957 		    "Unable to find symbol -"
1958 		    "stmf_ic_scsi_data_xfer_done_msg_alloc");
1959 		return (STMF_FAILURE);
1960 	}
1961 
1962 	if (ic_session_reg_msg_alloc == NULL &&
1963 	    ((ic_session_reg_msg_alloc =
1964 	    (stmf_ic_session_create_msg_alloc_func_t)
1965 	    ddi_modsym(pppt_mod, "stmf_ic_session_create_msg_alloc",
1966 	    &error)) == NULL)) {
1967 		cmn_err(CE_WARN,
1968 		    "Unable to find symbol -"
1969 		    "stmf_ic_session_create_msg_alloc");
1970 		return (STMF_FAILURE);
1971 	}
1972 
1973 	if (ic_session_dereg_msg_alloc == NULL &&
1974 	    ((ic_session_dereg_msg_alloc =
1975 	    (stmf_ic_session_destroy_msg_alloc_func_t)
1976 	    ddi_modsym(pppt_mod, "stmf_ic_session_destroy_msg_alloc",
1977 	    &error)) == NULL)) {
1978 		cmn_err(CE_WARN,
1979 		    "Unable to find symbol -"
1980 		    "stmf_ic_session_destroy_msg_alloc");
1981 		return (STMF_FAILURE);
1982 	}
1983 
1984 	if (ic_tx_msg == NULL && ((ic_tx_msg =
1985 	    (stmf_ic_tx_msg_func_t)ddi_modsym(pppt_mod, "stmf_ic_tx_msg",
1986 	    &error)) == NULL)) {
1987 		cmn_err(CE_WARN, "Unable to find symbol - stmf_ic_tx_msg");
1988 		return (STMF_FAILURE);
1989 	}
1990 
1991 	if (ic_msg_free == NULL && ((ic_msg_free =
1992 	    (stmf_ic_msg_free_func_t)ddi_modsym(pppt_mod, "stmf_ic_msg_free",
1993 	    &error)) == NULL)) {
1994 		cmn_err(CE_WARN, "Unable to find symbol - stmf_ic_msg_free");
1995 		return (STMF_FAILURE);
1996 	}
1997 	return (STMF_SUCCESS);
1998 }
1999 
2000 static void
2001 stmf_get_alua_state(stmf_alua_state_desc_t *alua_state)
2002 {
2003 	mutex_enter(&stmf_state.stmf_lock);
2004 	alua_state->alua_node = stmf_state.stmf_alua_node;
2005 	alua_state->alua_state = stmf_state.stmf_alua_state;
2006 	mutex_exit(&stmf_state.stmf_lock);
2007 }
2008 
2009 
2010 static int
2011 stmf_set_alua_state(stmf_alua_state_desc_t *alua_state)
2012 {
2013 	stmf_i_local_port_t *ilport;
2014 	stmf_i_lu_t *ilu;
2015 	stmf_lu_t *lu;
2016 	stmf_ic_msg_status_t ic_ret;
2017 	stmf_ic_msg_t *ic_reg_lun, *ic_reg_port;
2018 	stmf_local_port_t *lport;
2019 	int ret = 0;
2020 
2021 	if (alua_state->alua_state > 1 || alua_state->alua_node > 1) {
2022 		return (EINVAL);
2023 	}
2024 
2025 	mutex_enter(&stmf_state.stmf_lock);
2026 	if (alua_state->alua_state == 1) {
2027 		if (pppt_modload() == STMF_FAILURE) {
2028 			ret = EIO;
2029 			goto err;
2030 		}
2031 		if (alua_state->alua_node != 0) {
2032 			/* reset existing rtpids to new base */
2033 			stmf_rtpid_counter = 255;
2034 		}
2035 		stmf_state.stmf_alua_node = alua_state->alua_node;
2036 		stmf_state.stmf_alua_state = 1;
2037 		/* register existing local ports with ppp */
2038 		for (ilport = stmf_state.stmf_ilportlist; ilport != NULL;
2039 		    ilport = ilport->ilport_next) {
2040 			/* skip standby ports and non-alua participants */
2041 			if (ilport->ilport_standby == 1 ||
2042 			    ilport->ilport_alua == 0) {
2043 				continue;
2044 			}
2045 			if (alua_state->alua_node != 0) {
2046 				ilport->ilport_rtpid =
2047 				    atomic_add_16_nv(&stmf_rtpid_counter, 1);
2048 			}
2049 			lport = ilport->ilport_lport;
2050 			ic_reg_port = ic_reg_port_msg_alloc(
2051 			    lport->lport_id, ilport->ilport_rtpid,
2052 			    0, NULL, stmf_proxy_msg_id);
2053 			if (ic_reg_port) {
2054 				ic_ret = ic_tx_msg(ic_reg_port);
2055 				if (ic_ret == STMF_IC_MSG_SUCCESS) {
2056 					ilport->ilport_reg_msgid =
2057 					    stmf_proxy_msg_id++;
2058 				} else {
2059 					cmn_err(CE_WARN,
2060 					    "error on port registration "
2061 					    "port - %s",
2062 					    ilport->ilport_kstat_tgt_name);
2063 				}
2064 			}
2065 		}
2066 		/* register existing logical units */
2067 		for (ilu = stmf_state.stmf_ilulist; ilu != NULL;
2068 		    ilu = ilu->ilu_next) {
2069 			if (ilu->ilu_access != STMF_LU_ACTIVE) {
2070 				continue;
2071 			}
2072 			/* register with proxy module */
2073 			lu = ilu->ilu_lu;
2074 			if (lu->lu_lp && lu->lu_lp->lp_lpif_rev == LPIF_REV_2 &&
2075 			    lu->lu_lp->lp_alua_support) {
2076 				ilu->ilu_alua = 1;
2077 				/* allocate the register message */
2078 				ic_reg_lun = ic_reg_lun_msg_alloc(
2079 				    lu->lu_id->ident, lu->lu_lp->lp_name,
2080 				    lu->lu_proxy_reg_arg_len,
2081 				    (uint8_t *)lu->lu_proxy_reg_arg,
2082 				    stmf_proxy_msg_id);
2083 				/* send the message */
2084 				if (ic_reg_lun) {
2085 					ic_ret = ic_tx_msg(ic_reg_lun);
2086 					if (ic_ret == STMF_IC_MSG_SUCCESS) {
2087 						stmf_proxy_msg_id++;
2088 					}
2089 				}
2090 			}
2091 		}
2092 	} else {
2093 		stmf_state.stmf_alua_state = 0;
2094 	}
2095 
2096 err:
2097 	mutex_exit(&stmf_state.stmf_lock);
2098 	return (ret);
2099 }
2100 
2101 
2102 typedef struct {
2103 	void	*bp;	/* back pointer from internal struct to main struct */
2104 	int	alloc_size;
2105 } __istmf_t;
2106 
2107 typedef struct {
2108 	__istmf_t	*fp;	/* Framework private */
2109 	void		*cp;	/* Caller private */
2110 	void		*ss;	/* struct specific */
2111 } __stmf_t;
2112 
2113 static struct {
2114 	int shared;
2115 	int fw_private;
2116 } stmf_sizes[] = { { 0, 0 },
2117 	{ GET_STRUCT_SIZE(stmf_lu_provider_t),
2118 		GET_STRUCT_SIZE(stmf_i_lu_provider_t) },
2119 	{ GET_STRUCT_SIZE(stmf_port_provider_t),
2120 		GET_STRUCT_SIZE(stmf_i_port_provider_t) },
2121 	{ GET_STRUCT_SIZE(stmf_local_port_t),
2122 		GET_STRUCT_SIZE(stmf_i_local_port_t) },
2123 	{ GET_STRUCT_SIZE(stmf_lu_t),
2124 		GET_STRUCT_SIZE(stmf_i_lu_t) },
2125 	{ GET_STRUCT_SIZE(stmf_scsi_session_t),
2126 		GET_STRUCT_SIZE(stmf_i_scsi_session_t) },
2127 	{ GET_STRUCT_SIZE(scsi_task_t),
2128 		GET_STRUCT_SIZE(stmf_i_scsi_task_t) },
2129 	{ GET_STRUCT_SIZE(stmf_data_buf_t),
2130 		GET_STRUCT_SIZE(__istmf_t) },
2131 	{ GET_STRUCT_SIZE(stmf_dbuf_store_t),
2132 		GET_STRUCT_SIZE(__istmf_t) }
2133 
2134 };
2135 
2136 void *
2137 stmf_alloc(stmf_struct_id_t struct_id, int additional_size, int flags)
2138 {
2139 	int stmf_size;
2140 	int kmem_flag;
2141 	__stmf_t *sh;
2142 
2143 	if ((struct_id == 0) || (struct_id >= STMF_MAX_STRUCT_IDS))
2144 		return (NULL);
2145 
2146 	if ((curthread->t_flag & T_INTR_THREAD) || (flags & AF_FORCE_NOSLEEP)) {
2147 		kmem_flag = KM_NOSLEEP;
2148 	} else {
2149 		kmem_flag = KM_SLEEP;
2150 	}
2151 
2152 	additional_size = (additional_size + 7) & (~7);
2153 	stmf_size = stmf_sizes[struct_id].shared +
2154 	    stmf_sizes[struct_id].fw_private + additional_size;
2155 
2156 	sh = (__stmf_t *)kmem_zalloc(stmf_size, kmem_flag);
2157 
2158 	if (sh == NULL)
2159 		return (NULL);
2160 
2161 	/*
2162 	 * In principle, the implementation inside stmf_alloc should not
2163 	 * be changed anyway. But the original order of framework private
2164 	 * data and caller private data does not support sglist in the caller
2165 	 * private data.
2166 	 * To work around this, the memory segments of framework private
2167 	 * data and caller private data are re-ordered here.
2168 	 * A better solution is to provide a specific interface to allocate
2169 	 * the sglist, then we will not need this workaround any more.
2170 	 * But before the new interface is available, the memory segment
2171 	 * ordering should be kept as is.
2172 	 */
2173 	sh->cp = GET_BYTE_OFFSET(sh, stmf_sizes[struct_id].shared);
2174 	sh->fp = (__istmf_t *)GET_BYTE_OFFSET(sh,
2175 	    stmf_sizes[struct_id].shared + additional_size);
2176 
2177 	sh->fp->bp = sh;
2178 	/* Just store the total size instead of storing additional size */
2179 	sh->fp->alloc_size = stmf_size;
2180 
2181 	return (sh);
2182 }
2183 
2184 void
2185 stmf_free(void *ptr)
2186 {
2187 	__stmf_t *sh = (__stmf_t *)ptr;
2188 
2189 	/*
2190 	 * So far we dont need any struct specific processing. If such
2191 	 * a need ever arises, then store the struct id in the framework
2192 	 * private section and get it here as sh->fp->struct_id.
2193 	 */
2194 	kmem_free(ptr, sh->fp->alloc_size);
2195 }
2196 
2197 /*
2198  * Given a pointer to stmf_lu_t, verifies if this lu is registered with the
2199  * framework and returns a pointer to framework private data for the lu.
2200  * Returns NULL if the lu was not found.
2201  */
2202 stmf_i_lu_t *
2203 stmf_lookup_lu(stmf_lu_t *lu)
2204 {
2205 	stmf_i_lu_t *ilu;
2206 	ASSERT(mutex_owned(&stmf_state.stmf_lock));
2207 
2208 	for (ilu = stmf_state.stmf_ilulist; ilu != NULL; ilu = ilu->ilu_next) {
2209 		if (ilu->ilu_lu == lu)
2210 			return (ilu);
2211 	}
2212 	return (NULL);
2213 }
2214 
2215 /*
2216  * Given a pointer to stmf_local_port_t, verifies if this lport is registered
2217  * with the framework and returns a pointer to framework private data for
2218  * the lport.
2219  * Returns NULL if the lport was not found.
2220  */
2221 stmf_i_local_port_t *
2222 stmf_lookup_lport(stmf_local_port_t *lport)
2223 {
2224 	stmf_i_local_port_t *ilport;
2225 	ASSERT(mutex_owned(&stmf_state.stmf_lock));
2226 
2227 	for (ilport = stmf_state.stmf_ilportlist; ilport != NULL;
2228 	    ilport = ilport->ilport_next) {
2229 		if (ilport->ilport_lport == lport)
2230 			return (ilport);
2231 	}
2232 	return (NULL);
2233 }
2234 
2235 stmf_status_t
2236 stmf_register_lu_provider(stmf_lu_provider_t *lp)
2237 {
2238 	stmf_i_lu_provider_t *ilp = (stmf_i_lu_provider_t *)lp->lp_stmf_private;
2239 	stmf_pp_data_t *ppd;
2240 	uint32_t cb_flags;
2241 
2242 	if (lp->lp_lpif_rev != LPIF_REV_1 && lp->lp_lpif_rev != LPIF_REV_2)
2243 		return (STMF_FAILURE);
2244 
2245 	mutex_enter(&stmf_state.stmf_lock);
2246 	ilp->ilp_next = stmf_state.stmf_ilplist;
2247 	stmf_state.stmf_ilplist = ilp;
2248 	stmf_state.stmf_nlps++;
2249 
2250 	/* See if we need to do a callback */
2251 	for (ppd = stmf_state.stmf_ppdlist; ppd != NULL; ppd = ppd->ppd_next) {
2252 		if (strcmp(ppd->ppd_name, lp->lp_name) == 0) {
2253 			break;
2254 		}
2255 	}
2256 	if ((ppd == NULL) || (ppd->ppd_nv == NULL)) {
2257 		goto rlp_bail_out;
2258 	}
2259 	ilp->ilp_ppd = ppd;
2260 	ppd->ppd_provider = ilp;
2261 	if (lp->lp_cb == NULL)
2262 		goto rlp_bail_out;
2263 	ilp->ilp_cb_in_progress = 1;
2264 	cb_flags = STMF_PCB_PREG_COMPLETE;
2265 	if (stmf_state.stmf_config_state == STMF_CONFIG_INIT)
2266 		cb_flags |= STMF_PCB_STMF_ONLINING;
2267 	mutex_exit(&stmf_state.stmf_lock);
2268 	lp->lp_cb(lp, STMF_PROVIDER_DATA_UPDATED, ppd->ppd_nv, cb_flags);
2269 	mutex_enter(&stmf_state.stmf_lock);
2270 	ilp->ilp_cb_in_progress = 0;
2271 
2272 rlp_bail_out:
2273 	mutex_exit(&stmf_state.stmf_lock);
2274 
2275 	return (STMF_SUCCESS);
2276 }
2277 
2278 stmf_status_t
2279 stmf_deregister_lu_provider(stmf_lu_provider_t *lp)
2280 {
2281 	stmf_i_lu_provider_t	**ppilp;
2282 	stmf_i_lu_provider_t *ilp = (stmf_i_lu_provider_t *)lp->lp_stmf_private;
2283 
2284 	mutex_enter(&stmf_state.stmf_lock);
2285 	if (ilp->ilp_nlus || ilp->ilp_cb_in_progress) {
2286 		mutex_exit(&stmf_state.stmf_lock);
2287 		return (STMF_BUSY);
2288 	}
2289 	for (ppilp = &stmf_state.stmf_ilplist; *ppilp != NULL;
2290 	    ppilp = &((*ppilp)->ilp_next)) {
2291 		if (*ppilp == ilp) {
2292 			*ppilp = ilp->ilp_next;
2293 			stmf_state.stmf_nlps--;
2294 			if (ilp->ilp_ppd) {
2295 				ilp->ilp_ppd->ppd_provider = NULL;
2296 				ilp->ilp_ppd = NULL;
2297 			}
2298 			mutex_exit(&stmf_state.stmf_lock);
2299 			return (STMF_SUCCESS);
2300 		}
2301 	}
2302 	mutex_exit(&stmf_state.stmf_lock);
2303 	return (STMF_NOT_FOUND);
2304 }
2305 
2306 stmf_status_t
2307 stmf_register_port_provider(stmf_port_provider_t *pp)
2308 {
2309 	stmf_i_port_provider_t *ipp =
2310 	    (stmf_i_port_provider_t *)pp->pp_stmf_private;
2311 	stmf_pp_data_t *ppd;
2312 	uint32_t cb_flags;
2313 
2314 	if (pp->pp_portif_rev != PORTIF_REV_1)
2315 		return (STMF_FAILURE);
2316 
2317 	mutex_enter(&stmf_state.stmf_lock);
2318 	ipp->ipp_next = stmf_state.stmf_ipplist;
2319 	stmf_state.stmf_ipplist = ipp;
2320 	stmf_state.stmf_npps++;
2321 	/* See if we need to do a callback */
2322 	for (ppd = stmf_state.stmf_ppdlist; ppd != NULL; ppd = ppd->ppd_next) {
2323 		if (strcmp(ppd->ppd_name, pp->pp_name) == 0) {
2324 			break;
2325 		}
2326 	}
2327 	if ((ppd == NULL) || (ppd->ppd_nv == NULL)) {
2328 		goto rpp_bail_out;
2329 	}
2330 	ipp->ipp_ppd = ppd;
2331 	ppd->ppd_provider = ipp;
2332 	if (pp->pp_cb == NULL)
2333 		goto rpp_bail_out;
2334 	ipp->ipp_cb_in_progress = 1;
2335 	cb_flags = STMF_PCB_PREG_COMPLETE;
2336 	if (stmf_state.stmf_config_state == STMF_CONFIG_INIT)
2337 		cb_flags |= STMF_PCB_STMF_ONLINING;
2338 	mutex_exit(&stmf_state.stmf_lock);
2339 	pp->pp_cb(pp, STMF_PROVIDER_DATA_UPDATED, ppd->ppd_nv, cb_flags);
2340 	mutex_enter(&stmf_state.stmf_lock);
2341 	ipp->ipp_cb_in_progress = 0;
2342 
2343 rpp_bail_out:
2344 	mutex_exit(&stmf_state.stmf_lock);
2345 
2346 	return (STMF_SUCCESS);
2347 }
2348 
2349 stmf_status_t
2350 stmf_deregister_port_provider(stmf_port_provider_t *pp)
2351 {
2352 	stmf_i_port_provider_t *ipp =
2353 	    (stmf_i_port_provider_t *)pp->pp_stmf_private;
2354 	stmf_i_port_provider_t **ppipp;
2355 
2356 	mutex_enter(&stmf_state.stmf_lock);
2357 	if (ipp->ipp_npps || ipp->ipp_cb_in_progress) {
2358 		mutex_exit(&stmf_state.stmf_lock);
2359 		return (STMF_BUSY);
2360 	}
2361 	for (ppipp = &stmf_state.stmf_ipplist; *ppipp != NULL;
2362 	    ppipp = &((*ppipp)->ipp_next)) {
2363 		if (*ppipp == ipp) {
2364 			*ppipp = ipp->ipp_next;
2365 			stmf_state.stmf_npps--;
2366 			if (ipp->ipp_ppd) {
2367 				ipp->ipp_ppd->ppd_provider = NULL;
2368 				ipp->ipp_ppd = NULL;
2369 			}
2370 			mutex_exit(&stmf_state.stmf_lock);
2371 			return (STMF_SUCCESS);
2372 		}
2373 	}
2374 	mutex_exit(&stmf_state.stmf_lock);
2375 	return (STMF_NOT_FOUND);
2376 }
2377 
2378 int
2379 stmf_load_ppd_ioctl(stmf_ppioctl_data_t *ppi, uint64_t *ppi_token,
2380     uint32_t *err_ret)
2381 {
2382 	stmf_i_port_provider_t		*ipp;
2383 	stmf_i_lu_provider_t		*ilp;
2384 	stmf_pp_data_t			*ppd;
2385 	nvlist_t			*nv;
2386 	int				s;
2387 	int				ret;
2388 
2389 	*err_ret = 0;
2390 
2391 	if ((ppi->ppi_lu_provider + ppi->ppi_port_provider) != 1) {
2392 		return (EINVAL);
2393 	}
2394 
2395 	mutex_enter(&stmf_state.stmf_lock);
2396 	for (ppd = stmf_state.stmf_ppdlist; ppd != NULL; ppd = ppd->ppd_next) {
2397 		if (ppi->ppi_lu_provider) {
2398 			if (!ppd->ppd_lu_provider)
2399 				continue;
2400 		} else if (ppi->ppi_port_provider) {
2401 			if (!ppd->ppd_port_provider)
2402 				continue;
2403 		}
2404 		if (strncmp(ppi->ppi_name, ppd->ppd_name, 254) == 0)
2405 			break;
2406 	}
2407 
2408 	if (ppd == NULL) {
2409 		/* New provider */
2410 		s = strlen(ppi->ppi_name);
2411 		if (s > 254) {
2412 			mutex_exit(&stmf_state.stmf_lock);
2413 			return (EINVAL);
2414 		}
2415 		s += sizeof (stmf_pp_data_t) - 7;
2416 
2417 		ppd = kmem_zalloc(s, KM_NOSLEEP);
2418 		if (ppd == NULL) {
2419 			mutex_exit(&stmf_state.stmf_lock);
2420 			return (ENOMEM);
2421 		}
2422 		ppd->ppd_alloc_size = s;
2423 		(void) strcpy(ppd->ppd_name, ppi->ppi_name);
2424 
2425 		/* See if this provider already exists */
2426 		if (ppi->ppi_lu_provider) {
2427 			ppd->ppd_lu_provider = 1;
2428 			for (ilp = stmf_state.stmf_ilplist; ilp != NULL;
2429 			    ilp = ilp->ilp_next) {
2430 				if (strcmp(ppi->ppi_name,
2431 				    ilp->ilp_lp->lp_name) == 0) {
2432 					ppd->ppd_provider = ilp;
2433 					ilp->ilp_ppd = ppd;
2434 					break;
2435 				}
2436 			}
2437 		} else {
2438 			ppd->ppd_port_provider = 1;
2439 			for (ipp = stmf_state.stmf_ipplist; ipp != NULL;
2440 			    ipp = ipp->ipp_next) {
2441 				if (strcmp(ppi->ppi_name,
2442 				    ipp->ipp_pp->pp_name) == 0) {
2443 					ppd->ppd_provider = ipp;
2444 					ipp->ipp_ppd = ppd;
2445 					break;
2446 				}
2447 			}
2448 		}
2449 
2450 		/* Link this ppd in */
2451 		ppd->ppd_next = stmf_state.stmf_ppdlist;
2452 		stmf_state.stmf_ppdlist = ppd;
2453 	}
2454 
2455 	/*
2456 	 * User is requesting that the token be checked.
2457 	 * If there was another set after the user's get
2458 	 * it's an error
2459 	 */
2460 	if (ppi->ppi_token_valid) {
2461 		if (ppi->ppi_token != ppd->ppd_token) {
2462 			*err_ret = STMF_IOCERR_PPD_UPDATED;
2463 			mutex_exit(&stmf_state.stmf_lock);
2464 			return (EINVAL);
2465 		}
2466 	}
2467 
2468 	if ((ret = nvlist_unpack((char *)ppi->ppi_data,
2469 	    (size_t)ppi->ppi_data_size, &nv, KM_NOSLEEP)) != 0) {
2470 		mutex_exit(&stmf_state.stmf_lock);
2471 		return (ret);
2472 	}
2473 
2474 	/* Free any existing lists and add this one to the ppd */
2475 	if (ppd->ppd_nv)
2476 		nvlist_free(ppd->ppd_nv);
2477 	ppd->ppd_nv = nv;
2478 
2479 	/* set the token for writes */
2480 	ppd->ppd_token++;
2481 	/* return token to caller */
2482 	if (ppi_token) {
2483 		*ppi_token = ppd->ppd_token;
2484 	}
2485 
2486 	/* If there is a provider registered, do the notifications */
2487 	if (ppd->ppd_provider) {
2488 		uint32_t cb_flags = 0;
2489 
2490 		if (stmf_state.stmf_config_state == STMF_CONFIG_INIT)
2491 			cb_flags |= STMF_PCB_STMF_ONLINING;
2492 		if (ppi->ppi_lu_provider) {
2493 			ilp = (stmf_i_lu_provider_t *)ppd->ppd_provider;
2494 			if (ilp->ilp_lp->lp_cb == NULL)
2495 				goto bail_out;
2496 			ilp->ilp_cb_in_progress = 1;
2497 			mutex_exit(&stmf_state.stmf_lock);
2498 			ilp->ilp_lp->lp_cb(ilp->ilp_lp,
2499 			    STMF_PROVIDER_DATA_UPDATED, ppd->ppd_nv, cb_flags);
2500 			mutex_enter(&stmf_state.stmf_lock);
2501 			ilp->ilp_cb_in_progress = 0;
2502 		} else {
2503 			ipp = (stmf_i_port_provider_t *)ppd->ppd_provider;
2504 			if (ipp->ipp_pp->pp_cb == NULL)
2505 				goto bail_out;
2506 			ipp->ipp_cb_in_progress = 1;
2507 			mutex_exit(&stmf_state.stmf_lock);
2508 			ipp->ipp_pp->pp_cb(ipp->ipp_pp,
2509 			    STMF_PROVIDER_DATA_UPDATED, ppd->ppd_nv, cb_flags);
2510 			mutex_enter(&stmf_state.stmf_lock);
2511 			ipp->ipp_cb_in_progress = 0;
2512 		}
2513 	}
2514 
2515 bail_out:
2516 	mutex_exit(&stmf_state.stmf_lock);
2517 
2518 	return (0);
2519 }
2520 
2521 void
2522 stmf_delete_ppd(stmf_pp_data_t *ppd)
2523 {
2524 	stmf_pp_data_t **pppd;
2525 
2526 	ASSERT(mutex_owned(&stmf_state.stmf_lock));
2527 	if (ppd->ppd_provider) {
2528 		if (ppd->ppd_lu_provider) {
2529 			((stmf_i_lu_provider_t *)
2530 			    ppd->ppd_provider)->ilp_ppd = NULL;
2531 		} else {
2532 			((stmf_i_port_provider_t *)
2533 			    ppd->ppd_provider)->ipp_ppd = NULL;
2534 		}
2535 		ppd->ppd_provider = NULL;
2536 	}
2537 
2538 	for (pppd = &stmf_state.stmf_ppdlist; *pppd != NULL;
2539 	    pppd = &((*pppd)->ppd_next)) {
2540 		if (*pppd == ppd)
2541 			break;
2542 	}
2543 
2544 	if (*pppd == NULL)
2545 		return;
2546 
2547 	*pppd = ppd->ppd_next;
2548 	if (ppd->ppd_nv)
2549 		nvlist_free(ppd->ppd_nv);
2550 
2551 	kmem_free(ppd, ppd->ppd_alloc_size);
2552 }
2553 
2554 int
2555 stmf_delete_ppd_ioctl(stmf_ppioctl_data_t *ppi)
2556 {
2557 	stmf_pp_data_t *ppd;
2558 	int ret = ENOENT;
2559 
2560 	if ((ppi->ppi_lu_provider + ppi->ppi_port_provider) != 1) {
2561 		return (EINVAL);
2562 	}
2563 
2564 	mutex_enter(&stmf_state.stmf_lock);
2565 
2566 	for (ppd = stmf_state.stmf_ppdlist; ppd != NULL; ppd = ppd->ppd_next) {
2567 		if (ppi->ppi_lu_provider) {
2568 			if (!ppd->ppd_lu_provider)
2569 				continue;
2570 		} else if (ppi->ppi_port_provider) {
2571 			if (!ppd->ppd_port_provider)
2572 				continue;
2573 		}
2574 		if (strncmp(ppi->ppi_name, ppd->ppd_name, 254) == 0)
2575 			break;
2576 	}
2577 
2578 	if (ppd) {
2579 		ret = 0;
2580 		stmf_delete_ppd(ppd);
2581 	}
2582 	mutex_exit(&stmf_state.stmf_lock);
2583 
2584 	return (ret);
2585 }
2586 
2587 int
2588 stmf_get_ppd_ioctl(stmf_ppioctl_data_t *ppi, stmf_ppioctl_data_t *ppi_out,
2589     uint32_t *err_ret)
2590 {
2591 	stmf_pp_data_t *ppd;
2592 	size_t req_size;
2593 	int ret = ENOENT;
2594 	char *bufp = (char *)ppi_out->ppi_data;
2595 
2596 	if ((ppi->ppi_lu_provider + ppi->ppi_port_provider) != 1) {
2597 		return (EINVAL);
2598 	}
2599 
2600 	mutex_enter(&stmf_state.stmf_lock);
2601 
2602 	for (ppd = stmf_state.stmf_ppdlist; ppd != NULL; ppd = ppd->ppd_next) {
2603 		if (ppi->ppi_lu_provider) {
2604 			if (!ppd->ppd_lu_provider)
2605 				continue;
2606 		} else if (ppi->ppi_port_provider) {
2607 			if (!ppd->ppd_port_provider)
2608 				continue;
2609 		}
2610 		if (strncmp(ppi->ppi_name, ppd->ppd_name, 254) == 0)
2611 			break;
2612 	}
2613 
2614 	if (ppd && ppd->ppd_nv) {
2615 		ppi_out->ppi_token = ppd->ppd_token;
2616 		if ((ret = nvlist_size(ppd->ppd_nv, &req_size,
2617 		    NV_ENCODE_XDR)) != 0) {
2618 			goto done;
2619 		}
2620 		ppi_out->ppi_data_size = req_size;
2621 		if (req_size > ppi->ppi_data_size) {
2622 			*err_ret = STMF_IOCERR_INSUFFICIENT_BUF;
2623 			ret = EINVAL;
2624 			goto done;
2625 		}
2626 
2627 		if ((ret = nvlist_pack(ppd->ppd_nv, &bufp, &req_size,
2628 		    NV_ENCODE_XDR, 0)) != 0) {
2629 			goto done;
2630 		}
2631 		ret = 0;
2632 	}
2633 
2634 done:
2635 	mutex_exit(&stmf_state.stmf_lock);
2636 
2637 	return (ret);
2638 }
2639 
2640 void
2641 stmf_delete_all_ppds()
2642 {
2643 	stmf_pp_data_t *ppd, *nppd;
2644 
2645 	ASSERT(mutex_owned(&stmf_state.stmf_lock));
2646 	for (ppd = stmf_state.stmf_ppdlist; ppd != NULL; ppd = nppd) {
2647 		nppd = ppd->ppd_next;
2648 		stmf_delete_ppd(ppd);
2649 	}
2650 }
2651 
2652 /*
2653  * 16 is the max string length of a protocol_ident, increase
2654  * the size if needed.
2655  */
2656 #define	STMF_KSTAT_LU_SZ	(STMF_GUID_INPUT + 1 + 256)
2657 #define	STMF_KSTAT_TGT_SZ	(256 * 2 + 16)
2658 
2659 /*
2660  * This array matches the Protocol Identifier in stmf_ioctl.h
2661  */
2662 #define	MAX_PROTO_STR_LEN	32
2663 
2664 char *protocol_ident[PROTOCOL_ANY] = {
2665 	"Fibre Channel",
2666 	"Parallel SCSI",
2667 	"SSA",
2668 	"IEEE_1394",
2669 	"SRP",
2670 	"iSCSI",
2671 	"SAS",
2672 	"ADT",
2673 	"ATAPI",
2674 	"UNKNOWN", "UNKNOWN", "UNKNOWN", "UNKNOWN", "UNKNOWN", "UNKNOWN"
2675 };
2676 
2677 /*
2678  * Update the lun wait/run queue count
2679  */
2680 static void
2681 stmf_update_kstat_lu_q(scsi_task_t *task, void func())
2682 {
2683 	stmf_i_lu_t		*ilu;
2684 	kstat_io_t		*kip;
2685 
2686 	if (task->task_lu == dlun0)
2687 		return;
2688 	ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private;
2689 	if (ilu != NULL && ilu->ilu_kstat_io != NULL) {
2690 		kip = KSTAT_IO_PTR(ilu->ilu_kstat_io);
2691 		if (kip != NULL) {
2692 			func(kip);
2693 		}
2694 	}
2695 }
2696 
2697 /*
2698  * Update the target(lport) wait/run queue count
2699  */
2700 static void
2701 stmf_update_kstat_lport_q(scsi_task_t *task, void func())
2702 {
2703 	stmf_i_local_port_t	*ilp;
2704 	kstat_io_t		*kip;
2705 
2706 	ilp = (stmf_i_local_port_t *)task->task_lport->lport_stmf_private;
2707 	if (ilp != NULL && ilp->ilport_kstat_io != NULL) {
2708 		kip = KSTAT_IO_PTR(ilp->ilport_kstat_io);
2709 		if (kip != NULL) {
2710 			mutex_enter(ilp->ilport_kstat_io->ks_lock);
2711 			func(kip);
2712 			mutex_exit(ilp->ilport_kstat_io->ks_lock);
2713 		}
2714 	}
2715 }
2716 
2717 static void
2718 stmf_update_kstat_lport_io(scsi_task_t *task, stmf_data_buf_t *dbuf)
2719 {
2720 	stmf_i_local_port_t	*ilp;
2721 	kstat_io_t		*kip;
2722 
2723 	ilp = (stmf_i_local_port_t *)task->task_lport->lport_stmf_private;
2724 	if (ilp != NULL && ilp->ilport_kstat_io != NULL) {
2725 		kip = KSTAT_IO_PTR(ilp->ilport_kstat_io);
2726 		if (kip != NULL) {
2727 			mutex_enter(ilp->ilport_kstat_io->ks_lock);
2728 			STMF_UPDATE_KSTAT_IO(kip, dbuf);
2729 			mutex_exit(ilp->ilport_kstat_io->ks_lock);
2730 		}
2731 	}
2732 }
2733 
2734 static void
2735 stmf_update_kstat_lu_io(scsi_task_t *task, stmf_data_buf_t *dbuf)
2736 {
2737 	stmf_i_lu_t		*ilu;
2738 	kstat_io_t		*kip;
2739 
2740 	ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private;
2741 	if (ilu != NULL && ilu->ilu_kstat_io != NULL) {
2742 		kip = KSTAT_IO_PTR(ilu->ilu_kstat_io);
2743 		if (kip != NULL) {
2744 			mutex_enter(ilu->ilu_kstat_io->ks_lock);
2745 			STMF_UPDATE_KSTAT_IO(kip, dbuf);
2746 			mutex_exit(ilu->ilu_kstat_io->ks_lock);
2747 		}
2748 	}
2749 }
2750 
2751 static void
2752 stmf_create_kstat_lu(stmf_i_lu_t *ilu)
2753 {
2754 	char				ks_nm[KSTAT_STRLEN];
2755 	stmf_kstat_lu_info_t		*ks_lu;
2756 
2757 	/* create kstat lun info */
2758 	ks_lu = (stmf_kstat_lu_info_t *)kmem_zalloc(STMF_KSTAT_LU_SZ,
2759 	    KM_NOSLEEP);
2760 	if (ks_lu == NULL) {
2761 		cmn_err(CE_WARN, "STMF: kmem_zalloc failed");
2762 		return;
2763 	}
2764 
2765 	bzero(ks_nm, sizeof (ks_nm));
2766 	(void) sprintf(ks_nm, "stmf_lu_%"PRIxPTR"", (uintptr_t)ilu);
2767 	if ((ilu->ilu_kstat_info = kstat_create(STMF_MODULE_NAME, 0,
2768 	    ks_nm, "misc", KSTAT_TYPE_NAMED,
2769 	    sizeof (stmf_kstat_lu_info_t) / sizeof (kstat_named_t),
2770 	    KSTAT_FLAG_VIRTUAL)) == NULL) {
2771 		kmem_free(ks_lu, STMF_KSTAT_LU_SZ);
2772 		cmn_err(CE_WARN, "STMF: kstat_create lu failed");
2773 		return;
2774 	}
2775 
2776 	ilu->ilu_kstat_info->ks_data_size = STMF_KSTAT_LU_SZ;
2777 	ilu->ilu_kstat_info->ks_data = ks_lu;
2778 
2779 	kstat_named_init(&ks_lu->i_lun_guid, "lun-guid",
2780 	    KSTAT_DATA_STRING);
2781 	kstat_named_init(&ks_lu->i_lun_alias, "lun-alias",
2782 	    KSTAT_DATA_STRING);
2783 
2784 	/* convert guid to hex string */
2785 	int		i;
2786 	uint8_t		*p = ilu->ilu_lu->lu_id->ident;
2787 	bzero(ilu->ilu_ascii_hex_guid, sizeof (ilu->ilu_ascii_hex_guid));
2788 	for (i = 0; i < STMF_GUID_INPUT / 2; i++) {
2789 		(void) sprintf(&ilu->ilu_ascii_hex_guid[i * 2], "%02x", p[i]);
2790 	}
2791 	kstat_named_setstr(&ks_lu->i_lun_guid,
2792 	    (const char *)ilu->ilu_ascii_hex_guid);
2793 	kstat_named_setstr(&ks_lu->i_lun_alias,
2794 	    (const char *)ilu->ilu_lu->lu_alias);
2795 	kstat_install(ilu->ilu_kstat_info);
2796 
2797 	/* create kstat lun io */
2798 	bzero(ks_nm, sizeof (ks_nm));
2799 	(void) sprintf(ks_nm, "stmf_lu_io_%"PRIxPTR"", (uintptr_t)ilu);
2800 	if ((ilu->ilu_kstat_io = kstat_create(STMF_MODULE_NAME, 0,
2801 	    ks_nm, "io", KSTAT_TYPE_IO, 1, 0)) == NULL) {
2802 		cmn_err(CE_WARN, "STMF: kstat_create lu_io failed");
2803 		return;
2804 	}
2805 	mutex_init(&ilu->ilu_kstat_lock, NULL, MUTEX_DRIVER, 0);
2806 	ilu->ilu_kstat_io->ks_lock = &ilu->ilu_kstat_lock;
2807 	kstat_install(ilu->ilu_kstat_io);
2808 }
2809 
2810 static void
2811 stmf_create_kstat_lport(stmf_i_local_port_t *ilport)
2812 {
2813 	char				ks_nm[KSTAT_STRLEN];
2814 	stmf_kstat_tgt_info_t		*ks_tgt;
2815 	int				id, len;
2816 
2817 	/* create kstat lport info */
2818 	ks_tgt = (stmf_kstat_tgt_info_t *)kmem_zalloc(STMF_KSTAT_TGT_SZ,
2819 	    KM_NOSLEEP);
2820 	if (ks_tgt == NULL) {
2821 		cmn_err(CE_WARN, "STMF: kmem_zalloc failed");
2822 		return;
2823 	}
2824 
2825 	bzero(ks_nm, sizeof (ks_nm));
2826 	(void) sprintf(ks_nm, "stmf_tgt_%"PRIxPTR"", (uintptr_t)ilport);
2827 	if ((ilport->ilport_kstat_info = kstat_create(STMF_MODULE_NAME,
2828 	    0, ks_nm, "misc", KSTAT_TYPE_NAMED,
2829 	    sizeof (stmf_kstat_tgt_info_t) / sizeof (kstat_named_t),
2830 	    KSTAT_FLAG_VIRTUAL)) == NULL) {
2831 		kmem_free(ks_tgt, STMF_KSTAT_TGT_SZ);
2832 		cmn_err(CE_WARN, "STMF: kstat_create target failed");
2833 		return;
2834 	}
2835 
2836 	ilport->ilport_kstat_info->ks_data_size = STMF_KSTAT_TGT_SZ;
2837 	ilport->ilport_kstat_info->ks_data = ks_tgt;
2838 
2839 	kstat_named_init(&ks_tgt->i_tgt_name, "target-name",
2840 	    KSTAT_DATA_STRING);
2841 	kstat_named_init(&ks_tgt->i_tgt_alias, "target-alias",
2842 	    KSTAT_DATA_STRING);
2843 	kstat_named_init(&ks_tgt->i_protocol, "protocol",
2844 	    KSTAT_DATA_STRING);
2845 
2846 	/* ident might not be null terminated */
2847 	len = ilport->ilport_lport->lport_id->ident_length;
2848 	bcopy(ilport->ilport_lport->lport_id->ident,
2849 	    ilport->ilport_kstat_tgt_name, len);
2850 	ilport->ilport_kstat_tgt_name[len + 1] = NULL;
2851 	kstat_named_setstr(&ks_tgt->i_tgt_name,
2852 	    (const char *)ilport->ilport_kstat_tgt_name);
2853 	kstat_named_setstr(&ks_tgt->i_tgt_alias,
2854 	    (const char *)ilport->ilport_lport->lport_alias);
2855 	/* protocol */
2856 	if ((id = ilport->ilport_lport->lport_id->protocol_id) > PROTOCOL_ANY) {
2857 		cmn_err(CE_WARN, "STMF: protocol_id out of bound");
2858 		id = PROTOCOL_ANY;
2859 	}
2860 	kstat_named_setstr(&ks_tgt->i_protocol, protocol_ident[id]);
2861 	kstat_install(ilport->ilport_kstat_info);
2862 
2863 	/* create kstat lport io */
2864 	bzero(ks_nm, sizeof (ks_nm));
2865 	(void) sprintf(ks_nm, "stmf_tgt_io_%"PRIxPTR"", (uintptr_t)ilport);
2866 	if ((ilport->ilport_kstat_io = kstat_create(STMF_MODULE_NAME, 0,
2867 	    ks_nm, "io", KSTAT_TYPE_IO, 1, 0)) == NULL) {
2868 		cmn_err(CE_WARN, "STMF: kstat_create target_io failed");
2869 		return;
2870 	}
2871 	mutex_init(&ilport->ilport_kstat_lock, NULL, MUTEX_DRIVER, 0);
2872 	ilport->ilport_kstat_io->ks_lock = &ilport->ilport_kstat_lock;
2873 	kstat_install(ilport->ilport_kstat_io);
2874 }
2875 
2876 /*
2877  * set the asymmetric access state for a logical unit
2878  * caller is responsible for establishing SCSI unit attention on
2879  * state change
2880  */
2881 stmf_status_t
2882 stmf_set_lu_access(stmf_lu_t *lu, uint8_t access_state)
2883 {
2884 	stmf_i_lu_t *ilu;
2885 	uint8_t *p1, *p2;
2886 
2887 	if ((access_state != STMF_LU_STANDBY) &&
2888 	    (access_state != STMF_LU_ACTIVE)) {
2889 		return (STMF_INVALID_ARG);
2890 	}
2891 
2892 	p1 = &lu->lu_id->ident[0];
2893 	mutex_enter(&stmf_state.stmf_lock);
2894 	if (stmf_state.stmf_inventory_locked) {
2895 		mutex_exit(&stmf_state.stmf_lock);
2896 		return (STMF_BUSY);
2897 	}
2898 
2899 	for (ilu = stmf_state.stmf_ilulist; ilu != NULL; ilu = ilu->ilu_next) {
2900 		p2 = &ilu->ilu_lu->lu_id->ident[0];
2901 		if (bcmp(p1, p2, 16) == 0) {
2902 			break;
2903 		}
2904 	}
2905 
2906 	if (!ilu) {
2907 		ilu = (stmf_i_lu_t *)lu->lu_stmf_private;
2908 	} else {
2909 		/*
2910 		 * We're changing access state on an existing logical unit
2911 		 * Send the proxy registration message for this logical unit
2912 		 * if we're in alua mode.
2913 		 * If the requested state is STMF_LU_ACTIVE, we want to register
2914 		 * this logical unit.
2915 		 * If the requested state is STMF_LU_STANDBY, we're going to
2916 		 * abort all tasks for this logical unit.
2917 		 */
2918 		if (stmf_state.stmf_alua_state == 1 &&
2919 		    access_state == STMF_LU_ACTIVE) {
2920 			stmf_ic_msg_status_t ic_ret = STMF_IC_MSG_SUCCESS;
2921 			stmf_ic_msg_t *ic_reg_lun;
2922 			if (lu->lu_lp && lu->lu_lp->lp_lpif_rev == LPIF_REV_2 &&
2923 			    lu->lu_lp->lp_alua_support) {
2924 				ilu->ilu_alua = 1;
2925 				/* allocate the register message */
2926 				ic_reg_lun = ic_lun_active_msg_alloc(p1,
2927 				    lu->lu_lp->lp_name,
2928 				    lu->lu_proxy_reg_arg_len,
2929 				    (uint8_t *)lu->lu_proxy_reg_arg,
2930 				    stmf_proxy_msg_id);
2931 				/* send the message */
2932 				if (ic_reg_lun) {
2933 					ic_ret = ic_tx_msg(ic_reg_lun);
2934 					if (ic_ret == STMF_IC_MSG_SUCCESS) {
2935 						stmf_proxy_msg_id++;
2936 					}
2937 				}
2938 			}
2939 		} else if (stmf_state.stmf_alua_state == 1 &&
2940 		    access_state == STMF_LU_STANDBY) {
2941 			/* abort all tasks for this lu */
2942 			stmf_task_lu_killall(lu, NULL, STMF_ABORTED);
2943 		}
2944 	}
2945 
2946 	ilu->ilu_access = access_state;
2947 
2948 	mutex_exit(&stmf_state.stmf_lock);
2949 	return (STMF_SUCCESS);
2950 }
2951 
2952 
2953 stmf_status_t
2954 stmf_register_lu(stmf_lu_t *lu)
2955 {
2956 	stmf_i_lu_t *ilu;
2957 	uint8_t *p1, *p2;
2958 	stmf_state_change_info_t ssci;
2959 	stmf_id_data_t *luid;
2960 
2961 	if ((lu->lu_id->ident_type != ID_TYPE_NAA) ||
2962 	    (lu->lu_id->ident_length != 16) ||
2963 	    ((lu->lu_id->ident[0] & 0xf0) != 0x60)) {
2964 		return (STMF_INVALID_ARG);
2965 	}
2966 	p1 = &lu->lu_id->ident[0];
2967 	mutex_enter(&stmf_state.stmf_lock);
2968 	if (stmf_state.stmf_inventory_locked) {
2969 		mutex_exit(&stmf_state.stmf_lock);
2970 		return (STMF_BUSY);
2971 	}
2972 
2973 	for (ilu = stmf_state.stmf_ilulist; ilu != NULL; ilu = ilu->ilu_next) {
2974 		p2 = &ilu->ilu_lu->lu_id->ident[0];
2975 		if (bcmp(p1, p2, 16) == 0) {
2976 			mutex_exit(&stmf_state.stmf_lock);
2977 			return (STMF_ALREADY);
2978 		}
2979 	}
2980 
2981 	ilu = (stmf_i_lu_t *)lu->lu_stmf_private;
2982 	luid = stmf_lookup_id(&stmf_state.stmf_luid_list,
2983 	    lu->lu_id->ident_length, lu->lu_id->ident);
2984 	if (luid) {
2985 		luid->id_pt_to_object = (void *)ilu;
2986 		ilu->ilu_luid = luid;
2987 	}
2988 	ilu->ilu_alias = NULL;
2989 
2990 	ilu->ilu_next = stmf_state.stmf_ilulist;
2991 	ilu->ilu_prev = NULL;
2992 	if (ilu->ilu_next)
2993 		ilu->ilu_next->ilu_prev = ilu;
2994 	stmf_state.stmf_ilulist = ilu;
2995 	stmf_state.stmf_nlus++;
2996 	if (lu->lu_lp) {
2997 		((stmf_i_lu_provider_t *)
2998 		    (lu->lu_lp->lp_stmf_private))->ilp_nlus++;
2999 	}
3000 	ilu->ilu_cur_task_cntr = &ilu->ilu_task_cntr1;
3001 	STMF_EVENT_ALLOC_HANDLE(ilu->ilu_event_hdl);
3002 	stmf_create_kstat_lu(ilu);
3003 	/*
3004 	 * register with proxy module if available and logical unit
3005 	 * is in active state
3006 	 */
3007 	if (stmf_state.stmf_alua_state == 1 &&
3008 	    ilu->ilu_access == STMF_LU_ACTIVE) {
3009 		stmf_ic_msg_status_t ic_ret = STMF_IC_MSG_SUCCESS;
3010 		stmf_ic_msg_t *ic_reg_lun;
3011 		if (lu->lu_lp && lu->lu_lp->lp_lpif_rev == LPIF_REV_2 &&
3012 		    lu->lu_lp->lp_alua_support) {
3013 			ilu->ilu_alua = 1;
3014 			/* allocate the register message */
3015 			ic_reg_lun = ic_reg_lun_msg_alloc(p1,
3016 			    lu->lu_lp->lp_name, lu->lu_proxy_reg_arg_len,
3017 			    (uint8_t *)lu->lu_proxy_reg_arg, stmf_proxy_msg_id);
3018 			/* send the message */
3019 			if (ic_reg_lun) {
3020 				ic_ret = ic_tx_msg(ic_reg_lun);
3021 				if (ic_ret == STMF_IC_MSG_SUCCESS) {
3022 					stmf_proxy_msg_id++;
3023 				}
3024 			}
3025 		}
3026 	}
3027 	mutex_exit(&stmf_state.stmf_lock);
3028 
3029 	/* XXX we should probably check if this lu can be brought online */
3030 	ilu->ilu_prev_state = STMF_STATE_ONLINE;
3031 	if (stmf_state.stmf_service_running) {
3032 		ssci.st_rflags = 0;
3033 		ssci.st_additional_info = NULL;
3034 		(void) stmf_ctl(STMF_CMD_LU_ONLINE, lu, &ssci);
3035 	}
3036 
3037 	/* XXX: Generate event */
3038 	return (STMF_SUCCESS);
3039 }
3040 
3041 stmf_status_t
3042 stmf_deregister_lu(stmf_lu_t *lu)
3043 {
3044 	stmf_i_lu_t *ilu;
3045 
3046 	mutex_enter(&stmf_state.stmf_lock);
3047 	if (stmf_state.stmf_inventory_locked) {
3048 		mutex_exit(&stmf_state.stmf_lock);
3049 		return (STMF_BUSY);
3050 	}
3051 	ilu = stmf_lookup_lu(lu);
3052 	if (ilu == NULL) {
3053 		mutex_exit(&stmf_state.stmf_lock);
3054 		return (STMF_INVALID_ARG);
3055 	}
3056 	if (ilu->ilu_state == STMF_STATE_OFFLINE) {
3057 		ASSERT(ilu->ilu_ntasks == ilu->ilu_ntasks_free);
3058 		while (ilu->ilu_flags & ILU_STALL_DEREGISTER) {
3059 			cv_wait(&stmf_state.stmf_cv, &stmf_state.stmf_lock);
3060 		}
3061 		if (ilu->ilu_ntasks) {
3062 			stmf_i_scsi_task_t *itask, *nitask;
3063 
3064 			nitask = ilu->ilu_tasks;
3065 			do {
3066 				itask = nitask;
3067 				nitask = itask->itask_lu_next;
3068 				lu->lu_task_free(itask->itask_task);
3069 				stmf_free(itask->itask_task);
3070 			} while (nitask != NULL);
3071 
3072 			ilu->ilu_tasks = ilu->ilu_free_tasks = NULL;
3073 			ilu->ilu_ntasks = ilu->ilu_ntasks_free = 0;
3074 		}
3075 		/* de-register with proxy if available */
3076 		if (ilu->ilu_access == STMF_LU_ACTIVE &&
3077 		    stmf_state.stmf_alua_state == 1) {
3078 			/* de-register with proxy module */
3079 			stmf_ic_msg_status_t ic_ret = STMF_IC_MSG_SUCCESS;
3080 			stmf_ic_msg_t *ic_dereg_lun;
3081 			if (lu->lu_lp && lu->lu_lp->lp_lpif_rev == LPIF_REV_2 &&
3082 			    lu->lu_lp->lp_alua_support) {
3083 				ilu->ilu_alua = 1;
3084 				/* allocate the de-register message */
3085 				ic_dereg_lun = ic_dereg_lun_msg_alloc(
3086 				    lu->lu_id->ident, lu->lu_lp->lp_name, 0,
3087 				    NULL, stmf_proxy_msg_id);
3088 				/* send the message */
3089 				if (ic_dereg_lun) {
3090 					ic_ret = ic_tx_msg(ic_dereg_lun);
3091 					if (ic_ret == STMF_IC_MSG_SUCCESS) {
3092 						stmf_proxy_msg_id++;
3093 					}
3094 				}
3095 			}
3096 		}
3097 
3098 		if (ilu->ilu_next)
3099 			ilu->ilu_next->ilu_prev = ilu->ilu_prev;
3100 		if (ilu->ilu_prev)
3101 			ilu->ilu_prev->ilu_next = ilu->ilu_next;
3102 		else
3103 			stmf_state.stmf_ilulist = ilu->ilu_next;
3104 		stmf_state.stmf_nlus--;
3105 
3106 		if (ilu == stmf_state.stmf_svc_ilu_draining) {
3107 			stmf_state.stmf_svc_ilu_draining = ilu->ilu_next;
3108 		}
3109 		if (ilu == stmf_state.stmf_svc_ilu_timing) {
3110 			stmf_state.stmf_svc_ilu_timing = ilu->ilu_next;
3111 		}
3112 		if (lu->lu_lp) {
3113 			((stmf_i_lu_provider_t *)
3114 			    (lu->lu_lp->lp_stmf_private))->ilp_nlus--;
3115 		}
3116 		if (ilu->ilu_luid) {
3117 			((stmf_id_data_t *)ilu->ilu_luid)->id_pt_to_object =
3118 			    NULL;
3119 			ilu->ilu_luid = NULL;
3120 		}
3121 		STMF_EVENT_FREE_HANDLE(ilu->ilu_event_hdl);
3122 	} else {
3123 		mutex_exit(&stmf_state.stmf_lock);
3124 		return (STMF_BUSY);
3125 	}
3126 	if (ilu->ilu_kstat_info) {
3127 		kmem_free(ilu->ilu_kstat_info->ks_data,
3128 		    ilu->ilu_kstat_info->ks_data_size);
3129 		kstat_delete(ilu->ilu_kstat_info);
3130 	}
3131 	if (ilu->ilu_kstat_io) {
3132 		kstat_delete(ilu->ilu_kstat_io);
3133 		mutex_destroy(&ilu->ilu_kstat_lock);
3134 	}
3135 	stmf_delete_itl_kstat_by_guid(ilu->ilu_ascii_hex_guid);
3136 	mutex_exit(&stmf_state.stmf_lock);
3137 	return (STMF_SUCCESS);
3138 }
3139 
3140 void
3141 stmf_set_port_standby(stmf_local_port_t *lport, uint16_t rtpid)
3142 {
3143 	stmf_i_local_port_t *ilport =
3144 	    (stmf_i_local_port_t *)lport->lport_stmf_private;
3145 	ilport->ilport_rtpid = rtpid;
3146 	ilport->ilport_standby = 1;
3147 }
3148 
3149 void
3150 stmf_set_port_alua(stmf_local_port_t *lport)
3151 {
3152 	stmf_i_local_port_t *ilport =
3153 	    (stmf_i_local_port_t *)lport->lport_stmf_private;
3154 	ilport->ilport_alua = 1;
3155 }
3156 
3157 stmf_status_t
3158 stmf_register_local_port(stmf_local_port_t *lport)
3159 {
3160 	stmf_i_local_port_t *ilport;
3161 	stmf_state_change_info_t ssci;
3162 	int start_workers = 0;
3163 
3164 	mutex_enter(&stmf_state.stmf_lock);
3165 	if (stmf_state.stmf_inventory_locked) {
3166 		mutex_exit(&stmf_state.stmf_lock);
3167 		return (STMF_BUSY);
3168 	}
3169 	ilport = (stmf_i_local_port_t *)lport->lport_stmf_private;
3170 	rw_init(&ilport->ilport_lock, NULL, RW_DRIVER, NULL);
3171 
3172 	ilport->ilport_instance =
3173 	    id_alloc_nosleep(stmf_state.stmf_ilport_inst_space);
3174 	if (ilport->ilport_instance == -1) {
3175 		mutex_exit(&stmf_state.stmf_lock);
3176 		return (STMF_FAILURE);
3177 	}
3178 	ilport->ilport_next = stmf_state.stmf_ilportlist;
3179 	ilport->ilport_prev = NULL;
3180 	if (ilport->ilport_next)
3181 		ilport->ilport_next->ilport_prev = ilport;
3182 	stmf_state.stmf_ilportlist = ilport;
3183 	stmf_state.stmf_nlports++;
3184 	if (lport->lport_pp) {
3185 		((stmf_i_port_provider_t *)
3186 		    (lport->lport_pp->pp_stmf_private))->ipp_npps++;
3187 	}
3188 	ilport->ilport_tg =
3189 	    stmf_lookup_group_for_target(lport->lport_id->ident,
3190 	    lport->lport_id->ident_length);
3191 
3192 	/*
3193 	 * rtpid will/must be set if this is a standby port
3194 	 * only register ports that are not standby (proxy) ports
3195 	 * and ports that are alua participants (ilport_alua == 1)
3196 	 */
3197 	if (ilport->ilport_standby == 0) {
3198 		ilport->ilport_rtpid = atomic_add_16_nv(&stmf_rtpid_counter, 1);
3199 	}
3200 
3201 	if (stmf_state.stmf_alua_state == 1 &&
3202 	    ilport->ilport_standby == 0 &&
3203 	    ilport->ilport_alua == 1) {
3204 		stmf_ic_msg_t *ic_reg_port;
3205 		stmf_ic_msg_status_t ic_ret;
3206 		stmf_local_port_t *lport;
3207 		lport = ilport->ilport_lport;
3208 		ic_reg_port = ic_reg_port_msg_alloc(
3209 		    lport->lport_id, ilport->ilport_rtpid,
3210 		    0, NULL, stmf_proxy_msg_id);
3211 		if (ic_reg_port) {
3212 			ic_ret = ic_tx_msg(ic_reg_port);
3213 			if (ic_ret == STMF_IC_MSG_SUCCESS) {
3214 				ilport->ilport_reg_msgid = stmf_proxy_msg_id++;
3215 			} else {
3216 				cmn_err(CE_WARN, "error on port registration "
3217 				"port - %s", ilport->ilport_kstat_tgt_name);
3218 			}
3219 		}
3220 	}
3221 	STMF_EVENT_ALLOC_HANDLE(ilport->ilport_event_hdl);
3222 	stmf_create_kstat_lport(ilport);
3223 	if (stmf_workers_state == STMF_WORKERS_DISABLED) {
3224 		stmf_workers_state = STMF_WORKERS_ENABLING;
3225 		start_workers = 1;
3226 	}
3227 	mutex_exit(&stmf_state.stmf_lock);
3228 
3229 	if (start_workers)
3230 		stmf_worker_init();
3231 
3232 	/* XXX we should probably check if this lport can be brought online */
3233 	ilport->ilport_prev_state = STMF_STATE_ONLINE;
3234 	if (stmf_state.stmf_service_running) {
3235 		ssci.st_rflags = 0;
3236 		ssci.st_additional_info = NULL;
3237 		(void) stmf_ctl(STMF_CMD_LPORT_ONLINE, lport, &ssci);
3238 	}
3239 
3240 	/* XXX: Generate event */
3241 	return (STMF_SUCCESS);
3242 }
3243 
3244 stmf_status_t
3245 stmf_deregister_local_port(stmf_local_port_t *lport)
3246 {
3247 	stmf_i_local_port_t *ilport;
3248 
3249 	mutex_enter(&stmf_state.stmf_lock);
3250 	if (stmf_state.stmf_inventory_locked) {
3251 		mutex_exit(&stmf_state.stmf_lock);
3252 		return (STMF_BUSY);
3253 	}
3254 
3255 	ilport = (stmf_i_local_port_t *)lport->lport_stmf_private;
3256 
3257 	/*
3258 	 * deregister ports that are not standby (proxy)
3259 	 */
3260 	if (stmf_state.stmf_alua_state == 1 &&
3261 	    ilport->ilport_standby == 0 &&
3262 	    ilport->ilport_alua == 1) {
3263 		stmf_ic_msg_t *ic_dereg_port;
3264 		stmf_ic_msg_status_t ic_ret;
3265 		ic_dereg_port = ic_dereg_port_msg_alloc(
3266 		    lport->lport_id, 0, NULL, stmf_proxy_msg_id);
3267 		if (ic_dereg_port) {
3268 			ic_ret = ic_tx_msg(ic_dereg_port);
3269 			if (ic_ret == STMF_IC_MSG_SUCCESS) {
3270 				stmf_proxy_msg_id++;
3271 			}
3272 		}
3273 	}
3274 
3275 	if (ilport->ilport_nsessions == 0) {
3276 		if (ilport->ilport_next)
3277 			ilport->ilport_next->ilport_prev = ilport->ilport_prev;
3278 		if (ilport->ilport_prev)
3279 			ilport->ilport_prev->ilport_next = ilport->ilport_next;
3280 		else
3281 			stmf_state.stmf_ilportlist = ilport->ilport_next;
3282 		id_free(stmf_state.stmf_ilport_inst_space,
3283 		    ilport->ilport_instance);
3284 		rw_destroy(&ilport->ilport_lock);
3285 		stmf_state.stmf_nlports--;
3286 		if (lport->lport_pp) {
3287 			((stmf_i_port_provider_t *)
3288 			    (lport->lport_pp->pp_stmf_private))->ipp_npps--;
3289 		}
3290 		ilport->ilport_tg = NULL;
3291 		STMF_EVENT_FREE_HANDLE(ilport->ilport_event_hdl);
3292 	} else {
3293 		mutex_exit(&stmf_state.stmf_lock);
3294 		return (STMF_BUSY);
3295 	}
3296 	if (ilport->ilport_kstat_info) {
3297 		kmem_free(ilport->ilport_kstat_info->ks_data,
3298 		    ilport->ilport_kstat_info->ks_data_size);
3299 		kstat_delete(ilport->ilport_kstat_info);
3300 	}
3301 	if (ilport->ilport_kstat_io) {
3302 		kstat_delete(ilport->ilport_kstat_io);
3303 		mutex_destroy(&ilport->ilport_kstat_lock);
3304 	}
3305 	stmf_delete_itl_kstat_by_lport(ilport->ilport_kstat_tgt_name);
3306 	mutex_exit(&stmf_state.stmf_lock);
3307 	return (STMF_SUCCESS);
3308 }
3309 
3310 /*
3311  * Rport id/instance mappings remain valid until STMF is unloaded
3312  */
3313 static int
3314 stmf_irport_compare(const void *void_irport1, const void *void_irport2)
3315 {
3316 	const	stmf_i_remote_port_t	*irport1 = void_irport1;
3317 	const	stmf_i_remote_port_t	*irport2 = void_irport2;
3318 	int			result;
3319 
3320 	/* Sort by code set then ident */
3321 	if (irport1->irport_id->code_set <
3322 	    irport2->irport_id->code_set) {
3323 		return (-1);
3324 	} else if (irport1->irport_id->code_set >
3325 	    irport2->irport_id->code_set) {
3326 		return (1);
3327 	}
3328 
3329 	/* Next by ident length */
3330 	if (irport1->irport_id->ident_length <
3331 	    irport2->irport_id->ident_length) {
3332 		return (-1);
3333 	} else if (irport1->irport_id->ident_length >
3334 	    irport2->irport_id->ident_length) {
3335 		return (1);
3336 	}
3337 
3338 	/* Code set and ident length both match, now compare idents */
3339 	result = memcmp(irport1->irport_id->ident,
3340 	    irport2->irport_id->ident,
3341 	    irport1->irport_id->ident_length);
3342 
3343 	if (result < 0) {
3344 		return (-1);
3345 	} else if (result > 0) {
3346 		return (1);
3347 	}
3348 
3349 	return (0);
3350 }
3351 
3352 static stmf_i_remote_port_t *
3353 stmf_irport_create(scsi_devid_desc_t *rport_devid)
3354 {
3355 	int			alloc_len;
3356 	stmf_i_remote_port_t	*irport;
3357 
3358 	/*
3359 	 * Lookup will bump the refcnt if there's an existing rport
3360 	 * context for this identifier.
3361 	 */
3362 	ASSERT(mutex_owned(&stmf_state.stmf_lock));
3363 
3364 	alloc_len = sizeof (*irport) + sizeof (scsi_devid_desc_t) +
3365 	    rport_devid->ident_length - 1;
3366 	irport = kmem_zalloc(alloc_len, KM_NOSLEEP);
3367 	if (irport == NULL) {
3368 		return (NULL);
3369 	}
3370 
3371 	irport->irport_instance =
3372 	    id_alloc_nosleep(stmf_state.stmf_irport_inst_space);
3373 	if (irport->irport_instance == -1) {
3374 		kmem_free(irport, alloc_len);
3375 		return (NULL);
3376 	}
3377 
3378 	irport->irport_id =
3379 	    (struct scsi_devid_desc *)(irport + 1); /* Ptr. Arith. */
3380 	bcopy(rport_devid, irport->irport_id,
3381 	    sizeof (scsi_devid_desc_t) + rport_devid->ident_length - 1);
3382 	irport->irport_refcnt = 1;
3383 	mutex_init(&irport->irport_mutex, NULL, MUTEX_DEFAULT, NULL);
3384 
3385 	return (irport);
3386 }
3387 
3388 static void
3389 stmf_irport_destroy(stmf_i_remote_port_t *irport)
3390 {
3391 	id_free(stmf_state.stmf_irport_inst_space, irport->irport_instance);
3392 	mutex_destroy(&irport->irport_mutex);
3393 	kmem_free(irport, sizeof (*irport) + sizeof (scsi_devid_desc_t) +
3394 	    irport->irport_id->ident_length - 1);
3395 }
3396 
3397 static stmf_i_remote_port_t *
3398 stmf_irport_register(scsi_devid_desc_t *rport_devid)
3399 {
3400 	stmf_i_remote_port_t	*irport;
3401 
3402 	mutex_enter(&stmf_state.stmf_lock);
3403 
3404 	/*
3405 	 * Lookup will bump the refcnt if there's an existing rport
3406 	 * context for this identifier.
3407 	 */
3408 	if ((irport = stmf_irport_lookup_locked(rport_devid)) != NULL) {
3409 		mutex_exit(&stmf_state.stmf_lock);
3410 		return (irport);
3411 	}
3412 
3413 	irport = stmf_irport_create(rport_devid);
3414 	if (irport == NULL) {
3415 		mutex_exit(&stmf_state.stmf_lock);
3416 		return (NULL);
3417 	}
3418 
3419 	avl_add(&stmf_state.stmf_irportlist, irport);
3420 	mutex_exit(&stmf_state.stmf_lock);
3421 
3422 	return (irport);
3423 }
3424 
3425 static stmf_i_remote_port_t *
3426 stmf_irport_lookup_locked(scsi_devid_desc_t *rport_devid)
3427 {
3428 	stmf_i_remote_port_t	*irport;
3429 	stmf_i_remote_port_t	tmp_irport;
3430 
3431 	ASSERT(mutex_owned(&stmf_state.stmf_lock));
3432 	tmp_irport.irport_id = rport_devid;
3433 	irport = avl_find(&stmf_state.stmf_irportlist, &tmp_irport, NULL);
3434 	if (irport != NULL) {
3435 		mutex_enter(&irport->irport_mutex);
3436 		irport->irport_refcnt++;
3437 		mutex_exit(&irport->irport_mutex);
3438 	}
3439 
3440 	return (irport);
3441 }
3442 
3443 static void
3444 stmf_irport_deregister(stmf_i_remote_port_t *irport)
3445 {
3446 	/*
3447 	 * If we were actually going to remove unreferenced remote ports
3448 	 * we would want to acquire stmf_state.stmf_lock before getting
3449 	 * the irport mutex.
3450 	 *
3451 	 * Instead we're just going to leave it there even if unreferenced.
3452 	 */
3453 	mutex_enter(&irport->irport_mutex);
3454 	irport->irport_refcnt--;
3455 	mutex_exit(&irport->irport_mutex);
3456 }
3457 
3458 /*
3459  * Port provider has to make sure that register/deregister session and
3460  * port are serialized calls.
3461  */
3462 stmf_status_t
3463 stmf_register_scsi_session(stmf_local_port_t *lport, stmf_scsi_session_t *ss)
3464 {
3465 	stmf_i_scsi_session_t *iss;
3466 	stmf_i_local_port_t *ilport = (stmf_i_local_port_t *)
3467 	    lport->lport_stmf_private;
3468 	uint8_t		lun[8];
3469 
3470 	/*
3471 	 * Port state has to be online to register a scsi session. It is
3472 	 * possible that we started an offline operation and a new SCSI
3473 	 * session started at the same time (in that case also we are going
3474 	 * to fail the registeration). But any other state is simply
3475 	 * a bad port provider implementation.
3476 	 */
3477 	if (ilport->ilport_state != STMF_STATE_ONLINE) {
3478 		if (ilport->ilport_state != STMF_STATE_OFFLINING) {
3479 			stmf_trace(lport->lport_alias, "Port is trying to "
3480 			    "register a session while the state is neither "
3481 			    "online nor offlining");
3482 		}
3483 		return (STMF_FAILURE);
3484 	}
3485 	bzero(lun, 8);
3486 	iss = (stmf_i_scsi_session_t *)ss->ss_stmf_private;
3487 	if ((iss->iss_irport = stmf_irport_register(ss->ss_rport_id)) == NULL) {
3488 		stmf_trace(lport->lport_alias, "Could not register "
3489 		    "remote port during session registration");
3490 		return (STMF_FAILURE);
3491 	}
3492 
3493 	iss->iss_flags |= ISS_BEING_CREATED;
3494 
3495 	/* sessions use the ilport_lock. No separate lock is required */
3496 	iss->iss_lockp = &ilport->ilport_lock;
3497 	(void) stmf_session_create_lun_map(ilport, iss);
3498 
3499 	rw_enter(&ilport->ilport_lock, RW_WRITER);
3500 	ilport->ilport_nsessions++;
3501 	iss->iss_next = ilport->ilport_ss_list;
3502 	ilport->ilport_ss_list = iss;
3503 	rw_exit(&ilport->ilport_lock);
3504 
3505 	iss->iss_creation_time = ddi_get_time();
3506 	ss->ss_session_id = atomic_add_64_nv(&stmf_session_counter, 1);
3507 	iss->iss_flags &= ~ISS_BEING_CREATED;
3508 	/* XXX should we remove ISS_LUN_INVENTORY_CHANGED on new session? */
3509 	iss->iss_flags &= ~ISS_LUN_INVENTORY_CHANGED;
3510 	DTRACE_PROBE2(session__online, stmf_local_port_t *, lport,
3511 	    stmf_scsi_session_t *, ss);
3512 	return (STMF_SUCCESS);
3513 }
3514 
3515 void
3516 stmf_deregister_scsi_session(stmf_local_port_t *lport, stmf_scsi_session_t *ss)
3517 {
3518 	stmf_i_local_port_t *ilport = (stmf_i_local_port_t *)
3519 	    lport->lport_stmf_private;
3520 	stmf_i_scsi_session_t *iss, **ppss;
3521 	int found = 0;
3522 	stmf_ic_msg_t *ic_session_dereg;
3523 	stmf_status_t ic_ret = STMF_FAILURE;
3524 
3525 	DTRACE_PROBE2(session__offline, stmf_local_port_t *, lport,
3526 	    stmf_scsi_session_t *, ss);
3527 
3528 	iss = (stmf_i_scsi_session_t *)ss->ss_stmf_private;
3529 	if (ss->ss_rport_alias) {
3530 		ss->ss_rport_alias = NULL;
3531 	}
3532 
3533 try_dereg_ss_again:
3534 	mutex_enter(&stmf_state.stmf_lock);
3535 	atomic_and_32(&iss->iss_flags,
3536 	    ~(ISS_LUN_INVENTORY_CHANGED | ISS_GOT_INITIAL_LUNS));
3537 	if (iss->iss_flags & ISS_EVENT_ACTIVE) {
3538 		mutex_exit(&stmf_state.stmf_lock);
3539 		delay(1);
3540 		goto try_dereg_ss_again;
3541 	}
3542 
3543 	/* dereg proxy session if not standby port */
3544 	if (stmf_state.stmf_alua_state == 1 &&
3545 	    ilport->ilport_standby == 0 &&
3546 	    ilport->ilport_alua == 1) {
3547 		ic_session_dereg = ic_session_dereg_msg_alloc(
3548 		    ss, stmf_proxy_msg_id);
3549 		if (ic_session_dereg) {
3550 			ic_ret = ic_tx_msg(ic_session_dereg);
3551 			if (ic_ret == STMF_IC_MSG_SUCCESS) {
3552 				stmf_proxy_msg_id++;
3553 			}
3554 		}
3555 	}
3556 
3557 	mutex_exit(&stmf_state.stmf_lock);
3558 
3559 	rw_enter(&ilport->ilport_lock, RW_WRITER);
3560 	for (ppss = &ilport->ilport_ss_list; *ppss != NULL;
3561 	    ppss = &((*ppss)->iss_next)) {
3562 		if (iss == (*ppss)) {
3563 			*ppss = (*ppss)->iss_next;
3564 			found = 1;
3565 			break;
3566 		}
3567 	}
3568 	if (!found) {
3569 		cmn_err(CE_PANIC, "Deregister session called for non existent"
3570 		    " session");
3571 	}
3572 	ilport->ilport_nsessions--;
3573 	rw_exit(&ilport->ilport_lock);
3574 
3575 	stmf_irport_deregister(iss->iss_irport);
3576 	(void) stmf_session_destroy_lun_map(ilport, iss);
3577 }
3578 
3579 stmf_i_scsi_session_t *
3580 stmf_session_id_to_issptr(uint64_t session_id, int stay_locked)
3581 {
3582 	stmf_i_local_port_t *ilport;
3583 	stmf_i_scsi_session_t *iss;
3584 
3585 	mutex_enter(&stmf_state.stmf_lock);
3586 	for (ilport = stmf_state.stmf_ilportlist; ilport != NULL;
3587 	    ilport = ilport->ilport_next) {
3588 		rw_enter(&ilport->ilport_lock, RW_WRITER);
3589 		for (iss = ilport->ilport_ss_list; iss != NULL;
3590 		    iss = iss->iss_next) {
3591 			if (iss->iss_ss->ss_session_id == session_id) {
3592 				if (!stay_locked)
3593 					rw_exit(&ilport->ilport_lock);
3594 				mutex_exit(&stmf_state.stmf_lock);
3595 				return (iss);
3596 			}
3597 		}
3598 		rw_exit(&ilport->ilport_lock);
3599 	}
3600 	mutex_exit(&stmf_state.stmf_lock);
3601 	return (NULL);
3602 }
3603 
3604 #define	MAX_ALIAS		128
3605 
3606 static int
3607 stmf_itl_kstat_compare(const void *itl_kstat_1, const void *itl_kstat_2)
3608 {
3609 	const	stmf_i_itl_kstat_t	*kstat_nm1 = itl_kstat_1;
3610 	const	stmf_i_itl_kstat_t	*kstat_nm2 = itl_kstat_2;
3611 	int	ret;
3612 
3613 	ret = strcmp(kstat_nm1->iitl_kstat_nm, kstat_nm2->iitl_kstat_nm);
3614 	if (ret < 0) {
3615 		return (-1);
3616 	} else if (ret > 0) {
3617 		return (1);
3618 	}
3619 	return (0);
3620 }
3621 
3622 static stmf_i_itl_kstat_t *
3623 stmf_itl_kstat_lookup(char *kstat_nm)
3624 {
3625 	stmf_i_itl_kstat_t	tmp;
3626 	stmf_i_itl_kstat_t	*itl_kstat;
3627 
3628 	(void) strcpy(tmp.iitl_kstat_nm, kstat_nm);
3629 	mutex_enter(&stmf_state.stmf_lock);
3630 	itl_kstat = avl_find(&stmf_state.stmf_itl_kstat_list, &tmp, NULL);
3631 	mutex_exit(&stmf_state.stmf_lock);
3632 	return (itl_kstat);
3633 }
3634 
3635 static void
3636 stmf_delete_itl_kstat_by_lport(char *tgt)
3637 {
3638 	stmf_i_itl_kstat_t	*ks_itl, *next;
3639 
3640 	ASSERT(mutex_owned(&stmf_state.stmf_lock));
3641 	ks_itl = avl_first(&stmf_state.stmf_itl_kstat_list);
3642 	for (; ks_itl != NULL; ks_itl = next) {
3643 		next = AVL_NEXT(&stmf_state.stmf_itl_kstat_list, ks_itl);
3644 		if (strcmp(ks_itl->iitl_kstat_lport, tgt) == 0) {
3645 			stmf_teardown_itl_kstats(ks_itl);
3646 			avl_remove(&stmf_state.stmf_itl_kstat_list, ks_itl);
3647 			kmem_free(ks_itl, sizeof (stmf_i_itl_kstat_t));
3648 		}
3649 	}
3650 }
3651 
3652 static void
3653 stmf_delete_itl_kstat_by_guid(char *guid)
3654 {
3655 	stmf_i_itl_kstat_t	*ks_itl, *next;
3656 
3657 	ASSERT(mutex_owned(&stmf_state.stmf_lock));
3658 	ks_itl = avl_first(&stmf_state.stmf_itl_kstat_list);
3659 	for (; ks_itl != NULL; ks_itl = next) {
3660 		next = AVL_NEXT(&stmf_state.stmf_itl_kstat_list, ks_itl);
3661 		if (strcmp(ks_itl->iitl_kstat_guid, guid) == 0) {
3662 			stmf_teardown_itl_kstats(ks_itl);
3663 			avl_remove(&stmf_state.stmf_itl_kstat_list, ks_itl);
3664 			kmem_free(ks_itl, sizeof (stmf_i_itl_kstat_t));
3665 		}
3666 	}
3667 }
3668 
3669 static stmf_i_itl_kstat_t *
3670 stmf_itl_kstat_create(stmf_itl_data_t *itl, char *nm,
3671     scsi_devid_desc_t *lport, scsi_devid_desc_t *lun)
3672 {
3673 	stmf_i_itl_kstat_t	*ks_itl;
3674 	int			i, len;
3675 
3676 	if ((ks_itl = stmf_itl_kstat_lookup(nm)) != NULL)
3677 		return (ks_itl);
3678 
3679 	len = sizeof (stmf_i_itl_kstat_t);
3680 	ks_itl = kmem_zalloc(len, KM_NOSLEEP);
3681 	if (ks_itl == NULL)
3682 		return (NULL);
3683 
3684 	(void) strcpy(ks_itl->iitl_kstat_nm, nm);
3685 	bcopy(lport->ident, ks_itl->iitl_kstat_lport, lport->ident_length);
3686 	ks_itl->iitl_kstat_lport[lport->ident_length] = '\0';
3687 	for (i = 0; i < STMF_GUID_INPUT / 2; i++) {
3688 		(void) sprintf(&ks_itl->iitl_kstat_guid[i * 2], "%02x",
3689 		    lun->ident[i]);
3690 	}
3691 	ks_itl->iitl_kstat_strbuf = itl->itl_kstat_strbuf;
3692 	ks_itl->iitl_kstat_strbuflen = itl->itl_kstat_strbuflen;
3693 	ks_itl->iitl_kstat_info = itl->itl_kstat_info;
3694 	ks_itl->iitl_kstat_taskq = itl->itl_kstat_taskq;
3695 	ks_itl->iitl_kstat_lu_xfer = itl->itl_kstat_lu_xfer;
3696 	ks_itl->iitl_kstat_lport_xfer = itl->itl_kstat_lport_xfer;
3697 	mutex_enter(&stmf_state.stmf_lock);
3698 	avl_add(&stmf_state.stmf_itl_kstat_list, ks_itl);
3699 	mutex_exit(&stmf_state.stmf_lock);
3700 
3701 	return (ks_itl);
3702 }
3703 
3704 stmf_status_t
3705 stmf_setup_itl_kstats(stmf_itl_data_t *itl)
3706 {
3707 	char				ks_itl_id[32];
3708 	char				ks_nm[KSTAT_STRLEN];
3709 	char				ks_itl_nm[KSTAT_STRLEN];
3710 	stmf_kstat_itl_info_t		*ks_itl;
3711 	stmf_scsi_session_t		*ss;
3712 	stmf_i_scsi_session_t		*iss;
3713 	stmf_i_local_port_t		*ilport;
3714 	char				*strbuf;
3715 	int				id, len, i;
3716 	char				*rport_alias;
3717 	char				*lport_alias;
3718 	char				*lu_alias;
3719 	stmf_i_itl_kstat_t		*tmp_kstat;
3720 
3721 	/*
3722 	 * Allocate enough memory in the ITL to hold the relevant
3723 	 * identifiers.
3724 	 * rport and lport identifiers come from the stmf_scsi_session_t.
3725 	 * ident might not be null terminated.
3726 	 */
3727 	ss = itl->itl_session->iss_ss;
3728 	iss = ss->ss_stmf_private;
3729 	ilport = ss->ss_lport->lport_stmf_private;
3730 	(void) snprintf(ks_itl_id, 32, "%d.%d.%d",
3731 	    iss->iss_irport->irport_instance, ilport->ilport_instance,
3732 	    itl->itl_lun);
3733 
3734 	(void) snprintf(ks_itl_nm, KSTAT_STRLEN, "itl_%s", ks_itl_id);
3735 	/*
3736 	 * let's verify this itl_kstat already exist
3737 	 */
3738 	if ((tmp_kstat = stmf_itl_kstat_lookup(ks_itl_nm)) != NULL) {
3739 		itl->itl_kstat_strbuf = tmp_kstat->iitl_kstat_strbuf;
3740 		itl->itl_kstat_strbuflen = tmp_kstat->iitl_kstat_strbuflen;
3741 		itl->itl_kstat_info = tmp_kstat->iitl_kstat_info;
3742 		itl->itl_kstat_taskq = tmp_kstat->iitl_kstat_taskq;
3743 		itl->itl_kstat_lu_xfer = tmp_kstat->iitl_kstat_lu_xfer;
3744 		itl->itl_kstat_lport_xfer = tmp_kstat->iitl_kstat_lport_xfer;
3745 		return (STMF_SUCCESS);
3746 	}
3747 
3748 	/* New itl_kstat */
3749 	rport_alias = (ss->ss_rport_alias == NULL) ?
3750 	    "" : ss->ss_rport_alias;
3751 	lport_alias = (ss->ss_lport->lport_alias == NULL) ?
3752 	    "" : ss->ss_lport->lport_alias;
3753 	lu_alias = (itl->itl_ilu->ilu_lu->lu_alias == NULL) ?
3754 	    "" : itl->itl_ilu->ilu_lu->lu_alias;
3755 
3756 	itl->itl_kstat_strbuflen = (ss->ss_rport_id->ident_length + 1) +
3757 	    (strnlen(rport_alias, MAX_ALIAS) + 1) +
3758 	    (ss->ss_lport->lport_id->ident_length + 1) +
3759 	    (strnlen(lport_alias, MAX_ALIAS) + 1) +
3760 	    (STMF_GUID_INPUT + 1) +
3761 	    (strnlen(lu_alias, MAX_ALIAS) + 1) +
3762 	    MAX_PROTO_STR_LEN;
3763 	itl->itl_kstat_strbuf = kmem_zalloc(itl->itl_kstat_strbuflen,
3764 	    KM_NOSLEEP);
3765 	if (itl->itl_kstat_strbuf == NULL) {
3766 		return (STMF_ALLOC_FAILURE);
3767 	}
3768 
3769 	ks_itl = (stmf_kstat_itl_info_t *)kmem_zalloc(sizeof (*ks_itl),
3770 	    KM_NOSLEEP);
3771 	if (ks_itl == NULL) {
3772 		kmem_free(itl->itl_kstat_strbuf, itl->itl_kstat_strbuflen);
3773 		return (STMF_ALLOC_FAILURE);
3774 	}
3775 
3776 	if ((itl->itl_kstat_info = kstat_create(STMF_MODULE_NAME,
3777 	    0, ks_itl_nm, "misc", KSTAT_TYPE_NAMED,
3778 	    sizeof (stmf_kstat_itl_info_t) / sizeof (kstat_named_t),
3779 	    KSTAT_FLAG_VIRTUAL)) == NULL) {
3780 		goto itl_kstat_cleanup;
3781 	}
3782 
3783 	itl->itl_kstat_info->ks_data_size += itl->itl_kstat_strbuflen;
3784 	itl->itl_kstat_info->ks_data = ks_itl;
3785 
3786 	kstat_named_init(&ks_itl->i_rport_name, "rport-name",
3787 	    KSTAT_DATA_STRING);
3788 	kstat_named_init(&ks_itl->i_rport_alias, "rport-alias",
3789 	    KSTAT_DATA_STRING);
3790 	kstat_named_init(&ks_itl->i_lport_name, "lport-name",
3791 	    KSTAT_DATA_STRING);
3792 	kstat_named_init(&ks_itl->i_lport_alias, "lport-alias",
3793 	    KSTAT_DATA_STRING);
3794 	kstat_named_init(&ks_itl->i_protocol, "protocol",
3795 	    KSTAT_DATA_STRING);
3796 	kstat_named_init(&ks_itl->i_lu_guid, "lu-guid",
3797 	    KSTAT_DATA_STRING);
3798 	kstat_named_init(&ks_itl->i_lu_alias, "lu-alias",
3799 	    KSTAT_DATA_STRING);
3800 	kstat_named_init(&ks_itl->i_lu_number, "lu-number",
3801 	    KSTAT_DATA_UINT64);
3802 	kstat_named_init(&ks_itl->i_task_waitq_elapsed, "task-waitq-elapsed",
3803 	    KSTAT_DATA_UINT64);
3804 	kstat_named_init(&ks_itl->i_task_read_elapsed, "task-read-elapsed",
3805 	    KSTAT_DATA_UINT64);
3806 	kstat_named_init(&ks_itl->i_task_write_elapsed, "task-write-elapsed",
3807 	    KSTAT_DATA_UINT64);
3808 	kstat_named_init(&ks_itl->i_lu_read_elapsed, "lu-read-elapsed",
3809 	    KSTAT_DATA_UINT64);
3810 	kstat_named_init(&ks_itl->i_lu_write_elapsed, "lu-write-elapsed",
3811 	    KSTAT_DATA_UINT64);
3812 	kstat_named_init(&ks_itl->i_lport_read_elapsed, "lport-read-elapsed",
3813 	    KSTAT_DATA_UINT64);
3814 	kstat_named_init(&ks_itl->i_lport_write_elapsed, "lport-write-elapsed",
3815 	    KSTAT_DATA_UINT64);
3816 
3817 	strbuf = itl->itl_kstat_strbuf;
3818 
3819 	/* Rport */
3820 	len = ss->ss_rport_id->ident_length;
3821 	bcopy(ss->ss_rport_id->ident, strbuf, len);
3822 	strbuf += len;
3823 	*strbuf = '\0';
3824 	kstat_named_setstr(&ks_itl->i_rport_name, strbuf - len);
3825 	strbuf++;
3826 
3827 	len = strnlen(rport_alias, MAX_ALIAS);
3828 	(void) strncpy(strbuf, rport_alias, len + 1);
3829 	kstat_named_setstr(&ks_itl->i_rport_alias, strbuf);
3830 	strbuf += len + 1;
3831 
3832 	/* Lport */
3833 	len = ss->ss_lport->lport_id->ident_length;
3834 	bcopy(ss->ss_lport->lport_id->ident, strbuf, len);
3835 	strbuf += len;
3836 	*strbuf = '\0';
3837 	kstat_named_setstr(&ks_itl->i_lport_name, strbuf - len);
3838 	strbuf++;
3839 
3840 	len = strnlen(lport_alias, MAX_ALIAS);
3841 	(void) strncpy(strbuf, lport_alias, len + 1);
3842 	kstat_named_setstr(&ks_itl->i_lport_alias, strbuf);
3843 	strbuf += len + 1;
3844 
3845 	id = (ss->ss_lport->lport_id->protocol_id > PROTOCOL_ANY) ?
3846 	    PROTOCOL_ANY : ss->ss_lport->lport_id->protocol_id;
3847 	kstat_named_setstr(&ks_itl->i_protocol, protocol_ident[id]);
3848 
3849 	/* LU */
3850 	for (i = 0; i < STMF_GUID_INPUT / 2; i++) {
3851 		(void) sprintf(&strbuf[i * 2], "%02x",
3852 		    itl->itl_ilu->ilu_lu->lu_id->ident[i]);
3853 	}
3854 	kstat_named_setstr(&ks_itl->i_lu_guid, strbuf);
3855 	strbuf += STMF_GUID_INPUT + 1;
3856 
3857 	len = strnlen(lu_alias, MAX_ALIAS);
3858 	(void) strncpy(strbuf, lu_alias, len + 1);
3859 	kstat_named_setstr(&ks_itl->i_lu_alias, strbuf);
3860 	strbuf += len + 1;
3861 
3862 	ks_itl->i_lu_number.value.ui64 = itl->itl_lun;
3863 
3864 	/* Now create the I/O kstats */
3865 	(void) snprintf(ks_nm, KSTAT_STRLEN, "itl_tasks_%s",  ks_itl_id);
3866 	if ((itl->itl_kstat_taskq = kstat_create(STMF_MODULE_NAME, 0,
3867 	    ks_nm, "io", KSTAT_TYPE_IO, 1, 0)) == NULL) {
3868 		goto itl_kstat_cleanup;
3869 	}
3870 
3871 	(void) snprintf(ks_nm, KSTAT_STRLEN, "itl_lu_%s",  ks_itl_id);
3872 	if ((itl->itl_kstat_lu_xfer = kstat_create(STMF_MODULE_NAME, 0,
3873 	    ks_nm, "io", KSTAT_TYPE_IO, 1, 0)) == NULL) {
3874 		goto itl_kstat_cleanup;
3875 	}
3876 
3877 	(void) snprintf(ks_nm, KSTAT_STRLEN, "itl_lport_%s",  ks_itl_id);
3878 	if ((itl->itl_kstat_lport_xfer = kstat_create(STMF_MODULE_NAME, 0,
3879 	    ks_nm, "io", KSTAT_TYPE_IO, 1, 0)) == NULL) {
3880 		goto itl_kstat_cleanup;
3881 	}
3882 
3883 	/* Install all the kstats */
3884 	kstat_install(itl->itl_kstat_info);
3885 	kstat_install(itl->itl_kstat_taskq);
3886 	kstat_install(itl->itl_kstat_lu_xfer);
3887 	kstat_install(itl->itl_kstat_lport_xfer);
3888 
3889 	/* Add new itl_kstat to stmf_itl_kstat_list */
3890 	if (stmf_itl_kstat_create(itl, ks_itl_nm, ss->ss_lport->lport_id,
3891 	    itl->itl_ilu->ilu_lu->lu_id) != NULL)
3892 		return (STMF_SUCCESS);
3893 
3894 itl_kstat_cleanup:
3895 	if (itl->itl_kstat_taskq)
3896 		kstat_delete(itl->itl_kstat_taskq);
3897 	if (itl->itl_kstat_lu_xfer)
3898 		kstat_delete(itl->itl_kstat_lu_xfer);
3899 	if (itl->itl_kstat_lport_xfer)
3900 		kstat_delete(itl->itl_kstat_lport_xfer);
3901 	if (itl->itl_kstat_info)
3902 		kstat_delete(itl->itl_kstat_info);
3903 	kmem_free(ks_itl, sizeof (*ks_itl));
3904 	kmem_free(itl->itl_kstat_strbuf, itl->itl_kstat_strbuflen);
3905 	cmn_err(CE_WARN, "STMF: kstat_create itl failed");
3906 	return (STMF_ALLOC_FAILURE);
3907 }
3908 
3909 static void
3910 stmf_teardown_itl_kstats(stmf_i_itl_kstat_t *ks)
3911 {
3912 	kstat_delete(ks->iitl_kstat_lport_xfer);
3913 	kstat_delete(ks->iitl_kstat_lu_xfer);
3914 	kstat_delete(ks->iitl_kstat_taskq);
3915 	kmem_free(ks->iitl_kstat_info->ks_data, sizeof (stmf_kstat_itl_info_t));
3916 	kstat_delete(ks->iitl_kstat_info);
3917 	kmem_free(ks->iitl_kstat_strbuf, ks->iitl_kstat_strbuflen);
3918 }
3919 
3920 void
3921 stmf_release_itl_handle(stmf_lu_t *lu, stmf_itl_data_t *itl)
3922 {
3923 	stmf_itl_data_t **itlpp;
3924 	stmf_i_lu_t *ilu;
3925 
3926 	ASSERT(itl->itl_flags & STMF_ITL_BEING_TERMINATED);
3927 
3928 	ilu = (stmf_i_lu_t *)lu->lu_stmf_private;
3929 	mutex_enter(&ilu->ilu_task_lock);
3930 	for (itlpp = &ilu->ilu_itl_list; (*itlpp) != NULL;
3931 	    itlpp = &(*itlpp)->itl_next) {
3932 		if ((*itlpp) == itl)
3933 			break;
3934 	}
3935 	ASSERT((*itlpp) != NULL);
3936 	*itlpp = itl->itl_next;
3937 	mutex_exit(&ilu->ilu_task_lock);
3938 	lu->lu_abort(lu, STMF_LU_ITL_HANDLE_REMOVED, itl->itl_handle,
3939 	    (uint32_t)itl->itl_hdlrm_reason);
3940 
3941 	kmem_free(itl, sizeof (*itl));
3942 }
3943 
3944 stmf_status_t
3945 stmf_register_itl_handle(stmf_lu_t *lu, uint8_t *lun,
3946     stmf_scsi_session_t *ss, uint64_t session_id, void *itl_handle)
3947 {
3948 	stmf_itl_data_t *itl;
3949 	stmf_i_scsi_session_t *iss;
3950 	stmf_lun_map_ent_t *lun_map_ent;
3951 	stmf_i_lu_t *ilu;
3952 	uint16_t n;
3953 
3954 	ilu = (stmf_i_lu_t *)lu->lu_stmf_private;
3955 	if (ss == NULL) {
3956 		iss = stmf_session_id_to_issptr(session_id, 1);
3957 		if (iss == NULL)
3958 			return (STMF_NOT_FOUND);
3959 	} else {
3960 		iss = (stmf_i_scsi_session_t *)ss->ss_stmf_private;
3961 	}
3962 
3963 	rw_enter(iss->iss_lockp, RW_WRITER);
3964 	n = ((uint16_t)lun[1] | (((uint16_t)(lun[0] & 0x3F)) << 8));
3965 	lun_map_ent = (stmf_lun_map_ent_t *)
3966 	    stmf_get_ent_from_map(iss->iss_sm, n);
3967 	if ((lun_map_ent == NULL) || (lun_map_ent->ent_lu != lu)) {
3968 		rw_exit(iss->iss_lockp);
3969 		return (STMF_NOT_FOUND);
3970 	}
3971 	if (lun_map_ent->ent_itl_datap != NULL) {
3972 		rw_exit(iss->iss_lockp);
3973 		return (STMF_ALREADY);
3974 	}
3975 
3976 	itl = (stmf_itl_data_t *)kmem_zalloc(sizeof (*itl), KM_NOSLEEP);
3977 	if (itl == NULL) {
3978 		rw_exit(iss->iss_lockp);
3979 		return (STMF_ALLOC_FAILURE);
3980 	}
3981 
3982 	itl->itl_ilu = ilu;
3983 	itl->itl_session = iss;
3984 	itl->itl_counter = 1;
3985 	itl->itl_lun = n;
3986 	itl->itl_handle = itl_handle;
3987 
3988 	if (stmf_setup_itl_kstats(itl) != STMF_SUCCESS) {
3989 		kmem_free(itl, sizeof (*itl));
3990 		rw_exit(iss->iss_lockp);
3991 		return (STMF_ALLOC_FAILURE);
3992 	}
3993 
3994 	mutex_enter(&ilu->ilu_task_lock);
3995 	itl->itl_next = ilu->ilu_itl_list;
3996 	ilu->ilu_itl_list = itl;
3997 	mutex_exit(&ilu->ilu_task_lock);
3998 	lun_map_ent->ent_itl_datap = itl;
3999 	rw_exit(iss->iss_lockp);
4000 
4001 	return (STMF_SUCCESS);
4002 }
4003 
4004 void
4005 stmf_do_itl_dereg(stmf_lu_t *lu, stmf_itl_data_t *itl, uint8_t hdlrm_reason)
4006 {
4007 	uint8_t old, new;
4008 
4009 	do {
4010 		old = new = itl->itl_flags;
4011 		if (old & STMF_ITL_BEING_TERMINATED)
4012 			return;
4013 		new |= STMF_ITL_BEING_TERMINATED;
4014 	} while (atomic_cas_8(&itl->itl_flags, old, new) != old);
4015 	itl->itl_hdlrm_reason = hdlrm_reason;
4016 
4017 	ASSERT(itl->itl_counter);
4018 
4019 	if (atomic_add_32_nv(&itl->itl_counter, -1))
4020 		return;
4021 
4022 	drv_usecwait(10);
4023 	if (itl->itl_counter)
4024 		return;
4025 
4026 	stmf_release_itl_handle(lu, itl);
4027 }
4028 
4029 stmf_status_t
4030 stmf_deregister_all_lu_itl_handles(stmf_lu_t *lu)
4031 {
4032 	stmf_i_lu_t *ilu;
4033 	stmf_i_local_port_t *ilport;
4034 	stmf_i_scsi_session_t *iss;
4035 	stmf_lun_map_t *lm;
4036 	stmf_lun_map_ent_t *ent;
4037 	uint32_t nmaps, nu;
4038 	stmf_itl_data_t **itl_list;
4039 	int i;
4040 
4041 	ilu = (stmf_i_lu_t *)lu->lu_stmf_private;
4042 
4043 dereg_itl_start:;
4044 	nmaps = ilu->ilu_ref_cnt;
4045 	if (nmaps == 0)
4046 		return (STMF_NOT_FOUND);
4047 	itl_list = (stmf_itl_data_t **)kmem_zalloc(
4048 	    nmaps * sizeof (stmf_itl_data_t *), KM_SLEEP);
4049 	mutex_enter(&stmf_state.stmf_lock);
4050 	if (nmaps != ilu->ilu_ref_cnt) {
4051 		/* Something changed, start all over */
4052 		mutex_exit(&stmf_state.stmf_lock);
4053 		kmem_free(itl_list, nmaps * sizeof (stmf_itl_data_t *));
4054 		goto dereg_itl_start;
4055 	}
4056 	nu = 0;
4057 	for (ilport = stmf_state.stmf_ilportlist; ilport != NULL;
4058 	    ilport = ilport->ilport_next) {
4059 		rw_enter(&ilport->ilport_lock, RW_WRITER);
4060 		for (iss = ilport->ilport_ss_list; iss != NULL;
4061 		    iss = iss->iss_next) {
4062 			lm = iss->iss_sm;
4063 			if (!lm)
4064 				continue;
4065 			for (i = 0; i < lm->lm_nentries; i++) {
4066 				if (lm->lm_plus[i] == NULL)
4067 					continue;
4068 				ent = (stmf_lun_map_ent_t *)lm->lm_plus[i];
4069 				if ((ent->ent_lu == lu) &&
4070 				    (ent->ent_itl_datap)) {
4071 					itl_list[nu++] = ent->ent_itl_datap;
4072 					ent->ent_itl_datap = NULL;
4073 					if (nu == nmaps) {
4074 						rw_exit(&ilport->ilport_lock);
4075 						goto dai_scan_done;
4076 					}
4077 				}
4078 			} /* lun table for a session */
4079 		} /* sessions */
4080 		rw_exit(&ilport->ilport_lock);
4081 	} /* ports */
4082 
4083 dai_scan_done:
4084 	mutex_exit(&stmf_state.stmf_lock);
4085 
4086 	for (i = 0; i < nu; i++) {
4087 		stmf_do_itl_dereg(lu, itl_list[i],
4088 		    STMF_ITL_REASON_DEREG_REQUEST);
4089 	}
4090 	kmem_free(itl_list, nmaps * sizeof (stmf_itl_data_t *));
4091 
4092 	return (STMF_SUCCESS);
4093 }
4094 
4095 stmf_status_t
4096 stmf_deregister_itl_handle(stmf_lu_t *lu, uint8_t *lun,
4097     stmf_scsi_session_t *ss, uint64_t session_id, void *itl_handle)
4098 {
4099 	stmf_i_scsi_session_t *iss;
4100 	stmf_itl_data_t *itl;
4101 	stmf_lun_map_ent_t *ent;
4102 	stmf_lun_map_t *lm;
4103 	int i;
4104 	uint16_t n;
4105 
4106 	if (ss == NULL) {
4107 		if (session_id == STMF_SESSION_ID_NONE)
4108 			return (STMF_INVALID_ARG);
4109 		iss = stmf_session_id_to_issptr(session_id, 1);
4110 		if (iss == NULL)
4111 			return (STMF_NOT_FOUND);
4112 	} else {
4113 		iss = (stmf_i_scsi_session_t *)ss->ss_stmf_private;
4114 		rw_enter(iss->iss_lockp, RW_WRITER);
4115 	}
4116 	lm = iss->iss_sm;
4117 	if (lm == NULL) {
4118 		rw_exit(iss->iss_lockp);
4119 		return (STMF_NOT_FOUND);
4120 	}
4121 
4122 	if (lun) {
4123 		n = ((uint16_t)lun[1] | (((uint16_t)(lun[0] & 0x3F)) << 8));
4124 		ent = (stmf_lun_map_ent_t *)
4125 		    stmf_get_ent_from_map(iss->iss_sm, n);
4126 	} else {
4127 		if (itl_handle == NULL) {
4128 			rw_exit(iss->iss_lockp);
4129 			return (STMF_INVALID_ARG);
4130 		}
4131 		ent = NULL;
4132 		for (i = 0; i < lm->lm_nentries; i++) {
4133 			if (lm->lm_plus[i] == NULL)
4134 				continue;
4135 			ent = (stmf_lun_map_ent_t *)lm->lm_plus[i];
4136 			if (ent->ent_itl_datap &&
4137 			    (ent->ent_itl_datap->itl_handle == itl_handle)) {
4138 				break;
4139 			}
4140 		}
4141 	}
4142 	if ((ent == NULL) || (ent->ent_lu != lu) ||
4143 	    (ent->ent_itl_datap == NULL)) {
4144 		rw_exit(iss->iss_lockp);
4145 		return (STMF_NOT_FOUND);
4146 	}
4147 	itl = ent->ent_itl_datap;
4148 	ent->ent_itl_datap = NULL;
4149 	rw_exit(iss->iss_lockp);
4150 	stmf_do_itl_dereg(lu, itl, STMF_ITL_REASON_DEREG_REQUEST);
4151 
4152 	return (STMF_SUCCESS);
4153 }
4154 
4155 stmf_status_t
4156 stmf_get_itl_handle(stmf_lu_t *lu, uint8_t *lun, stmf_scsi_session_t *ss,
4157     uint64_t session_id, void **itl_handle_retp)
4158 {
4159 	stmf_i_scsi_session_t *iss;
4160 	stmf_lun_map_ent_t *ent;
4161 	stmf_lun_map_t *lm;
4162 	stmf_status_t ret;
4163 	int i;
4164 	uint16_t n;
4165 
4166 	if (ss == NULL) {
4167 		iss = stmf_session_id_to_issptr(session_id, 1);
4168 		if (iss == NULL)
4169 			return (STMF_NOT_FOUND);
4170 	} else {
4171 		iss = (stmf_i_scsi_session_t *)ss->ss_stmf_private;
4172 		rw_enter(iss->iss_lockp, RW_WRITER);
4173 	}
4174 
4175 	ent = NULL;
4176 	if (lun == NULL) {
4177 		lm = iss->iss_sm;
4178 		for (i = 0; i < lm->lm_nentries; i++) {
4179 			if (lm->lm_plus[i] == NULL)
4180 				continue;
4181 			ent = (stmf_lun_map_ent_t *)lm->lm_plus[i];
4182 			if (ent->ent_lu == lu)
4183 				break;
4184 		}
4185 	} else {
4186 		n = ((uint16_t)lun[1] | (((uint16_t)(lun[0] & 0x3F)) << 8));
4187 		ent = (stmf_lun_map_ent_t *)
4188 		    stmf_get_ent_from_map(iss->iss_sm, n);
4189 		if (lu && (ent->ent_lu != lu))
4190 			ent = NULL;
4191 	}
4192 	if (ent && ent->ent_itl_datap) {
4193 		*itl_handle_retp = ent->ent_itl_datap->itl_handle;
4194 		ret = STMF_SUCCESS;
4195 	} else {
4196 		ret = STMF_NOT_FOUND;
4197 	}
4198 
4199 	rw_exit(iss->iss_lockp);
4200 	return (ret);
4201 }
4202 
4203 stmf_data_buf_t *
4204 stmf_alloc_dbuf(scsi_task_t *task, uint32_t size, uint32_t *pminsize,
4205     uint32_t flags)
4206 {
4207 	stmf_i_scsi_task_t *itask =
4208 	    (stmf_i_scsi_task_t *)task->task_stmf_private;
4209 	stmf_local_port_t *lport = task->task_lport;
4210 	stmf_data_buf_t *dbuf;
4211 	uint8_t ndx;
4212 
4213 	ndx = stmf_first_zero[itask->itask_allocated_buf_map];
4214 	if (ndx == 0xff)
4215 		return (NULL);
4216 	dbuf = itask->itask_dbufs[ndx] = lport->lport_ds->ds_alloc_data_buf(
4217 	    task, size, pminsize, flags);
4218 	if (dbuf) {
4219 		task->task_cur_nbufs++;
4220 		itask->itask_allocated_buf_map |= (1 << ndx);
4221 		dbuf->db_handle = ndx;
4222 		return (dbuf);
4223 	}
4224 
4225 	return (NULL);
4226 }
4227 
4228 void
4229 stmf_free_dbuf(scsi_task_t *task, stmf_data_buf_t *dbuf)
4230 {
4231 	stmf_i_scsi_task_t *itask =
4232 	    (stmf_i_scsi_task_t *)task->task_stmf_private;
4233 	stmf_local_port_t *lport = task->task_lport;
4234 
4235 	itask->itask_allocated_buf_map &= ~(1 << dbuf->db_handle);
4236 	task->task_cur_nbufs--;
4237 	lport->lport_ds->ds_free_data_buf(lport->lport_ds, dbuf);
4238 }
4239 
4240 stmf_data_buf_t *
4241 stmf_handle_to_buf(scsi_task_t *task, uint8_t h)
4242 {
4243 	stmf_i_scsi_task_t *itask;
4244 
4245 	itask = (stmf_i_scsi_task_t *)task->task_stmf_private;
4246 	if (h > 3)
4247 		return (NULL);
4248 	return (itask->itask_dbufs[h]);
4249 }
4250 
4251 /* ARGSUSED */
4252 struct scsi_task *
4253 stmf_task_alloc(struct stmf_local_port *lport, stmf_scsi_session_t *ss,
4254 			uint8_t *lun, uint16_t cdb_length_in, uint16_t ext_id)
4255 {
4256 	stmf_lu_t *lu;
4257 	stmf_i_scsi_session_t *iss;
4258 	stmf_i_lu_t *ilu;
4259 	stmf_i_scsi_task_t *itask;
4260 	stmf_i_scsi_task_t **ppitask;
4261 	scsi_task_t *task;
4262 	uint64_t *p;
4263 	uint8_t	*l;
4264 	stmf_lun_map_ent_t *lun_map_ent;
4265 	uint16_t cdb_length;
4266 	uint16_t luNbr;
4267 	uint8_t new_task = 0;
4268 
4269 	/*
4270 	 * We allocate 7 extra bytes for CDB to provide a cdb pointer which
4271 	 * is guaranteed to be 8 byte aligned. Some LU providers like OSD
4272 	 * depend upon this alignment.
4273 	 */
4274 	if (cdb_length_in >= 16)
4275 		cdb_length = cdb_length_in + 7;
4276 	else
4277 		cdb_length = 16 + 7;
4278 	iss = (stmf_i_scsi_session_t *)ss->ss_stmf_private;
4279 	luNbr = ((uint16_t)lun[1] | (((uint16_t)(lun[0] & 0x3F)) << 8));
4280 	rw_enter(iss->iss_lockp, RW_READER);
4281 	lun_map_ent =
4282 	    (stmf_lun_map_ent_t *)stmf_get_ent_from_map(iss->iss_sm, luNbr);
4283 	if (!lun_map_ent) {
4284 		lu = dlun0;
4285 	} else {
4286 		lu = lun_map_ent->ent_lu;
4287 	}
4288 	ilu = lu->lu_stmf_private;
4289 	if (ilu->ilu_flags & ILU_RESET_ACTIVE) {
4290 		rw_exit(iss->iss_lockp);
4291 		return (NULL);
4292 	}
4293 	do {
4294 		if (ilu->ilu_free_tasks == NULL) {
4295 			new_task = 1;
4296 			break;
4297 		}
4298 		mutex_enter(&ilu->ilu_task_lock);
4299 		for (ppitask = &ilu->ilu_free_tasks; (*ppitask != NULL) &&
4300 		    ((*ppitask)->itask_cdb_buf_size < cdb_length);
4301 		    ppitask = &((*ppitask)->itask_lu_free_next))
4302 			;
4303 		if (*ppitask) {
4304 			itask = *ppitask;
4305 			*ppitask = (*ppitask)->itask_lu_free_next;
4306 			ilu->ilu_ntasks_free--;
4307 			if (ilu->ilu_ntasks_free < ilu->ilu_ntasks_min_free)
4308 				ilu->ilu_ntasks_min_free = ilu->ilu_ntasks_free;
4309 		} else {
4310 			new_task = 1;
4311 		}
4312 		mutex_exit(&ilu->ilu_task_lock);
4313 	/* CONSTCOND */
4314 	} while (0);
4315 
4316 	if (!new_task) {
4317 		task = itask->itask_task;
4318 		task->task_timeout = 0;
4319 		p = (uint64_t *)&task->task_flags;
4320 		*p++ = 0; *p++ = 0; p++; p++; *p++ = 0; *p++ = 0; *p = 0;
4321 		itask->itask_ncmds = 0;
4322 	} else {
4323 		task = (scsi_task_t *)stmf_alloc(STMF_STRUCT_SCSI_TASK,
4324 		    cdb_length, AF_FORCE_NOSLEEP);
4325 		if (task == NULL) {
4326 			rw_exit(iss->iss_lockp);
4327 			return (NULL);
4328 		}
4329 		task->task_lu = lu;
4330 		l = task->task_lun_no;
4331 		l[0] = lun[0];
4332 		l[1] = lun[1];
4333 		l[2] = lun[2];
4334 		l[3] = lun[3];
4335 		l[4] = lun[4];
4336 		l[5] = lun[5];
4337 		l[6] = lun[6];
4338 		l[7] = lun[7];
4339 		task->task_cdb = (uint8_t *)task->task_port_private;
4340 		if ((ulong_t)(task->task_cdb) & 7ul) {
4341 			task->task_cdb = (uint8_t *)(((ulong_t)
4342 			    (task->task_cdb) + 7ul) & ~(7ul));
4343 		}
4344 		itask = (stmf_i_scsi_task_t *)task->task_stmf_private;
4345 		itask->itask_cdb_buf_size = cdb_length;
4346 	}
4347 	task->task_session = ss;
4348 	task->task_lport = lport;
4349 	task->task_cdb_length = cdb_length_in;
4350 	itask->itask_flags = ITASK_IN_TRANSITION;
4351 	itask->itask_waitq_time = 0;
4352 	itask->itask_lu_read_time = itask->itask_lu_write_time = 0;
4353 	itask->itask_lport_read_time = itask->itask_lport_write_time = 0;
4354 	itask->itask_read_xfer = itask->itask_write_xfer = 0;
4355 
4356 	if (new_task) {
4357 		if (lu->lu_task_alloc(task) != STMF_SUCCESS) {
4358 			rw_exit(iss->iss_lockp);
4359 			stmf_free(task);
4360 			return (NULL);
4361 		}
4362 		mutex_enter(&ilu->ilu_task_lock);
4363 		if (ilu->ilu_flags & ILU_RESET_ACTIVE) {
4364 			mutex_exit(&ilu->ilu_task_lock);
4365 			rw_exit(iss->iss_lockp);
4366 			stmf_free(task);
4367 			return (NULL);
4368 		}
4369 		itask->itask_lu_next = ilu->ilu_tasks;
4370 		if (ilu->ilu_tasks)
4371 			ilu->ilu_tasks->itask_lu_prev = itask;
4372 		ilu->ilu_tasks = itask;
4373 		/* kmem_zalloc automatically makes itask->itask_lu_prev NULL */
4374 		ilu->ilu_ntasks++;
4375 		mutex_exit(&ilu->ilu_task_lock);
4376 	}
4377 
4378 	itask->itask_ilu_task_cntr = ilu->ilu_cur_task_cntr;
4379 	atomic_add_32(itask->itask_ilu_task_cntr, 1);
4380 	itask->itask_start_time = ddi_get_lbolt();
4381 
4382 	if ((lun_map_ent != NULL) && ((itask->itask_itl_datap =
4383 	    lun_map_ent->ent_itl_datap) != NULL)) {
4384 		atomic_add_32(&itask->itask_itl_datap->itl_counter, 1);
4385 		task->task_lu_itl_handle = itask->itask_itl_datap->itl_handle;
4386 	} else {
4387 		itask->itask_itl_datap = NULL;
4388 		task->task_lu_itl_handle = NULL;
4389 	}
4390 
4391 	rw_exit(iss->iss_lockp);
4392 	return (task);
4393 }
4394 
4395 static void
4396 stmf_task_lu_free(scsi_task_t *task, stmf_i_scsi_session_t *iss)
4397 {
4398 	stmf_i_scsi_task_t *itask =
4399 	    (stmf_i_scsi_task_t *)task->task_stmf_private;
4400 	stmf_i_lu_t *ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private;
4401 
4402 	ASSERT(rw_lock_held(iss->iss_lockp));
4403 	itask->itask_flags = ITASK_IN_FREE_LIST;
4404 	itask->itask_proxy_msg_id = 0;
4405 	mutex_enter(&ilu->ilu_task_lock);
4406 	itask->itask_lu_free_next = ilu->ilu_free_tasks;
4407 	ilu->ilu_free_tasks = itask;
4408 	ilu->ilu_ntasks_free++;
4409 	mutex_exit(&ilu->ilu_task_lock);
4410 	atomic_add_32(itask->itask_ilu_task_cntr, -1);
4411 }
4412 
4413 void
4414 stmf_task_lu_check_freelist(stmf_i_lu_t *ilu)
4415 {
4416 	uint32_t	num_to_release, ndx;
4417 	stmf_i_scsi_task_t *itask;
4418 	stmf_lu_t	*lu = ilu->ilu_lu;
4419 
4420 	ASSERT(ilu->ilu_ntasks_min_free <= ilu->ilu_ntasks_free);
4421 
4422 	/* free half of the minimal free of the free tasks */
4423 	num_to_release = (ilu->ilu_ntasks_min_free + 1) / 2;
4424 	if (!num_to_release) {
4425 		return;
4426 	}
4427 	for (ndx = 0; ndx < num_to_release; ndx++) {
4428 		mutex_enter(&ilu->ilu_task_lock);
4429 		itask = ilu->ilu_free_tasks;
4430 		if (itask == NULL) {
4431 			mutex_exit(&ilu->ilu_task_lock);
4432 			break;
4433 		}
4434 		ilu->ilu_free_tasks = itask->itask_lu_free_next;
4435 		ilu->ilu_ntasks_free--;
4436 		mutex_exit(&ilu->ilu_task_lock);
4437 
4438 		lu->lu_task_free(itask->itask_task);
4439 		mutex_enter(&ilu->ilu_task_lock);
4440 		if (itask->itask_lu_next)
4441 			itask->itask_lu_next->itask_lu_prev =
4442 			    itask->itask_lu_prev;
4443 		if (itask->itask_lu_prev)
4444 			itask->itask_lu_prev->itask_lu_next =
4445 			    itask->itask_lu_next;
4446 		else
4447 			ilu->ilu_tasks = itask->itask_lu_next;
4448 
4449 		ilu->ilu_ntasks--;
4450 		mutex_exit(&ilu->ilu_task_lock);
4451 		stmf_free(itask->itask_task);
4452 	}
4453 }
4454 
4455 /*
4456  * Called with stmf_lock held
4457  */
4458 void
4459 stmf_check_freetask()
4460 {
4461 	stmf_i_lu_t *ilu;
4462 	clock_t	endtime = ddi_get_lbolt() + drv_usectohz(10000);
4463 
4464 	/* stmf_svc_ilu_draining may get changed after stmf_lock is released */
4465 	while ((ilu = stmf_state.stmf_svc_ilu_draining) != NULL) {
4466 		stmf_state.stmf_svc_ilu_draining = ilu->ilu_next;
4467 		if (!ilu->ilu_ntasks_min_free) {
4468 			ilu->ilu_ntasks_min_free = ilu->ilu_ntasks_free;
4469 			continue;
4470 		}
4471 		ilu->ilu_flags |= ILU_STALL_DEREGISTER;
4472 		mutex_exit(&stmf_state.stmf_lock);
4473 		stmf_task_lu_check_freelist(ilu);
4474 		/*
4475 		 * we do not care about the accuracy of
4476 		 * ilu_ntasks_min_free, so we don't lock here
4477 		 */
4478 		ilu->ilu_ntasks_min_free = ilu->ilu_ntasks_free;
4479 		mutex_enter(&stmf_state.stmf_lock);
4480 		ilu->ilu_flags &= ~ILU_STALL_DEREGISTER;
4481 		cv_broadcast(&stmf_state.stmf_cv);
4482 		if (ddi_get_lbolt() >= endtime)
4483 			break;
4484 	}
4485 }
4486 
4487 void
4488 stmf_do_ilu_timeouts(stmf_i_lu_t *ilu)
4489 {
4490 	clock_t l = ddi_get_lbolt();
4491 	clock_t ps = drv_usectohz(1000000);
4492 	stmf_i_scsi_task_t *itask;
4493 	scsi_task_t *task;
4494 	uint32_t to;
4495 
4496 	mutex_enter(&ilu->ilu_task_lock);
4497 	for (itask = ilu->ilu_tasks; itask != NULL;
4498 	    itask = itask->itask_lu_next) {
4499 		if (itask->itask_flags & (ITASK_IN_FREE_LIST |
4500 		    ITASK_BEING_ABORTED)) {
4501 			continue;
4502 		}
4503 		task = itask->itask_task;
4504 		if (task->task_timeout == 0)
4505 			to = stmf_default_task_timeout;
4506 		else
4507 			to = task->task_timeout;
4508 		if ((itask->itask_start_time + (to * ps)) > l)
4509 			continue;
4510 		stmf_abort(STMF_QUEUE_TASK_ABORT, task,
4511 		    STMF_TIMEOUT, NULL);
4512 	}
4513 	mutex_exit(&ilu->ilu_task_lock);
4514 }
4515 
4516 /*
4517  * Called with stmf_lock held
4518  */
4519 void
4520 stmf_check_ilu_timing()
4521 {
4522 	stmf_i_lu_t *ilu;
4523 	clock_t	endtime = ddi_get_lbolt() + drv_usectohz(10000);
4524 
4525 	/* stmf_svc_ilu_timing may get changed after stmf_lock is released */
4526 	while ((ilu = stmf_state.stmf_svc_ilu_timing) != NULL) {
4527 		stmf_state.stmf_svc_ilu_timing = ilu->ilu_next;
4528 		if (ilu->ilu_cur_task_cntr == (&ilu->ilu_task_cntr1)) {
4529 			if (ilu->ilu_task_cntr2 == 0) {
4530 				ilu->ilu_cur_task_cntr = &ilu->ilu_task_cntr2;
4531 				continue;
4532 			}
4533 		} else {
4534 			if (ilu->ilu_task_cntr1 == 0) {
4535 				ilu->ilu_cur_task_cntr = &ilu->ilu_task_cntr1;
4536 				continue;
4537 			}
4538 		}
4539 		/*
4540 		 * If we are here then it means that there is some slowdown
4541 		 * in tasks on this lu. We need to check.
4542 		 */
4543 		ilu->ilu_flags |= ILU_STALL_DEREGISTER;
4544 		mutex_exit(&stmf_state.stmf_lock);
4545 		stmf_do_ilu_timeouts(ilu);
4546 		mutex_enter(&stmf_state.stmf_lock);
4547 		ilu->ilu_flags &= ~ILU_STALL_DEREGISTER;
4548 		cv_broadcast(&stmf_state.stmf_cv);
4549 		if (ddi_get_lbolt() >= endtime)
4550 			break;
4551 	}
4552 }
4553 
4554 /*
4555  * Kills all tasks on a lu except tm_task
4556  */
4557 void
4558 stmf_task_lu_killall(stmf_lu_t *lu, scsi_task_t *tm_task, stmf_status_t s)
4559 {
4560 	stmf_i_lu_t *ilu = (stmf_i_lu_t *)lu->lu_stmf_private;
4561 	stmf_i_scsi_task_t *itask;
4562 
4563 	mutex_enter(&ilu->ilu_task_lock);
4564 
4565 	for (itask = ilu->ilu_tasks; itask != NULL;
4566 	    itask = itask->itask_lu_next) {
4567 		if (itask->itask_flags & ITASK_IN_FREE_LIST)
4568 			continue;
4569 		if (itask->itask_task == tm_task)
4570 			continue;
4571 		stmf_abort(STMF_QUEUE_TASK_ABORT, itask->itask_task, s, NULL);
4572 	}
4573 	mutex_exit(&ilu->ilu_task_lock);
4574 }
4575 
4576 void
4577 stmf_free_task_bufs(stmf_i_scsi_task_t *itask, stmf_local_port_t *lport)
4578 {
4579 	int i;
4580 	uint8_t map;
4581 
4582 	if ((map = itask->itask_allocated_buf_map) != 0) {
4583 		for (i = 0; i < 4; i++) {
4584 			if (map & 1) {
4585 				stmf_data_buf_t *dbuf;
4586 
4587 				dbuf = itask->itask_dbufs[i];
4588 				if (dbuf->db_lu_private) {
4589 					dbuf->db_lu_private = NULL;
4590 				}
4591 				if (dbuf->db_xfer_start_timestamp != NULL) {
4592 					stmf_lport_xfer_done(itask, dbuf);
4593 				}
4594 				lport->lport_ds->ds_free_data_buf(
4595 				    lport->lport_ds, dbuf);
4596 			}
4597 			map >>= 1;
4598 		}
4599 		itask->itask_allocated_buf_map = 0;
4600 	}
4601 }
4602 
4603 void
4604 stmf_task_free(scsi_task_t *task)
4605 {
4606 	stmf_local_port_t *lport = task->task_lport;
4607 	stmf_i_scsi_task_t *itask = (stmf_i_scsi_task_t *)
4608 	    task->task_stmf_private;
4609 	stmf_i_scsi_session_t *iss = (stmf_i_scsi_session_t *)
4610 	    task->task_session->ss_stmf_private;
4611 
4612 	stmf_free_task_bufs(itask, lport);
4613 	stmf_itl_task_done(itask);
4614 	DTRACE_PROBE2(stmf__task__end, scsi_task_t *, task,
4615 	    hrtime_t,
4616 	    itask->itask_done_timestamp - itask->itask_start_timestamp);
4617 	if (itask->itask_itl_datap) {
4618 		if (atomic_add_32_nv(&itask->itask_itl_datap->itl_counter,
4619 		    -1) == 0) {
4620 			stmf_release_itl_handle(task->task_lu,
4621 			    itask->itask_itl_datap);
4622 		}
4623 	}
4624 
4625 	rw_enter(iss->iss_lockp, RW_READER);
4626 	lport->lport_task_free(task);
4627 	if (itask->itask_worker) {
4628 		atomic_add_32(&stmf_cur_ntasks, -1);
4629 		atomic_add_32(&itask->itask_worker->worker_ref_count, -1);
4630 	}
4631 	/*
4632 	 * After calling stmf_task_lu_free, the task pointer can no longer
4633 	 * be trusted.
4634 	 */
4635 	stmf_task_lu_free(task, iss);
4636 	rw_exit(iss->iss_lockp);
4637 }
4638 
4639 void
4640 stmf_post_task(scsi_task_t *task, stmf_data_buf_t *dbuf)
4641 {
4642 	stmf_i_scsi_task_t *itask = (stmf_i_scsi_task_t *)
4643 	    task->task_stmf_private;
4644 	stmf_i_lu_t *ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private;
4645 	int nv;
4646 	uint32_t old, new;
4647 	uint32_t ct;
4648 	stmf_worker_t *w, *w1;
4649 	uint8_t tm;
4650 
4651 	if (task->task_max_nbufs > 4)
4652 		task->task_max_nbufs = 4;
4653 	task->task_cur_nbufs = 0;
4654 	/* Latest value of currently running tasks */
4655 	ct = atomic_add_32_nv(&stmf_cur_ntasks, 1);
4656 
4657 	/* Select the next worker using round robin */
4658 	nv = (int)atomic_add_32_nv((uint32_t *)&stmf_worker_sel_counter, 1);
4659 	if (nv >= stmf_nworkers_accepting_cmds) {
4660 		int s = nv;
4661 		do {
4662 			nv -= stmf_nworkers_accepting_cmds;
4663 		} while (nv >= stmf_nworkers_accepting_cmds);
4664 		if (nv < 0)
4665 			nv = 0;
4666 		/* Its ok if this cas fails */
4667 		(void) atomic_cas_32((uint32_t *)&stmf_worker_sel_counter,
4668 		    s, nv);
4669 	}
4670 	w = &stmf_workers[nv];
4671 
4672 	/*
4673 	 * A worker can be pinned by interrupt. So select the next one
4674 	 * if it has lower load.
4675 	 */
4676 	if ((nv + 1) >= stmf_nworkers_accepting_cmds) {
4677 		w1 = stmf_workers;
4678 	} else {
4679 		w1 = &stmf_workers[nv + 1];
4680 	}
4681 	if (w1->worker_queue_depth < w->worker_queue_depth)
4682 		w = w1;
4683 
4684 	mutex_enter(&w->worker_lock);
4685 	if (((w->worker_flags & STMF_WORKER_STARTED) == 0) ||
4686 	    (w->worker_flags & STMF_WORKER_TERMINATE)) {
4687 		/*
4688 		 * Maybe we are in the middle of a change. Just go to
4689 		 * the 1st worker.
4690 		 */
4691 		mutex_exit(&w->worker_lock);
4692 		w = stmf_workers;
4693 		mutex_enter(&w->worker_lock);
4694 	}
4695 	itask->itask_worker = w;
4696 	/*
4697 	 * Track max system load inside the worker as we already have the
4698 	 * worker lock (no point implementing another lock). The service
4699 	 * thread will do the comparisons and figure out the max overall
4700 	 * system load.
4701 	 */
4702 	if (w->worker_max_sys_qdepth_pu < ct)
4703 		w->worker_max_sys_qdepth_pu = ct;
4704 
4705 	do {
4706 		old = new = itask->itask_flags;
4707 		new |= ITASK_KNOWN_TO_TGT_PORT | ITASK_IN_WORKER_QUEUE;
4708 		if (task->task_mgmt_function) {
4709 			tm = task->task_mgmt_function;
4710 			if ((tm == TM_TARGET_RESET) ||
4711 			    (tm == TM_TARGET_COLD_RESET) ||
4712 			    (tm == TM_TARGET_WARM_RESET)) {
4713 				new |= ITASK_DEFAULT_HANDLING;
4714 			}
4715 		} else if (task->task_cdb[0] == SCMD_REPORT_LUNS) {
4716 			new |= ITASK_DEFAULT_HANDLING;
4717 		}
4718 		new &= ~ITASK_IN_TRANSITION;
4719 	} while (atomic_cas_32(&itask->itask_flags, old, new) != old);
4720 
4721 	stmf_itl_task_start(itask);
4722 
4723 	itask->itask_worker_next = NULL;
4724 	if (w->worker_task_tail) {
4725 		w->worker_task_tail->itask_worker_next = itask;
4726 	} else {
4727 		w->worker_task_head = itask;
4728 	}
4729 	w->worker_task_tail = itask;
4730 	if (++(w->worker_queue_depth) > w->worker_max_qdepth_pu) {
4731 		w->worker_max_qdepth_pu = w->worker_queue_depth;
4732 	}
4733 	/* Measure task waitq time */
4734 	itask->itask_waitq_enter_timestamp = gethrtime();
4735 	atomic_add_32(&w->worker_ref_count, 1);
4736 	itask->itask_cmd_stack[0] = ITASK_CMD_NEW_TASK;
4737 	itask->itask_ncmds = 1;
4738 	if (dbuf) {
4739 		itask->itask_allocated_buf_map = 1;
4740 		itask->itask_dbufs[0] = dbuf;
4741 		dbuf->db_handle = 0;
4742 	} else {
4743 		itask->itask_allocated_buf_map = 0;
4744 		itask->itask_dbufs[0] = NULL;
4745 	}
4746 
4747 	if ((w->worker_flags & STMF_WORKER_ACTIVE) == 0) {
4748 		w->worker_signal_timestamp = gethrtime();
4749 		DTRACE_PROBE2(worker__signal, stmf_worker_t *, w,
4750 		    scsi_task_t *, task);
4751 		cv_signal(&w->worker_cv);
4752 	}
4753 	mutex_exit(&w->worker_lock);
4754 
4755 	/*
4756 	 * This can only happen if during stmf_task_alloc(), ILU_RESET_ACTIVE
4757 	 * was set between checking of ILU_RESET_ACTIVE and clearing of the
4758 	 * ITASK_IN_FREE_LIST flag. Take care of these "sneaked-in" tasks here.
4759 	 */
4760 	if (ilu->ilu_flags & ILU_RESET_ACTIVE) {
4761 		stmf_abort(STMF_QUEUE_TASK_ABORT, task, STMF_ABORTED, NULL);
4762 	}
4763 }
4764 
4765 /*
4766  * ++++++++++++++ ABORT LOGIC ++++++++++++++++++++
4767  * Once ITASK_BEING_ABORTED is set, ITASK_KNOWN_TO_LU can be reset already
4768  * i.e. before ITASK_BEING_ABORTED being set. But if it was not, it cannot
4769  * be reset until the LU explicitly calls stmf_task_lu_aborted(). Of course
4770  * the LU will make this call only if we call the LU's abort entry point.
4771  * we will only call that entry point if ITASK_KNOWN_TO_LU was set.
4772  *
4773  * Same logic applies for the port.
4774  *
4775  * Also ITASK_BEING_ABORTED will not be allowed to set if both KNOWN_TO_LU
4776  * and KNOWN_TO_TGT_PORT are reset.
4777  *
4778  * +++++++++++++++++++++++++++++++++++++++++++++++
4779  */
4780 
4781 stmf_status_t
4782 stmf_xfer_data(scsi_task_t *task, stmf_data_buf_t *dbuf, uint32_t ioflags)
4783 {
4784 	stmf_status_t ret;
4785 
4786 	stmf_i_scsi_task_t *itask =
4787 	    (stmf_i_scsi_task_t *)task->task_stmf_private;
4788 
4789 	if (ioflags & STMF_IOF_LU_DONE) {
4790 		uint32_t new, old;
4791 		do {
4792 			new = old = itask->itask_flags;
4793 			if (new & ITASK_BEING_ABORTED)
4794 				return (STMF_ABORTED);
4795 			new &= ~ITASK_KNOWN_TO_LU;
4796 		} while (atomic_cas_32(&itask->itask_flags, old, new) != old);
4797 	}
4798 	if (itask->itask_flags & ITASK_BEING_ABORTED)
4799 		return (STMF_ABORTED);
4800 #ifdef	DEBUG
4801 	if (!(ioflags & STMF_IOF_STATS_ONLY) && stmf_drop_buf_counter > 0) {
4802 		if (atomic_add_32_nv((uint32_t *)&stmf_drop_buf_counter, -1) ==
4803 		    1)
4804 			return (STMF_SUCCESS);
4805 	}
4806 #endif
4807 
4808 	stmf_update_kstat_lu_io(task, dbuf);
4809 	stmf_update_kstat_lport_io(task, dbuf);
4810 	stmf_lport_xfer_start(itask, dbuf);
4811 	if (ioflags & STMF_IOF_STATS_ONLY) {
4812 		stmf_lport_xfer_done(itask, dbuf);
4813 		return (STMF_SUCCESS);
4814 	}
4815 
4816 	ret = task->task_lport->lport_xfer_data(task, dbuf, ioflags);
4817 
4818 	/*
4819 	 * Port provider may have already called the buffer callback in
4820 	 * which case dbuf->db_xfer_start_timestamp will be 0.
4821 	 */
4822 	if ((ret != STMF_SUCCESS) && (dbuf->db_xfer_start_timestamp != 0)) {
4823 		stmf_lport_xfer_done(itask, dbuf);
4824 	}
4825 
4826 	return (ret);
4827 }
4828 
4829 void
4830 stmf_data_xfer_done(scsi_task_t *task, stmf_data_buf_t *dbuf, uint32_t iof)
4831 {
4832 	stmf_i_scsi_task_t *itask =
4833 	    (stmf_i_scsi_task_t *)task->task_stmf_private;
4834 	stmf_worker_t *w = itask->itask_worker;
4835 	uint32_t new, old;
4836 	uint8_t update_queue_flags, free_it, queue_it;
4837 
4838 	stmf_lport_xfer_done(itask, dbuf);
4839 
4840 	mutex_enter(&w->worker_lock);
4841 	do {
4842 		new = old = itask->itask_flags;
4843 		if (old & ITASK_BEING_ABORTED) {
4844 			mutex_exit(&w->worker_lock);
4845 			return;
4846 		}
4847 		free_it = 0;
4848 		if (iof & STMF_IOF_LPORT_DONE) {
4849 			new &= ~ITASK_KNOWN_TO_TGT_PORT;
4850 			task->task_completion_status = dbuf->db_xfer_status;
4851 			free_it = 1;
4852 		}
4853 		/*
4854 		 * If the task is known to LU then queue it. But if
4855 		 * it is already queued (multiple completions) then
4856 		 * just update the buffer information by grabbing the
4857 		 * worker lock. If the task is not known to LU,
4858 		 * completed/aborted, then see if we need to
4859 		 * free this task.
4860 		 */
4861 		if (old & ITASK_KNOWN_TO_LU) {
4862 			free_it = 0;
4863 			update_queue_flags = 1;
4864 			if (old & ITASK_IN_WORKER_QUEUE) {
4865 				queue_it = 0;
4866 			} else {
4867 				queue_it = 1;
4868 				new |= ITASK_IN_WORKER_QUEUE;
4869 			}
4870 		} else {
4871 			update_queue_flags = 0;
4872 			queue_it = 0;
4873 		}
4874 	} while (atomic_cas_32(&itask->itask_flags, old, new) != old);
4875 
4876 	if (update_queue_flags) {
4877 		uint8_t cmd = (dbuf->db_handle << 5) | ITASK_CMD_DATA_XFER_DONE;
4878 
4879 		ASSERT(itask->itask_ncmds < ITASK_MAX_NCMDS);
4880 		itask->itask_cmd_stack[itask->itask_ncmds++] = cmd;
4881 		if (queue_it) {
4882 			itask->itask_worker_next = NULL;
4883 			if (w->worker_task_tail) {
4884 				w->worker_task_tail->itask_worker_next = itask;
4885 			} else {
4886 				w->worker_task_head = itask;
4887 			}
4888 			w->worker_task_tail = itask;
4889 			/* Measure task waitq time */
4890 			itask->itask_waitq_enter_timestamp = gethrtime();
4891 			if (++(w->worker_queue_depth) >
4892 			    w->worker_max_qdepth_pu) {
4893 				w->worker_max_qdepth_pu = w->worker_queue_depth;
4894 			}
4895 			if ((w->worker_flags & STMF_WORKER_ACTIVE) == 0)
4896 				cv_signal(&w->worker_cv);
4897 		}
4898 	}
4899 	mutex_exit(&w->worker_lock);
4900 
4901 	if (free_it) {
4902 		if ((itask->itask_flags & (ITASK_KNOWN_TO_LU |
4903 		    ITASK_KNOWN_TO_TGT_PORT | ITASK_IN_WORKER_QUEUE |
4904 		    ITASK_BEING_ABORTED)) == 0) {
4905 			stmf_task_free(task);
4906 		}
4907 	}
4908 }
4909 
4910 stmf_status_t
4911 stmf_send_scsi_status(scsi_task_t *task, uint32_t ioflags)
4912 {
4913 	DTRACE_PROBE1(scsi__send__status, scsi_task_t *, task);
4914 
4915 	stmf_i_scsi_task_t *itask =
4916 	    (stmf_i_scsi_task_t *)task->task_stmf_private;
4917 	if (ioflags & STMF_IOF_LU_DONE) {
4918 		uint32_t new, old;
4919 		do {
4920 			new = old = itask->itask_flags;
4921 			if (new & ITASK_BEING_ABORTED)
4922 				return (STMF_ABORTED);
4923 			new &= ~ITASK_KNOWN_TO_LU;
4924 		} while (atomic_cas_32(&itask->itask_flags, old, new) != old);
4925 	}
4926 
4927 	if (!(itask->itask_flags & ITASK_KNOWN_TO_TGT_PORT)) {
4928 		return (STMF_SUCCESS);
4929 	}
4930 
4931 	if (itask->itask_flags & ITASK_BEING_ABORTED)
4932 		return (STMF_ABORTED);
4933 
4934 	if (task->task_additional_flags & TASK_AF_NO_EXPECTED_XFER_LENGTH) {
4935 		task->task_status_ctrl = 0;
4936 		task->task_resid = 0;
4937 	} else if (task->task_cmd_xfer_length >
4938 	    task->task_expected_xfer_length) {
4939 		task->task_status_ctrl = TASK_SCTRL_OVER;
4940 		task->task_resid = task->task_cmd_xfer_length -
4941 		    task->task_expected_xfer_length;
4942 	} else if (task->task_nbytes_transferred <
4943 	    task->task_expected_xfer_length) {
4944 		task->task_status_ctrl = TASK_SCTRL_UNDER;
4945 		task->task_resid = task->task_expected_xfer_length -
4946 		    task->task_nbytes_transferred;
4947 	} else {
4948 		task->task_status_ctrl = 0;
4949 		task->task_resid = 0;
4950 	}
4951 	return (task->task_lport->lport_send_status(task, ioflags));
4952 }
4953 
4954 void
4955 stmf_send_status_done(scsi_task_t *task, stmf_status_t s, uint32_t iof)
4956 {
4957 	stmf_i_scsi_task_t *itask =
4958 	    (stmf_i_scsi_task_t *)task->task_stmf_private;
4959 	stmf_worker_t *w = itask->itask_worker;
4960 	uint32_t new, old;
4961 	uint8_t free_it, queue_it;
4962 
4963 	mutex_enter(&w->worker_lock);
4964 	do {
4965 		new = old = itask->itask_flags;
4966 		if (old & ITASK_BEING_ABORTED) {
4967 			mutex_exit(&w->worker_lock);
4968 			return;
4969 		}
4970 		free_it = 0;
4971 		if (iof & STMF_IOF_LPORT_DONE) {
4972 			new &= ~ITASK_KNOWN_TO_TGT_PORT;
4973 			free_it = 1;
4974 		}
4975 		/*
4976 		 * If the task is known to LU then queue it. But if
4977 		 * it is already queued (multiple completions) then
4978 		 * just update the buffer information by grabbing the
4979 		 * worker lock. If the task is not known to LU,
4980 		 * completed/aborted, then see if we need to
4981 		 * free this task.
4982 		 */
4983 		if (old & ITASK_KNOWN_TO_LU) {
4984 			free_it = 0;
4985 			queue_it = 1;
4986 			if (old & ITASK_IN_WORKER_QUEUE) {
4987 				cmn_err(CE_PANIC, "status completion received"
4988 				    " when task is already in worker queue "
4989 				    " task = %p", (void *)task);
4990 			}
4991 			new |= ITASK_IN_WORKER_QUEUE;
4992 		} else {
4993 			queue_it = 0;
4994 		}
4995 	} while (atomic_cas_32(&itask->itask_flags, old, new) != old);
4996 	task->task_completion_status = s;
4997 
4998 
4999 	if (queue_it) {
5000 		ASSERT(itask->itask_ncmds < ITASK_MAX_NCMDS);
5001 		itask->itask_cmd_stack[itask->itask_ncmds++] =
5002 		    ITASK_CMD_STATUS_DONE;
5003 		itask->itask_worker_next = NULL;
5004 		if (w->worker_task_tail) {
5005 			w->worker_task_tail->itask_worker_next = itask;
5006 		} else {
5007 			w->worker_task_head = itask;
5008 		}
5009 		w->worker_task_tail = itask;
5010 		/* Measure task waitq time */
5011 		itask->itask_waitq_enter_timestamp = gethrtime();
5012 		if (++(w->worker_queue_depth) > w->worker_max_qdepth_pu) {
5013 			w->worker_max_qdepth_pu = w->worker_queue_depth;
5014 		}
5015 		if ((w->worker_flags & STMF_WORKER_ACTIVE) == 0)
5016 			cv_signal(&w->worker_cv);
5017 	}
5018 	mutex_exit(&w->worker_lock);
5019 
5020 	if (free_it) {
5021 		if ((itask->itask_flags & (ITASK_KNOWN_TO_LU |
5022 		    ITASK_KNOWN_TO_TGT_PORT | ITASK_IN_WORKER_QUEUE |
5023 		    ITASK_BEING_ABORTED)) == 0) {
5024 			stmf_task_free(task);
5025 		} else {
5026 			cmn_err(CE_PANIC, "LU is done with the task but LPORT "
5027 			    " is not done, itask %p itask_flags %x",
5028 			    (void *)itask, itask->itask_flags);
5029 		}
5030 	}
5031 }
5032 
5033 void
5034 stmf_task_lu_done(scsi_task_t *task)
5035 {
5036 	stmf_i_scsi_task_t *itask =
5037 	    (stmf_i_scsi_task_t *)task->task_stmf_private;
5038 	stmf_worker_t *w = itask->itask_worker;
5039 	uint32_t new, old;
5040 
5041 	mutex_enter(&w->worker_lock);
5042 	do {
5043 		new = old = itask->itask_flags;
5044 		if (old & ITASK_BEING_ABORTED) {
5045 			mutex_exit(&w->worker_lock);
5046 			return;
5047 		}
5048 		if (old & ITASK_IN_WORKER_QUEUE) {
5049 			cmn_err(CE_PANIC, "task_lu_done received"
5050 			    " when task is in worker queue "
5051 			    " task = %p", (void *)task);
5052 		}
5053 		new &= ~ITASK_KNOWN_TO_LU;
5054 	} while (atomic_cas_32(&itask->itask_flags, old, new) != old);
5055 
5056 	mutex_exit(&w->worker_lock);
5057 
5058 	if ((itask->itask_flags & (ITASK_KNOWN_TO_LU |
5059 	    ITASK_KNOWN_TO_TGT_PORT | ITASK_IN_WORKER_QUEUE |
5060 	    ITASK_BEING_ABORTED)) == 0) {
5061 		stmf_task_free(task);
5062 	} else {
5063 		cmn_err(CE_PANIC, "stmf_lu_done should be the last stage but "
5064 		    " the task is still not done, task = %p", (void *)task);
5065 	}
5066 }
5067 
5068 void
5069 stmf_queue_task_for_abort(scsi_task_t *task, stmf_status_t s)
5070 {
5071 	stmf_i_scsi_task_t *itask =
5072 	    (stmf_i_scsi_task_t *)task->task_stmf_private;
5073 	stmf_worker_t *w;
5074 	uint32_t old, new;
5075 
5076 	do {
5077 		old = new = itask->itask_flags;
5078 		if ((old & ITASK_BEING_ABORTED) ||
5079 		    ((old & (ITASK_KNOWN_TO_TGT_PORT |
5080 		    ITASK_KNOWN_TO_LU)) == 0)) {
5081 			return;
5082 		}
5083 		new |= ITASK_BEING_ABORTED;
5084 	} while (atomic_cas_32(&itask->itask_flags, old, new) != old);
5085 	task->task_completion_status = s;
5086 	itask->itask_start_time = ddi_get_lbolt();
5087 
5088 	if (((w = itask->itask_worker) == NULL) ||
5089 	    (itask->itask_flags & ITASK_IN_TRANSITION)) {
5090 		return;
5091 	}
5092 
5093 	/* Queue it and get out */
5094 	mutex_enter(&w->worker_lock);
5095 	if (itask->itask_flags & ITASK_IN_WORKER_QUEUE) {
5096 		mutex_exit(&w->worker_lock);
5097 		return;
5098 	}
5099 	atomic_or_32(&itask->itask_flags, ITASK_IN_WORKER_QUEUE);
5100 	itask->itask_worker_next = NULL;
5101 	if (w->worker_task_tail) {
5102 		w->worker_task_tail->itask_worker_next = itask;
5103 	} else {
5104 		w->worker_task_head = itask;
5105 	}
5106 	w->worker_task_tail = itask;
5107 	if (++(w->worker_queue_depth) > w->worker_max_qdepth_pu) {
5108 		w->worker_max_qdepth_pu = w->worker_queue_depth;
5109 	}
5110 	if ((w->worker_flags & STMF_WORKER_ACTIVE) == 0)
5111 		cv_signal(&w->worker_cv);
5112 	mutex_exit(&w->worker_lock);
5113 }
5114 
5115 void
5116 stmf_abort(int abort_cmd, scsi_task_t *task, stmf_status_t s, void *arg)
5117 {
5118 	stmf_i_scsi_task_t *itask = NULL;
5119 	uint32_t old, new, f, rf;
5120 
5121 	DTRACE_PROBE2(scsi__task__abort, scsi_task_t *, task,
5122 	    stmf_status_t, s);
5123 
5124 	switch (abort_cmd) {
5125 	case STMF_QUEUE_ABORT_LU:
5126 		stmf_task_lu_killall((stmf_lu_t *)arg, task, s);
5127 		return;
5128 	case STMF_QUEUE_TASK_ABORT:
5129 		stmf_queue_task_for_abort(task, s);
5130 		return;
5131 	case STMF_REQUEUE_TASK_ABORT_LPORT:
5132 		rf = ITASK_TGT_PORT_ABORT_CALLED;
5133 		f = ITASK_KNOWN_TO_TGT_PORT;
5134 		break;
5135 	case STMF_REQUEUE_TASK_ABORT_LU:
5136 		rf = ITASK_LU_ABORT_CALLED;
5137 		f = ITASK_KNOWN_TO_LU;
5138 		break;
5139 	default:
5140 		return;
5141 	}
5142 	itask = (stmf_i_scsi_task_t *)task->task_stmf_private;
5143 	f |= ITASK_BEING_ABORTED | rf;
5144 	do {
5145 		old = new = itask->itask_flags;
5146 		if ((old & f) != f) {
5147 			return;
5148 		}
5149 		new &= ~rf;
5150 	} while (atomic_cas_32(&itask->itask_flags, old, new) != old);
5151 }
5152 
5153 void
5154 stmf_task_lu_aborted(scsi_task_t *task, stmf_status_t s, uint32_t iof)
5155 {
5156 	char			 info[STMF_CHANGE_INFO_LEN];
5157 	stmf_i_scsi_task_t	*itask = TASK_TO_ITASK(task);
5158 	unsigned long long	st;
5159 
5160 	st = s;	/* gcc fix */
5161 	if ((s != STMF_ABORT_SUCCESS) && (s != STMF_NOT_FOUND)) {
5162 		(void) snprintf(info, STMF_CHANGE_INFO_LEN,
5163 		    "task %p, lu failed to abort ret=%llx", (void *)task, st);
5164 	} else if ((iof & STMF_IOF_LU_DONE) == 0) {
5165 		(void) snprintf(info, STMF_CHANGE_INFO_LEN,
5166 		    "Task aborted but LU is not finished, task ="
5167 		    "%p, s=%llx, iof=%x", (void *)task, st, iof);
5168 	} else {
5169 		/*
5170 		 * LU abort successfully
5171 		 */
5172 		atomic_and_32(&itask->itask_flags, ~ITASK_KNOWN_TO_LU);
5173 		return;
5174 	}
5175 
5176 	info[STMF_CHANGE_INFO_LEN - 1] = 0;
5177 	stmf_abort_task_offline(task, 1, info);
5178 }
5179 
5180 void
5181 stmf_task_lport_aborted(scsi_task_t *task, stmf_status_t s, uint32_t iof)
5182 {
5183 	char			info[STMF_CHANGE_INFO_LEN];
5184 	stmf_i_scsi_task_t	*itask = TASK_TO_ITASK(task);
5185 	unsigned long long	st;
5186 	uint32_t		old, new;
5187 
5188 	st = s;
5189 	if ((s != STMF_ABORT_SUCCESS) && (s != STMF_NOT_FOUND)) {
5190 		(void) snprintf(info, STMF_CHANGE_INFO_LEN,
5191 		    "task %p, tgt port failed to abort ret=%llx", (void *)task,
5192 		    st);
5193 	} else if ((iof & STMF_IOF_LPORT_DONE) == 0) {
5194 		(void) snprintf(info, STMF_CHANGE_INFO_LEN,
5195 		    "Task aborted but tgt port is not finished, "
5196 		    "task=%p, s=%llx, iof=%x", (void *)task, st, iof);
5197 	} else {
5198 		/*
5199 		 * LPORT abort successfully
5200 		 */
5201 		do {
5202 			old = new = itask->itask_flags;
5203 			if (!(old & ITASK_KNOWN_TO_TGT_PORT))
5204 				return;
5205 			new &= ~ITASK_KNOWN_TO_TGT_PORT;
5206 		} while (atomic_cas_32(&itask->itask_flags, old, new) != old);
5207 		return;
5208 	}
5209 
5210 	info[STMF_CHANGE_INFO_LEN - 1] = 0;
5211 	stmf_abort_task_offline(task, 0, info);
5212 }
5213 
5214 stmf_status_t
5215 stmf_task_poll_lu(scsi_task_t *task, uint32_t timeout)
5216 {
5217 	stmf_i_scsi_task_t *itask = (stmf_i_scsi_task_t *)
5218 	    task->task_stmf_private;
5219 	stmf_worker_t *w = itask->itask_worker;
5220 	int i;
5221 
5222 	ASSERT(itask->itask_flags & ITASK_KNOWN_TO_LU);
5223 	mutex_enter(&w->worker_lock);
5224 	if (itask->itask_ncmds >= ITASK_MAX_NCMDS) {
5225 		mutex_exit(&w->worker_lock);
5226 		return (STMF_BUSY);
5227 	}
5228 	for (i = 0; i < itask->itask_ncmds; i++) {
5229 		if (itask->itask_cmd_stack[i] == ITASK_CMD_POLL_LU) {
5230 			mutex_exit(&w->worker_lock);
5231 			return (STMF_SUCCESS);
5232 		}
5233 	}
5234 	itask->itask_cmd_stack[itask->itask_ncmds++] = ITASK_CMD_POLL_LU;
5235 	if (timeout == ITASK_DEFAULT_POLL_TIMEOUT) {
5236 		itask->itask_poll_timeout = ddi_get_lbolt() + 1;
5237 	} else {
5238 		clock_t t = drv_usectohz(timeout * 1000);
5239 		if (t == 0)
5240 			t = 1;
5241 		itask->itask_poll_timeout = ddi_get_lbolt() + t;
5242 	}
5243 	if ((itask->itask_flags & ITASK_IN_WORKER_QUEUE) == 0) {
5244 		itask->itask_worker_next = NULL;
5245 		if (w->worker_task_tail) {
5246 			w->worker_task_tail->itask_worker_next = itask;
5247 		} else {
5248 			w->worker_task_head = itask;
5249 		}
5250 		w->worker_task_tail = itask;
5251 		if (++(w->worker_queue_depth) > w->worker_max_qdepth_pu) {
5252 			w->worker_max_qdepth_pu = w->worker_queue_depth;
5253 		}
5254 		atomic_or_32(&itask->itask_flags, ITASK_IN_WORKER_QUEUE);
5255 		if ((w->worker_flags & STMF_WORKER_ACTIVE) == 0)
5256 			cv_signal(&w->worker_cv);
5257 	}
5258 	mutex_exit(&w->worker_lock);
5259 	return (STMF_SUCCESS);
5260 }
5261 
5262 stmf_status_t
5263 stmf_task_poll_lport(scsi_task_t *task, uint32_t timeout)
5264 {
5265 	stmf_i_scsi_task_t *itask = (stmf_i_scsi_task_t *)
5266 	    task->task_stmf_private;
5267 	stmf_worker_t *w = itask->itask_worker;
5268 	int i;
5269 
5270 	ASSERT(itask->itask_flags & ITASK_KNOWN_TO_TGT_PORT);
5271 	mutex_enter(&w->worker_lock);
5272 	if (itask->itask_ncmds >= ITASK_MAX_NCMDS) {
5273 		mutex_exit(&w->worker_lock);
5274 		return (STMF_BUSY);
5275 	}
5276 	for (i = 0; i < itask->itask_ncmds; i++) {
5277 		if (itask->itask_cmd_stack[i] == ITASK_CMD_POLL_LPORT) {
5278 			mutex_exit(&w->worker_lock);
5279 			return (STMF_SUCCESS);
5280 		}
5281 	}
5282 	itask->itask_cmd_stack[itask->itask_ncmds++] = ITASK_CMD_POLL_LPORT;
5283 	if (timeout == ITASK_DEFAULT_POLL_TIMEOUT) {
5284 		itask->itask_poll_timeout = ddi_get_lbolt() + 1;
5285 	} else {
5286 		clock_t t = drv_usectohz(timeout * 1000);
5287 		if (t == 0)
5288 			t = 1;
5289 		itask->itask_poll_timeout = ddi_get_lbolt() + t;
5290 	}
5291 	if ((itask->itask_flags & ITASK_IN_WORKER_QUEUE) == 0) {
5292 		itask->itask_worker_next = NULL;
5293 		if (w->worker_task_tail) {
5294 			w->worker_task_tail->itask_worker_next = itask;
5295 		} else {
5296 			w->worker_task_head = itask;
5297 		}
5298 		w->worker_task_tail = itask;
5299 		if (++(w->worker_queue_depth) > w->worker_max_qdepth_pu) {
5300 			w->worker_max_qdepth_pu = w->worker_queue_depth;
5301 		}
5302 		if ((w->worker_flags & STMF_WORKER_ACTIVE) == 0)
5303 			cv_signal(&w->worker_cv);
5304 	}
5305 	mutex_exit(&w->worker_lock);
5306 	return (STMF_SUCCESS);
5307 }
5308 
5309 void
5310 stmf_do_task_abort(scsi_task_t *task)
5311 {
5312 	stmf_i_scsi_task_t	*itask = TASK_TO_ITASK(task);
5313 	stmf_lu_t		*lu;
5314 	stmf_local_port_t	*lport;
5315 	unsigned long long	 ret;
5316 	uint32_t		 old, new;
5317 	uint8_t			 call_lu_abort, call_port_abort;
5318 	char			 info[STMF_CHANGE_INFO_LEN];
5319 
5320 	lu = task->task_lu;
5321 	lport = task->task_lport;
5322 	do {
5323 		old = new = itask->itask_flags;
5324 		if ((old & (ITASK_KNOWN_TO_LU | ITASK_LU_ABORT_CALLED)) ==
5325 		    ITASK_KNOWN_TO_LU) {
5326 			new |= ITASK_LU_ABORT_CALLED;
5327 			call_lu_abort = 1;
5328 		} else {
5329 			call_lu_abort = 0;
5330 		}
5331 	} while (atomic_cas_32(&itask->itask_flags, old, new) != old);
5332 
5333 	if (call_lu_abort) {
5334 		if ((itask->itask_flags & ITASK_DEFAULT_HANDLING) == 0) {
5335 			ret = lu->lu_abort(lu, STMF_LU_ABORT_TASK, task, 0);
5336 		} else {
5337 			ret = dlun0->lu_abort(lu, STMF_LU_ABORT_TASK, task, 0);
5338 		}
5339 		if ((ret == STMF_ABORT_SUCCESS) || (ret == STMF_NOT_FOUND)) {
5340 			stmf_task_lu_aborted(task, ret, STMF_IOF_LU_DONE);
5341 		} else if (ret == STMF_BUSY) {
5342 			atomic_and_32(&itask->itask_flags,
5343 			    ~ITASK_LU_ABORT_CALLED);
5344 		} else if (ret != STMF_SUCCESS) {
5345 			(void) snprintf(info, STMF_CHANGE_INFO_LEN,
5346 			    "Abort failed by LU %p, ret %llx", (void *)lu, ret);
5347 			info[STMF_CHANGE_INFO_LEN - 1] = 0;
5348 			stmf_abort_task_offline(task, 1, info);
5349 		}
5350 	} else if (itask->itask_flags & ITASK_KNOWN_TO_LU) {
5351 		if (ddi_get_lbolt() > (itask->itask_start_time +
5352 		    STMF_SEC2TICK(lu->lu_abort_timeout?
5353 		    lu->lu_abort_timeout : ITASK_DEFAULT_ABORT_TIMEOUT))) {
5354 			(void) snprintf(info, STMF_CHANGE_INFO_LEN,
5355 			    "lu abort timed out");
5356 			info[STMF_CHANGE_INFO_LEN - 1] = 0;
5357 			stmf_abort_task_offline(itask->itask_task, 1, info);
5358 		}
5359 	}
5360 
5361 	do {
5362 		old = new = itask->itask_flags;
5363 		if ((old & (ITASK_KNOWN_TO_TGT_PORT |
5364 		    ITASK_TGT_PORT_ABORT_CALLED)) == ITASK_KNOWN_TO_TGT_PORT) {
5365 			new |= ITASK_TGT_PORT_ABORT_CALLED;
5366 			call_port_abort = 1;
5367 		} else {
5368 			call_port_abort = 0;
5369 		}
5370 	} while (atomic_cas_32(&itask->itask_flags, old, new) != old);
5371 	if (call_port_abort) {
5372 		ret = lport->lport_abort(lport, STMF_LPORT_ABORT_TASK, task, 0);
5373 		if ((ret == STMF_ABORT_SUCCESS) || (ret == STMF_NOT_FOUND)) {
5374 			stmf_task_lport_aborted(task, ret, STMF_IOF_LPORT_DONE);
5375 		} else if (ret == STMF_BUSY) {
5376 			atomic_and_32(&itask->itask_flags,
5377 			    ~ITASK_TGT_PORT_ABORT_CALLED);
5378 		} else if (ret != STMF_SUCCESS) {
5379 			(void) snprintf(info, STMF_CHANGE_INFO_LEN,
5380 			    "Abort failed by tgt port %p ret %llx",
5381 			    (void *)lport, ret);
5382 			info[STMF_CHANGE_INFO_LEN - 1] = 0;
5383 			stmf_abort_task_offline(task, 0, info);
5384 		}
5385 	} else if (itask->itask_flags & ITASK_KNOWN_TO_TGT_PORT) {
5386 		if (ddi_get_lbolt() > (itask->itask_start_time +
5387 		    STMF_SEC2TICK(lport->lport_abort_timeout?
5388 		    lport->lport_abort_timeout :
5389 		    ITASK_DEFAULT_ABORT_TIMEOUT))) {
5390 			(void) snprintf(info, STMF_CHANGE_INFO_LEN,
5391 			    "lport abort timed out");
5392 			info[STMF_CHANGE_INFO_LEN - 1] = 0;
5393 			stmf_abort_task_offline(itask->itask_task, 0, info);
5394 		}
5395 	}
5396 }
5397 
5398 stmf_status_t
5399 stmf_ctl(int cmd, void *obj, void *arg)
5400 {
5401 	stmf_status_t			ret;
5402 	stmf_i_lu_t			*ilu;
5403 	stmf_i_local_port_t		*ilport;
5404 	stmf_state_change_info_t	*ssci = (stmf_state_change_info_t *)arg;
5405 
5406 	mutex_enter(&stmf_state.stmf_lock);
5407 	ret = STMF_INVALID_ARG;
5408 	if (cmd & STMF_CMD_LU_OP) {
5409 		ilu = stmf_lookup_lu((stmf_lu_t *)obj);
5410 		if (ilu == NULL) {
5411 			goto stmf_ctl_lock_exit;
5412 		}
5413 		DTRACE_PROBE3(lu__state__change,
5414 		    stmf_lu_t *, ilu->ilu_lu,
5415 		    int, cmd, stmf_state_change_info_t *, ssci);
5416 	} else if (cmd & STMF_CMD_LPORT_OP) {
5417 		ilport = stmf_lookup_lport((stmf_local_port_t *)obj);
5418 		if (ilport == NULL) {
5419 			goto stmf_ctl_lock_exit;
5420 		}
5421 		DTRACE_PROBE3(lport__state__change,
5422 		    stmf_local_port_t *, ilport->ilport_lport,
5423 		    int, cmd, stmf_state_change_info_t *, ssci);
5424 	} else {
5425 		goto stmf_ctl_lock_exit;
5426 	}
5427 
5428 	switch (cmd) {
5429 	case STMF_CMD_LU_ONLINE:
5430 		switch (ilu->ilu_state) {
5431 			case STMF_STATE_OFFLINE:
5432 				ret = STMF_SUCCESS;
5433 				break;
5434 			case STMF_STATE_ONLINE:
5435 			case STMF_STATE_ONLINING:
5436 				ret = STMF_ALREADY;
5437 				break;
5438 			case STMF_STATE_OFFLINING:
5439 				ret = STMF_BUSY;
5440 				break;
5441 			default:
5442 				ret = STMF_BADSTATE;
5443 				break;
5444 		}
5445 		if (ret != STMF_SUCCESS)
5446 			goto stmf_ctl_lock_exit;
5447 
5448 		ilu->ilu_state = STMF_STATE_ONLINING;
5449 		mutex_exit(&stmf_state.stmf_lock);
5450 		stmf_svc_queue(cmd, obj, (stmf_state_change_info_t *)arg);
5451 		break;
5452 
5453 	case STMF_CMD_LU_ONLINE_COMPLETE:
5454 		if (ilu->ilu_state != STMF_STATE_ONLINING) {
5455 			ret = STMF_BADSTATE;
5456 			goto stmf_ctl_lock_exit;
5457 		}
5458 		if (((stmf_change_status_t *)arg)->st_completion_status ==
5459 		    STMF_SUCCESS) {
5460 			ilu->ilu_state = STMF_STATE_ONLINE;
5461 			mutex_exit(&stmf_state.stmf_lock);
5462 			((stmf_lu_t *)obj)->lu_ctl((stmf_lu_t *)obj,
5463 			    STMF_ACK_LU_ONLINE_COMPLETE, arg);
5464 			mutex_enter(&stmf_state.stmf_lock);
5465 			stmf_add_lu_to_active_sessions((stmf_lu_t *)obj);
5466 		} else {
5467 			/* XXX: should throw a meesage an record more data */
5468 			ilu->ilu_state = STMF_STATE_OFFLINE;
5469 		}
5470 		ret = STMF_SUCCESS;
5471 		goto stmf_ctl_lock_exit;
5472 
5473 	case STMF_CMD_LU_OFFLINE:
5474 		switch (ilu->ilu_state) {
5475 			case STMF_STATE_ONLINE:
5476 				ret = STMF_SUCCESS;
5477 				break;
5478 			case STMF_STATE_OFFLINE:
5479 			case STMF_STATE_OFFLINING:
5480 				ret = STMF_ALREADY;
5481 				break;
5482 			case STMF_STATE_ONLINING:
5483 				ret = STMF_BUSY;
5484 				break;
5485 			default:
5486 				ret = STMF_BADSTATE;
5487 				break;
5488 		}
5489 		if (ret != STMF_SUCCESS)
5490 			goto stmf_ctl_lock_exit;
5491 		ilu->ilu_state = STMF_STATE_OFFLINING;
5492 		mutex_exit(&stmf_state.stmf_lock);
5493 		stmf_svc_queue(cmd, obj, (stmf_state_change_info_t *)arg);
5494 		break;
5495 
5496 	case STMF_CMD_LU_OFFLINE_COMPLETE:
5497 		if (ilu->ilu_state != STMF_STATE_OFFLINING) {
5498 			ret = STMF_BADSTATE;
5499 			goto stmf_ctl_lock_exit;
5500 		}
5501 		if (((stmf_change_status_t *)arg)->st_completion_status ==
5502 		    STMF_SUCCESS) {
5503 			ilu->ilu_state = STMF_STATE_OFFLINE;
5504 			mutex_exit(&stmf_state.stmf_lock);
5505 			((stmf_lu_t *)obj)->lu_ctl((stmf_lu_t *)obj,
5506 			    STMF_ACK_LU_OFFLINE_COMPLETE, arg);
5507 			mutex_enter(&stmf_state.stmf_lock);
5508 		} else {
5509 			ilu->ilu_state = STMF_STATE_ONLINE;
5510 			stmf_add_lu_to_active_sessions((stmf_lu_t *)obj);
5511 		}
5512 		mutex_exit(&stmf_state.stmf_lock);
5513 		break;
5514 
5515 	/*
5516 	 * LPORT_ONLINE/OFFLINE has nothing to do with link offline/online.
5517 	 * It's related with hardware disable/enable.
5518 	 */
5519 	case STMF_CMD_LPORT_ONLINE:
5520 		switch (ilport->ilport_state) {
5521 			case STMF_STATE_OFFLINE:
5522 				ret = STMF_SUCCESS;
5523 				break;
5524 			case STMF_STATE_ONLINE:
5525 			case STMF_STATE_ONLINING:
5526 				ret = STMF_ALREADY;
5527 				break;
5528 			case STMF_STATE_OFFLINING:
5529 				ret = STMF_BUSY;
5530 				break;
5531 			default:
5532 				ret = STMF_BADSTATE;
5533 				break;
5534 		}
5535 		if (ret != STMF_SUCCESS)
5536 			goto stmf_ctl_lock_exit;
5537 
5538 		/*
5539 		 * Only user request can recover the port from the
5540 		 * FORCED_OFFLINE state
5541 		 */
5542 		if (ilport->ilport_flags & ILPORT_FORCED_OFFLINE) {
5543 			if (!(ssci->st_rflags & STMF_RFLAG_USER_REQUEST)) {
5544 				ret = STMF_FAILURE;
5545 				goto stmf_ctl_lock_exit;
5546 			}
5547 		}
5548 
5549 		/*
5550 		 * Avoid too frequent request to online
5551 		 */
5552 		if (ssci->st_rflags & STMF_RFLAG_USER_REQUEST) {
5553 			ilport->ilport_online_times = 0;
5554 			ilport->ilport_avg_interval = 0;
5555 		}
5556 		if ((ilport->ilport_avg_interval < STMF_AVG_ONLINE_INTERVAL) &&
5557 		    (ilport->ilport_online_times >= 4)) {
5558 			ret = STMF_FAILURE;
5559 			ilport->ilport_flags |= ILPORT_FORCED_OFFLINE;
5560 			stmf_trace(NULL, "stmf_ctl: too frequent request to "
5561 			    "online the port");
5562 			cmn_err(CE_WARN, "stmf_ctl: too frequent request to "
5563 			    "online the port, set FORCED_OFFLINE now");
5564 			goto stmf_ctl_lock_exit;
5565 		}
5566 		if (ilport->ilport_online_times > 0) {
5567 			if (ilport->ilport_online_times == 1) {
5568 				ilport->ilport_avg_interval = ddi_get_lbolt() -
5569 				    ilport->ilport_last_online_clock;
5570 			} else {
5571 				ilport->ilport_avg_interval =
5572 				    (ilport->ilport_avg_interval +
5573 				    ddi_get_lbolt() -
5574 				    ilport->ilport_last_online_clock) >> 1;
5575 			}
5576 		}
5577 		ilport->ilport_last_online_clock = ddi_get_lbolt();
5578 		ilport->ilport_online_times++;
5579 
5580 		/*
5581 		 * Submit online service request
5582 		 */
5583 		ilport->ilport_flags &= ~ILPORT_FORCED_OFFLINE;
5584 		ilport->ilport_state = STMF_STATE_ONLINING;
5585 		mutex_exit(&stmf_state.stmf_lock);
5586 		stmf_svc_queue(cmd, obj, (stmf_state_change_info_t *)arg);
5587 		break;
5588 
5589 	case STMF_CMD_LPORT_ONLINE_COMPLETE:
5590 		if (ilport->ilport_state != STMF_STATE_ONLINING) {
5591 			ret = STMF_BADSTATE;
5592 			goto stmf_ctl_lock_exit;
5593 		}
5594 		if (((stmf_change_status_t *)arg)->st_completion_status ==
5595 		    STMF_SUCCESS) {
5596 			ilport->ilport_state = STMF_STATE_ONLINE;
5597 			mutex_exit(&stmf_state.stmf_lock);
5598 			((stmf_local_port_t *)obj)->lport_ctl(
5599 			    (stmf_local_port_t *)obj,
5600 			    STMF_ACK_LPORT_ONLINE_COMPLETE, arg);
5601 			mutex_enter(&stmf_state.stmf_lock);
5602 		} else {
5603 			ilport->ilport_state = STMF_STATE_OFFLINE;
5604 		}
5605 		ret = STMF_SUCCESS;
5606 		goto stmf_ctl_lock_exit;
5607 
5608 	case STMF_CMD_LPORT_OFFLINE:
5609 		switch (ilport->ilport_state) {
5610 			case STMF_STATE_ONLINE:
5611 				ret = STMF_SUCCESS;
5612 				break;
5613 			case STMF_STATE_OFFLINE:
5614 			case STMF_STATE_OFFLINING:
5615 				ret = STMF_ALREADY;
5616 				break;
5617 			case STMF_STATE_ONLINING:
5618 				ret = STMF_BUSY;
5619 				break;
5620 			default:
5621 				ret = STMF_BADSTATE;
5622 				break;
5623 		}
5624 		if (ret != STMF_SUCCESS)
5625 			goto stmf_ctl_lock_exit;
5626 
5627 		ilport->ilport_state = STMF_STATE_OFFLINING;
5628 		mutex_exit(&stmf_state.stmf_lock);
5629 		stmf_svc_queue(cmd, obj, (stmf_state_change_info_t *)arg);
5630 		break;
5631 
5632 	case STMF_CMD_LPORT_OFFLINE_COMPLETE:
5633 		if (ilport->ilport_state != STMF_STATE_OFFLINING) {
5634 			ret = STMF_BADSTATE;
5635 			goto stmf_ctl_lock_exit;
5636 		}
5637 		if (((stmf_change_status_t *)arg)->st_completion_status ==
5638 		    STMF_SUCCESS) {
5639 			ilport->ilport_state = STMF_STATE_OFFLINE;
5640 			mutex_exit(&stmf_state.stmf_lock);
5641 			((stmf_local_port_t *)obj)->lport_ctl(
5642 			    (stmf_local_port_t *)obj,
5643 			    STMF_ACK_LPORT_OFFLINE_COMPLETE, arg);
5644 			mutex_enter(&stmf_state.stmf_lock);
5645 		} else {
5646 			ilport->ilport_state = STMF_STATE_ONLINE;
5647 		}
5648 		mutex_exit(&stmf_state.stmf_lock);
5649 		break;
5650 
5651 	default:
5652 		cmn_err(CE_WARN, "Invalid ctl cmd received %x", cmd);
5653 		ret = STMF_INVALID_ARG;
5654 		goto stmf_ctl_lock_exit;
5655 	}
5656 
5657 	return (STMF_SUCCESS);
5658 
5659 stmf_ctl_lock_exit:;
5660 	mutex_exit(&stmf_state.stmf_lock);
5661 	return (ret);
5662 }
5663 
5664 /* ARGSUSED */
5665 stmf_status_t
5666 stmf_info_impl(uint32_t cmd, void *arg1, void *arg2, uint8_t *buf,
5667 						uint32_t *bufsizep)
5668 {
5669 	return (STMF_NOT_SUPPORTED);
5670 }
5671 
5672 /* ARGSUSED */
5673 stmf_status_t
5674 stmf_info(uint32_t cmd, void *arg1, void *arg2, uint8_t *buf,
5675 						uint32_t *bufsizep)
5676 {
5677 	uint32_t cl = SI_GET_CLASS(cmd);
5678 
5679 	if (cl == SI_STMF) {
5680 		return (stmf_info_impl(cmd, arg1, arg2, buf, bufsizep));
5681 	}
5682 	if (cl == SI_LPORT) {
5683 		return (((stmf_local_port_t *)arg1)->lport_info(cmd, arg1,
5684 		    arg2, buf, bufsizep));
5685 	} else if (cl == SI_LU) {
5686 		return (((stmf_lu_t *)arg1)->lu_info(cmd, arg1, arg2, buf,
5687 		    bufsizep));
5688 	}
5689 
5690 	return (STMF_NOT_SUPPORTED);
5691 }
5692 
5693 /*
5694  * Used by port providers. pwwn is 8 byte wwn, sdid is the devid used by
5695  * stmf to register local ports. The ident should have 20 bytes in buffer
5696  * space to convert the wwn to "wwn.xxxxxxxxxxxxxxxx" string.
5697  */
5698 void
5699 stmf_wwn_to_devid_desc(scsi_devid_desc_t *sdid, uint8_t *wwn,
5700     uint8_t protocol_id)
5701 {
5702 	char wwn_str[20+1];
5703 
5704 	sdid->protocol_id = protocol_id;
5705 	sdid->piv = 1;
5706 	sdid->code_set = CODE_SET_ASCII;
5707 	sdid->association = ID_IS_TARGET_PORT;
5708 	sdid->ident_length = 20;
5709 	/* Convert wwn value to "wwn.XXXXXXXXXXXXXXXX" format */
5710 	(void) snprintf(wwn_str, sizeof (wwn_str),
5711 	    "wwn.%02X%02X%02X%02X%02X%02X%02X%02X",
5712 	    wwn[0], wwn[1], wwn[2], wwn[3], wwn[4], wwn[5], wwn[6], wwn[7]);
5713 	bcopy(wwn_str, (char *)sdid->ident, 20);
5714 }
5715 
5716 
5717 stmf_xfer_data_t *
5718 stmf_prepare_tpgs_data(uint8_t ilu_alua)
5719 {
5720 	stmf_xfer_data_t *xd;
5721 	stmf_i_local_port_t *ilport;
5722 	uint8_t *p;
5723 	uint32_t sz, asz, nports = 0, nports_standby = 0;
5724 
5725 	mutex_enter(&stmf_state.stmf_lock);
5726 	/* check if any ports are standby and create second group */
5727 	for (ilport = stmf_state.stmf_ilportlist; ilport;
5728 	    ilport = ilport->ilport_next) {
5729 		if (ilport->ilport_standby == 1) {
5730 			nports_standby++;
5731 		} else {
5732 			nports++;
5733 		}
5734 	}
5735 
5736 	/* The spec only allows for 255 ports to be reported per group */
5737 	nports = min(nports, 255);
5738 	nports_standby = min(nports_standby, 255);
5739 	sz = (nports * 4) + 12;
5740 	if (nports_standby && ilu_alua) {
5741 		sz += (nports_standby * 4) + 8;
5742 	}
5743 	asz = sz + sizeof (*xd) - 4;
5744 	xd = (stmf_xfer_data_t *)kmem_zalloc(asz, KM_NOSLEEP);
5745 	if (xd == NULL) {
5746 		mutex_exit(&stmf_state.stmf_lock);
5747 		return (NULL);
5748 	}
5749 	xd->alloc_size = asz;
5750 	xd->size_left = sz;
5751 
5752 	p = xd->buf;
5753 
5754 	*((uint32_t *)p) = BE_32(sz - 4);
5755 	p += 4;
5756 	p[0] = 0x80;	/* PREF */
5757 	p[1] = 5;	/* AO_SUP, S_SUP */
5758 	if (stmf_state.stmf_alua_node == 1) {
5759 		p[3] = 1;	/* Group 1 */
5760 	} else {
5761 		p[3] = 0;	/* Group 0 */
5762 	}
5763 	p[7] = nports & 0xff;
5764 	p += 8;
5765 	for (ilport = stmf_state.stmf_ilportlist; ilport;
5766 	    ilport = ilport->ilport_next) {
5767 		if (ilport->ilport_standby == 1) {
5768 			continue;
5769 		}
5770 		((uint16_t *)p)[1] = BE_16(ilport->ilport_rtpid);
5771 		p += 4;
5772 	}
5773 	if (nports_standby && ilu_alua) {
5774 		p[0] = 0x02;	/* Non PREF, Standby */
5775 		p[1] = 5;	/* AO_SUP, S_SUP */
5776 		if (stmf_state.stmf_alua_node == 1) {
5777 			p[3] = 0;	/* Group 0 */
5778 		} else {
5779 			p[3] = 1;	/* Group 1 */
5780 		}
5781 		p[7] = nports_standby & 0xff;
5782 		p += 8;
5783 		for (ilport = stmf_state.stmf_ilportlist; ilport;
5784 		    ilport = ilport->ilport_next) {
5785 			if (ilport->ilport_standby == 0) {
5786 				continue;
5787 			}
5788 			((uint16_t *)p)[1] = BE_16(ilport->ilport_rtpid);
5789 			p += 4;
5790 		}
5791 	}
5792 
5793 	mutex_exit(&stmf_state.stmf_lock);
5794 
5795 	return (xd);
5796 }
5797 
5798 struct scsi_devid_desc *
5799 stmf_scsilib_get_devid_desc(uint16_t rtpid)
5800 {
5801 	scsi_devid_desc_t *devid = NULL;
5802 	stmf_i_local_port_t *ilport;
5803 
5804 	mutex_enter(&stmf_state.stmf_lock);
5805 
5806 	for (ilport = stmf_state.stmf_ilportlist; ilport;
5807 	    ilport = ilport->ilport_next) {
5808 		if (ilport->ilport_rtpid == rtpid) {
5809 			scsi_devid_desc_t *id = ilport->ilport_lport->lport_id;
5810 			uint32_t id_sz = sizeof (scsi_devid_desc_t) - 1 +
5811 			    id->ident_length;
5812 			devid = (scsi_devid_desc_t *)kmem_zalloc(id_sz,
5813 			    KM_NOSLEEP);
5814 			if (devid != NULL) {
5815 				bcopy(id, devid, id_sz);
5816 			}
5817 			break;
5818 		}
5819 	}
5820 
5821 	mutex_exit(&stmf_state.stmf_lock);
5822 	return (devid);
5823 }
5824 
5825 uint16_t
5826 stmf_scsilib_get_lport_rtid(struct scsi_devid_desc *devid)
5827 {
5828 	stmf_i_local_port_t	*ilport;
5829 	scsi_devid_desc_t	*id;
5830 	uint16_t		rtpid = 0;
5831 
5832 	mutex_enter(&stmf_state.stmf_lock);
5833 	for (ilport = stmf_state.stmf_ilportlist; ilport;
5834 	    ilport = ilport->ilport_next) {
5835 		id = ilport->ilport_lport->lport_id;
5836 		if ((devid->ident_length == id->ident_length) &&
5837 		    (memcmp(devid->ident, id->ident, id->ident_length) == 0)) {
5838 			rtpid = ilport->ilport_rtpid;
5839 			break;
5840 		}
5841 	}
5842 	mutex_exit(&stmf_state.stmf_lock);
5843 	return (rtpid);
5844 }
5845 
5846 static uint16_t stmf_lu_id_gen_number = 0;
5847 
5848 stmf_status_t
5849 stmf_scsilib_uniq_lu_id(uint32_t company_id, scsi_devid_desc_t *lu_id)
5850 {
5851 	return (stmf_scsilib_uniq_lu_id2(company_id, 0, lu_id));
5852 }
5853 
5854 stmf_status_t
5855 stmf_scsilib_uniq_lu_id2(uint32_t company_id, uint32_t host_id,
5856     scsi_devid_desc_t *lu_id)
5857 {
5858 	uint8_t *p;
5859 	struct timeval32 timestamp32;
5860 	uint32_t *t = (uint32_t *)&timestamp32;
5861 	struct ether_addr mac;
5862 	uint8_t *e = (uint8_t *)&mac;
5863 	int hid = (int)host_id;
5864 
5865 	if (company_id == COMPANY_ID_NONE)
5866 		company_id = COMPANY_ID_SUN;
5867 
5868 	if (lu_id->ident_length != 0x10)
5869 		return (STMF_INVALID_ARG);
5870 
5871 	p = (uint8_t *)lu_id;
5872 
5873 	atomic_add_16(&stmf_lu_id_gen_number, 1);
5874 
5875 	p[0] = 0xf1; p[1] = 3; p[2] = 0; p[3] = 0x10;
5876 	p[4] = ((company_id >> 20) & 0xf) | 0x60;
5877 	p[5] = (company_id >> 12) & 0xff;
5878 	p[6] = (company_id >> 4) & 0xff;
5879 	p[7] = (company_id << 4) & 0xf0;
5880 	if (hid == 0 && !localetheraddr((struct ether_addr *)NULL, &mac)) {
5881 		hid = BE_32((int)zone_get_hostid(NULL));
5882 	}
5883 	if (hid != 0) {
5884 		e[0] = (hid >> 24) & 0xff;
5885 		e[1] = (hid >> 16) & 0xff;
5886 		e[2] = (hid >> 8) & 0xff;
5887 		e[3] = hid & 0xff;
5888 		e[4] = e[5] = 0;
5889 	}
5890 	bcopy(e, p+8, 6);
5891 	uniqtime32(&timestamp32);
5892 	*t = BE_32(*t);
5893 	bcopy(t, p+14, 4);
5894 	p[18] = (stmf_lu_id_gen_number >> 8) & 0xff;
5895 	p[19] = stmf_lu_id_gen_number & 0xff;
5896 
5897 	return (STMF_SUCCESS);
5898 }
5899 
5900 /*
5901  * saa is sense key, ASC, ASCQ
5902  */
5903 void
5904 stmf_scsilib_send_status(scsi_task_t *task, uint8_t st, uint32_t saa)
5905 {
5906 	uint8_t sd[18];
5907 	task->task_scsi_status = st;
5908 	if (st == 2) {
5909 		bzero(sd, 18);
5910 		sd[0] = 0x70;
5911 		sd[2] = (saa >> 16) & 0xf;
5912 		sd[7] = 10;
5913 		sd[12] = (saa >> 8) & 0xff;
5914 		sd[13] = saa & 0xff;
5915 		task->task_sense_data = sd;
5916 		task->task_sense_length = 18;
5917 	} else {
5918 		task->task_sense_data = NULL;
5919 		task->task_sense_length = 0;
5920 	}
5921 	(void) stmf_send_scsi_status(task, STMF_IOF_LU_DONE);
5922 }
5923 
5924 uint32_t
5925 stmf_scsilib_prepare_vpd_page83(scsi_task_t *task, uint8_t *page,
5926     uint32_t page_len, uint8_t byte0, uint32_t vpd_mask)
5927 {
5928 	uint8_t		*p = NULL;
5929 	uint8_t		small_buf[32];
5930 	uint32_t	sz = 0;
5931 	uint32_t	n = 4;
5932 	uint32_t	m = 0;
5933 	uint32_t	last_bit = 0;
5934 
5935 	if (page_len < 4)
5936 		return (0);
5937 	if (page_len > 65535)
5938 		page_len = 65535;
5939 
5940 	page[0] = byte0;
5941 	page[1] = 0x83;
5942 
5943 	/* CONSTCOND */
5944 	while (1) {
5945 		m += sz;
5946 		if (sz && (page_len > n)) {
5947 			uint32_t copysz;
5948 			copysz = page_len > (n + sz) ? sz : page_len - n;
5949 			bcopy(p, page + n, copysz);
5950 			n += copysz;
5951 		}
5952 		vpd_mask &= ~last_bit;
5953 		if (vpd_mask == 0)
5954 			break;
5955 
5956 		if (vpd_mask & STMF_VPD_LU_ID) {
5957 			last_bit = STMF_VPD_LU_ID;
5958 			sz = task->task_lu->lu_id->ident_length + 4;
5959 			p = (uint8_t *)task->task_lu->lu_id;
5960 			continue;
5961 		} else if (vpd_mask & STMF_VPD_TARGET_ID) {
5962 			last_bit = STMF_VPD_TARGET_ID;
5963 			sz = task->task_lport->lport_id->ident_length + 4;
5964 			p = (uint8_t *)task->task_lport->lport_id;
5965 			continue;
5966 		} else if (vpd_mask & STMF_VPD_TP_GROUP) {
5967 			stmf_i_local_port_t *ilport;
5968 			last_bit = STMF_VPD_TP_GROUP;
5969 			p = small_buf;
5970 			bzero(p, 8);
5971 			p[0] = 1;
5972 			p[1] = 0x15;
5973 			p[3] = 4;
5974 			ilport = (stmf_i_local_port_t *)
5975 			    task->task_lport->lport_stmf_private;
5976 			/*
5977 			 * If we're in alua mode, group 1 contains all alua
5978 			 * participating ports and all standby ports
5979 			 * > 255. Otherwise, if we're in alua mode, any local
5980 			 * ports (non standby/pppt) are also in group 1 if the
5981 			 * alua node is 1. Otherwise the group is 0.
5982 			 */
5983 			if ((stmf_state.stmf_alua_state &&
5984 			    (ilport->ilport_alua || ilport->ilport_standby) &&
5985 			    ilport->ilport_rtpid > 255) ||
5986 			    (stmf_state.stmf_alua_node == 1 &&
5987 			    ilport->ilport_standby != 1)) {
5988 				p[7] = 1;	/* Group 1 */
5989 			}
5990 			sz = 8;
5991 			continue;
5992 		} else if (vpd_mask & STMF_VPD_RELATIVE_TP_ID) {
5993 			stmf_i_local_port_t *ilport;
5994 
5995 			last_bit = STMF_VPD_RELATIVE_TP_ID;
5996 			p = small_buf;
5997 			bzero(p, 8);
5998 			p[0] = 1;
5999 			p[1] = 0x14;
6000 			p[3] = 4;
6001 			ilport = (stmf_i_local_port_t *)
6002 			    task->task_lport->lport_stmf_private;
6003 			p[6] = (ilport->ilport_rtpid >> 8) & 0xff;
6004 			p[7] = ilport->ilport_rtpid & 0xff;
6005 			sz = 8;
6006 			continue;
6007 		} else {
6008 			cmn_err(CE_WARN, "Invalid vpd_mask");
6009 			break;
6010 		}
6011 	}
6012 
6013 	page[2] = (m >> 8) & 0xff;
6014 	page[3] = m & 0xff;
6015 
6016 	return (n);
6017 }
6018 
6019 void
6020 stmf_scsilib_handle_report_tpgs(scsi_task_t *task, stmf_data_buf_t *dbuf)
6021 {
6022 	stmf_i_scsi_task_t *itask =
6023 	    (stmf_i_scsi_task_t *)task->task_stmf_private;
6024 	stmf_i_lu_t *ilu =
6025 	    (stmf_i_lu_t *)task->task_lu->lu_stmf_private;
6026 	stmf_xfer_data_t *xd;
6027 	uint32_t sz, minsz;
6028 
6029 	itask->itask_flags |= ITASK_DEFAULT_HANDLING;
6030 	task->task_cmd_xfer_length =
6031 	    ((((uint32_t)task->task_cdb[6]) << 24) |
6032 	    (((uint32_t)task->task_cdb[7]) << 16) |
6033 	    (((uint32_t)task->task_cdb[8]) << 8) |
6034 	    ((uint32_t)task->task_cdb[9]));
6035 
6036 	if (task->task_additional_flags &
6037 	    TASK_AF_NO_EXPECTED_XFER_LENGTH) {
6038 		task->task_expected_xfer_length =
6039 		    task->task_cmd_xfer_length;
6040 	}
6041 
6042 	if (task->task_cmd_xfer_length == 0) {
6043 		stmf_scsilib_send_status(task, STATUS_GOOD, 0);
6044 		return;
6045 	}
6046 	if (task->task_cmd_xfer_length < 4) {
6047 		stmf_scsilib_send_status(task, STATUS_CHECK,
6048 		    STMF_SAA_INVALID_FIELD_IN_CDB);
6049 		return;
6050 	}
6051 
6052 	sz = min(task->task_expected_xfer_length,
6053 	    task->task_cmd_xfer_length);
6054 
6055 	xd = stmf_prepare_tpgs_data(ilu->ilu_alua);
6056 
6057 	if (xd == NULL) {
6058 		stmf_abort(STMF_QUEUE_TASK_ABORT, task,
6059 		    STMF_ALLOC_FAILURE, NULL);
6060 		return;
6061 	}
6062 
6063 	sz = min(sz, xd->size_left);
6064 	xd->size_left = sz;
6065 	minsz = min(512, sz);
6066 
6067 	if (dbuf == NULL)
6068 		dbuf = stmf_alloc_dbuf(task, sz, &minsz, 0);
6069 	if (dbuf == NULL) {
6070 		kmem_free(xd, xd->alloc_size);
6071 		stmf_abort(STMF_QUEUE_TASK_ABORT, task,
6072 		    STMF_ALLOC_FAILURE, NULL);
6073 		return;
6074 	}
6075 	dbuf->db_lu_private = xd;
6076 	stmf_xd_to_dbuf(dbuf);
6077 
6078 	dbuf->db_flags = DB_DIRECTION_TO_RPORT;
6079 	(void) stmf_xfer_data(task, dbuf, 0);
6080 
6081 }
6082 
6083 void
6084 stmf_scsilib_handle_task_mgmt(scsi_task_t *task)
6085 {
6086 
6087 	switch (task->task_mgmt_function) {
6088 	/*
6089 	 * For now we will abort all I/Os on the LU in case of ABORT_TASK_SET
6090 	 * and ABORT_TASK. But unlike LUN_RESET we will not reset LU state
6091 	 * in these cases. This needs to be changed to abort only the required
6092 	 * set.
6093 	 */
6094 	case TM_ABORT_TASK:
6095 	case TM_ABORT_TASK_SET:
6096 	case TM_CLEAR_TASK_SET:
6097 	case TM_LUN_RESET:
6098 		stmf_handle_lun_reset(task);
6099 		/* issue the reset to the proxy node as well */
6100 		if (stmf_state.stmf_alua_state == 1) {
6101 			(void) stmf_proxy_scsi_cmd(task, NULL);
6102 		}
6103 		return;
6104 	case TM_TARGET_RESET:
6105 	case TM_TARGET_COLD_RESET:
6106 	case TM_TARGET_WARM_RESET:
6107 		stmf_handle_target_reset(task);
6108 		return;
6109 	default:
6110 		/* We dont support this task mgmt function */
6111 		stmf_scsilib_send_status(task, STATUS_CHECK,
6112 		    STMF_SAA_INVALID_FIELD_IN_CMD_IU);
6113 		return;
6114 	}
6115 }
6116 
6117 void
6118 stmf_handle_lun_reset(scsi_task_t *task)
6119 {
6120 	stmf_i_scsi_task_t *itask;
6121 	stmf_i_lu_t *ilu;
6122 
6123 	itask = (stmf_i_scsi_task_t *)task->task_stmf_private;
6124 	ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private;
6125 
6126 	/*
6127 	 * To sync with target reset, grab this lock. The LU is not going
6128 	 * anywhere as there is atleast one task pending (this task).
6129 	 */
6130 	mutex_enter(&stmf_state.stmf_lock);
6131 
6132 	if (ilu->ilu_flags & ILU_RESET_ACTIVE) {
6133 		mutex_exit(&stmf_state.stmf_lock);
6134 		stmf_scsilib_send_status(task, STATUS_CHECK,
6135 		    STMF_SAA_OPERATION_IN_PROGRESS);
6136 		return;
6137 	}
6138 	atomic_or_32(&ilu->ilu_flags, ILU_RESET_ACTIVE);
6139 	mutex_exit(&stmf_state.stmf_lock);
6140 
6141 	/*
6142 	 * Mark this task as the one causing LU reset so that we know who
6143 	 * was responsible for setting the ILU_RESET_ACTIVE. In case this
6144 	 * task itself gets aborted, we will clear ILU_RESET_ACTIVE.
6145 	 */
6146 	itask->itask_flags |= ITASK_DEFAULT_HANDLING | ITASK_CAUSING_LU_RESET;
6147 
6148 	/* Initiatiate abort on all commands on this LU except this one */
6149 	stmf_abort(STMF_QUEUE_ABORT_LU, task, STMF_ABORTED, task->task_lu);
6150 
6151 	/* Start polling on this task */
6152 	if (stmf_task_poll_lu(task, ITASK_DEFAULT_POLL_TIMEOUT)
6153 	    != STMF_SUCCESS) {
6154 		stmf_abort(STMF_QUEUE_TASK_ABORT, task, STMF_ALLOC_FAILURE,
6155 		    NULL);
6156 		return;
6157 	}
6158 }
6159 
6160 void
6161 stmf_handle_target_reset(scsi_task_t *task)
6162 {
6163 	stmf_i_scsi_task_t *itask;
6164 	stmf_i_lu_t *ilu;
6165 	stmf_i_scsi_session_t *iss;
6166 	stmf_lun_map_t *lm;
6167 	stmf_lun_map_ent_t *lm_ent;
6168 	int i, lf;
6169 
6170 	itask = (stmf_i_scsi_task_t *)task->task_stmf_private;
6171 	iss = (stmf_i_scsi_session_t *)task->task_session->ss_stmf_private;
6172 	ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private;
6173 
6174 	/*
6175 	 * To sync with LUN reset, grab this lock. The session is not going
6176 	 * anywhere as there is atleast one task pending (this task).
6177 	 */
6178 	mutex_enter(&stmf_state.stmf_lock);
6179 
6180 	/* Grab the session lock as a writer to prevent any changes in it */
6181 	rw_enter(iss->iss_lockp, RW_WRITER);
6182 
6183 	if (iss->iss_flags & ISS_RESET_ACTIVE) {
6184 		rw_exit(iss->iss_lockp);
6185 		mutex_exit(&stmf_state.stmf_lock);
6186 		stmf_scsilib_send_status(task, STATUS_CHECK,
6187 		    STMF_SAA_OPERATION_IN_PROGRESS);
6188 		return;
6189 	}
6190 	atomic_or_32(&iss->iss_flags, ISS_RESET_ACTIVE);
6191 
6192 	/*
6193 	 * Now go through each LUN in this session and make sure all of them
6194 	 * can be reset.
6195 	 */
6196 	lm = iss->iss_sm;
6197 	for (i = 0, lf = 0; i < lm->lm_nentries; i++) {
6198 		if (lm->lm_plus[i] == NULL)
6199 			continue;
6200 		lf++;
6201 		lm_ent = (stmf_lun_map_ent_t *)lm->lm_plus[i];
6202 		ilu = (stmf_i_lu_t *)(lm_ent->ent_lu->lu_stmf_private);
6203 		if (ilu->ilu_flags & ILU_RESET_ACTIVE) {
6204 			atomic_and_32(&iss->iss_flags, ~ISS_RESET_ACTIVE);
6205 			rw_exit(iss->iss_lockp);
6206 			mutex_exit(&stmf_state.stmf_lock);
6207 			stmf_scsilib_send_status(task, STATUS_CHECK,
6208 			    STMF_SAA_OPERATION_IN_PROGRESS);
6209 			return;
6210 		}
6211 	}
6212 	if (lf == 0) {
6213 		/* No luns in this session */
6214 		atomic_and_32(&iss->iss_flags, ~ISS_RESET_ACTIVE);
6215 		rw_exit(iss->iss_lockp);
6216 		mutex_exit(&stmf_state.stmf_lock);
6217 		stmf_scsilib_send_status(task, STATUS_GOOD, 0);
6218 		return;
6219 	}
6220 
6221 	/* ok, start the damage */
6222 	itask->itask_flags |= ITASK_DEFAULT_HANDLING |
6223 	    ITASK_CAUSING_TARGET_RESET;
6224 	for (i = 0; i < lm->lm_nentries; i++) {
6225 		if (lm->lm_plus[i] == NULL)
6226 			continue;
6227 		lm_ent = (stmf_lun_map_ent_t *)lm->lm_plus[i];
6228 		ilu = (stmf_i_lu_t *)(lm_ent->ent_lu->lu_stmf_private);
6229 		atomic_or_32(&ilu->ilu_flags, ILU_RESET_ACTIVE);
6230 	}
6231 	rw_exit(iss->iss_lockp);
6232 	mutex_exit(&stmf_state.stmf_lock);
6233 
6234 	for (i = 0; i < lm->lm_nentries; i++) {
6235 		if (lm->lm_plus[i] == NULL)
6236 			continue;
6237 		lm_ent = (stmf_lun_map_ent_t *)lm->lm_plus[i];
6238 		stmf_abort(STMF_QUEUE_ABORT_LU, task, STMF_ABORTED,
6239 		    lm_ent->ent_lu);
6240 	}
6241 
6242 	/* Start polling on this task */
6243 	if (stmf_task_poll_lu(task, ITASK_DEFAULT_POLL_TIMEOUT)
6244 	    != STMF_SUCCESS) {
6245 		stmf_abort(STMF_QUEUE_TASK_ABORT, task, STMF_ALLOC_FAILURE,
6246 		    NULL);
6247 		return;
6248 	}
6249 }
6250 
6251 int
6252 stmf_handle_cmd_during_ic(stmf_i_scsi_task_t *itask)
6253 {
6254 	scsi_task_t *task = itask->itask_task;
6255 	stmf_i_scsi_session_t *iss = (stmf_i_scsi_session_t *)
6256 	    task->task_session->ss_stmf_private;
6257 
6258 	rw_enter(iss->iss_lockp, RW_WRITER);
6259 	if (((iss->iss_flags & ISS_LUN_INVENTORY_CHANGED) == 0) ||
6260 	    (task->task_cdb[0] == SCMD_INQUIRY)) {
6261 		rw_exit(iss->iss_lockp);
6262 		return (0);
6263 	}
6264 	atomic_and_32(&iss->iss_flags,
6265 	    ~(ISS_LUN_INVENTORY_CHANGED | ISS_GOT_INITIAL_LUNS));
6266 	rw_exit(iss->iss_lockp);
6267 
6268 	if (task->task_cdb[0] == SCMD_REPORT_LUNS) {
6269 		return (0);
6270 	}
6271 	stmf_scsilib_send_status(task, STATUS_CHECK,
6272 	    STMF_SAA_REPORT_LUN_DATA_HAS_CHANGED);
6273 	return (1);
6274 }
6275 
6276 void
6277 stmf_worker_init()
6278 {
6279 	uint32_t i;
6280 
6281 	/* Make local copy of global tunables */
6282 	stmf_i_max_nworkers = stmf_max_nworkers;
6283 	stmf_i_min_nworkers = stmf_min_nworkers;
6284 
6285 	ASSERT(stmf_workers == NULL);
6286 	if (stmf_i_min_nworkers < 4) {
6287 		stmf_i_min_nworkers = 4;
6288 	}
6289 	if (stmf_i_max_nworkers < stmf_i_min_nworkers) {
6290 		stmf_i_max_nworkers = stmf_i_min_nworkers;
6291 	}
6292 	stmf_workers = (stmf_worker_t *)kmem_zalloc(
6293 	    sizeof (stmf_worker_t) * stmf_i_max_nworkers, KM_SLEEP);
6294 	for (i = 0; i < stmf_i_max_nworkers; i++) {
6295 		stmf_worker_t *w = &stmf_workers[i];
6296 		mutex_init(&w->worker_lock, NULL, MUTEX_DRIVER, NULL);
6297 		cv_init(&w->worker_cv, NULL, CV_DRIVER, NULL);
6298 	}
6299 	stmf_worker_mgmt_delay = drv_usectohz(20 * 1000);
6300 	stmf_workers_state = STMF_WORKERS_ENABLED;
6301 
6302 	/* Workers will be started by stmf_worker_mgmt() */
6303 
6304 	/* Lets wait for atleast one worker to start */
6305 	while (stmf_nworkers_cur == 0)
6306 		delay(drv_usectohz(20 * 1000));
6307 	stmf_worker_mgmt_delay = drv_usectohz(3 * 1000 * 1000);
6308 }
6309 
6310 stmf_status_t
6311 stmf_worker_fini()
6312 {
6313 	int i;
6314 	clock_t sb;
6315 
6316 	if (stmf_workers_state == STMF_WORKERS_DISABLED)
6317 		return (STMF_SUCCESS);
6318 	ASSERT(stmf_workers);
6319 	stmf_workers_state = STMF_WORKERS_DISABLED;
6320 	stmf_worker_mgmt_delay = drv_usectohz(20 * 1000);
6321 	cv_signal(&stmf_state.stmf_cv);
6322 
6323 	sb = ddi_get_lbolt() + drv_usectohz(10 * 1000 * 1000);
6324 	/* Wait for all the threads to die */
6325 	while (stmf_nworkers_cur != 0) {
6326 		if (ddi_get_lbolt() > sb) {
6327 			stmf_workers_state = STMF_WORKERS_ENABLED;
6328 			return (STMF_BUSY);
6329 		}
6330 		delay(drv_usectohz(100 * 1000));
6331 	}
6332 	for (i = 0; i < stmf_i_max_nworkers; i++) {
6333 		stmf_worker_t *w = &stmf_workers[i];
6334 		mutex_destroy(&w->worker_lock);
6335 		cv_destroy(&w->worker_cv);
6336 	}
6337 	kmem_free(stmf_workers, sizeof (stmf_worker_t) * stmf_i_max_nworkers);
6338 	stmf_workers = NULL;
6339 
6340 	return (STMF_SUCCESS);
6341 }
6342 
6343 void
6344 stmf_worker_task(void *arg)
6345 {
6346 	stmf_worker_t *w;
6347 	stmf_i_scsi_session_t *iss;
6348 	scsi_task_t *task;
6349 	stmf_i_scsi_task_t *itask;
6350 	stmf_data_buf_t *dbuf;
6351 	stmf_lu_t *lu;
6352 	clock_t wait_timer = 0;
6353 	clock_t wait_ticks, wait_delta = 0;
6354 	uint32_t old, new;
6355 	uint8_t curcmd;
6356 	uint8_t abort_free;
6357 	uint8_t wait_queue;
6358 	uint8_t dec_qdepth;
6359 
6360 	w = (stmf_worker_t *)arg;
6361 	wait_ticks = drv_usectohz(10000);
6362 
6363 	DTRACE_PROBE1(worker__create, stmf_worker_t, w);
6364 	mutex_enter(&w->worker_lock);
6365 	w->worker_flags |= STMF_WORKER_STARTED | STMF_WORKER_ACTIVE;
6366 stmf_worker_loop:;
6367 	if ((w->worker_ref_count == 0) &&
6368 	    (w->worker_flags & STMF_WORKER_TERMINATE)) {
6369 		w->worker_flags &= ~(STMF_WORKER_STARTED |
6370 		    STMF_WORKER_ACTIVE | STMF_WORKER_TERMINATE);
6371 		w->worker_tid = NULL;
6372 		mutex_exit(&w->worker_lock);
6373 		DTRACE_PROBE1(worker__destroy, stmf_worker_t, w);
6374 		thread_exit();
6375 	}
6376 	/* CONSTCOND */
6377 	while (1) {
6378 		dec_qdepth = 0;
6379 		if (wait_timer && (ddi_get_lbolt() >= wait_timer)) {
6380 			wait_timer = 0;
6381 			wait_delta = 0;
6382 			if (w->worker_wait_head) {
6383 				ASSERT(w->worker_wait_tail);
6384 				if (w->worker_task_head == NULL)
6385 					w->worker_task_head =
6386 					    w->worker_wait_head;
6387 				else
6388 					w->worker_task_tail->itask_worker_next =
6389 					    w->worker_wait_head;
6390 				w->worker_task_tail = w->worker_wait_tail;
6391 				w->worker_wait_head = w->worker_wait_tail =
6392 				    NULL;
6393 			}
6394 		}
6395 		if ((itask = w->worker_task_head) == NULL) {
6396 			break;
6397 		}
6398 		task = itask->itask_task;
6399 		DTRACE_PROBE2(worker__active, stmf_worker_t, w,
6400 		    scsi_task_t *, task);
6401 		w->worker_task_head = itask->itask_worker_next;
6402 		if (w->worker_task_head == NULL)
6403 			w->worker_task_tail = NULL;
6404 
6405 		wait_queue = 0;
6406 		abort_free = 0;
6407 		if (itask->itask_ncmds > 0) {
6408 			curcmd = itask->itask_cmd_stack[itask->itask_ncmds - 1];
6409 		} else {
6410 			ASSERT(itask->itask_flags & ITASK_BEING_ABORTED);
6411 		}
6412 		do {
6413 			old = itask->itask_flags;
6414 			if (old & ITASK_BEING_ABORTED) {
6415 				itask->itask_ncmds = 1;
6416 				curcmd = itask->itask_cmd_stack[0] =
6417 				    ITASK_CMD_ABORT;
6418 				goto out_itask_flag_loop;
6419 			} else if ((curcmd & ITASK_CMD_MASK) ==
6420 			    ITASK_CMD_NEW_TASK) {
6421 				/*
6422 				 * set ITASK_KSTAT_IN_RUNQ, this flag
6423 				 * will not reset until task completed
6424 				 */
6425 				new = old | ITASK_KNOWN_TO_LU |
6426 				    ITASK_KSTAT_IN_RUNQ;
6427 			} else {
6428 				goto out_itask_flag_loop;
6429 			}
6430 		} while (atomic_cas_32(&itask->itask_flags, old, new) != old);
6431 
6432 out_itask_flag_loop:
6433 
6434 		/*
6435 		 * Decide if this task needs to go to a queue and/or if
6436 		 * we can decrement the itask_cmd_stack.
6437 		 */
6438 		if (curcmd == ITASK_CMD_ABORT) {
6439 			if (itask->itask_flags & (ITASK_KNOWN_TO_LU |
6440 			    ITASK_KNOWN_TO_TGT_PORT)) {
6441 				wait_queue = 1;
6442 			} else {
6443 				abort_free = 1;
6444 			}
6445 		} else if ((curcmd & ITASK_CMD_POLL) &&
6446 		    (itask->itask_poll_timeout > ddi_get_lbolt())) {
6447 			wait_queue = 1;
6448 		}
6449 
6450 		if (wait_queue) {
6451 			itask->itask_worker_next = NULL;
6452 			if (w->worker_wait_tail) {
6453 				w->worker_wait_tail->itask_worker_next = itask;
6454 			} else {
6455 				w->worker_wait_head = itask;
6456 			}
6457 			w->worker_wait_tail = itask;
6458 			if (wait_timer == 0) {
6459 				wait_timer = ddi_get_lbolt() + wait_ticks;
6460 				wait_delta = wait_ticks;
6461 			}
6462 		} else if ((--(itask->itask_ncmds)) != 0) {
6463 			itask->itask_worker_next = NULL;
6464 			if (w->worker_task_tail) {
6465 				w->worker_task_tail->itask_worker_next = itask;
6466 			} else {
6467 				w->worker_task_head = itask;
6468 			}
6469 			w->worker_task_tail = itask;
6470 		} else {
6471 			atomic_and_32(&itask->itask_flags,
6472 			    ~ITASK_IN_WORKER_QUEUE);
6473 			/*
6474 			 * This is where the queue depth should go down by
6475 			 * one but we delay that on purpose to account for
6476 			 * the call into the provider. The actual decrement
6477 			 * happens after the worker has done its job.
6478 			 */
6479 			dec_qdepth = 1;
6480 			itask->itask_waitq_time +=
6481 			    gethrtime() - itask->itask_waitq_enter_timestamp;
6482 		}
6483 
6484 		/* We made it here means we are going to call LU */
6485 		if ((itask->itask_flags & ITASK_DEFAULT_HANDLING) == 0)
6486 			lu = task->task_lu;
6487 		else
6488 			lu = dlun0;
6489 		dbuf = itask->itask_dbufs[ITASK_CMD_BUF_NDX(curcmd)];
6490 		mutex_exit(&w->worker_lock);
6491 		curcmd &= ITASK_CMD_MASK;
6492 		switch (curcmd) {
6493 		case ITASK_CMD_NEW_TASK:
6494 			iss = (stmf_i_scsi_session_t *)
6495 			    task->task_session->ss_stmf_private;
6496 			stmf_itl_lu_new_task(itask);
6497 			if (iss->iss_flags & ISS_LUN_INVENTORY_CHANGED) {
6498 				if (stmf_handle_cmd_during_ic(itask))
6499 					break;
6500 			}
6501 #ifdef	DEBUG
6502 			if (stmf_drop_task_counter > 0) {
6503 				if (atomic_add_32_nv(
6504 				    (uint32_t *)&stmf_drop_task_counter,
6505 				    -1) == 1) {
6506 					break;
6507 				}
6508 			}
6509 #endif
6510 			DTRACE_PROBE1(scsi__task__start, scsi_task_t *, task);
6511 			lu->lu_new_task(task, dbuf);
6512 			break;
6513 		case ITASK_CMD_DATA_XFER_DONE:
6514 			lu->lu_dbuf_xfer_done(task, dbuf);
6515 			break;
6516 		case ITASK_CMD_STATUS_DONE:
6517 			lu->lu_send_status_done(task);
6518 			break;
6519 		case ITASK_CMD_ABORT:
6520 			if (abort_free) {
6521 				stmf_task_free(task);
6522 			} else {
6523 				stmf_do_task_abort(task);
6524 			}
6525 			break;
6526 		case ITASK_CMD_POLL_LU:
6527 			if (!wait_queue) {
6528 				lu->lu_task_poll(task);
6529 			}
6530 			break;
6531 		case ITASK_CMD_POLL_LPORT:
6532 			if (!wait_queue)
6533 				task->task_lport->lport_task_poll(task);
6534 			break;
6535 		case ITASK_CMD_SEND_STATUS:
6536 		/* case ITASK_CMD_XFER_DATA: */
6537 			break;
6538 		}
6539 		mutex_enter(&w->worker_lock);
6540 		if (dec_qdepth) {
6541 			w->worker_queue_depth--;
6542 		}
6543 	}
6544 	if ((w->worker_flags & STMF_WORKER_TERMINATE) && (wait_timer == 0)) {
6545 		if (w->worker_ref_count == 0)
6546 			goto stmf_worker_loop;
6547 		else {
6548 			wait_timer = ddi_get_lbolt() + 1;
6549 			wait_delta = 1;
6550 		}
6551 	}
6552 	w->worker_flags &= ~STMF_WORKER_ACTIVE;
6553 	if (wait_timer) {
6554 		DTRACE_PROBE1(worker__timed__sleep, stmf_worker_t, w);
6555 		(void) cv_reltimedwait(&w->worker_cv, &w->worker_lock,
6556 		    wait_delta, TR_CLOCK_TICK);
6557 	} else {
6558 		DTRACE_PROBE1(worker__sleep, stmf_worker_t, w);
6559 		cv_wait(&w->worker_cv, &w->worker_lock);
6560 	}
6561 	DTRACE_PROBE1(worker__wakeup, stmf_worker_t, w);
6562 	w->worker_flags |= STMF_WORKER_ACTIVE;
6563 	goto stmf_worker_loop;
6564 }
6565 
6566 void
6567 stmf_worker_mgmt()
6568 {
6569 	int i;
6570 	int workers_needed;
6571 	uint32_t qd;
6572 	clock_t tps, d = 0;
6573 	uint32_t cur_max_ntasks = 0;
6574 	stmf_worker_t *w;
6575 
6576 	/* Check if we are trying to increase the # of threads */
6577 	for (i = stmf_nworkers_cur; i < stmf_nworkers_needed; i++) {
6578 		if (stmf_workers[i].worker_flags & STMF_WORKER_STARTED) {
6579 			stmf_nworkers_cur++;
6580 			stmf_nworkers_accepting_cmds++;
6581 		} else {
6582 			/* Wait for transition to complete */
6583 			return;
6584 		}
6585 	}
6586 	/* Check if we are trying to decrease the # of workers */
6587 	for (i = (stmf_nworkers_cur - 1); i >= stmf_nworkers_needed; i--) {
6588 		if ((stmf_workers[i].worker_flags & STMF_WORKER_STARTED) == 0) {
6589 			stmf_nworkers_cur--;
6590 			/*
6591 			 * stmf_nworkers_accepting_cmds has already been
6592 			 * updated by the request to reduce the # of workers.
6593 			 */
6594 		} else {
6595 			/* Wait for transition to complete */
6596 			return;
6597 		}
6598 	}
6599 	/* Check if we are being asked to quit */
6600 	if (stmf_workers_state != STMF_WORKERS_ENABLED) {
6601 		if (stmf_nworkers_cur) {
6602 			workers_needed = 0;
6603 			goto worker_mgmt_trigger_change;
6604 		}
6605 		return;
6606 	}
6607 	/* Check if we are starting */
6608 	if (stmf_nworkers_cur < stmf_i_min_nworkers) {
6609 		workers_needed = stmf_i_min_nworkers;
6610 		goto worker_mgmt_trigger_change;
6611 	}
6612 
6613 	tps = drv_usectohz(1 * 1000 * 1000);
6614 	if ((stmf_wm_last != 0) &&
6615 	    ((d = ddi_get_lbolt() - stmf_wm_last) > tps)) {
6616 		qd = 0;
6617 		for (i = 0; i < stmf_nworkers_accepting_cmds; i++) {
6618 			qd += stmf_workers[i].worker_max_qdepth_pu;
6619 			stmf_workers[i].worker_max_qdepth_pu = 0;
6620 			if (stmf_workers[i].worker_max_sys_qdepth_pu >
6621 			    cur_max_ntasks) {
6622 				cur_max_ntasks =
6623 				    stmf_workers[i].worker_max_sys_qdepth_pu;
6624 			}
6625 			stmf_workers[i].worker_max_sys_qdepth_pu = 0;
6626 		}
6627 	}
6628 	stmf_wm_last = ddi_get_lbolt();
6629 	if (d <= tps) {
6630 		/* still ramping up */
6631 		return;
6632 	}
6633 	/* max qdepth cannot be more than max tasks */
6634 	if (qd > cur_max_ntasks)
6635 		qd = cur_max_ntasks;
6636 
6637 	/* See if we have more workers */
6638 	if (qd < stmf_nworkers_accepting_cmds) {
6639 		/*
6640 		 * Since we dont reduce the worker count right away, monitor
6641 		 * the highest load during the scale_down_delay.
6642 		 */
6643 		if (qd > stmf_worker_scale_down_qd)
6644 			stmf_worker_scale_down_qd = qd;
6645 		if (stmf_worker_scale_down_timer == 0) {
6646 			stmf_worker_scale_down_timer = ddi_get_lbolt() +
6647 			    drv_usectohz(stmf_worker_scale_down_delay *
6648 			    1000 * 1000);
6649 			return;
6650 		}
6651 		if (ddi_get_lbolt() < stmf_worker_scale_down_timer) {
6652 			return;
6653 		}
6654 		/* Its time to reduce the workers */
6655 		if (stmf_worker_scale_down_qd < stmf_i_min_nworkers)
6656 			stmf_worker_scale_down_qd = stmf_i_min_nworkers;
6657 		if (stmf_worker_scale_down_qd > stmf_i_max_nworkers)
6658 			stmf_worker_scale_down_qd = stmf_i_max_nworkers;
6659 		if (stmf_worker_scale_down_qd == stmf_nworkers_cur)
6660 			return;
6661 		workers_needed = stmf_worker_scale_down_qd;
6662 		stmf_worker_scale_down_qd = 0;
6663 		goto worker_mgmt_trigger_change;
6664 	}
6665 	stmf_worker_scale_down_qd = 0;
6666 	stmf_worker_scale_down_timer = 0;
6667 	if (qd > stmf_i_max_nworkers)
6668 		qd = stmf_i_max_nworkers;
6669 	if (qd < stmf_i_min_nworkers)
6670 		qd = stmf_i_min_nworkers;
6671 	if (qd == stmf_nworkers_cur)
6672 		return;
6673 	workers_needed = qd;
6674 	goto worker_mgmt_trigger_change;
6675 
6676 	/* NOTREACHED */
6677 	return;
6678 
6679 worker_mgmt_trigger_change:
6680 	ASSERT(workers_needed != stmf_nworkers_cur);
6681 	if (workers_needed > stmf_nworkers_cur) {
6682 		stmf_nworkers_needed = workers_needed;
6683 		for (i = stmf_nworkers_cur; i < workers_needed; i++) {
6684 			w = &stmf_workers[i];
6685 			w->worker_tid = thread_create(NULL, 0, stmf_worker_task,
6686 			    (void *)&stmf_workers[i], 0, &p0, TS_RUN,
6687 			    minclsyspri);
6688 		}
6689 		return;
6690 	}
6691 	/* At this point we know that we are decreasing the # of workers */
6692 	stmf_nworkers_accepting_cmds = workers_needed;
6693 	stmf_nworkers_needed = workers_needed;
6694 	/* Signal the workers that its time to quit */
6695 	for (i = (stmf_nworkers_cur - 1); i >= stmf_nworkers_needed; i--) {
6696 		w = &stmf_workers[i];
6697 		ASSERT(w && (w->worker_flags & STMF_WORKER_STARTED));
6698 		mutex_enter(&w->worker_lock);
6699 		w->worker_flags |= STMF_WORKER_TERMINATE;
6700 		if ((w->worker_flags & STMF_WORKER_ACTIVE) == 0)
6701 			cv_signal(&w->worker_cv);
6702 		mutex_exit(&w->worker_lock);
6703 	}
6704 }
6705 
6706 /*
6707  * Fills out a dbuf from stmf_xfer_data_t (contained in the db_lu_private).
6708  * If all the data has been filled out, frees the xd and makes
6709  * db_lu_private NULL.
6710  */
6711 void
6712 stmf_xd_to_dbuf(stmf_data_buf_t *dbuf)
6713 {
6714 	stmf_xfer_data_t *xd;
6715 	uint8_t *p;
6716 	int i;
6717 	uint32_t s;
6718 
6719 	xd = (stmf_xfer_data_t *)dbuf->db_lu_private;
6720 	dbuf->db_data_size = 0;
6721 	dbuf->db_relative_offset = xd->size_done;
6722 	for (i = 0; i < dbuf->db_sglist_length; i++) {
6723 		s = min(xd->size_left, dbuf->db_sglist[i].seg_length);
6724 		p = &xd->buf[xd->size_done];
6725 		bcopy(p, dbuf->db_sglist[i].seg_addr, s);
6726 		xd->size_left -= s;
6727 		xd->size_done += s;
6728 		dbuf->db_data_size += s;
6729 		if (xd->size_left == 0) {
6730 			kmem_free(xd, xd->alloc_size);
6731 			dbuf->db_lu_private = NULL;
6732 			return;
6733 		}
6734 	}
6735 }
6736 
6737 /* ARGSUSED */
6738 stmf_status_t
6739 stmf_dlun0_task_alloc(scsi_task_t *task)
6740 {
6741 	return (STMF_SUCCESS);
6742 }
6743 
6744 void
6745 stmf_dlun0_new_task(scsi_task_t *task, stmf_data_buf_t *dbuf)
6746 {
6747 	uint8_t *cdbp = (uint8_t *)&task->task_cdb[0];
6748 	stmf_i_scsi_session_t *iss;
6749 	uint32_t sz, minsz;
6750 	uint8_t *p;
6751 	stmf_xfer_data_t *xd;
6752 	uint8_t inq_page_length = 31;
6753 
6754 	if (task->task_mgmt_function) {
6755 		stmf_scsilib_handle_task_mgmt(task);
6756 		return;
6757 	}
6758 
6759 	switch (cdbp[0]) {
6760 	case SCMD_INQUIRY:
6761 		/*
6762 		 * Basic protocol checks.  In addition, only reply to
6763 		 * standard inquiry.  Otherwise, the LU provider needs
6764 		 * to respond.
6765 		 */
6766 
6767 		if (cdbp[2] || (cdbp[1] & 1) || cdbp[5]) {
6768 			stmf_scsilib_send_status(task, STATUS_CHECK,
6769 			    STMF_SAA_INVALID_FIELD_IN_CDB);
6770 			return;
6771 		}
6772 
6773 		task->task_cmd_xfer_length =
6774 		    (((uint32_t)cdbp[3]) << 8) | cdbp[4];
6775 
6776 		if (task->task_additional_flags &
6777 		    TASK_AF_NO_EXPECTED_XFER_LENGTH) {
6778 			task->task_expected_xfer_length =
6779 			    task->task_cmd_xfer_length;
6780 		}
6781 
6782 		sz = min(task->task_expected_xfer_length,
6783 		    min(36, task->task_cmd_xfer_length));
6784 		minsz = 36;
6785 
6786 		if (sz == 0) {
6787 			stmf_scsilib_send_status(task, STATUS_GOOD, 0);
6788 			return;
6789 		}
6790 
6791 		if (dbuf && (dbuf->db_sglist[0].seg_length < 36)) {
6792 			/*
6793 			 * Ignore any preallocated dbuf if the size is less
6794 			 * than 36. It will be freed during the task_free.
6795 			 */
6796 			dbuf = NULL;
6797 		}
6798 		if (dbuf == NULL)
6799 			dbuf = stmf_alloc_dbuf(task, minsz, &minsz, 0);
6800 		if ((dbuf == NULL) || (dbuf->db_sglist[0].seg_length < sz)) {
6801 			stmf_abort(STMF_QUEUE_TASK_ABORT, task,
6802 			    STMF_ALLOC_FAILURE, NULL);
6803 			return;
6804 		}
6805 		dbuf->db_lu_private = NULL;
6806 
6807 		p = dbuf->db_sglist[0].seg_addr;
6808 
6809 		/*
6810 		 * Standard inquiry handling only.
6811 		 */
6812 
6813 		bzero(p, inq_page_length + 5);
6814 
6815 		p[0] = DPQ_SUPPORTED | DTYPE_UNKNOWN;
6816 		p[2] = 5;
6817 		p[3] = 0x12;
6818 		p[4] = inq_page_length;
6819 		p[6] = 0x80;
6820 
6821 		(void) strncpy((char *)p+8, "SUN     ", 8);
6822 		(void) strncpy((char *)p+16, "COMSTAR	       ", 16);
6823 		(void) strncpy((char *)p+32, "1.0 ", 4);
6824 
6825 		dbuf->db_data_size = sz;
6826 		dbuf->db_relative_offset = 0;
6827 		dbuf->db_flags = DB_DIRECTION_TO_RPORT;
6828 		(void) stmf_xfer_data(task, dbuf, 0);
6829 
6830 		return;
6831 
6832 	case SCMD_REPORT_LUNS:
6833 		task->task_cmd_xfer_length =
6834 		    ((((uint32_t)task->task_cdb[6]) << 24) |
6835 		    (((uint32_t)task->task_cdb[7]) << 16) |
6836 		    (((uint32_t)task->task_cdb[8]) << 8) |
6837 		    ((uint32_t)task->task_cdb[9]));
6838 
6839 		if (task->task_additional_flags &
6840 		    TASK_AF_NO_EXPECTED_XFER_LENGTH) {
6841 			task->task_expected_xfer_length =
6842 			    task->task_cmd_xfer_length;
6843 		}
6844 
6845 		sz = min(task->task_expected_xfer_length,
6846 		    task->task_cmd_xfer_length);
6847 
6848 		if (sz < 16) {
6849 			stmf_scsilib_send_status(task, STATUS_CHECK,
6850 			    STMF_SAA_INVALID_FIELD_IN_CDB);
6851 			return;
6852 		}
6853 
6854 		iss = (stmf_i_scsi_session_t *)
6855 		    task->task_session->ss_stmf_private;
6856 		rw_enter(iss->iss_lockp, RW_WRITER);
6857 		xd = stmf_session_prepare_report_lun_data(iss->iss_sm);
6858 		rw_exit(iss->iss_lockp);
6859 
6860 		if (xd == NULL) {
6861 			stmf_abort(STMF_QUEUE_TASK_ABORT, task,
6862 			    STMF_ALLOC_FAILURE, NULL);
6863 			return;
6864 		}
6865 
6866 		sz = min(sz, xd->size_left);
6867 		xd->size_left = sz;
6868 		minsz = min(512, sz);
6869 
6870 		if (dbuf == NULL)
6871 			dbuf = stmf_alloc_dbuf(task, sz, &minsz, 0);
6872 		if (dbuf == NULL) {
6873 			kmem_free(xd, xd->alloc_size);
6874 			stmf_abort(STMF_QUEUE_TASK_ABORT, task,
6875 			    STMF_ALLOC_FAILURE, NULL);
6876 			return;
6877 		}
6878 		dbuf->db_lu_private = xd;
6879 		stmf_xd_to_dbuf(dbuf);
6880 
6881 		atomic_and_32(&iss->iss_flags,
6882 		    ~(ISS_LUN_INVENTORY_CHANGED | ISS_GOT_INITIAL_LUNS));
6883 		dbuf->db_flags = DB_DIRECTION_TO_RPORT;
6884 		(void) stmf_xfer_data(task, dbuf, 0);
6885 		return;
6886 	}
6887 
6888 	stmf_scsilib_send_status(task, STATUS_CHECK, STMF_SAA_INVALID_OPCODE);
6889 }
6890 
6891 void
6892 stmf_dlun0_dbuf_done(scsi_task_t *task, stmf_data_buf_t *dbuf)
6893 {
6894 	stmf_i_scsi_task_t *itask =
6895 	    (stmf_i_scsi_task_t *)task->task_stmf_private;
6896 
6897 	if (dbuf->db_xfer_status != STMF_SUCCESS) {
6898 		stmf_abort(STMF_QUEUE_TASK_ABORT, task,
6899 		    dbuf->db_xfer_status, NULL);
6900 		return;
6901 	}
6902 	task->task_nbytes_transferred = dbuf->db_data_size;
6903 	if (dbuf->db_lu_private) {
6904 		/* There is more */
6905 		stmf_xd_to_dbuf(dbuf);
6906 		(void) stmf_xfer_data(task, dbuf, 0);
6907 		return;
6908 	}
6909 	/*
6910 	 * If this is a proxy task, it will need to be completed from the
6911 	 * proxy port provider. This message lets pppt know that the xfer
6912 	 * is complete. When we receive the status from pppt, we will
6913 	 * then relay that status back to the lport.
6914 	 */
6915 	if (itask->itask_flags & ITASK_PROXY_TASK) {
6916 		stmf_ic_msg_t *ic_xfer_done_msg = NULL;
6917 		stmf_status_t ic_ret = STMF_FAILURE;
6918 		uint64_t session_msg_id;
6919 		mutex_enter(&stmf_state.stmf_lock);
6920 		session_msg_id = stmf_proxy_msg_id++;
6921 		mutex_exit(&stmf_state.stmf_lock);
6922 		/* send xfer done status to pppt */
6923 		ic_xfer_done_msg = ic_scsi_data_xfer_done_msg_alloc(
6924 		    itask->itask_proxy_msg_id,
6925 		    task->task_session->ss_session_id,
6926 		    STMF_SUCCESS, session_msg_id);
6927 		if (ic_xfer_done_msg) {
6928 			ic_ret = ic_tx_msg(ic_xfer_done_msg);
6929 			if (ic_ret != STMF_IC_MSG_SUCCESS) {
6930 				cmn_err(CE_WARN, "unable to xmit session msg");
6931 			}
6932 		}
6933 		/* task will be completed from pppt */
6934 		return;
6935 	}
6936 	stmf_scsilib_send_status(task, STATUS_GOOD, 0);
6937 }
6938 
6939 /* ARGSUSED */
6940 void
6941 stmf_dlun0_status_done(scsi_task_t *task)
6942 {
6943 }
6944 
6945 /* ARGSUSED */
6946 void
6947 stmf_dlun0_task_free(scsi_task_t *task)
6948 {
6949 }
6950 
6951 /* ARGSUSED */
6952 stmf_status_t
6953 stmf_dlun0_abort(struct stmf_lu *lu, int abort_cmd, void *arg, uint32_t flags)
6954 {
6955 	scsi_task_t *task = (scsi_task_t *)arg;
6956 	stmf_i_scsi_task_t *itask =
6957 	    (stmf_i_scsi_task_t *)task->task_stmf_private;
6958 	stmf_i_lu_t *ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private;
6959 	int i;
6960 	uint8_t map;
6961 
6962 	if ((task->task_mgmt_function) && (itask->itask_flags &
6963 	    (ITASK_CAUSING_LU_RESET | ITASK_CAUSING_TARGET_RESET))) {
6964 		switch (task->task_mgmt_function) {
6965 		case TM_ABORT_TASK:
6966 		case TM_ABORT_TASK_SET:
6967 		case TM_CLEAR_TASK_SET:
6968 		case TM_LUN_RESET:
6969 			atomic_and_32(&ilu->ilu_flags, ~ILU_RESET_ACTIVE);
6970 			break;
6971 		case TM_TARGET_RESET:
6972 		case TM_TARGET_COLD_RESET:
6973 		case TM_TARGET_WARM_RESET:
6974 			stmf_abort_target_reset(task);
6975 			break;
6976 		}
6977 		return (STMF_ABORT_SUCCESS);
6978 	}
6979 
6980 	/*
6981 	 * OK so its not a task mgmt. Make sure we free any xd sitting
6982 	 * inside any dbuf.
6983 	 */
6984 	if ((map = itask->itask_allocated_buf_map) != 0) {
6985 		for (i = 0; i < 4; i++) {
6986 			if ((map & 1) &&
6987 			    ((itask->itask_dbufs[i])->db_lu_private)) {
6988 				stmf_xfer_data_t *xd;
6989 				stmf_data_buf_t *dbuf;
6990 
6991 				dbuf = itask->itask_dbufs[i];
6992 				xd = (stmf_xfer_data_t *)dbuf->db_lu_private;
6993 				dbuf->db_lu_private = NULL;
6994 				kmem_free(xd, xd->alloc_size);
6995 			}
6996 			map >>= 1;
6997 		}
6998 	}
6999 	return (STMF_ABORT_SUCCESS);
7000 }
7001 
7002 void
7003 stmf_dlun0_task_poll(struct scsi_task *task)
7004 {
7005 	/* Right now we only do this for handling task management functions */
7006 	ASSERT(task->task_mgmt_function);
7007 
7008 	switch (task->task_mgmt_function) {
7009 	case TM_ABORT_TASK:
7010 	case TM_ABORT_TASK_SET:
7011 	case TM_CLEAR_TASK_SET:
7012 	case TM_LUN_RESET:
7013 		(void) stmf_lun_reset_poll(task->task_lu, task, 0);
7014 		return;
7015 	case TM_TARGET_RESET:
7016 	case TM_TARGET_COLD_RESET:
7017 	case TM_TARGET_WARM_RESET:
7018 		stmf_target_reset_poll(task);
7019 		return;
7020 	}
7021 }
7022 
7023 /* ARGSUSED */
7024 void
7025 stmf_dlun0_ctl(struct stmf_lu *lu, int cmd, void *arg)
7026 {
7027 	/* This function will never be called */
7028 	cmn_err(CE_WARN, "stmf_dlun0_ctl called with cmd %x", cmd);
7029 }
7030 
7031 void
7032 stmf_dlun_init()
7033 {
7034 	stmf_i_lu_t *ilu;
7035 
7036 	dlun0 = stmf_alloc(STMF_STRUCT_STMF_LU, 0, 0);
7037 	dlun0->lu_task_alloc = stmf_dlun0_task_alloc;
7038 	dlun0->lu_new_task = stmf_dlun0_new_task;
7039 	dlun0->lu_dbuf_xfer_done = stmf_dlun0_dbuf_done;
7040 	dlun0->lu_send_status_done = stmf_dlun0_status_done;
7041 	dlun0->lu_task_free = stmf_dlun0_task_free;
7042 	dlun0->lu_abort = stmf_dlun0_abort;
7043 	dlun0->lu_task_poll = stmf_dlun0_task_poll;
7044 	dlun0->lu_ctl = stmf_dlun0_ctl;
7045 
7046 	ilu = (stmf_i_lu_t *)dlun0->lu_stmf_private;
7047 	ilu->ilu_cur_task_cntr = &ilu->ilu_task_cntr1;
7048 }
7049 
7050 stmf_status_t
7051 stmf_dlun_fini()
7052 {
7053 	stmf_i_lu_t *ilu;
7054 
7055 	ilu = (stmf_i_lu_t *)dlun0->lu_stmf_private;
7056 
7057 	ASSERT(ilu->ilu_ntasks == ilu->ilu_ntasks_free);
7058 	if (ilu->ilu_ntasks) {
7059 		stmf_i_scsi_task_t *itask, *nitask;
7060 
7061 		nitask = ilu->ilu_tasks;
7062 		do {
7063 			itask = nitask;
7064 			nitask = itask->itask_lu_next;
7065 			dlun0->lu_task_free(itask->itask_task);
7066 			stmf_free(itask->itask_task);
7067 		} while (nitask != NULL);
7068 
7069 	}
7070 	stmf_free(dlun0);
7071 	return (STMF_SUCCESS);
7072 }
7073 
7074 void
7075 stmf_abort_target_reset(scsi_task_t *task)
7076 {
7077 	stmf_i_scsi_session_t *iss = (stmf_i_scsi_session_t *)
7078 	    task->task_session->ss_stmf_private;
7079 	stmf_lun_map_t *lm;
7080 	stmf_lun_map_ent_t *lm_ent;
7081 	stmf_i_lu_t *ilu;
7082 	int i;
7083 
7084 	rw_enter(iss->iss_lockp, RW_READER);
7085 	lm = iss->iss_sm;
7086 	for (i = 0; i < lm->lm_nentries; i++) {
7087 		if (lm->lm_plus[i] == NULL)
7088 			continue;
7089 		lm_ent = (stmf_lun_map_ent_t *)lm->lm_plus[i];
7090 		ilu = (stmf_i_lu_t *)lm_ent->ent_lu->lu_stmf_private;
7091 		if (ilu->ilu_flags & ILU_RESET_ACTIVE) {
7092 			atomic_and_32(&ilu->ilu_flags, ~ILU_RESET_ACTIVE);
7093 		}
7094 	}
7095 	atomic_and_32(&iss->iss_flags, ~ISS_RESET_ACTIVE);
7096 	rw_exit(iss->iss_lockp);
7097 }
7098 
7099 /*
7100  * The return value is only used by function managing target reset.
7101  */
7102 stmf_status_t
7103 stmf_lun_reset_poll(stmf_lu_t *lu, struct scsi_task *task, int target_reset)
7104 {
7105 	stmf_i_lu_t *ilu = (stmf_i_lu_t *)lu->lu_stmf_private;
7106 	int ntasks_pending;
7107 
7108 	ntasks_pending = ilu->ilu_ntasks - ilu->ilu_ntasks_free;
7109 	/*
7110 	 * This function is also used during Target reset. The idea is that
7111 	 * once all the commands are aborted, call the LU's reset entry
7112 	 * point (abort entry point with a reset flag). But if this Task
7113 	 * mgmt is running on this LU then all the tasks cannot be aborted.
7114 	 * one task (this task) will still be running which is OK.
7115 	 */
7116 	if ((ntasks_pending == 0) || ((task->task_lu == lu) &&
7117 	    (ntasks_pending == 1))) {
7118 		stmf_status_t ret;
7119 
7120 		if ((task->task_mgmt_function == TM_LUN_RESET) ||
7121 		    (task->task_mgmt_function == TM_TARGET_RESET) ||
7122 		    (task->task_mgmt_function == TM_TARGET_WARM_RESET) ||
7123 		    (task->task_mgmt_function == TM_TARGET_COLD_RESET)) {
7124 			ret = lu->lu_abort(lu, STMF_LU_RESET_STATE, task, 0);
7125 		} else {
7126 			ret = STMF_SUCCESS;
7127 		}
7128 		if (ret == STMF_SUCCESS) {
7129 			atomic_and_32(&ilu->ilu_flags, ~ILU_RESET_ACTIVE);
7130 		}
7131 		if (target_reset) {
7132 			return (ret);
7133 		}
7134 		if (ret == STMF_SUCCESS) {
7135 			stmf_scsilib_send_status(task, STATUS_GOOD, 0);
7136 			return (ret);
7137 		}
7138 		if (ret != STMF_BUSY) {
7139 			stmf_abort(STMF_QUEUE_TASK_ABORT, task, ret, NULL);
7140 			return (ret);
7141 		}
7142 	}
7143 
7144 	if (target_reset) {
7145 		/* Tell target reset polling code that we are not done */
7146 		return (STMF_BUSY);
7147 	}
7148 
7149 	if (stmf_task_poll_lu(task, ITASK_DEFAULT_POLL_TIMEOUT)
7150 	    != STMF_SUCCESS) {
7151 		stmf_abort(STMF_QUEUE_TASK_ABORT, task,
7152 		    STMF_ALLOC_FAILURE, NULL);
7153 		return (STMF_SUCCESS);
7154 	}
7155 
7156 	return (STMF_SUCCESS);
7157 }
7158 
7159 void
7160 stmf_target_reset_poll(struct scsi_task *task)
7161 {
7162 	stmf_i_scsi_session_t *iss = (stmf_i_scsi_session_t *)
7163 	    task->task_session->ss_stmf_private;
7164 	stmf_lun_map_t *lm;
7165 	stmf_lun_map_ent_t *lm_ent;
7166 	stmf_i_lu_t *ilu;
7167 	stmf_status_t ret;
7168 	int i;
7169 	int not_done = 0;
7170 
7171 	ASSERT(iss->iss_flags & ISS_RESET_ACTIVE);
7172 
7173 	rw_enter(iss->iss_lockp, RW_READER);
7174 	lm = iss->iss_sm;
7175 	for (i = 0; i < lm->lm_nentries; i++) {
7176 		if (lm->lm_plus[i] == NULL)
7177 			continue;
7178 		lm_ent = (stmf_lun_map_ent_t *)lm->lm_plus[i];
7179 		ilu = (stmf_i_lu_t *)lm_ent->ent_lu->lu_stmf_private;
7180 		if (ilu->ilu_flags & ILU_RESET_ACTIVE) {
7181 			rw_exit(iss->iss_lockp);
7182 			ret = stmf_lun_reset_poll(lm_ent->ent_lu, task, 1);
7183 			rw_enter(iss->iss_lockp, RW_READER);
7184 			if (ret == STMF_SUCCESS)
7185 				continue;
7186 			not_done = 1;
7187 			if (ret != STMF_BUSY) {
7188 				rw_exit(iss->iss_lockp);
7189 				stmf_abort(STMF_QUEUE_TASK_ABORT, task,
7190 				    STMF_ABORTED, NULL);
7191 				return;
7192 			}
7193 		}
7194 	}
7195 	rw_exit(iss->iss_lockp);
7196 
7197 	if (not_done) {
7198 		if (stmf_task_poll_lu(task, ITASK_DEFAULT_POLL_TIMEOUT)
7199 		    != STMF_SUCCESS) {
7200 			stmf_abort(STMF_QUEUE_TASK_ABORT, task,
7201 			    STMF_ALLOC_FAILURE, NULL);
7202 			return;
7203 		}
7204 		return;
7205 	}
7206 
7207 	atomic_and_32(&iss->iss_flags, ~ISS_RESET_ACTIVE);
7208 
7209 	stmf_scsilib_send_status(task, STATUS_GOOD, 0);
7210 }
7211 
7212 stmf_status_t
7213 stmf_lu_add_event(stmf_lu_t *lu, int eventid)
7214 {
7215 	stmf_i_lu_t *ilu = (stmf_i_lu_t *)lu->lu_stmf_private;
7216 
7217 	if ((eventid < 0) || (eventid >= STMF_MAX_NUM_EVENTS)) {
7218 		return (STMF_INVALID_ARG);
7219 	}
7220 
7221 	STMF_EVENT_ADD(ilu->ilu_event_hdl, eventid);
7222 	return (STMF_SUCCESS);
7223 }
7224 
7225 stmf_status_t
7226 stmf_lu_remove_event(stmf_lu_t *lu, int eventid)
7227 {
7228 	stmf_i_lu_t *ilu = (stmf_i_lu_t *)lu->lu_stmf_private;
7229 
7230 	if (eventid == STMF_EVENT_ALL) {
7231 		STMF_EVENT_CLEAR_ALL(ilu->ilu_event_hdl);
7232 		return (STMF_SUCCESS);
7233 	}
7234 
7235 	if ((eventid < 0) || (eventid >= STMF_MAX_NUM_EVENTS)) {
7236 		return (STMF_INVALID_ARG);
7237 	}
7238 
7239 	STMF_EVENT_REMOVE(ilu->ilu_event_hdl, eventid);
7240 	return (STMF_SUCCESS);
7241 }
7242 
7243 stmf_status_t
7244 stmf_lport_add_event(stmf_local_port_t *lport, int eventid)
7245 {
7246 	stmf_i_local_port_t *ilport =
7247 	    (stmf_i_local_port_t *)lport->lport_stmf_private;
7248 
7249 	if ((eventid < 0) || (eventid >= STMF_MAX_NUM_EVENTS)) {
7250 		return (STMF_INVALID_ARG);
7251 	}
7252 
7253 	STMF_EVENT_ADD(ilport->ilport_event_hdl, eventid);
7254 	return (STMF_SUCCESS);
7255 }
7256 
7257 stmf_status_t
7258 stmf_lport_remove_event(stmf_local_port_t *lport, int eventid)
7259 {
7260 	stmf_i_local_port_t *ilport =
7261 	    (stmf_i_local_port_t *)lport->lport_stmf_private;
7262 
7263 	if (eventid == STMF_EVENT_ALL) {
7264 		STMF_EVENT_CLEAR_ALL(ilport->ilport_event_hdl);
7265 		return (STMF_SUCCESS);
7266 	}
7267 
7268 	if ((eventid < 0) || (eventid >= STMF_MAX_NUM_EVENTS)) {
7269 		return (STMF_INVALID_ARG);
7270 	}
7271 
7272 	STMF_EVENT_REMOVE(ilport->ilport_event_hdl, eventid);
7273 	return (STMF_SUCCESS);
7274 }
7275 
7276 void
7277 stmf_generate_lu_event(stmf_i_lu_t *ilu, int eventid, void *arg, uint32_t flags)
7278 {
7279 	if (STMF_EVENT_ENABLED(ilu->ilu_event_hdl, eventid) &&
7280 	    (ilu->ilu_lu->lu_event_handler != NULL)) {
7281 		ilu->ilu_lu->lu_event_handler(ilu->ilu_lu, eventid, arg, flags);
7282 	}
7283 }
7284 
7285 void
7286 stmf_generate_lport_event(stmf_i_local_port_t *ilport, int eventid, void *arg,
7287 				uint32_t flags)
7288 {
7289 	if (STMF_EVENT_ENABLED(ilport->ilport_event_hdl, eventid) &&
7290 	    (ilport->ilport_lport->lport_event_handler != NULL)) {
7291 		ilport->ilport_lport->lport_event_handler(
7292 		    ilport->ilport_lport, eventid, arg, flags);
7293 	}
7294 }
7295 
7296 /*
7297  * With the possibility of having multiple itl sessions pointing to the
7298  * same itl_kstat_info, the ilu_kstat_lock mutex is used to synchronize
7299  * the kstat update of the ilu_kstat_io, itl_kstat_taskq and itl_kstat_lu_xfer
7300  * statistics.
7301  */
7302 void
7303 stmf_itl_task_start(stmf_i_scsi_task_t *itask)
7304 {
7305 	stmf_itl_data_t	*itl = itask->itask_itl_datap;
7306 	scsi_task_t	*task = itask->itask_task;
7307 	stmf_i_lu_t	*ilu;
7308 
7309 	if (itl == NULL || task->task_lu == dlun0)
7310 		return;
7311 	ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private;
7312 	mutex_enter(ilu->ilu_kstat_io->ks_lock);
7313 	itask->itask_start_timestamp = gethrtime();
7314 	kstat_waitq_enter(KSTAT_IO_PTR(itl->itl_kstat_taskq));
7315 	stmf_update_kstat_lu_q(itask->itask_task, kstat_waitq_enter);
7316 	mutex_exit(ilu->ilu_kstat_io->ks_lock);
7317 
7318 	stmf_update_kstat_lport_q(itask->itask_task, kstat_waitq_enter);
7319 }
7320 
7321 void
7322 stmf_itl_lu_new_task(stmf_i_scsi_task_t *itask)
7323 {
7324 	stmf_itl_data_t	*itl = itask->itask_itl_datap;
7325 	scsi_task_t	*task = itask->itask_task;
7326 	stmf_i_lu_t	*ilu;
7327 
7328 	if (itl == NULL || task->task_lu == dlun0)
7329 		return;
7330 	ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private;
7331 	mutex_enter(ilu->ilu_kstat_io->ks_lock);
7332 	kstat_waitq_to_runq(KSTAT_IO_PTR(itl->itl_kstat_taskq));
7333 	stmf_update_kstat_lu_q(itask->itask_task, kstat_waitq_to_runq);
7334 	mutex_exit(ilu->ilu_kstat_io->ks_lock);
7335 
7336 	stmf_update_kstat_lport_q(itask->itask_task, kstat_waitq_to_runq);
7337 }
7338 
7339 void
7340 stmf_itl_task_done(stmf_i_scsi_task_t *itask)
7341 {
7342 	stmf_itl_data_t		*itl = itask->itask_itl_datap;
7343 	scsi_task_t		*task = itask->itask_task;
7344 	kstat_io_t		*kip;
7345 	hrtime_t		elapsed_time;
7346 	stmf_kstat_itl_info_t	*itli;
7347 	stmf_i_lu_t	*ilu;
7348 
7349 	if (itl == NULL || task->task_lu == dlun0)
7350 		return;
7351 	ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private;
7352 
7353 	mutex_enter(ilu->ilu_kstat_io->ks_lock);
7354 	itli = (stmf_kstat_itl_info_t *)KSTAT_NAMED_PTR(itl->itl_kstat_info);
7355 	kip = KSTAT_IO_PTR(itl->itl_kstat_taskq);
7356 
7357 	itli->i_task_waitq_elapsed.value.ui64 += itask->itask_waitq_time;
7358 
7359 	itask->itask_done_timestamp = gethrtime();
7360 	elapsed_time =
7361 	    itask->itask_done_timestamp - itask->itask_start_timestamp;
7362 
7363 	if (task->task_flags & TF_READ_DATA) {
7364 		kip->reads++;
7365 		kip->nread += itask->itask_read_xfer;
7366 		itli->i_task_read_elapsed.value.ui64 += elapsed_time;
7367 		itli->i_lu_read_elapsed.value.ui64 +=
7368 		    itask->itask_lu_read_time;
7369 		itli->i_lport_read_elapsed.value.ui64 +=
7370 		    itask->itask_lport_read_time;
7371 	}
7372 
7373 	if (task->task_flags & TF_WRITE_DATA) {
7374 		kip->writes++;
7375 		kip->nwritten += itask->itask_write_xfer;
7376 		itli->i_task_write_elapsed.value.ui64 += elapsed_time;
7377 		itli->i_lu_write_elapsed.value.ui64 +=
7378 		    itask->itask_lu_write_time;
7379 		itli->i_lport_write_elapsed.value.ui64 +=
7380 		    itask->itask_lport_write_time;
7381 	}
7382 
7383 	if (itask->itask_flags & ITASK_KSTAT_IN_RUNQ) {
7384 		kstat_runq_exit(kip);
7385 		stmf_update_kstat_lu_q(task, kstat_runq_exit);
7386 		mutex_exit(ilu->ilu_kstat_io->ks_lock);
7387 		stmf_update_kstat_lport_q(task, kstat_runq_exit);
7388 	} else {
7389 		kstat_waitq_exit(kip);
7390 		stmf_update_kstat_lu_q(task, kstat_waitq_exit);
7391 		mutex_exit(ilu->ilu_kstat_io->ks_lock);
7392 		stmf_update_kstat_lport_q(task, kstat_waitq_exit);
7393 	}
7394 }
7395 
7396 void
7397 stmf_lu_xfer_start(scsi_task_t *task)
7398 {
7399 	stmf_i_scsi_task_t *itask = task->task_stmf_private;
7400 	stmf_itl_data_t	*itl = itask->itask_itl_datap;
7401 	stmf_i_lu_t	*ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private;
7402 	kstat_io_t		*kip;
7403 
7404 	if (itl == NULL || task->task_lu == dlun0)
7405 		return;
7406 
7407 	kip = KSTAT_IO_PTR(itl->itl_kstat_lu_xfer);
7408 	mutex_enter(ilu->ilu_kstat_io->ks_lock);
7409 	kstat_runq_enter(kip);
7410 	mutex_exit(ilu->ilu_kstat_io->ks_lock);
7411 }
7412 
7413 void
7414 stmf_lu_xfer_done(scsi_task_t *task, boolean_t read, uint64_t xfer_bytes,
7415     hrtime_t elapsed_time)
7416 {
7417 	stmf_i_scsi_task_t	*itask = task->task_stmf_private;
7418 	stmf_itl_data_t		*itl = itask->itask_itl_datap;
7419 	stmf_i_lu_t	*ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private;
7420 	kstat_io_t		*kip;
7421 
7422 	if (itl == NULL || task->task_lu == dlun0)
7423 		return;
7424 
7425 	if (read) {
7426 		atomic_add_64((uint64_t *)&itask->itask_lu_read_time,
7427 		    elapsed_time);
7428 	} else {
7429 		atomic_add_64((uint64_t *)&itask->itask_lu_write_time,
7430 		    elapsed_time);
7431 	}
7432 
7433 	kip = KSTAT_IO_PTR(itl->itl_kstat_lu_xfer);
7434 	mutex_enter(ilu->ilu_kstat_io->ks_lock);
7435 	kstat_runq_exit(kip);
7436 	if (read) {
7437 		kip->reads++;
7438 		kip->nread += xfer_bytes;
7439 	} else {
7440 		kip->writes++;
7441 		kip->nwritten += xfer_bytes;
7442 	}
7443 	mutex_exit(ilu->ilu_kstat_io->ks_lock);
7444 }
7445 
7446 static void
7447 stmf_lport_xfer_start(stmf_i_scsi_task_t *itask, stmf_data_buf_t *dbuf)
7448 {
7449 	stmf_itl_data_t		*itl = itask->itask_itl_datap;
7450 
7451 	if (itl == NULL)
7452 		return;
7453 
7454 	DTRACE_PROBE2(scsi__xfer__start, scsi_task_t *, itask->itask_task,
7455 	    stmf_data_buf_t *, dbuf);
7456 
7457 	dbuf->db_xfer_start_timestamp = gethrtime();
7458 }
7459 
7460 static void
7461 stmf_lport_xfer_done(stmf_i_scsi_task_t *itask, stmf_data_buf_t *dbuf)
7462 {
7463 	stmf_itl_data_t		*itl = itask->itask_itl_datap;
7464 	scsi_task_t		*task;
7465 	stmf_i_local_port_t	*ilp;
7466 	kstat_io_t		*kip;
7467 	hrtime_t		elapsed_time;
7468 	uint64_t		xfer_size;
7469 
7470 	if (itl == NULL)
7471 		return;
7472 
7473 	task = (scsi_task_t *)itask->itask_task;
7474 	ilp = (stmf_i_local_port_t *)task->task_lport->lport_stmf_private;
7475 	xfer_size = (dbuf->db_xfer_status == STMF_SUCCESS) ?
7476 	    dbuf->db_data_size : 0;
7477 
7478 	elapsed_time = gethrtime() - dbuf->db_xfer_start_timestamp;
7479 	if (dbuf->db_flags & DB_DIRECTION_TO_RPORT) {
7480 		atomic_add_64((uint64_t *)&itask->itask_lport_read_time,
7481 		    elapsed_time);
7482 		atomic_add_64((uint64_t *)&itask->itask_read_xfer,
7483 		    xfer_size);
7484 	} else {
7485 		atomic_add_64((uint64_t *)&itask->itask_lport_write_time,
7486 		    elapsed_time);
7487 		atomic_add_64((uint64_t *)&itask->itask_write_xfer,
7488 		    xfer_size);
7489 	}
7490 
7491 	DTRACE_PROBE3(scsi__xfer__end, scsi_task_t *, itask->itask_task,
7492 	    stmf_data_buf_t *, dbuf, hrtime_t, elapsed_time);
7493 
7494 	kip = KSTAT_IO_PTR(itl->itl_kstat_lport_xfer);
7495 	mutex_enter(ilp->ilport_kstat_io->ks_lock);
7496 	if (dbuf->db_flags & DB_DIRECTION_TO_RPORT) {
7497 		kip->reads++;
7498 		kip->nread += xfer_size;
7499 	} else {
7500 		kip->writes++;
7501 		kip->nwritten += xfer_size;
7502 	}
7503 	mutex_exit(ilp->ilport_kstat_io->ks_lock);
7504 
7505 	dbuf->db_xfer_start_timestamp = 0;
7506 }
7507 
7508 void
7509 stmf_svc_init()
7510 {
7511 	if (stmf_state.stmf_svc_flags & STMF_SVC_STARTED)
7512 		return;
7513 	stmf_state.stmf_svc_taskq = ddi_taskq_create(0, "STMF_SVC_TASKQ", 1,
7514 	    TASKQ_DEFAULTPRI, 0);
7515 	(void) ddi_taskq_dispatch(stmf_state.stmf_svc_taskq,
7516 	    stmf_svc, 0, DDI_SLEEP);
7517 }
7518 
7519 stmf_status_t
7520 stmf_svc_fini()
7521 {
7522 	uint32_t i;
7523 
7524 	mutex_enter(&stmf_state.stmf_lock);
7525 	if (stmf_state.stmf_svc_flags & STMF_SVC_STARTED) {
7526 		stmf_state.stmf_svc_flags |= STMF_SVC_TERMINATE;
7527 		cv_signal(&stmf_state.stmf_cv);
7528 	}
7529 	mutex_exit(&stmf_state.stmf_lock);
7530 
7531 	/* Wait for 5 seconds */
7532 	for (i = 0; i < 500; i++) {
7533 		if (stmf_state.stmf_svc_flags & STMF_SVC_STARTED)
7534 			delay(drv_usectohz(10000));
7535 		else
7536 			break;
7537 	}
7538 	if (i == 500)
7539 		return (STMF_BUSY);
7540 
7541 	ddi_taskq_destroy(stmf_state.stmf_svc_taskq);
7542 
7543 	return (STMF_SUCCESS);
7544 }
7545 
7546 /* ARGSUSED */
7547 void
7548 stmf_svc(void *arg)
7549 {
7550 	stmf_svc_req_t *req, **preq;
7551 	clock_t td;
7552 	clock_t	drain_start, drain_next = 0;
7553 	clock_t	timing_start, timing_next = 0;
7554 	clock_t worker_delay = 0;
7555 	int deq;
7556 	stmf_lu_t *lu;
7557 	stmf_i_lu_t *ilu;
7558 	stmf_local_port_t *lport;
7559 	stmf_i_local_port_t *ilport, *next_ilport;
7560 	stmf_i_scsi_session_t *iss;
7561 
7562 	td = drv_usectohz(20000);
7563 
7564 	mutex_enter(&stmf_state.stmf_lock);
7565 	stmf_state.stmf_svc_flags |= STMF_SVC_STARTED | STMF_SVC_ACTIVE;
7566 
7567 stmf_svc_loop:
7568 	if (stmf_state.stmf_svc_flags & STMF_SVC_TERMINATE) {
7569 		stmf_state.stmf_svc_flags &=
7570 		    ~(STMF_SVC_STARTED | STMF_SVC_ACTIVE);
7571 		mutex_exit(&stmf_state.stmf_lock);
7572 		return;
7573 	}
7574 
7575 	if (stmf_state.stmf_svc_active) {
7576 		int waitq_add = 0;
7577 		req = stmf_state.stmf_svc_active;
7578 		stmf_state.stmf_svc_active = req->svc_next;
7579 
7580 		switch (req->svc_cmd) {
7581 		case STMF_CMD_LPORT_ONLINE:
7582 			/* Fallthrough */
7583 		case STMF_CMD_LPORT_OFFLINE:
7584 			/* Fallthrough */
7585 		case STMF_CMD_LU_ONLINE:
7586 			/* Nothing to do */
7587 			waitq_add = 1;
7588 			break;
7589 
7590 		case STMF_CMD_LU_OFFLINE:
7591 			/* Remove all mappings of this LU */
7592 			stmf_session_lu_unmapall((stmf_lu_t *)req->svc_obj);
7593 			/* Kill all the pending I/Os for this LU */
7594 			mutex_exit(&stmf_state.stmf_lock);
7595 			stmf_task_lu_killall((stmf_lu_t *)req->svc_obj, NULL,
7596 			    STMF_ABORTED);
7597 			mutex_enter(&stmf_state.stmf_lock);
7598 			waitq_add = 1;
7599 			break;
7600 		default:
7601 			cmn_err(CE_PANIC, "stmf_svc: unknown cmd %d",
7602 			    req->svc_cmd);
7603 		}
7604 
7605 		if (waitq_add) {
7606 			/* Put it in the wait queue */
7607 			req->svc_next = stmf_state.stmf_svc_waiting;
7608 			stmf_state.stmf_svc_waiting = req;
7609 		}
7610 	}
7611 
7612 	/* The waiting list is not going to be modified by anybody else */
7613 	mutex_exit(&stmf_state.stmf_lock);
7614 
7615 	for (preq = &stmf_state.stmf_svc_waiting; (*preq) != NULL; ) {
7616 		req = *preq;
7617 		deq = 0;
7618 
7619 		switch (req->svc_cmd) {
7620 		case STMF_CMD_LU_ONLINE:
7621 			lu = (stmf_lu_t *)req->svc_obj;
7622 			deq = 1;
7623 			lu->lu_ctl(lu, req->svc_cmd, &req->svc_info);
7624 			break;
7625 
7626 		case STMF_CMD_LU_OFFLINE:
7627 			lu = (stmf_lu_t *)req->svc_obj;
7628 			ilu = (stmf_i_lu_t *)lu->lu_stmf_private;
7629 			if (ilu->ilu_ntasks != ilu->ilu_ntasks_free)
7630 				break;
7631 			deq = 1;
7632 			lu->lu_ctl(lu, req->svc_cmd, &req->svc_info);
7633 			break;
7634 
7635 		case STMF_CMD_LPORT_OFFLINE:
7636 			/* Fallthrough */
7637 		case STMF_CMD_LPORT_ONLINE:
7638 			lport = (stmf_local_port_t *)req->svc_obj;
7639 			deq = 1;
7640 			lport->lport_ctl(lport, req->svc_cmd, &req->svc_info);
7641 			break;
7642 		}
7643 		if (deq) {
7644 			*preq = req->svc_next;
7645 			kmem_free(req, req->svc_req_alloc_size);
7646 		} else {
7647 			preq = &req->svc_next;
7648 		}
7649 	}
7650 
7651 	mutex_enter(&stmf_state.stmf_lock);
7652 	if (stmf_state.stmf_svc_active == NULL) {
7653 		/* Do timeouts */
7654 		if (stmf_state.stmf_nlus &&
7655 		    ((!timing_next) || (ddi_get_lbolt() >= timing_next))) {
7656 			if (!stmf_state.stmf_svc_ilu_timing) {
7657 				/* we are starting a new round */
7658 				stmf_state.stmf_svc_ilu_timing =
7659 				    stmf_state.stmf_ilulist;
7660 				timing_start = ddi_get_lbolt();
7661 			}
7662 			stmf_check_ilu_timing();
7663 			if (!stmf_state.stmf_svc_ilu_timing) {
7664 				/* we finished a complete round */
7665 				timing_next =
7666 				    timing_start + drv_usectohz(5*1000*1000);
7667 			} else {
7668 				/* we still have some ilu items to check */
7669 				timing_next =
7670 				    ddi_get_lbolt() + drv_usectohz(1*1000*1000);
7671 			}
7672 			if (stmf_state.stmf_svc_active)
7673 				goto stmf_svc_loop;
7674 		}
7675 		/* Check if there are free tasks to clear */
7676 		if (stmf_state.stmf_nlus &&
7677 		    ((!drain_next) || (ddi_get_lbolt() >= drain_next))) {
7678 			if (!stmf_state.stmf_svc_ilu_draining) {
7679 				/* we are starting a new round */
7680 				stmf_state.stmf_svc_ilu_draining =
7681 				    stmf_state.stmf_ilulist;
7682 				drain_start = ddi_get_lbolt();
7683 			}
7684 			stmf_check_freetask();
7685 			if (!stmf_state.stmf_svc_ilu_draining) {
7686 				/* we finished a complete round */
7687 				drain_next =
7688 				    drain_start + drv_usectohz(10*1000*1000);
7689 			} else {
7690 				/* we still have some ilu items to check */
7691 				drain_next =
7692 				    ddi_get_lbolt() + drv_usectohz(1*1000*1000);
7693 			}
7694 			if (stmf_state.stmf_svc_active)
7695 				goto stmf_svc_loop;
7696 		}
7697 
7698 		/* Check if we need to run worker_mgmt */
7699 		if (ddi_get_lbolt() > worker_delay) {
7700 			stmf_worker_mgmt();
7701 			worker_delay = ddi_get_lbolt() +
7702 			    stmf_worker_mgmt_delay;
7703 		}
7704 
7705 		/* Check if any active session got its 1st LUN */
7706 		if (stmf_state.stmf_process_initial_luns) {
7707 			int stmf_level = 0;
7708 			int port_level;
7709 			for (ilport = stmf_state.stmf_ilportlist; ilport;
7710 			    ilport = next_ilport) {
7711 				int ilport_lock_held;
7712 				next_ilport = ilport->ilport_next;
7713 				if ((ilport->ilport_flags &
7714 				    ILPORT_SS_GOT_INITIAL_LUNS) == 0) {
7715 					continue;
7716 				}
7717 				port_level = 0;
7718 				rw_enter(&ilport->ilport_lock, RW_READER);
7719 				ilport_lock_held = 1;
7720 				for (iss = ilport->ilport_ss_list; iss;
7721 				    iss = iss->iss_next) {
7722 					if ((iss->iss_flags &
7723 					    ISS_GOT_INITIAL_LUNS) == 0) {
7724 						continue;
7725 					}
7726 					port_level++;
7727 					stmf_level++;
7728 					atomic_and_32(&iss->iss_flags,
7729 					    ~ISS_GOT_INITIAL_LUNS);
7730 					atomic_or_32(&iss->iss_flags,
7731 					    ISS_EVENT_ACTIVE);
7732 					rw_exit(&ilport->ilport_lock);
7733 					ilport_lock_held = 0;
7734 					mutex_exit(&stmf_state.stmf_lock);
7735 					stmf_generate_lport_event(ilport,
7736 					    LPORT_EVENT_INITIAL_LUN_MAPPED,
7737 					    iss->iss_ss, 0);
7738 					atomic_and_32(&iss->iss_flags,
7739 					    ~ISS_EVENT_ACTIVE);
7740 					mutex_enter(&stmf_state.stmf_lock);
7741 					/*
7742 					 * scan all the ilports again as the
7743 					 * ilport list might have changed.
7744 					 */
7745 					next_ilport =
7746 					    stmf_state.stmf_ilportlist;
7747 					break;
7748 				}
7749 				if (port_level == 0) {
7750 					atomic_and_32(&ilport->ilport_flags,
7751 					    ~ILPORT_SS_GOT_INITIAL_LUNS);
7752 				}
7753 				/* drop the lock if we are holding it. */
7754 				if (ilport_lock_held == 1)
7755 					rw_exit(&ilport->ilport_lock);
7756 
7757 				/* Max 4 session at a time */
7758 				if (stmf_level >= 4) {
7759 					break;
7760 				}
7761 			}
7762 			if (stmf_level == 0) {
7763 				stmf_state.stmf_process_initial_luns = 0;
7764 			}
7765 		}
7766 
7767 		stmf_state.stmf_svc_flags &= ~STMF_SVC_ACTIVE;
7768 		(void) cv_reltimedwait(&stmf_state.stmf_cv,
7769 		    &stmf_state.stmf_lock, td, TR_CLOCK_TICK);
7770 		stmf_state.stmf_svc_flags |= STMF_SVC_ACTIVE;
7771 	}
7772 	goto stmf_svc_loop;
7773 }
7774 
7775 void
7776 stmf_svc_queue(int cmd, void *obj, stmf_state_change_info_t *info)
7777 {
7778 	stmf_svc_req_t *req;
7779 	int s;
7780 
7781 	ASSERT(!mutex_owned(&stmf_state.stmf_lock));
7782 	s = sizeof (stmf_svc_req_t);
7783 	if (info->st_additional_info) {
7784 		s += strlen(info->st_additional_info) + 1;
7785 	}
7786 	req = kmem_zalloc(s, KM_SLEEP);
7787 
7788 	req->svc_cmd = cmd;
7789 	req->svc_obj = obj;
7790 	req->svc_info.st_rflags = info->st_rflags;
7791 	if (info->st_additional_info) {
7792 		req->svc_info.st_additional_info = (char *)(GET_BYTE_OFFSET(req,
7793 		    sizeof (stmf_svc_req_t)));
7794 		(void) strcpy(req->svc_info.st_additional_info,
7795 		    info->st_additional_info);
7796 	}
7797 	req->svc_req_alloc_size = s;
7798 
7799 	mutex_enter(&stmf_state.stmf_lock);
7800 	req->svc_next = stmf_state.stmf_svc_active;
7801 	stmf_state.stmf_svc_active = req;
7802 	if ((stmf_state.stmf_svc_flags & STMF_SVC_ACTIVE) == 0) {
7803 		cv_signal(&stmf_state.stmf_cv);
7804 	}
7805 	mutex_exit(&stmf_state.stmf_lock);
7806 }
7807 
7808 void
7809 stmf_trace(caddr_t ident, const char *fmt, ...)
7810 {
7811 	va_list args;
7812 	char tbuf[160];
7813 	int len;
7814 
7815 	if (!stmf_trace_on)
7816 		return;
7817 	len = snprintf(tbuf, 158, "%s:%07lu: ", ident ? ident : "",
7818 	    ddi_get_lbolt());
7819 	va_start(args, fmt);
7820 	len += vsnprintf(tbuf + len, 158 - len, fmt, args);
7821 	va_end(args);
7822 
7823 	if (len > 158) {
7824 		len = 158;
7825 	}
7826 	tbuf[len++] = '\n';
7827 	tbuf[len] = 0;
7828 
7829 	mutex_enter(&trace_buf_lock);
7830 	bcopy(tbuf, &stmf_trace_buf[trace_buf_curndx], len+1);
7831 	trace_buf_curndx += len;
7832 	if (trace_buf_curndx > (trace_buf_size - 320))
7833 		trace_buf_curndx = 0;
7834 	mutex_exit(&trace_buf_lock);
7835 }
7836 
7837 void
7838 stmf_trace_clear()
7839 {
7840 	if (!stmf_trace_on)
7841 		return;
7842 	mutex_enter(&trace_buf_lock);
7843 	trace_buf_curndx = 0;
7844 	if (trace_buf_size > 0)
7845 		stmf_trace_buf[0] = 0;
7846 	mutex_exit(&trace_buf_lock);
7847 }
7848 
7849 static void
7850 stmf_abort_task_offline(scsi_task_t *task, int offline_lu, char *info)
7851 {
7852 	stmf_state_change_info_t	change_info;
7853 	void				*ctl_private;
7854 	uint32_t			ctl_cmd;
7855 	int				msg = 0;
7856 
7857 	stmf_trace("FROM STMF", "abort_task_offline called for %s: %s",
7858 	    offline_lu ? "LU" : "LPORT", info ? info : "no additional info");
7859 	change_info.st_additional_info = info;
7860 	if (offline_lu) {
7861 		change_info.st_rflags = STMF_RFLAG_RESET |
7862 		    STMF_RFLAG_LU_ABORT;
7863 		ctl_private = task->task_lu;
7864 		if (((stmf_i_lu_t *)
7865 		    task->task_lu->lu_stmf_private)->ilu_state ==
7866 		    STMF_STATE_ONLINE) {
7867 			msg = 1;
7868 		}
7869 		ctl_cmd = STMF_CMD_LU_OFFLINE;
7870 	} else {
7871 		change_info.st_rflags = STMF_RFLAG_RESET |
7872 		    STMF_RFLAG_LPORT_ABORT;
7873 		ctl_private = task->task_lport;
7874 		if (((stmf_i_local_port_t *)
7875 		    task->task_lport->lport_stmf_private)->ilport_state ==
7876 		    STMF_STATE_ONLINE) {
7877 			msg = 1;
7878 		}
7879 		ctl_cmd = STMF_CMD_LPORT_OFFLINE;
7880 	}
7881 
7882 	if (msg) {
7883 		stmf_trace(0, "Calling stmf_ctl to offline %s : %s",
7884 		    offline_lu ? "LU" : "LPORT", info ? info :
7885 		    "<no additional info>");
7886 	}
7887 	(void) stmf_ctl(ctl_cmd, ctl_private, &change_info);
7888 }
7889