xref: /illumos-gate/usr/src/uts/common/io/scsi/adapters/scsi_vhci/scsi_vhci.c (revision 895ca178e38ac3583d0c0d8317d51dc5f388df6e)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 #pragma ident	"%Z%%M%	%I%	%E% SMI"
26 
27 /*
28  * Multiplexed I/O SCSI vHCI implementation
29  */
30 
31 #include <sys/conf.h>
32 #include <sys/file.h>
33 #include <sys/ddi.h>
34 #include <sys/sunddi.h>
35 #include <sys/scsi/scsi.h>
36 #include <sys/scsi/impl/scsi_reset_notify.h>
37 #include <sys/sunmdi.h>
38 #include <sys/mdi_impldefs.h>
39 #include <sys/scsi/adapters/scsi_vhci.h>
40 #include <sys/disp.h>
41 #include <sys/byteorder.h>
42 
43 extern uintptr_t scsi_callback_id;
44 extern ddi_dma_attr_t scsi_alloc_attr;
45 
46 #ifdef	DEBUG
47 int	vhci_debug = VHCI_DEBUG_DEFAULT_VAL;
48 #endif
49 
50 /* retry for the vhci_do_prout command when a not ready is returned */
51 int vhci_prout_not_ready_retry = 180;
52 
53 /*
54  * These values are defined to support the internal retry of
55  * SCSI packets for better sense code handling.
56  */
57 #define	VHCI_CMD_CMPLT	0
58 #define	VHCI_CMD_RETRY	1
59 #define	VHCI_CMD_ERROR	-1
60 
61 #define	PROPFLAGS (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM)
62 #define	VHCI_SCSI_PERR		0x47
63 #define	VHCI_PGR_ILLEGALOP	-2
64 #define	VHCI_NUM_UPDATE_TASKQ	8
65 /* changed to 132 to accomodate HDS */
66 #define	VHCI_STD_INQ_SIZE	132
67 
68 /*
69  * Version Macros
70  */
71 #define	VHCI_NAME_VERSION	"SCSI VHCI Driver %I%"
72 char		vhci_version_name[] = VHCI_NAME_VERSION;
73 
74 int		vhci_first_time = 0;
75 clock_t		vhci_to_ticks = 0;
76 int		vhci_init_wait_timeout = VHCI_INIT_WAIT_TIMEOUT;
77 kcondvar_t	vhci_cv;
78 kmutex_t	vhci_global_mutex;
79 void		*vhci_softstate = NULL; /* for soft state */
80 
81 /*
82  * Flag to delay the retry of the reserve command
83  */
84 int		vhci_reserve_delay = 100000;
85 static int	vhci_path_quiesce_timeout = 60;
86 static uchar_t	zero_key[MHIOC_RESV_KEY_SIZE];
87 
88 /* uscsi delay for a TRAN_BUSY */
89 static int vhci_uscsi_delay = 100000;
90 static int vhci_uscsi_retry_count = 180;
91 /* uscsi_restart_sense timeout id in case it needs to get canceled */
92 static timeout_id_t vhci_restart_timeid = 0;
93 
94 /*
95  * Bidirectional map of 'target-port' to port id <pid> for support of
96  * iostat(1M) '-Xx' and '-Yx' output.
97  */
98 static kmutex_t		vhci_targetmap_mutex;
99 static uint_t		vhci_targetmap_pid = 1;
100 static mod_hash_t	*vhci_targetmap_bypid;	/* <pid> -> 'target-port' */
101 static mod_hash_t	*vhci_targetmap_byport;	/* 'target-port' -> <pid> */
102 
103 /*
104  * functions exported by scsi_vhci struct cb_ops
105  */
106 static int vhci_open(dev_t *, int, int, cred_t *);
107 static int vhci_close(dev_t, int, int, cred_t *);
108 static int vhci_ioctl(dev_t, int, intptr_t, int, cred_t *, int *);
109 
110 /*
111  * functions exported by scsi_vhci struct dev_ops
112  */
113 static int vhci_getinfo(dev_info_t *, ddi_info_cmd_t, void *, void **);
114 static int vhci_attach(dev_info_t *, ddi_attach_cmd_t);
115 static int vhci_detach(dev_info_t *, ddi_detach_cmd_t);
116 
117 /*
118  * functions exported by scsi_vhci scsi_hba_tran_t transport table
119  */
120 static int vhci_scsi_tgt_init(dev_info_t *, dev_info_t *,
121     scsi_hba_tran_t *, struct scsi_device *);
122 static void vhci_scsi_tgt_free(dev_info_t *, dev_info_t *, scsi_hba_tran_t *,
123     struct scsi_device *);
124 static int vhci_pgr_register_start(scsi_vhci_lun_t *, struct scsi_pkt *);
125 static int vhci_scsi_start(struct scsi_address *, struct scsi_pkt *);
126 static int vhci_scsi_abort(struct scsi_address *, struct scsi_pkt *);
127 static int vhci_scsi_reset(struct scsi_address *, int);
128 static int vhci_scsi_reset_target(struct scsi_address *, int level,
129     uint8_t select_path);
130 static int vhci_scsi_reset_bus(struct scsi_address *);
131 static int vhci_scsi_getcap(struct scsi_address *, char *, int);
132 static int vhci_scsi_setcap(struct scsi_address *, char *, int, int);
133 static int vhci_commoncap(struct scsi_address *, char *, int, int, int);
134 static int vhci_pHCI_cap(struct scsi_address *ap, char *cap, int val, int whom,
135     mdi_pathinfo_t *pip);
136 static struct scsi_pkt *vhci_scsi_init_pkt(struct scsi_address *,
137     struct scsi_pkt *, struct buf *, int, int, int, int, int (*)(), caddr_t);
138 static void vhci_scsi_destroy_pkt(struct scsi_address *, struct scsi_pkt *);
139 static void vhci_scsi_dmafree(struct scsi_address *, struct scsi_pkt *);
140 static void vhci_scsi_sync_pkt(struct scsi_address *, struct scsi_pkt *);
141 static int vhci_scsi_reset_notify(struct scsi_address *, int, void (*)(caddr_t),
142     caddr_t);
143 static int vhci_scsi_get_bus_addr(struct scsi_device *, char *, int);
144 static int vhci_scsi_get_name(struct scsi_device *, char *, int);
145 static int vhci_scsi_bus_power(dev_info_t *, void *, pm_bus_power_op_t,
146     void *, void *);
147 static int vhci_scsi_bus_config(dev_info_t *, uint_t, ddi_bus_config_op_t,
148     void *, dev_info_t **);
149 
150 /*
151  * functions registered with the mpxio framework via mdi_vhci_ops_t
152  */
153 static int vhci_pathinfo_init(dev_info_t *, mdi_pathinfo_t *, int);
154 static int vhci_pathinfo_uninit(dev_info_t *, mdi_pathinfo_t *, int);
155 static int vhci_pathinfo_state_change(dev_info_t *, mdi_pathinfo_t *,
156     mdi_pathinfo_state_t, uint32_t, int);
157 static int vhci_pathinfo_online(dev_info_t *, mdi_pathinfo_t *, int);
158 static int vhci_pathinfo_offline(dev_info_t *, mdi_pathinfo_t *, int);
159 static int vhci_failover(dev_info_t *, dev_info_t *, int);
160 static void vhci_client_attached(dev_info_t *);
161 
162 static int vhci_ctl(dev_t, int, intptr_t, int, cred_t *, int *);
163 static int vhci_devctl(dev_t, int, intptr_t, int, cred_t *, int *);
164 static int vhci_ioc_get_phci_path(sv_iocdata_t *, caddr_t, int, caddr_t);
165 static int vhci_ioc_get_client_path(sv_iocdata_t *, caddr_t, int, caddr_t);
166 static int vhci_ioc_get_paddr(sv_iocdata_t *, caddr_t, int, caddr_t);
167 static int vhci_ioc_send_client_path(caddr_t, sv_iocdata_t *, int, caddr_t);
168 static void vhci_ioc_devi_to_path(dev_info_t *, caddr_t);
169 static int vhci_get_phci_path_list(dev_info_t *, sv_path_info_t *, uint_t);
170 static int vhci_get_client_path_list(dev_info_t *, sv_path_info_t *, uint_t);
171 static int vhci_get_iocdata(const void *, sv_iocdata_t *, int, caddr_t);
172 static int vhci_get_iocswitchdata(const void *, sv_switch_to_cntlr_iocdata_t *,
173     int, caddr_t);
174 static int vhci_ioc_alloc_pathinfo(sv_path_info_t **, sv_path_info_t **,
175     uint_t, sv_iocdata_t *, int, caddr_t);
176 static void vhci_ioc_free_pathinfo(sv_path_info_t *, sv_path_info_t *, uint_t);
177 static int vhci_ioc_send_pathinfo(sv_path_info_t *, sv_path_info_t *, uint_t,
178     sv_iocdata_t *, int, caddr_t);
179 static int vhci_handle_ext_fo(struct scsi_pkt *, int);
180 static int vhci_efo_watch_cb(caddr_t, struct scsi_watch_result *);
181 static int vhci_quiesce_lun(struct scsi_vhci_lun *);
182 static int vhci_pgr_validate_and_register(scsi_vhci_priv_t *);
183 static void vhci_dispatch_scsi_start(void *);
184 static void vhci_efo_done(void *);
185 static void vhci_initiate_auto_failback(void *);
186 static void vhci_update_pHCI_pkt(struct vhci_pkt *, struct scsi_pkt *);
187 static int vhci_update_pathinfo(struct scsi_device *, mdi_pathinfo_t *,
188     struct scsi_failover_ops *, scsi_vhci_lun_t *, struct scsi_vhci *);
189 static void vhci_kstat_create_pathinfo(mdi_pathinfo_t *);
190 static int vhci_quiesce_paths(dev_info_t *, dev_info_t *,
191     scsi_vhci_lun_t *, char *, char *);
192 
193 static char *vhci_devnm_to_guid(char *);
194 static int vhci_bind_transport(struct scsi_address *, struct vhci_pkt *,
195     int, int (*func)(caddr_t));
196 static void vhci_intr(struct scsi_pkt *);
197 static int vhci_do_prout(scsi_vhci_priv_t *);
198 static void vhci_run_cmd(void *);
199 static int vhci_do_prin(struct vhci_pkt **);
200 static struct scsi_pkt *vhci_create_retry_pkt(struct vhci_pkt *);
201 static struct vhci_pkt *vhci_sync_retry_pkt(struct vhci_pkt *);
202 static struct scsi_vhci_lun *vhci_lun_lookup(dev_info_t *);
203 static struct scsi_vhci_lun *vhci_lun_lookup_alloc(dev_info_t *, char *, int *);
204 static void vhci_lun_free(dev_info_t *);
205 static int vhci_recovery_reset(scsi_vhci_lun_t *, struct scsi_address *,
206     uint8_t, uint8_t);
207 void vhci_update_pathstates(void *);
208 
209 #ifdef DEBUG
210 static void vhci_print_prin_keys(vhci_prin_readkeys_t *, int);
211 #endif
212 static void vhci_print_prout_keys(scsi_vhci_lun_t *, char *);
213 static void vhci_uscsi_iodone(struct scsi_pkt *pkt);
214 
215 /*
216  * MP-API related functions
217  */
218 extern int vhci_mpapi_init(struct scsi_vhci *);
219 extern void vhci_mpapi_add_dev_prod(struct scsi_vhci *, char *);
220 extern int vhci_mpapi_ctl(dev_t, int, intptr_t, int, cred_t *, int *);
221 extern void vhci_update_mpapi_data(struct scsi_vhci *,
222     scsi_vhci_lun_t *, mdi_pathinfo_t *);
223 extern void* vhci_get_mpapi_item(struct scsi_vhci *, mpapi_list_header_t *,
224     uint8_t, void*);
225 extern void vhci_mpapi_set_path_state(dev_info_t *, mdi_pathinfo_t *, int);
226 extern int vhci_mpapi_update_tpg_acc_state_for_lu(struct scsi_vhci *,
227     scsi_vhci_lun_t *);
228 
229 /* Special export to MP-API of tpgs non-'fops' entry point */
230 int (*tpgs_set_target_groups)(struct scsi_address *, int, int);
231 
232 #define	VHCI_DMA_MAX_XFER_CAP	0xffffffffULL
233 
234 #define	VHCI_MAX_PGR_RETRIES	3
235 
236 /*
237  * Macros for the device-type mpxio options
238  */
239 #define	LOAD_BALANCE_OPTIONS		"load-balance-options"
240 #define	LOGICAL_BLOCK_REGION_SIZE	"region-size"
241 #define	MPXIO_OPTIONS_LIST		"device-type-mpxio-options-list"
242 #define	DEVICE_TYPE_STR			"device-type"
243 #define	isdigit(ch)			((ch) >= '0' && (ch) <= '9')
244 
245 static struct cb_ops vhci_cb_ops = {
246 	vhci_open,			/* open */
247 	vhci_close,			/* close */
248 	nodev,				/* strategy */
249 	nodev,				/* print */
250 	nodev,				/* dump */
251 	nodev,				/* read */
252 	nodev,				/* write */
253 	vhci_ioctl,			/* ioctl */
254 	nodev,				/* devmap */
255 	nodev,				/* mmap */
256 	nodev,				/* segmap */
257 	nochpoll,			/* chpoll */
258 	ddi_prop_op,			/* cb_prop_op */
259 	0,				/* streamtab */
260 	D_NEW | D_MP,			/* cb_flag */
261 	CB_REV,				/* rev */
262 	nodev,				/* aread */
263 	nodev				/* awrite */
264 };
265 
266 static struct dev_ops vhci_ops = {
267 	DEVO_REV,
268 	0,
269 	vhci_getinfo,
270 	nulldev,		/* identify */
271 	nulldev,		/* probe */
272 	vhci_attach,		/* attach and detach are mandatory */
273 	vhci_detach,
274 	nodev,			/* reset */
275 	&vhci_cb_ops,		/* cb_ops */
276 	NULL,			/* bus_ops */
277 	NULL,			/* power */
278 };
279 
280 extern struct mod_ops mod_driverops;
281 
282 static struct modldrv modldrv = {
283 	&mod_driverops,
284 	vhci_version_name,	/* module name */
285 	&vhci_ops
286 };
287 
288 static struct modlinkage modlinkage = {
289 	MODREV_1,
290 	&modldrv,
291 	NULL
292 };
293 
294 static mdi_vhci_ops_t vhci_opinfo = {
295 	MDI_VHCI_OPS_REV,
296 	vhci_pathinfo_init,		/* Pathinfo node init callback	*/
297 	vhci_pathinfo_uninit,		/* Pathinfo uninit callback	*/
298 	vhci_pathinfo_state_change,	/* Pathinfo node state change	*/
299 	vhci_failover,			/* failover callback		*/
300 	vhci_client_attached		/* client attached callback	*/
301 };
302 
303 /*
304  * The scsi_failover table defines an ordered set of 'fops' modules supported
305  * by scsi_vhci.  Currently, initialize this table from the 'ddi-forceload'
306  * property specified in scsi_vhci.conf.
307  */
308 struct scsi_failover {
309 	ddi_modhandle_t			sf_mod;
310 	struct scsi_failover_ops	*sf_sfo;
311 } *scsi_failover_table;
312 uint_t	scsi_nfailover;
313 
314 int
315 _init(void)
316 {
317 	int	rval;
318 
319 	/*
320 	 * Allocate soft state and prepare to do ddi_soft_state_zalloc()
321 	 * before registering with the transport first.
322 	 */
323 	if ((rval = ddi_soft_state_init(&vhci_softstate,
324 	    sizeof (struct scsi_vhci), 1)) != 0) {
325 		VHCI_DEBUG(1, (CE_NOTE, NULL,
326 		    "!_init:soft state init failed\n"));
327 		return (rval);
328 	}
329 
330 	if ((rval = scsi_hba_init(&modlinkage)) != 0) {
331 		VHCI_DEBUG(1, (CE_NOTE, NULL,
332 		    "!_init: scsi hba init failed\n"));
333 		ddi_soft_state_fini(&vhci_softstate);
334 		return (rval);
335 	}
336 
337 	mutex_init(&vhci_global_mutex, NULL, MUTEX_DRIVER, NULL);
338 	cv_init(&vhci_cv, NULL, CV_DRIVER, NULL);
339 
340 	mutex_init(&vhci_targetmap_mutex, NULL, MUTEX_DRIVER, NULL);
341 	vhci_targetmap_byport = mod_hash_create_strhash(
342 	    "vhci_targetmap_byport", 256, mod_hash_null_valdtor);
343 	vhci_targetmap_bypid = mod_hash_create_idhash(
344 	    "vhci_targetmap_bypid", 256, mod_hash_null_valdtor);
345 
346 	if ((rval = mod_install(&modlinkage)) != 0) {
347 		VHCI_DEBUG(1, (CE_NOTE, NULL, "!_init: mod_install failed\n"));
348 		if (vhci_targetmap_bypid)
349 			mod_hash_destroy_idhash(vhci_targetmap_bypid);
350 		if (vhci_targetmap_byport)
351 			mod_hash_destroy_strhash(vhci_targetmap_byport);
352 		mutex_destroy(&vhci_targetmap_mutex);
353 		cv_destroy(&vhci_cv);
354 		mutex_destroy(&vhci_global_mutex);
355 		scsi_hba_fini(&modlinkage);
356 		ddi_soft_state_fini(&vhci_softstate);
357 	}
358 	return (rval);
359 }
360 
361 
362 /*
363  * the system is done with us as a driver, so clean up
364  */
365 int
366 _fini(void)
367 {
368 	int rval;
369 
370 	/*
371 	 * don't start cleaning up until we know that the module remove
372 	 * has worked  -- if this works, then we know that each instance
373 	 * has successfully been DDI_DETACHed
374 	 */
375 	if ((rval = mod_remove(&modlinkage)) != 0) {
376 		VHCI_DEBUG(4, (CE_NOTE, NULL, "!_fini: mod_remove failed\n"));
377 		return (rval);
378 	}
379 
380 	if (vhci_targetmap_bypid)
381 		mod_hash_destroy_idhash(vhci_targetmap_bypid);
382 	if (vhci_targetmap_byport)
383 		mod_hash_destroy_strhash(vhci_targetmap_byport);
384 	mutex_destroy(&vhci_targetmap_mutex);
385 	cv_destroy(&vhci_cv);
386 	mutex_destroy(&vhci_global_mutex);
387 	scsi_hba_fini(&modlinkage);
388 	ddi_soft_state_fini(&vhci_softstate);
389 
390 	return (rval);
391 }
392 
393 int
394 _info(struct modinfo *modinfop)
395 {
396 	return (mod_info(&modlinkage, modinfop));
397 }
398 
399 /*
400  * Lookup scsi_failover by "short name" of failover module.
401  */
402 struct scsi_failover_ops *
403 vhci_failover_ops_by_name(char *name)
404 {
405 	struct scsi_failover	*sf;
406 
407 	for (sf = scsi_failover_table; sf->sf_mod; sf++) {
408 		if (sf->sf_sfo == NULL)
409 			continue;
410 		if (strcmp(sf->sf_sfo->sfo_name, name) == 0)
411 			return (sf->sf_sfo);
412 	}
413 	return (NULL);
414 }
415 
416 /*
417  * Load all scsi_failover_ops 'fops' modules.
418  */
419 static void
420 vhci_failover_modopen(struct scsi_vhci *vhci)
421 {
422 	char			**module;
423 	int			i;
424 	struct scsi_failover	*sf;
425 	char			**dt;
426 	int			e;
427 
428 	if (scsi_failover_table)
429 		return;
430 
431 	/* Get the list of modules from scsi_vhci.conf */
432 	if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY,
433 	    vhci->vhci_dip, DDI_PROP_DONTPASS, "ddi-forceload",
434 	    &module, &scsi_nfailover) != DDI_PROP_SUCCESS) {
435 		cmn_err(CE_WARN, "scsi_vhci: "
436 		    "scsi_vhci.conf is missing 'ddi-forceload'");
437 		return;
438 	}
439 	if (scsi_nfailover == 0) {
440 		cmn_err(CE_WARN, "scsi_vhci: "
441 		    "scsi_vhci.conf has empty 'ddi-forceload'");
442 		ddi_prop_free(module);
443 		return;
444 	}
445 
446 	/* allocate failover table based on number of modules */
447 	scsi_failover_table = (struct scsi_failover *)
448 	    kmem_zalloc(sizeof (struct scsi_failover) * (scsi_nfailover + 1),
449 	    KM_SLEEP);
450 
451 	/* loop over modules specified in scsi_vhci.conf and open each module */
452 	for (i = 0, sf = scsi_failover_table; i < scsi_nfailover; i++) {
453 		if (module[i] == NULL)
454 			continue;
455 
456 		sf->sf_mod = ddi_modopen(module[i], KRTLD_MODE_FIRST, &e);
457 		if (sf->sf_mod == NULL) {
458 			/*
459 			 * A module returns EEXIST if other software is
460 			 * supporting the intended function: for example
461 			 * the scsi_vhci_f_sum_emc module returns EEXIST
462 			 * from _init if EMC powerpath software is installed.
463 			 */
464 			if (e != EEXIST)
465 				cmn_err(CE_WARN, "scsi_vhci: unable to open "
466 				    "module '%s', error %d", module[i], e);
467 			continue;
468 		}
469 		sf->sf_sfo = ddi_modsym(sf->sf_mod,
470 		    "scsi_vhci_failover_ops", &e);
471 		if (sf->sf_sfo == NULL) {
472 			cmn_err(CE_WARN, "scsi_vhci: "
473 			    "unable to import 'scsi_failover_ops' from '%s', "
474 			    "error %d", module[i], e);
475 			(void) ddi_modclose(sf->sf_mod);
476 			sf->sf_mod = NULL;
477 			continue;
478 		}
479 
480 		/* register vid/pid of devices supported with mpapi */
481 		for (dt = sf->sf_sfo->sfo_devices; *dt; dt++)
482 			vhci_mpapi_add_dev_prod(vhci, *dt);
483 
484 		/*
485 		 * Special processing for SFO_NAME_TPGS module, which contains
486 		 * the `tpgs_set_target_groups` implementation needed by the
487 		 * MP-API code.
488 		 */
489 		if (strcmp(sf->sf_sfo->sfo_name, SFO_NAME_TPGS) == 0) {
490 			tpgs_set_target_groups =
491 			    (int (*)(struct scsi_address *, int, int))
492 			    ddi_modsym(sf->sf_mod, "std_set_target_groups", &e);
493 			if (tpgs_set_target_groups == NULL) {
494 				cmn_err(CE_WARN, "scsi_vhci: "
495 				    "unable to import 'std_set_target_groups' "
496 				    "from '%s', error %d", module[i], e);
497 			}
498 		}
499 
500 		sf++;
501 	}
502 
503 	/* verify that at least the "well-known" modules were there */
504 	if (vhci_failover_ops_by_name(SFO_NAME_SYM) == NULL)
505 		cmn_err(CE_WARN, "scsi_vhci: well-known module \""
506 		    SFO_NAME_SYM "\" not defined in scsi_vhci.conf's "
507 		    "'ddi-forceload'");
508 	if (vhci_failover_ops_by_name(SFO_NAME_TPGS) == NULL)
509 		cmn_err(CE_WARN, "scsi_vhci: well-known module \""
510 		    SFO_NAME_TPGS "\" not defined in scsi_vhci.conf's "
511 		    "'ddi-forceload'");
512 
513 	/* call sfo_init for modules that need it */
514 	for (sf = scsi_failover_table; sf->sf_mod; sf++) {
515 		if (sf->sf_sfo && sf->sf_sfo->sfo_init)
516 			(*sf->sf_sfo->sfo_init)();
517 	}
518 
519 	ddi_prop_free(module);
520 }
521 
522 /*
523  * unload all loaded scsi_failover_ops modules
524  */
525 static void
526 vhci_failover_modclose()
527 {
528 	struct scsi_failover	*sf;
529 
530 	for (sf = scsi_failover_table; sf->sf_mod; sf++) {
531 		if ((sf->sf_mod == NULL) || (sf->sf_sfo == NULL))
532 			continue;
533 		(void) ddi_modclose(sf->sf_mod);
534 		sf->sf_mod = NULL;
535 		sf->sf_sfo = NULL;
536 	}
537 
538 	if (scsi_failover_table && scsi_nfailover)
539 		kmem_free(scsi_failover_table,
540 		    sizeof (struct scsi_failover) * (scsi_nfailover + 1));
541 	scsi_failover_table = NULL;
542 	scsi_nfailover = 0;
543 }
544 
545 /* ARGSUSED */
546 static int
547 vhci_open(dev_t *devp, int flag, int otype, cred_t *credp)
548 {
549 	struct scsi_vhci	*vhci;
550 
551 	if (otype != OTYP_CHR) {
552 		return (EINVAL);
553 	}
554 
555 	vhci = ddi_get_soft_state(vhci_softstate, MINOR2INST(getminor(*devp)));
556 	if (vhci == NULL) {
557 		VHCI_DEBUG(1, (CE_NOTE, NULL, "vhci_open: failed ENXIO\n"));
558 		return (ENXIO);
559 	}
560 
561 	mutex_enter(&vhci->vhci_mutex);
562 	if ((flag & FEXCL) && (vhci->vhci_state & VHCI_STATE_OPEN)) {
563 		mutex_exit(&vhci->vhci_mutex);
564 		vhci_log(CE_NOTE, vhci->vhci_dip,
565 		    "!vhci%d: Already open\n", getminor(*devp));
566 		return (EBUSY);
567 	}
568 
569 	vhci->vhci_state |= VHCI_STATE_OPEN;
570 	mutex_exit(&vhci->vhci_mutex);
571 	return (0);
572 }
573 
574 
575 /* ARGSUSED */
576 static int
577 vhci_close(dev_t dev, int flag, int otype, cred_t *credp)
578 {
579 	struct scsi_vhci	*vhci;
580 
581 	if (otype != OTYP_CHR) {
582 		return (EINVAL);
583 	}
584 
585 	vhci = ddi_get_soft_state(vhci_softstate, MINOR2INST(getminor(dev)));
586 	if (vhci == NULL) {
587 		VHCI_DEBUG(1, (CE_NOTE, NULL, "vhci_close: failed ENXIO\n"));
588 		return (ENXIO);
589 	}
590 
591 	mutex_enter(&vhci->vhci_mutex);
592 	vhci->vhci_state &= ~VHCI_STATE_OPEN;
593 	mutex_exit(&vhci->vhci_mutex);
594 
595 	return (0);
596 }
597 
598 /* ARGSUSED */
599 static int
600 vhci_ioctl(dev_t dev, int cmd, intptr_t data, int mode,
601 	cred_t *credp, int *rval)
602 {
603 	if (IS_DEVCTL(cmd)) {
604 		return (vhci_devctl(dev, cmd, data, mode, credp, rval));
605 	} else if (cmd == MP_CMD) {
606 		return (vhci_mpapi_ctl(dev, cmd, data, mode, credp, rval));
607 	} else {
608 		return (vhci_ctl(dev, cmd, data, mode, credp, rval));
609 	}
610 }
611 
612 /*
613  * attach the module
614  */
615 static int
616 vhci_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
617 {
618 	int			rval = DDI_FAILURE;
619 	int			scsi_hba_attached = 0;
620 	int			vhci_attached = 0;
621 	int			mutex_initted = 0;
622 	int			instance;
623 	struct scsi_vhci	*vhci;
624 	scsi_hba_tran_t		*tran;
625 	char			cache_name_buf[64];
626 	char			*data;
627 
628 	VHCI_DEBUG(4, (CE_NOTE, NULL, "vhci_attach: cmd=0x%x\n", cmd));
629 
630 	instance = ddi_get_instance(dip);
631 
632 	switch (cmd) {
633 	case DDI_ATTACH:
634 		break;
635 
636 	case DDI_RESUME:
637 	case DDI_PM_RESUME:
638 		VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_attach: resume not yet"
639 		    "implemented\n"));
640 		return (rval);
641 
642 	default:
643 		VHCI_DEBUG(1, (CE_NOTE, NULL,
644 		    "!vhci_attach: unknown ddi command\n"));
645 		return (rval);
646 	}
647 
648 	/*
649 	 * Allocate vhci data structure.
650 	 */
651 	if (ddi_soft_state_zalloc(vhci_softstate, instance) != DDI_SUCCESS) {
652 		VHCI_DEBUG(1, (CE_NOTE, dip, "!vhci_attach:"
653 		    "soft state alloc failed\n"));
654 		return (DDI_FAILURE);
655 	}
656 
657 	if ((vhci = ddi_get_soft_state(vhci_softstate, instance)) == NULL) {
658 		VHCI_DEBUG(1, (CE_NOTE, dip, "!vhci_attach:"
659 		    "bad soft state\n"));
660 		ddi_soft_state_free(vhci_softstate, instance);
661 		return (DDI_FAILURE);
662 	}
663 
664 	/* Allocate packet cache */
665 	(void) snprintf(cache_name_buf, sizeof (cache_name_buf),
666 	    "vhci%d_cache", instance);
667 
668 	mutex_init(&vhci->vhci_mutex, NULL, MUTEX_DRIVER, NULL);
669 	mutex_initted++;
670 
671 	/*
672 	 * Allocate a transport structure
673 	 */
674 	tran = scsi_hba_tran_alloc(dip, SCSI_HBA_CANSLEEP);
675 	ASSERT(tran != NULL);
676 
677 	vhci->vhci_tran		= tran;
678 	vhci->vhci_dip		= dip;
679 	vhci->vhci_instance	= instance;
680 
681 	tran->tran_hba_private	= vhci;
682 	tran->tran_tgt_private	= NULL;
683 	tran->tran_tgt_init	= vhci_scsi_tgt_init;
684 	tran->tran_tgt_probe	= NULL;
685 	tran->tran_tgt_free	= vhci_scsi_tgt_free;
686 
687 	tran->tran_start	= vhci_scsi_start;
688 	tran->tran_abort	= vhci_scsi_abort;
689 	tran->tran_reset	= vhci_scsi_reset;
690 	tran->tran_getcap	= vhci_scsi_getcap;
691 	tran->tran_setcap	= vhci_scsi_setcap;
692 	tran->tran_init_pkt	= vhci_scsi_init_pkt;
693 	tran->tran_destroy_pkt	= vhci_scsi_destroy_pkt;
694 	tran->tran_dmafree	= vhci_scsi_dmafree;
695 	tran->tran_sync_pkt	= vhci_scsi_sync_pkt;
696 	tran->tran_reset_notify = vhci_scsi_reset_notify;
697 
698 	tran->tran_get_bus_addr	= vhci_scsi_get_bus_addr;
699 	tran->tran_get_name	= vhci_scsi_get_name;
700 	tran->tran_bus_reset	= NULL;
701 	tran->tran_quiesce	= NULL;
702 	tran->tran_unquiesce	= NULL;
703 
704 	/*
705 	 * register event notification routines with scsa
706 	 */
707 	tran->tran_get_eventcookie = NULL;
708 	tran->tran_add_eventcall = NULL;
709 	tran->tran_remove_eventcall = NULL;
710 	tran->tran_post_event = NULL;
711 
712 	tran->tran_bus_power = vhci_scsi_bus_power;
713 
714 	tran->tran_bus_config = vhci_scsi_bus_config;
715 
716 	/*
717 	 * Attach this instance with the mpxio framework
718 	 */
719 	if (mdi_vhci_register(MDI_HCI_CLASS_SCSI, dip, &vhci_opinfo, 0)
720 	    != MDI_SUCCESS) {
721 		VHCI_DEBUG(1, (CE_NOTE, dip, "!vhci_attach:"
722 		    "mdi_vhci_register failed\n"));
723 		goto attach_fail;
724 	}
725 	vhci_attached++;
726 
727 	/*
728 	 * Attach this instance of the hba.
729 	 *
730 	 * Regarding dma attributes: Since scsi_vhci is a virtual scsi HBA
731 	 * driver, it has nothing to do with DMA. However, when calling
732 	 * scsi_hba_attach_setup() we need to pass something valid in the
733 	 * dma attributes parameter. So we just use scsi_alloc_attr.
734 	 * SCSA itself seems to care only for dma_attr_minxfer and
735 	 * dma_attr_burstsizes fields of dma attributes structure.
736 	 * It expects those fileds to be non-zero.
737 	 */
738 	if (scsi_hba_attach_setup(dip, &scsi_alloc_attr, tran,
739 	    SCSI_HBA_TRAN_CLONE) != DDI_SUCCESS) {
740 		VHCI_DEBUG(1, (CE_NOTE, dip, "!vhci_attach:"
741 		    "hba attach failed\n"));
742 		goto attach_fail;
743 	}
744 	scsi_hba_attached++;
745 
746 	if (ddi_create_minor_node(dip, "devctl", S_IFCHR,
747 	    INST2DEVCTL(instance), DDI_NT_SCSI_NEXUS, 0) != DDI_SUCCESS) {
748 		VHCI_DEBUG(1, (CE_NOTE, dip, "!vhci_attach:"
749 		    " ddi_create_minor_node failed\n"));
750 		goto attach_fail;
751 	}
752 
753 	/*
754 	 * Set pm-want-child-notification property for
755 	 * power management of the phci and client
756 	 */
757 	if (ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP,
758 	    "pm-want-child-notification?", NULL, NULL) != DDI_PROP_SUCCESS) {
759 		cmn_err(CE_WARN,
760 		    "%s%d fail to create pm-want-child-notification? prop",
761 		    ddi_driver_name(dip), ddi_get_instance(dip));
762 		goto attach_fail;
763 	}
764 
765 	vhci->vhci_taskq = taskq_create("vhci_taskq", 1, MINCLSYSPRI, 1, 4, 0);
766 	vhci->vhci_update_pathstates_taskq =
767 	    taskq_create("vhci_update_pathstates", VHCI_NUM_UPDATE_TASKQ,
768 	    MINCLSYSPRI, 1, 4, 0);
769 	ASSERT(vhci->vhci_taskq);
770 	ASSERT(vhci->vhci_update_pathstates_taskq);
771 
772 	/*
773 	 * Set appropriate configuration flags based on options set in
774 	 * conf file.
775 	 */
776 	vhci->vhci_conf_flags = 0;
777 	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, PROPFLAGS,
778 	    "auto-failback", &data) == DDI_SUCCESS) {
779 		if (strcmp(data, "enable") == 0)
780 			vhci->vhci_conf_flags |= VHCI_CONF_FLAGS_AUTO_FAILBACK;
781 		ddi_prop_free(data);
782 	}
783 
784 	if (!(vhci->vhci_conf_flags & VHCI_CONF_FLAGS_AUTO_FAILBACK))
785 		vhci_log(CE_NOTE, dip, "!Auto-failback capability "
786 		    "disabled through scsi_vhci.conf file.");
787 
788 	/*
789 	 * Allocate an mpapi private structure
790 	 */
791 	vhci->mp_priv = kmem_zalloc(sizeof (mpapi_priv_t), KM_SLEEP);
792 	if (vhci_mpapi_init(vhci) != 0) {
793 		VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_attach: "
794 		    "vhci_mpapi_init() failed"));
795 	}
796 
797 	vhci_failover_modopen(vhci);		/* load failover modules */
798 
799 	ddi_report_dev(dip);
800 	return (DDI_SUCCESS);
801 
802 attach_fail:
803 	if (vhci_attached)
804 		(void) mdi_vhci_unregister(dip, 0);
805 
806 	if (scsi_hba_attached)
807 		(void) scsi_hba_detach(dip);
808 
809 	if (vhci->vhci_tran)
810 		scsi_hba_tran_free(vhci->vhci_tran);
811 
812 	if (mutex_initted) {
813 		mutex_destroy(&vhci->vhci_mutex);
814 	}
815 
816 	ddi_soft_state_free(vhci_softstate, instance);
817 	return (DDI_FAILURE);
818 }
819 
820 
821 /*ARGSUSED*/
822 static int
823 vhci_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
824 {
825 	int			instance = ddi_get_instance(dip);
826 	scsi_hba_tran_t		*tran;
827 	struct scsi_vhci	*vhci;
828 
829 	VHCI_DEBUG(4, (CE_NOTE, NULL, "vhci_detach: cmd=0x%x\n", cmd));
830 
831 	if ((tran = ddi_get_driver_private(dip)) == NULL)
832 		return (DDI_FAILURE);
833 
834 	vhci = TRAN2HBAPRIVATE(tran);
835 	if (!vhci) {
836 		return (DDI_FAILURE);
837 	}
838 
839 	switch (cmd) {
840 	case DDI_DETACH:
841 		break;
842 
843 	case DDI_SUSPEND:
844 	case DDI_PM_SUSPEND:
845 		VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_detach: suspend/pm not yet"
846 		    "implemented\n"));
847 		return (DDI_FAILURE);
848 
849 	default:
850 		VHCI_DEBUG(1, (CE_NOTE, NULL,
851 		    "!vhci_detach: unknown ddi command\n"));
852 		return (DDI_FAILURE);
853 	}
854 
855 	(void) mdi_vhci_unregister(dip, 0);
856 	(void) scsi_hba_detach(dip);
857 	scsi_hba_tran_free(tran);
858 
859 	if (ddi_prop_remove(DDI_DEV_T_NONE, dip,
860 	    "pm-want-child-notification?") != DDI_PROP_SUCCESS) {
861 		cmn_err(CE_WARN,
862 		    "%s%d unable to remove prop pm-want_child_notification?",
863 		    ddi_driver_name(dip), ddi_get_instance(dip));
864 	}
865 	if (vhci_restart_timeid != 0) {
866 		(void) untimeout(vhci_restart_timeid);
867 	}
868 	vhci_restart_timeid = 0;
869 
870 	mutex_destroy(&vhci->vhci_mutex);
871 	vhci->vhci_dip = NULL;
872 	vhci->vhci_tran = NULL;
873 	taskq_destroy(vhci->vhci_taskq);
874 	taskq_destroy(vhci->vhci_update_pathstates_taskq);
875 	ddi_remove_minor_node(dip, NULL);
876 	ddi_soft_state_free(vhci_softstate, instance);
877 
878 	vhci_failover_modclose();		/* unload failover modules */
879 	return (DDI_SUCCESS);
880 }
881 
882 /*
883  * vhci_getinfo()
884  * Given the device number, return the devinfo pointer or the
885  * instance number.
886  * Note: always succeed DDI_INFO_DEVT2INSTANCE, even before attach.
887  */
888 
889 /*ARGSUSED*/
890 static int
891 vhci_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result)
892 {
893 	struct scsi_vhci	*vhcip;
894 	int			instance = MINOR2INST(getminor((dev_t)arg));
895 
896 	switch (cmd) {
897 	case DDI_INFO_DEVT2DEVINFO:
898 		vhcip = ddi_get_soft_state(vhci_softstate, instance);
899 		if (vhcip != NULL)
900 			*result = vhcip->vhci_dip;
901 		else {
902 			*result = NULL;
903 			return (DDI_FAILURE);
904 		}
905 		break;
906 
907 	case DDI_INFO_DEVT2INSTANCE:
908 		*result = (void *)(uintptr_t)instance;
909 		break;
910 
911 	default:
912 		return (DDI_FAILURE);
913 	}
914 
915 	return (DDI_SUCCESS);
916 }
917 
918 /*ARGSUSED*/
919 static int
920 vhci_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
921 	scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
922 {
923 	char			*guid;
924 	scsi_vhci_lun_t		*vlun;
925 	struct scsi_vhci	*vhci;
926 	clock_t			from_ticks;
927 	mdi_pathinfo_t		*pip;
928 	int			rval;
929 
930 	ASSERT(hba_dip != NULL);
931 	ASSERT(tgt_dip != NULL);
932 
933 	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, tgt_dip, PROPFLAGS,
934 	    MDI_CLIENT_GUID_PROP, &guid) != DDI_SUCCESS) {
935 		/*
936 		 * This must be the .conf node without GUID property.
937 		 * The node under fp already inserts a delay, so we
938 		 * just return from here. We rely on this delay to have
939 		 * all dips be posted to the ndi hotplug thread's newdev
940 		 * list. This is necessary for the deferred attach
941 		 * mechanism to work and opens() done soon after boot to
942 		 * succeed.
943 		 */
944 		VHCI_DEBUG(4, (CE_WARN, hba_dip, "tgt_init: lun guid "
945 		    "property failed"));
946 		return (DDI_NOT_WELL_FORMED);
947 	}
948 
949 	if (ndi_dev_is_persistent_node(tgt_dip) == 0) {
950 		/*
951 		 * This must be .conf node with the GUID property. We don't
952 		 * merge property by ndi_merge_node() here  because the
953 		 * devi_addr_buf of .conf node is "" always according the
954 		 * implementation of vhci_scsi_get_name_bus_addr().
955 		 */
956 		ddi_set_name_addr(tgt_dip, NULL);
957 		return (DDI_FAILURE);
958 	}
959 
960 	vhci = ddi_get_soft_state(vhci_softstate, ddi_get_instance(hba_dip));
961 	ASSERT(vhci != NULL);
962 
963 	VHCI_DEBUG(4, (CE_NOTE, hba_dip,
964 	    "!tgt_init: called for %s (instance %d)\n",
965 	    ddi_driver_name(tgt_dip), ddi_get_instance(tgt_dip)));
966 
967 	vlun = vhci_lun_lookup(tgt_dip);
968 
969 	mutex_enter(&vhci_global_mutex);
970 
971 	from_ticks = ddi_get_lbolt();
972 	if (vhci_to_ticks == 0) {
973 		vhci_to_ticks = from_ticks +
974 		    drv_usectohz(vhci_init_wait_timeout);
975 	}
976 
977 #if DEBUG
978 	if (vlun) {
979 		VHCI_DEBUG(1, (CE_WARN, hba_dip, "tgt_init: "
980 		    "vhci_scsi_tgt_init: guid %s : found vlun 0x%p "
981 		    "from_ticks %lx to_ticks %lx",
982 		    guid, (void *)vlun, from_ticks, vhci_to_ticks));
983 	} else {
984 		VHCI_DEBUG(1, (CE_WARN, hba_dip, "tgt_init: "
985 		    "vhci_scsi_tgt_init: guid %s : vlun not found "
986 		    "from_ticks %lx to_ticks %lx", guid, from_ticks,
987 		    vhci_to_ticks));
988 	}
989 #endif
990 
991 	rval = mdi_select_path(tgt_dip, NULL,
992 	    (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH), NULL, &pip);
993 	if (rval == MDI_SUCCESS) {
994 		mdi_rele_path(pip);
995 	}
996 
997 	/*
998 	 * Wait for the following conditions :
999 	 *	1. no vlun available yet
1000 	 *	2. no path established
1001 	 *	3. timer did not expire
1002 	 */
1003 	while ((vlun == NULL) || (mdi_client_get_path_count(tgt_dip) == 0) ||
1004 	    (rval != MDI_SUCCESS)) {
1005 		if (vlun && vlun->svl_not_supported) {
1006 			VHCI_DEBUG(1, (CE_WARN, hba_dip, "tgt_init: "
1007 			    "vlun 0x%p lun guid %s not supported!",
1008 			    (void *)vlun, guid));
1009 			mutex_exit(&vhci_global_mutex);
1010 			ddi_prop_free(guid);
1011 			return (DDI_NOT_WELL_FORMED);
1012 		}
1013 		if ((vhci_first_time == 0) && (from_ticks >= vhci_to_ticks)) {
1014 			vhci_first_time = 1;
1015 		}
1016 		if (vhci_first_time == 1) {
1017 			VHCI_DEBUG(1, (CE_WARN, hba_dip, "vhci_scsi_tgt_init: "
1018 			    "no wait for %s. from_tick %lx, to_tick %lx",
1019 			    guid, from_ticks, vhci_to_ticks));
1020 			mutex_exit(&vhci_global_mutex);
1021 			ddi_prop_free(guid);
1022 			return (DDI_NOT_WELL_FORMED);
1023 		}
1024 
1025 		if (cv_timedwait(&vhci_cv,
1026 		    &vhci_global_mutex, vhci_to_ticks) == -1) {
1027 			/* Timed out */
1028 #ifdef DEBUG
1029 			if (vlun == NULL) {
1030 				VHCI_DEBUG(1, (CE_WARN, hba_dip,
1031 				    "tgt_init: no vlun for %s!", guid));
1032 			} else if (mdi_client_get_path_count(tgt_dip) == 0) {
1033 				VHCI_DEBUG(1, (CE_WARN, hba_dip,
1034 				    "tgt_init: client path count is "
1035 				    "zero for %s!", guid));
1036 			} else {
1037 				VHCI_DEBUG(1, (CE_WARN, hba_dip,
1038 				    "tgt_init: client path not "
1039 				    "available yet for %s!", guid));
1040 			}
1041 #endif /* DEBUG */
1042 			mutex_exit(&vhci_global_mutex);
1043 			ddi_prop_free(guid);
1044 			return (DDI_NOT_WELL_FORMED);
1045 		}
1046 		vlun = vhci_lun_lookup(tgt_dip);
1047 		rval = mdi_select_path(tgt_dip, NULL,
1048 		    (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH),
1049 		    NULL, &pip);
1050 		if (rval == MDI_SUCCESS) {
1051 			mdi_rele_path(pip);
1052 		}
1053 		from_ticks = ddi_get_lbolt();
1054 	}
1055 	mutex_exit(&vhci_global_mutex);
1056 
1057 	ASSERT(vlun != NULL);
1058 	ddi_prop_free(guid);
1059 	hba_tran->tran_tgt_private = vlun;
1060 
1061 	return (DDI_SUCCESS);
1062 }
1063 
1064 /*ARGSUSED*/
1065 static void
1066 vhci_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
1067 	scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
1068 {
1069 }
1070 
1071 /*
1072  * a PGR register command has started; copy the info we need
1073  */
1074 int
1075 vhci_pgr_register_start(scsi_vhci_lun_t *vlun, struct scsi_pkt *pkt)
1076 {
1077 	struct vhci_pkt		*vpkt = TGTPKT2VHCIPKT(pkt);
1078 	void			*addr;
1079 
1080 	if (!vpkt->vpkt_tgt_init_bp)
1081 		return (TRAN_BADPKT);
1082 
1083 	addr = bp_mapin_common(vpkt->vpkt_tgt_init_bp,
1084 	    (vpkt->vpkt_flags & CFLAG_NOWAIT) ? VM_NOSLEEP : VM_SLEEP);
1085 	if (addr == NULL)
1086 		return (TRAN_BUSY);
1087 
1088 	mutex_enter(&vlun->svl_mutex);
1089 
1090 	vhci_print_prout_keys(vlun, "v_pgr_reg_start: before bcopy:");
1091 
1092 	bcopy(addr, &vlun->svl_prout, sizeof (vhci_prout_t) -
1093 	    (2 * MHIOC_RESV_KEY_SIZE*sizeof (char)));
1094 	bcopy(pkt->pkt_cdbp, vlun->svl_cdb, sizeof (vlun->svl_cdb));
1095 
1096 	vhci_print_prout_keys(vlun, "v_pgr_reg_start: after bcopy:");
1097 
1098 	vlun->svl_time = pkt->pkt_time;
1099 	vlun->svl_bcount = vpkt->vpkt_tgt_init_bp->b_bcount;
1100 	vlun->svl_first_path = vpkt->vpkt_path;
1101 	mutex_exit(&vlun->svl_mutex);
1102 	return (0);
1103 }
1104 
1105 /*
1106  * Function name : vhci_scsi_start()
1107  *
1108  * Return Values : TRAN_FATAL_ERROR	- vhci has been shutdown
1109  *					  or other fatal failure
1110  *					  preventing packet transportation
1111  *		   TRAN_BUSY		- request queue is full
1112  *		   TRAN_ACCEPT		- pkt has been submitted to phci
1113  *					  (or is held in the waitQ)
1114  * Description	 : Implements SCSA's tran_start() entry point for
1115  *		   packet transport
1116  *
1117  */
1118 static int
1119 vhci_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt)
1120 {
1121 	int			rval = TRAN_ACCEPT;
1122 	int			instance, held;
1123 	struct scsi_vhci	*vhci = ADDR2VHCI(ap);
1124 	struct scsi_vhci_lun	*vlun = ADDR2VLUN(ap);
1125 	struct vhci_pkt		*vpkt = TGTPKT2VHCIPKT(pkt);
1126 	int			flags = 0;
1127 	scsi_vhci_priv_t	*svp;
1128 	dev_info_t 		*cdip;
1129 	client_lb_t		lbp;
1130 	int			restore_lbp = 0;
1131 	/* set if pkt is SCSI-II RESERVE cmd */
1132 	int			pkt_reserve_cmd = 0;
1133 	int			reserve_failed = 0;
1134 
1135 	ASSERT(vhci != NULL);
1136 	ASSERT(vpkt != NULL);
1137 	ASSERT(vpkt->vpkt_state != VHCI_PKT_ISSUED);
1138 	cdip = ADDR2DIP(ap);
1139 
1140 	/*
1141 	 * Block IOs if LUN is held or QUIESCED for IOs.
1142 	 */
1143 	if ((VHCI_LUN_IS_HELD(vlun)) ||
1144 	    ((vlun->svl_flags & VLUN_QUIESCED_FLG) == VLUN_QUIESCED_FLG)) {
1145 		return (TRAN_BUSY);
1146 	}
1147 
1148 	/*
1149 	 * vhci_lun needs to be quiesced before SCSI-II RESERVE command
1150 	 * can be issued.  This may require a cv_timedwait, which is
1151 	 * dangerous to perform in an interrupt context.  So if this
1152 	 * is a RESERVE command a taskq is dispatched to service it.
1153 	 * This taskq shall again call vhci_scsi_start, but we shall be
1154 	 * sure its not in an interrupt context.
1155 	 */
1156 	if ((pkt->pkt_cdbp[0] == SCMD_RESERVE) ||
1157 	    (pkt->pkt_cdbp[0] == SCMD_RESERVE_G1)) {
1158 		if (!(vpkt->vpkt_state & VHCI_PKT_THRU_TASKQ)) {
1159 			if (taskq_dispatch(vhci->vhci_taskq,
1160 			    vhci_dispatch_scsi_start, (void *) vpkt,
1161 			    KM_NOSLEEP)) {
1162 				return (TRAN_ACCEPT);
1163 			} else {
1164 				return (TRAN_BUSY);
1165 			}
1166 		}
1167 
1168 		/*
1169 		 * Here we ensure that simultaneous SCSI-II RESERVE cmds don't
1170 		 * get serviced for a lun.
1171 		 */
1172 		VHCI_HOLD_LUN(vlun, VH_NOSLEEP, held);
1173 		if (!held) {
1174 			return (TRAN_BUSY);
1175 		} else if ((vlun->svl_flags & VLUN_QUIESCED_FLG) ==
1176 		    VLUN_QUIESCED_FLG) {
1177 			VHCI_RELEASE_LUN(vlun);
1178 			return (TRAN_BUSY);
1179 		}
1180 
1181 		/*
1182 		 * To ensure that no IOs occur for this LUN for the duration
1183 		 * of this pkt set the VLUN_QUIESCED_FLG.
1184 		 * In case this routine needs to exit on error make sure that
1185 		 * this flag is cleared.
1186 		 */
1187 		vlun->svl_flags |= VLUN_QUIESCED_FLG;
1188 		pkt_reserve_cmd = 1;
1189 
1190 		/*
1191 		 * if this is a SCSI-II RESERVE command, set load balancing
1192 		 * policy to be ALTERNATE PATH to ensure that all subsequent
1193 		 * IOs are routed on the same path.  This is because if commands
1194 		 * are routed across multiple paths then IOs on paths other than
1195 		 * the one on which the RESERVE was executed will get a
1196 		 * RESERVATION CONFLICT
1197 		 */
1198 		lbp = mdi_get_lb_policy(cdip);
1199 		if (lbp != LOAD_BALANCE_NONE) {
1200 			if (vhci_quiesce_lun(vlun) != 1) {
1201 				vlun->svl_flags &= ~VLUN_QUIESCED_FLG;
1202 				VHCI_RELEASE_LUN(vlun);
1203 				return (TRAN_FATAL_ERROR);
1204 			}
1205 			vlun->svl_lb_policy_save = lbp;
1206 			if (mdi_set_lb_policy(cdip, LOAD_BALANCE_NONE) !=
1207 			    MDI_SUCCESS) {
1208 				vlun->svl_flags &= ~VLUN_QUIESCED_FLG;
1209 				VHCI_RELEASE_LUN(vlun);
1210 				return (TRAN_FATAL_ERROR);
1211 			}
1212 			restore_lbp = 1;
1213 		}
1214 		/*
1215 		 * See comments for VLUN_RESERVE_ACTIVE_FLG in scsi_vhci.h
1216 		 * To narrow this window where a reserve command may be sent
1217 		 * down an inactive path the path states first need to be
1218 		 * updated. Before calling vhci_update_pathstates reset
1219 		 * VLUN_RESERVE_ACTIVE_FLG, just in case it was already set
1220 		 * for this lun.  This shall prevent an unnecessary reset
1221 		 * from being sent out.
1222 		 */
1223 		vlun->svl_flags &= ~VLUN_RESERVE_ACTIVE_FLG;
1224 		vhci_update_pathstates((void *)vlun);
1225 	}
1226 
1227 	instance = ddi_get_instance(vhci->vhci_dip);
1228 
1229 	/*
1230 	 * If the command is PRIN with action of zero, then the cmd
1231 	 * is reading PR keys which requires filtering on completion.
1232 	 * Data cache sync must be guaranteed.
1233 	 */
1234 	if ((pkt->pkt_cdbp[0] == SCMD_PRIN) &&
1235 	    (pkt->pkt_cdbp[1] == 0) &&
1236 	    (vpkt->vpkt_org_vpkt == NULL)) {
1237 		vpkt->vpkt_tgt_init_pkt_flags |= PKT_CONSISTENT;
1238 	}
1239 
1240 	/*
1241 	 * Do not defer bind for PKT_DMA_PARTIAL
1242 	 */
1243 	if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) == 0) {
1244 
1245 		/* This is a non pkt_dma_partial case */
1246 		if ((rval = vhci_bind_transport(
1247 		    ap, vpkt, vpkt->vpkt_tgt_init_pkt_flags, NULL_FUNC))
1248 		    != TRAN_ACCEPT) {
1249 			VHCI_DEBUG(6, (CE_WARN, vhci->vhci_dip,
1250 			    "!vhci%d %x: failed to bind transport: "
1251 			    "vlun 0x%p pkt_reserved %x restore_lbp %x,"
1252 			    "lbp %x", instance, rval, (void *)vlun,
1253 			    pkt_reserve_cmd, restore_lbp, lbp));
1254 			if (restore_lbp)
1255 				(void) mdi_set_lb_policy(cdip, lbp);
1256 			if (pkt_reserve_cmd)
1257 				vlun->svl_flags &= ~VLUN_QUIESCED_FLG;
1258 			return (rval);
1259 		}
1260 		VHCI_DEBUG(8, (CE_NOTE, NULL,
1261 		    "vhci_scsi_start: v_b_t called 0x%p\n", (void *)vpkt));
1262 	}
1263 	ASSERT(vpkt->vpkt_hba_pkt != NULL);
1264 	ASSERT(vpkt->vpkt_path != NULL);
1265 
1266 	/*
1267 	 * This is the chance to adjust the pHCI's pkt and other information
1268 	 * from target driver's pkt.
1269 	 */
1270 	VHCI_DEBUG(8, (CE_NOTE, vhci->vhci_dip, "vhci_scsi_start vpkt %p\n",
1271 	    (void *)vpkt));
1272 	vhci_update_pHCI_pkt(vpkt, pkt);
1273 
1274 	if (vlun->svl_flags & VLUN_RESERVE_ACTIVE_FLG) {
1275 		if (vpkt->vpkt_path != vlun->svl_resrv_pip) {
1276 			VHCI_DEBUG(1, (CE_WARN, vhci->vhci_dip,
1277 			    "!vhci_bind: reserve flag set for vlun 0x%p, but, "
1278 			    "pktpath 0x%p resrv path 0x%p differ. lb_policy %x",
1279 			    (void *)vlun, (void *)vpkt->vpkt_path,
1280 			    (void *)vlun->svl_resrv_pip,
1281 			    mdi_get_lb_policy(cdip)));
1282 			reserve_failed = 1;
1283 		}
1284 	}
1285 
1286 	svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(
1287 	    vpkt->vpkt_path);
1288 	if (svp == NULL || reserve_failed) {
1289 		if (pkt_reserve_cmd) {
1290 			VHCI_DEBUG(6, (CE_WARN, vhci->vhci_dip,
1291 			    "!vhci_bind returned null svp vlun 0x%p",
1292 			    (void *)vlun));
1293 			vlun->svl_flags &= ~VLUN_QUIESCED_FLG;
1294 			if (restore_lbp)
1295 				(void) mdi_set_lb_policy(cdip, lbp);
1296 		}
1297 pkt_cleanup:
1298 		if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) == 0) {
1299 			scsi_destroy_pkt(vpkt->vpkt_hba_pkt);
1300 			vpkt->vpkt_hba_pkt = NULL;
1301 			if (vpkt->vpkt_path) {
1302 				mdi_rele_path(vpkt->vpkt_path);
1303 				vpkt->vpkt_path = NULL;
1304 			}
1305 		}
1306 		if ((pkt->pkt_cdbp[0] == SCMD_PROUT) &&
1307 		    (((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_REGISTER) ||
1308 		    ((pkt->pkt_cdbp[1] & 0x1f) ==
1309 		    VHCI_PROUT_R_AND_IGNORE))) {
1310 			sema_v(&vlun->svl_pgr_sema);
1311 		}
1312 		return (TRAN_BUSY);
1313 	}
1314 
1315 	VHCI_INCR_PATH_CMDCOUNT(svp);
1316 
1317 	/*
1318 	 * Ensure that no other IOs raced ahead, while a RESERVE cmd was
1319 	 * QUIESCING the same lun.
1320 	 */
1321 	if ((!pkt_reserve_cmd) &&
1322 	    ((vlun->svl_flags & VLUN_QUIESCED_FLG) == VLUN_QUIESCED_FLG)) {
1323 		VHCI_DECR_PATH_CMDCOUNT(svp);
1324 		goto pkt_cleanup;
1325 	}
1326 
1327 	if ((pkt->pkt_cdbp[0] == SCMD_PRIN) ||
1328 	    (pkt->pkt_cdbp[0] == SCMD_PROUT)) {
1329 		/*
1330 		 * currently this thread only handles running PGR
1331 		 * commands, so don't bother creating it unless
1332 		 * something interesting is going to happen (like
1333 		 * either a PGR out, or a PGR in with enough space
1334 		 * to hold the keys that are getting returned)
1335 		 */
1336 		mutex_enter(&vlun->svl_mutex);
1337 		if (((vlun->svl_flags & VLUN_TASK_D_ALIVE_FLG) == 0) &&
1338 		    (pkt->pkt_cdbp[0] == SCMD_PROUT)) {
1339 			vlun->svl_taskq = taskq_create("vlun_pgr_task_daemon",
1340 			    1, MINCLSYSPRI, 1, 4, 0);
1341 			vlun->svl_flags |= VLUN_TASK_D_ALIVE_FLG;
1342 		}
1343 		mutex_exit(&vlun->svl_mutex);
1344 		if ((pkt->pkt_cdbp[0] == SCMD_PROUT) &&
1345 		    (((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_REGISTER) ||
1346 		    ((pkt->pkt_cdbp[1] & 0x1f) ==
1347 		    VHCI_PROUT_R_AND_IGNORE))) {
1348 			if (rval = vhci_pgr_register_start(vlun, pkt)) {
1349 				/* an error */
1350 				sema_v(&vlun->svl_pgr_sema);
1351 				return (rval);
1352 			}
1353 		}
1354 	}
1355 
1356 	/*
1357 	 * SCSI-II RESERVE cmd is not expected in polled mode.
1358 	 * If this changes it needs to be handled for the polled scenario.
1359 	 */
1360 	flags = vpkt->vpkt_hba_pkt->pkt_flags;
1361 
1362 	/*
1363 	 * Set the path_instance *before* sending the scsi_pkt down the path
1364 	 * to mpxio's pHCI so that additional path abstractions at a pHCI
1365 	 * level (like maybe iSCSI at some point in the future) can update
1366 	 * the path_instance.
1367 	 */
1368 	if (scsi_pkt_allocated_correctly(vpkt->vpkt_hba_pkt))
1369 		vpkt->vpkt_hba_pkt->pkt_path_instance =
1370 		    mdi_pi_get_path_instance(vpkt->vpkt_path);
1371 
1372 	rval = scsi_transport(vpkt->vpkt_hba_pkt);
1373 	if (rval == TRAN_ACCEPT) {
1374 		if (flags & FLAG_NOINTR) {
1375 			struct scsi_pkt *tpkt = vpkt->vpkt_tgt_pkt;
1376 			struct scsi_pkt *pkt = vpkt->vpkt_hba_pkt;
1377 
1378 			ASSERT(tpkt != NULL);
1379 			*(tpkt->pkt_scbp) = *(pkt->pkt_scbp);
1380 			tpkt->pkt_resid = pkt->pkt_resid;
1381 			tpkt->pkt_state = pkt->pkt_state;
1382 			tpkt->pkt_statistics = pkt->pkt_statistics;
1383 			tpkt->pkt_reason = pkt->pkt_reason;
1384 
1385 			if ((*(pkt->pkt_scbp) == STATUS_CHECK) &&
1386 			    (pkt->pkt_state & STATE_ARQ_DONE)) {
1387 				bcopy(pkt->pkt_scbp, tpkt->pkt_scbp,
1388 				    vpkt->vpkt_tgt_init_scblen);
1389 			}
1390 
1391 			VHCI_DECR_PATH_CMDCOUNT(svp);
1392 			if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) == 0) {
1393 				scsi_destroy_pkt(vpkt->vpkt_hba_pkt);
1394 				vpkt->vpkt_hba_pkt = NULL;
1395 				if (vpkt->vpkt_path) {
1396 					mdi_rele_path(vpkt->vpkt_path);
1397 					vpkt->vpkt_path = NULL;
1398 				}
1399 			}
1400 			/*
1401 			 * This path will not automatically retry pkts
1402 			 * internally, therefore, vpkt_org_vpkt should
1403 			 * never be set.
1404 			 */
1405 			ASSERT(vpkt->vpkt_org_vpkt == NULL);
1406 			if (tpkt->pkt_comp) {
1407 				(*tpkt->pkt_comp)(tpkt);
1408 			}
1409 		}
1410 		return (rval);
1411 	} else if ((pkt->pkt_cdbp[0] == SCMD_PROUT) &&
1412 	    (((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_REGISTER) ||
1413 	    ((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_R_AND_IGNORE))) {
1414 		/* the command exited with bad status */
1415 		sema_v(&vlun->svl_pgr_sema);
1416 	} else if (vpkt->vpkt_tgt_pkt->pkt_cdbp[0] == SCMD_PRIN) {
1417 		/* the command exited with bad status */
1418 		sema_v(&vlun->svl_pgr_sema);
1419 	} else if (pkt_reserve_cmd) {
1420 		VHCI_DEBUG(6, (CE_WARN, vhci->vhci_dip,
1421 		    "!vhci_scsi_start: reserve failed vlun 0x%p",
1422 		    (void *)vlun));
1423 		vlun->svl_flags &= ~VLUN_QUIESCED_FLG;
1424 		if (restore_lbp)
1425 			(void) mdi_set_lb_policy(cdip, lbp);
1426 	}
1427 
1428 	ASSERT(vpkt->vpkt_hba_pkt != NULL);
1429 	VHCI_DECR_PATH_CMDCOUNT(svp);
1430 
1431 	/* Do not destroy phci packet information for PKT_DMA_PARTIAL */
1432 	if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) == 0) {
1433 		scsi_destroy_pkt(vpkt->vpkt_hba_pkt);
1434 		vpkt->vpkt_hba_pkt = NULL;
1435 		if (vpkt->vpkt_path) {
1436 			MDI_PI_ERRSTAT(vpkt->vpkt_path, MDI_PI_TRANSERR);
1437 			mdi_rele_path(vpkt->vpkt_path);
1438 			vpkt->vpkt_path = NULL;
1439 		}
1440 	}
1441 	return (TRAN_BUSY);
1442 }
1443 
1444 /*
1445  * Function name : vhci_scsi_reset()
1446  *
1447  * Return Values : 0 - reset failed
1448  *		   1 - reset succeeded
1449  */
1450 
1451 /* ARGSUSED */
1452 static int
1453 vhci_scsi_reset(struct scsi_address *ap, int level)
1454 {
1455 	int rval = 0;
1456 
1457 	cmn_err(CE_WARN, "!vhci_scsi_reset 0x%x", level);
1458 	if ((level == RESET_TARGET) || (level == RESET_LUN)) {
1459 		return (vhci_scsi_reset_target(ap, level, TRUE));
1460 	} else if (level == RESET_ALL) {
1461 		return (vhci_scsi_reset_bus(ap));
1462 	}
1463 
1464 	return (rval);
1465 }
1466 
1467 /*
1468  * vhci_recovery_reset:
1469  *	Issues reset to the device
1470  * Input:
1471  *	vlun - vhci lun pointer of the device
1472  *	ap - address of the device
1473  *	select_path:
1474  *		If select_path is FALSE, then the address specified in ap is
1475  *		the path on which reset will be issued.
1476  *		If select_path is TRUE, then path is obtained by calling
1477  *		mdi_select_path.
1478  *
1479  *	recovery_depth:
1480  *		Caller can specify the level of reset.
1481  *		VHCI_DEPTH_LUN -
1482  *			Issues LUN RESET if device supports lun reset.
1483  *		VHCI_DEPTH_TARGET -
1484  *			If Lun Reset fails or the device does not support
1485  *			Lun Reset, issues TARGET RESET
1486  *		VHCI_DEPTH_ALL -
1487  *			If Lun Reset fails or the device does not support
1488  *			Lun Reset, issues TARGET RESET.
1489  *			If TARGET RESET does not succeed, issues Bus Reset.
1490  */
1491 
1492 static int
1493 vhci_recovery_reset(scsi_vhci_lun_t *vlun, struct scsi_address *ap,
1494 	uint8_t select_path, uint8_t recovery_depth)
1495 {
1496 	int	ret = 0;
1497 
1498 	ASSERT(ap != NULL);
1499 
1500 	if (vlun && vlun->svl_support_lun_reset == 1) {
1501 		ret = vhci_scsi_reset_target(ap, RESET_LUN,
1502 		    select_path);
1503 	}
1504 
1505 	recovery_depth--;
1506 
1507 	if ((ret == 0) && recovery_depth) {
1508 		ret = vhci_scsi_reset_target(ap, RESET_TARGET,
1509 		    select_path);
1510 		recovery_depth--;
1511 	}
1512 
1513 	if ((ret == 0) && recovery_depth) {
1514 		(void) scsi_reset(ap, RESET_ALL);
1515 	}
1516 
1517 	return (ret);
1518 }
1519 
1520 /*
1521  * Note: The scsi_address passed to this routine could be the scsi_address
1522  * for the virtual device or the physical device. No assumptions should be
1523  * made in this routine about the ap structure and a_hba_tran->tran_tgt_private
1524  * field of ap can not be assumed to be the vhci structure.
1525  * Further note that the child dip would be the dip of the ssd node irrespective
1526  * of the scsi_address passed.
1527  */
1528 
1529 static int
1530 vhci_scsi_reset_target(struct scsi_address *ap, int level, uint8_t select_path)
1531 {
1532 	dev_info_t		*vdip, *pdip, *cdip = ADDR2DIP(ap);
1533 	mdi_pathinfo_t		*pip = NULL;
1534 	mdi_pathinfo_t		*npip = NULL;
1535 	int			rval = -1;
1536 	scsi_vhci_priv_t	*svp = NULL;
1537 	struct scsi_address	*pap = NULL;
1538 	scsi_hba_tran_t		*hba = NULL;
1539 	int			sps;
1540 	struct scsi_vhci	*vhci = NULL;
1541 
1542 	if (select_path != TRUE) {
1543 		ASSERT(ap != NULL);
1544 		if (level == RESET_LUN) {
1545 			hba = ap->a_hba_tran;
1546 			ASSERT(hba != NULL);
1547 			return ((*hba->tran_reset)(ap, RESET_LUN));
1548 		}
1549 		return (scsi_reset(ap, level));
1550 	}
1551 
1552 	ASSERT(cdip != NULL);
1553 	vdip = ddi_get_parent(cdip);
1554 	ASSERT(vdip != NULL);
1555 	vhci = ddi_get_soft_state(vhci_softstate, ddi_get_instance(vdip));
1556 	ASSERT(vhci != NULL);
1557 
1558 	rval = mdi_select_path(cdip, NULL, MDI_SELECT_ONLINE_PATH, NULL, &pip);
1559 	if ((rval != MDI_SUCCESS) || (pip == NULL)) {
1560 		VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_scsi_reset_target: "
1561 		    "Unable to get a path, dip 0x%p", (void *)cdip));
1562 		return (0);
1563 	}
1564 again:
1565 	svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip);
1566 	if (svp == NULL) {
1567 		VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_scsi_reset_target: "
1568 		    "priv is NULL, pip 0x%p", (void *)pip));
1569 		mdi_rele_path(pip);
1570 		return (0);
1571 	}
1572 
1573 	if (svp->svp_psd == NULL) {
1574 		VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_scsi_reset_target: "
1575 		    "psd is NULL, pip 0x%p, svp 0x%p",
1576 		    (void *)pip, (void *)svp));
1577 		mdi_rele_path(pip);
1578 		return (0);
1579 	}
1580 
1581 	pap = &svp->svp_psd->sd_address;
1582 	hba = pap->a_hba_tran;
1583 
1584 	ASSERT(pap != NULL);
1585 	ASSERT(hba != NULL);
1586 
1587 	if (hba->tran_reset != NULL) {
1588 		if ((*hba->tran_reset)(pap, level) == 0) {
1589 			pdip = mdi_pi_get_phci(pip);
1590 			vhci_log(CE_WARN, vdip, "!(%s%d):"
1591 			    " path (%s%d), reset %d failed",
1592 			    ddi_driver_name(cdip), ddi_get_instance(cdip),
1593 			    ddi_driver_name(pdip), ddi_get_instance(pdip),
1594 			    level);
1595 
1596 			/*
1597 			 * Select next path and issue the reset, repeat
1598 			 * until all paths are exhausted
1599 			 */
1600 			sps = mdi_select_path(cdip, NULL,
1601 			    MDI_SELECT_ONLINE_PATH, pip, &npip);
1602 			if ((sps != MDI_SUCCESS) || (npip == NULL)) {
1603 				mdi_rele_path(pip);
1604 				return (0);
1605 			}
1606 			mdi_rele_path(pip);
1607 			pip = npip;
1608 			goto again;
1609 		}
1610 		mdi_rele_path(pip);
1611 		mutex_enter(&vhci->vhci_mutex);
1612 		scsi_hba_reset_notify_callback(&vhci->vhci_mutex,
1613 		    &vhci->vhci_reset_notify_listf);
1614 		mutex_exit(&vhci->vhci_mutex);
1615 		VHCI_DEBUG(6, (CE_NOTE, NULL, "!vhci_scsi_reset_target: "
1616 		    "reset %d sent down pip:%p for cdip:%p\n", level,
1617 		    (void *)pip, (void *)cdip));
1618 		return (1);
1619 	}
1620 	mdi_rele_path(pip);
1621 	return (0);
1622 }
1623 
1624 
1625 /* ARGSUSED */
1626 static int
1627 vhci_scsi_reset_bus(struct scsi_address *ap)
1628 {
1629 	return (1);
1630 }
1631 
1632 
1633 /*
1634  * called by vhci_getcap and vhci_setcap to get and set (respectively)
1635  * SCSI capabilities
1636  */
1637 /* ARGSUSED */
1638 static int
1639 vhci_commoncap(struct scsi_address *ap, char *cap,
1640     int val, int tgtonly, int doset)
1641 {
1642 	struct scsi_vhci		*vhci = ADDR2VHCI(ap);
1643 	struct scsi_vhci_lun		*vlun = ADDR2VLUN(ap);
1644 	int			cidx;
1645 	int			rval = 0;
1646 
1647 	if (cap == (char *)0) {
1648 		VHCI_DEBUG(3, (CE_WARN, vhci->vhci_dip,
1649 		    "!vhci_commoncap: invalid arg"));
1650 		return (rval);
1651 	}
1652 
1653 	if (vlun == NULL) {
1654 		VHCI_DEBUG(3, (CE_WARN, vhci->vhci_dip,
1655 		    "!vhci_commoncap: vlun is null"));
1656 		return (rval);
1657 	}
1658 
1659 	if ((cidx = scsi_hba_lookup_capstr(cap)) == -1) {
1660 		return (UNDEFINED);
1661 	}
1662 
1663 	/*
1664 	 * Process setcap request.
1665 	 */
1666 	if (doset) {
1667 		/*
1668 		 * At present, we can only set binary (0/1) values
1669 		 */
1670 		switch (cidx) {
1671 		case SCSI_CAP_ARQ:
1672 			if (val == 0) {
1673 				rval = 0;
1674 			} else {
1675 				rval = 1;
1676 			}
1677 			break;
1678 
1679 		case SCSI_CAP_LUN_RESET:
1680 			if (tgtonly == 0) {
1681 				VHCI_DEBUG(1, (CE_WARN, vhci->vhci_dip,
1682 				    "scsi_vhci_setcap: "
1683 				    "Returning error since whom = 0"));
1684 				rval = -1;
1685 				break;
1686 			}
1687 			/*
1688 			 * Set the capability accordingly.
1689 			 */
1690 			mutex_enter(&vlun->svl_mutex);
1691 			vlun->svl_support_lun_reset = val;
1692 			rval = val;
1693 			mutex_exit(&vlun->svl_mutex);
1694 			break;
1695 
1696 		case SCSI_CAP_SECTOR_SIZE:
1697 			mutex_enter(&vlun->svl_mutex);
1698 			vlun->svl_sector_size = val;
1699 			vlun->svl_setcap_done = 1;
1700 			mutex_exit(&vlun->svl_mutex);
1701 			(void) vhci_pHCI_cap(ap, cap, val, tgtonly, NULL);
1702 
1703 			/* Always return success */
1704 			rval = 1;
1705 			break;
1706 
1707 		default:
1708 			VHCI_DEBUG(6, (CE_WARN, vhci->vhci_dip,
1709 			    "!vhci_setcap: unsupported %d", cidx));
1710 			rval = UNDEFINED;
1711 			break;
1712 		}
1713 
1714 		VHCI_DEBUG(6, (CE_NOTE, vhci->vhci_dip,
1715 		    "!set cap: cap=%s, val/tgtonly/doset/rval = "
1716 		    "0x%x/0x%x/0x%x/%d\n",
1717 		    cap, val, tgtonly, doset, rval));
1718 
1719 	} else {
1720 		/*
1721 		 * Process getcap request.
1722 		 */
1723 		switch (cidx) {
1724 		case SCSI_CAP_DMA_MAX:
1725 			rval = (int)VHCI_DMA_MAX_XFER_CAP;
1726 			break;
1727 
1728 		case SCSI_CAP_INITIATOR_ID:
1729 			rval = 0x00;
1730 			break;
1731 
1732 		case SCSI_CAP_ARQ:
1733 		case SCSI_CAP_RESET_NOTIFICATION:
1734 		case SCSI_CAP_TAGGED_QING:
1735 			rval = 1;
1736 			break;
1737 
1738 		case SCSI_CAP_SCSI_VERSION:
1739 			rval = 3;
1740 			break;
1741 
1742 		case SCSI_CAP_INTERCONNECT_TYPE:
1743 			rval = INTERCONNECT_FABRIC;
1744 			break;
1745 
1746 		case SCSI_CAP_LUN_RESET:
1747 			/*
1748 			 * scsi_vhci will always return success for LUN reset.
1749 			 * When request for doing LUN reset comes
1750 			 * through scsi_reset entry point, at that time attempt
1751 			 * will be made to do reset through all the possible
1752 			 * paths.
1753 			 */
1754 			mutex_enter(&vlun->svl_mutex);
1755 			rval = vlun->svl_support_lun_reset;
1756 			mutex_exit(&vlun->svl_mutex);
1757 			VHCI_DEBUG(4, (CE_WARN, vhci->vhci_dip,
1758 			    "scsi_vhci_getcap:"
1759 			    "Getting the Lun reset capability %d", rval));
1760 			break;
1761 
1762 		case SCSI_CAP_SECTOR_SIZE:
1763 			mutex_enter(&vlun->svl_mutex);
1764 			rval = vlun->svl_sector_size;
1765 			mutex_exit(&vlun->svl_mutex);
1766 			break;
1767 
1768 		default:
1769 			VHCI_DEBUG(6, (CE_WARN, vhci->vhci_dip,
1770 			    "!vhci_getcap: unsupported %d", cidx));
1771 			rval = UNDEFINED;
1772 			break;
1773 		}
1774 
1775 		VHCI_DEBUG(6, (CE_NOTE, vhci->vhci_dip,
1776 		    "!get cap: cap=%s, val/tgtonly/doset/rval = "
1777 		    "0x%x/0x%x/0x%x/%d\n",
1778 		    cap, val, tgtonly, doset, rval));
1779 	}
1780 	return (rval);
1781 }
1782 
1783 
1784 /*
1785  * Function name : vhci_scsi_getcap()
1786  *
1787  */
1788 static int
1789 vhci_scsi_getcap(struct scsi_address *ap, char *cap, int whom)
1790 {
1791 	return (vhci_commoncap(ap, cap, 0, whom, 0));
1792 }
1793 
1794 static int
1795 vhci_scsi_setcap(struct scsi_address *ap, char *cap, int value, int whom)
1796 {
1797 	return (vhci_commoncap(ap, cap, value, whom, 1));
1798 }
1799 
1800 /*
1801  * Function name : vhci_scsi_abort()
1802  */
1803 /* ARGSUSED */
1804 static int
1805 vhci_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt)
1806 {
1807 	return (0);
1808 }
1809 
1810 /*
1811  * Function name : vhci_scsi_init_pkt
1812  *
1813  * Return Values : pointer to scsi_pkt, or NULL
1814  */
1815 /* ARGSUSED */
1816 static struct scsi_pkt *
1817 vhci_scsi_init_pkt(struct scsi_address *ap, struct scsi_pkt *pkt,
1818 	struct buf *bp, int cmdlen, int statuslen, int tgtlen,
1819 	int flags, int (*callback)(caddr_t), caddr_t arg)
1820 {
1821 	struct scsi_vhci	*vhci = ADDR2VHCI(ap);
1822 	struct vhci_pkt		*vpkt;
1823 	int			rval;
1824 	int			newpkt = 0;
1825 	struct scsi_pkt		*pktp;
1826 
1827 
1828 	if (pkt == NULL) {
1829 		if (cmdlen > VHCI_SCSI_CDB_SIZE) {
1830 			VHCI_DEBUG(1, (CE_NOTE, NULL,
1831 			    "!init pkt: cdb size not supported\n"));
1832 			return (NULL);
1833 		}
1834 
1835 		pktp = scsi_hba_pkt_alloc(vhci->vhci_dip,
1836 		    ap, cmdlen, statuslen, tgtlen, sizeof (*vpkt), callback,
1837 		    arg);
1838 
1839 		if (pktp == NULL) {
1840 			return (NULL);
1841 		}
1842 
1843 		/* Get the vhci's private structure */
1844 		vpkt = (struct vhci_pkt *)(pktp->pkt_ha_private);
1845 		ASSERT(vpkt);
1846 
1847 		/* Save the target driver's packet */
1848 		vpkt->vpkt_tgt_pkt = pktp;
1849 
1850 		/*
1851 		 * Save pkt_tgt_init_pkt fields if deferred binding
1852 		 * is needed or for other purposes.
1853 		 */
1854 		vpkt->vpkt_tgt_init_pkt_flags = flags;
1855 		vpkt->vpkt_flags = (callback == NULL_FUNC) ? CFLAG_NOWAIT : 0;
1856 		vpkt->vpkt_state = VHCI_PKT_IDLE;
1857 		vpkt->vpkt_tgt_init_cdblen = cmdlen;
1858 		vpkt->vpkt_tgt_init_scblen = statuslen;
1859 		newpkt = 1;
1860 	} else { /* pkt not NULL */
1861 		vpkt = pkt->pkt_ha_private;
1862 	}
1863 
1864 	VHCI_DEBUG(8, (CE_NOTE, NULL, "vhci_scsi_init_pkt "
1865 	    "vpkt %p flags %x\n", (void *)vpkt, flags));
1866 
1867 	/* Clear any stale error flags */
1868 	if (bp) {
1869 		bioerror(bp, 0);
1870 	}
1871 
1872 	vpkt->vpkt_tgt_init_bp = bp;
1873 
1874 	if (flags & PKT_DMA_PARTIAL) {
1875 
1876 		/*
1877 		 * Immediate binding is needed.
1878 		 * Target driver may not set this flag in next invocation.
1879 		 * vhci has to remember this flag was set during first
1880 		 * invocation of vhci_scsi_init_pkt.
1881 		 */
1882 		vpkt->vpkt_flags |= CFLAG_DMA_PARTIAL;
1883 	}
1884 
1885 	if (vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) {
1886 
1887 		/*
1888 		 * Re-initialize some of the target driver packet state
1889 		 * information.
1890 		 */
1891 		vpkt->vpkt_tgt_pkt->pkt_state = 0;
1892 		vpkt->vpkt_tgt_pkt->pkt_statistics = 0;
1893 		vpkt->vpkt_tgt_pkt->pkt_reason = 0;
1894 
1895 		/*
1896 		 * Binding a vpkt->vpkt_path for this IO at init_time.
1897 		 * If an IO error happens later, target driver will clear
1898 		 * this vpkt->vpkt_path binding before re-init IO again.
1899 		 */
1900 		VHCI_DEBUG(8, (CE_NOTE, NULL,
1901 		    "vhci_scsi_init_pkt: calling v_b_t %p, newpkt %d\n",
1902 		    (void *)vpkt, newpkt));
1903 		if (pkt && vpkt->vpkt_hba_pkt) {
1904 			VHCI_DEBUG(4, (CE_NOTE, NULL,
1905 			    "v_s_i_p calling update_pHCI_pkt resid %ld\n",
1906 			    pkt->pkt_resid));
1907 			vhci_update_pHCI_pkt(vpkt, pkt);
1908 		}
1909 		if (callback == SLEEP_FUNC) {
1910 			rval = vhci_bind_transport(
1911 			    ap, vpkt, flags, callback);
1912 		} else {
1913 			rval = vhci_bind_transport(
1914 			    ap, vpkt, flags, NULL_FUNC);
1915 		}
1916 		VHCI_DEBUG(8, (CE_NOTE, NULL,
1917 		    "vhci_scsi_init_pkt: v_b_t called 0x%p rval 0x%x\n",
1918 		    (void *)vpkt, rval));
1919 		if (bp) {
1920 			if (rval == TRAN_FATAL_ERROR) {
1921 				/*
1922 				 * No paths available. Could not bind
1923 				 * any pHCI. Setting EFAULT as a way
1924 				 * to indicate no DMA is mapped.
1925 				 */
1926 				bioerror(bp, EFAULT);
1927 			} else {
1928 				/*
1929 				 * Do not indicate any pHCI errors to
1930 				 * target driver otherwise.
1931 				 */
1932 				bioerror(bp, 0);
1933 			}
1934 		}
1935 		if (rval != TRAN_ACCEPT) {
1936 			VHCI_DEBUG(8, (CE_NOTE, NULL,
1937 			    "vhci_scsi_init_pkt: "
1938 			    "v_b_t failed 0x%p newpkt %x\n",
1939 			    (void *)vpkt, newpkt));
1940 			if (newpkt) {
1941 				scsi_hba_pkt_free(ap,
1942 				    vpkt->vpkt_tgt_pkt);
1943 			}
1944 			return (NULL);
1945 		}
1946 		ASSERT(vpkt->vpkt_hba_pkt != NULL);
1947 		ASSERT(vpkt->vpkt_path != NULL);
1948 
1949 		/* Update the resid for the target driver */
1950 		vpkt->vpkt_tgt_pkt->pkt_resid =
1951 		    vpkt->vpkt_hba_pkt->pkt_resid;
1952 	}
1953 
1954 	return (vpkt->vpkt_tgt_pkt);
1955 }
1956 
1957 /*
1958  * Function name : vhci_scsi_destroy_pkt
1959  *
1960  * Return Values : none
1961  */
1962 static void
1963 vhci_scsi_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
1964 {
1965 	struct vhci_pkt		*vpkt = (struct vhci_pkt *)pkt->pkt_ha_private;
1966 
1967 	VHCI_DEBUG(8, (CE_NOTE, NULL,
1968 	    "vhci_scsi_destroy_pkt: vpkt 0x%p\n", (void *)vpkt));
1969 
1970 	vpkt->vpkt_tgt_init_pkt_flags = 0;
1971 	if (vpkt->vpkt_hba_pkt) {
1972 		scsi_destroy_pkt(vpkt->vpkt_hba_pkt);
1973 		vpkt->vpkt_hba_pkt = NULL;
1974 	}
1975 	if (vpkt->vpkt_path) {
1976 		mdi_rele_path(vpkt->vpkt_path);
1977 		vpkt->vpkt_path = NULL;
1978 	}
1979 
1980 	ASSERT(vpkt->vpkt_state != VHCI_PKT_ISSUED);
1981 	scsi_hba_pkt_free(ap, vpkt->vpkt_tgt_pkt);
1982 }
1983 
1984 /*
1985  * Function name : vhci_scsi_dmafree()
1986  *
1987  * Return Values : none
1988  */
1989 /*ARGSUSED*/
1990 static void
1991 vhci_scsi_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
1992 {
1993 	struct vhci_pkt	*vpkt = (struct vhci_pkt *)pkt->pkt_ha_private;
1994 
1995 	VHCI_DEBUG(6, (CE_NOTE, NULL,
1996 	    "vhci_scsi_dmafree: vpkt 0x%p\n", (void *)vpkt));
1997 
1998 	ASSERT(vpkt != NULL);
1999 	if (vpkt->vpkt_hba_pkt) {
2000 		scsi_destroy_pkt(vpkt->vpkt_hba_pkt);
2001 		vpkt->vpkt_hba_pkt = NULL;
2002 	}
2003 	if (vpkt->vpkt_path) {
2004 		mdi_rele_path(vpkt->vpkt_path);
2005 		vpkt->vpkt_path = NULL;
2006 	}
2007 }
2008 
2009 /*
2010  * Function name : vhci_scsi_sync_pkt()
2011  *
2012  * Return Values : none
2013  */
2014 /*ARGSUSED*/
2015 static void
2016 vhci_scsi_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
2017 {
2018 	struct vhci_pkt	*vpkt = (struct vhci_pkt *)pkt->pkt_ha_private;
2019 
2020 	ASSERT(vpkt != NULL);
2021 	if (vpkt->vpkt_hba_pkt) {
2022 		scsi_sync_pkt(vpkt->vpkt_hba_pkt);
2023 	}
2024 }
2025 
2026 /*
2027  * routine for reset notification setup, to register or cancel.
2028  */
2029 static int
2030 vhci_scsi_reset_notify(struct scsi_address *ap, int flag,
2031 	void (*callback)(caddr_t), caddr_t arg)
2032 {
2033 	struct scsi_vhci *vhci = ADDR2VHCI(ap);
2034 	return (scsi_hba_reset_notify_setup(ap, flag, callback, arg,
2035 	    &vhci->vhci_mutex, &vhci->vhci_reset_notify_listf));
2036 }
2037 
2038 static int
2039 vhci_scsi_get_name_bus_addr(struct scsi_device *sd,
2040     char *name, int len, int bus_addr)
2041 {
2042 	dev_info_t		*cdip;
2043 	char			*guid;
2044 	scsi_vhci_lun_t		*vlun;
2045 
2046 	ASSERT(sd != NULL);
2047 	ASSERT(name != NULL);
2048 
2049 	cdip = sd->sd_dev;
2050 
2051 	ASSERT(cdip != NULL);
2052 
2053 	if (mdi_component_is_client(cdip, NULL) != MDI_SUCCESS) {
2054 		name[0] = '\0';
2055 		return (1);
2056 	}
2057 
2058 	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, cdip, PROPFLAGS,
2059 	    MDI_CLIENT_GUID_PROP, &guid) != DDI_SUCCESS) {
2060 		name[0] = '\0';
2061 		return (1);
2062 	}
2063 
2064 	vlun = ADDR2VLUN(&sd->sd_address);
2065 	if (bus_addr && vlun && vlun->svl_fops_name) {
2066 		/* report the guid and the name of the failover module */
2067 		(void) snprintf(name, len, "g%s %s", guid, vlun->svl_fops_name);
2068 	} else {
2069 		/* report the guid */
2070 		(void) snprintf(name, len, "g%s", guid);
2071 	}
2072 
2073 	ddi_prop_free(guid);
2074 	return (1);
2075 }
2076 
2077 static int
2078 vhci_scsi_get_bus_addr(struct scsi_device *sd, char *name, int len)
2079 {
2080 	return (vhci_scsi_get_name_bus_addr(sd, name, len, 1));
2081 }
2082 
2083 static int
2084 vhci_scsi_get_name(struct scsi_device *sd, char *name, int len)
2085 {
2086 	return (vhci_scsi_get_name_bus_addr(sd, name, len, 0));
2087 }
2088 
2089 /*
2090  * Return a pointer to the guid part of the devnm.
2091  * devnm format is "nodename@busaddr", busaddr format is "gGUID".
2092  */
2093 static char *
2094 vhci_devnm_to_guid(char *devnm)
2095 {
2096 	char *cp = devnm;
2097 
2098 	if (devnm == NULL)
2099 		return (NULL);
2100 
2101 	while (*cp != '\0' && *cp != '@')
2102 		cp++;
2103 	if (*cp == '@' && *(cp + 1) == 'g')
2104 		return (cp + 2);
2105 	return (NULL);
2106 }
2107 
2108 static int
2109 vhci_bind_transport(struct scsi_address *ap, struct vhci_pkt *vpkt, int flags,
2110     int (*func)(caddr_t))
2111 {
2112 	struct scsi_vhci	*vhci = ADDR2VHCI(ap);
2113 	dev_info_t		*cdip = ADDR2DIP(ap);
2114 	mdi_pathinfo_t		*pip = NULL;
2115 	mdi_pathinfo_t		*npip = NULL;
2116 	scsi_vhci_priv_t	*svp = NULL;
2117 	struct scsi_device	*psd = NULL;
2118 	struct scsi_address	*address = NULL;
2119 	struct scsi_pkt		*pkt = NULL;
2120 	int			rval = -1;
2121 	int			pgr_sema_held = 0;
2122 	int			held;
2123 	int			mps_flag = MDI_SELECT_ONLINE_PATH;
2124 	struct scsi_vhci_lun	*vlun;
2125 	time_t			tnow;
2126 	int			path_instance;
2127 
2128 	vlun = ADDR2VLUN(ap);
2129 	ASSERT(vlun != 0);
2130 
2131 	if ((vpkt->vpkt_tgt_pkt->pkt_cdbp[0] == SCMD_PROUT) &&
2132 	    (((vpkt->vpkt_tgt_pkt->pkt_cdbp[1] & 0x1f) ==
2133 	    VHCI_PROUT_REGISTER) ||
2134 	    ((vpkt->vpkt_tgt_pkt->pkt_cdbp[1] & 0x1f) ==
2135 	    VHCI_PROUT_R_AND_IGNORE))) {
2136 		if (!sema_tryp(&vlun->svl_pgr_sema))
2137 			return (TRAN_BUSY);
2138 		pgr_sema_held = 1;
2139 		if (vlun->svl_first_path != NULL) {
2140 			rval = mdi_select_path(cdip, NULL,
2141 			    MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH,
2142 			    NULL, &pip);
2143 			if ((rval != MDI_SUCCESS) || (pip == NULL)) {
2144 				VHCI_DEBUG(4, (CE_NOTE, NULL,
2145 				    "vhci_bind_transport: path select fail\n"));
2146 			} else {
2147 				npip = pip;
2148 				do {
2149 					if (npip == vlun->svl_first_path) {
2150 						VHCI_DEBUG(4, (CE_NOTE, NULL,
2151 						    "vhci_bind_transport: "
2152 						    "valid first path 0x%p\n",
2153 						    (void *)
2154 						    vlun->svl_first_path));
2155 						pip = vlun->svl_first_path;
2156 						goto bind_path;
2157 					}
2158 					pip = npip;
2159 					rval = mdi_select_path(cdip, NULL,
2160 					    MDI_SELECT_ONLINE_PATH |
2161 					    MDI_SELECT_STANDBY_PATH,
2162 					    pip, &npip);
2163 					mdi_rele_path(pip);
2164 				} while ((rval == MDI_SUCCESS) &&
2165 				    (npip != NULL));
2166 			}
2167 		}
2168 
2169 		if (vlun->svl_first_path) {
2170 			VHCI_DEBUG(4, (CE_NOTE, NULL,
2171 			    "vhci_bind_transport: invalid first path 0x%p\n",
2172 			    (void *)vlun->svl_first_path));
2173 			vlun->svl_first_path = NULL;
2174 		}
2175 	} else if (vpkt->vpkt_tgt_pkt->pkt_cdbp[0] == SCMD_PRIN) {
2176 		if ((vpkt->vpkt_state & VHCI_PKT_THRU_TASKQ) == 0) {
2177 			if (!sema_tryp(&vlun->svl_pgr_sema))
2178 				return (TRAN_BUSY);
2179 		}
2180 		pgr_sema_held = 1;
2181 	}
2182 
2183 	/*
2184 	 * If the path is already bound for PKT_PARTIAL_DMA case,
2185 	 * try to use the same path.
2186 	 */
2187 	if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) && vpkt->vpkt_path) {
2188 		VHCI_DEBUG(4, (CE_NOTE, NULL,
2189 		    "vhci_bind_transport: PKT_PARTIAL_DMA "
2190 		    "vpkt 0x%p, path 0x%p\n",
2191 		    (void *)vpkt, (void *)vpkt->vpkt_path));
2192 		pip = vpkt->vpkt_path;
2193 		goto bind_path;
2194 	}
2195 
2196 	/*
2197 	 * Get path_instance. Non-zero indicates that mdi_select_path should
2198 	 * be called to select a specific instance.
2199 	 *
2200 	 * NB: Condition pkt_path_instance reference on proper allocation.
2201 	 */
2202 	if (scsi_pkt_allocated_correctly(vpkt->vpkt_tgt_pkt))
2203 		path_instance = vpkt->vpkt_tgt_pkt->pkt_path_instance;
2204 	else
2205 		path_instance = 0;
2206 
2207 	/*
2208 	 * If reservation is active bind the transport directly to the pip
2209 	 * with the reservation.
2210 	 */
2211 	if (vpkt->vpkt_hba_pkt == NULL) {
2212 		if (vlun->svl_flags & VLUN_RESERVE_ACTIVE_FLG) {
2213 			if (MDI_PI_IS_ONLINE(vlun->svl_resrv_pip)) {
2214 				pip = vlun->svl_resrv_pip;
2215 				mdi_hold_path(pip);
2216 				vlun->svl_waiting_for_activepath = 0;
2217 				rval = MDI_SUCCESS;
2218 				goto bind_path;
2219 			} else {
2220 				if (pgr_sema_held) {
2221 					sema_v(&vlun->svl_pgr_sema);
2222 				}
2223 				return (TRAN_BUSY);
2224 			}
2225 		}
2226 try_again:
2227 		rval = mdi_select_path(cdip, vpkt->vpkt_tgt_init_bp,
2228 		    path_instance ? MDI_SELECT_PATH_INSTANCE : 0,
2229 		    (void *)(intptr_t)path_instance, &pip);
2230 		if (rval == MDI_BUSY) {
2231 			if (pgr_sema_held) {
2232 				sema_v(&vlun->svl_pgr_sema);
2233 			}
2234 			return (TRAN_BUSY);
2235 		} else if (rval == MDI_DEVI_ONLINING) {
2236 			/*
2237 			 * if we are here then we are in the midst of
2238 			 * an attach/probe of the client device.
2239 			 * We attempt to bind to ONLINE path if available,
2240 			 * else it is OK to bind to a STANDBY path (instead
2241 			 * of triggering a failover) because IO associated
2242 			 * with attach/probe (eg. INQUIRY, block 0 read)
2243 			 * are completed by targets even on passive paths
2244 			 * If no ONLINE paths available, it is important
2245 			 * to set svl_waiting_for_activepath for two
2246 			 * reasons: (1) avoid sense analysis in the
2247 			 * "external failure detection" codepath in
2248 			 * vhci_intr().  Failure to do so will result in
2249 			 * infinite loop (unless an ONLINE path becomes
2250 			 * available at some point) (2) avoid
2251 			 * unnecessary failover (see "---Waiting For Active
2252 			 * Path---" comment below).
2253 			 */
2254 			VHCI_DEBUG(1, (CE_NOTE, NULL, "!%p in onlining "
2255 			    "state\n", (void *)cdip));
2256 			pip = NULL;
2257 			rval = mdi_select_path(cdip, vpkt->vpkt_tgt_init_bp,
2258 			    mps_flag, NULL, &pip);
2259 			if ((rval != MDI_SUCCESS) || (pip == NULL)) {
2260 				if (vlun->svl_waiting_for_activepath == 0) {
2261 					vlun->svl_waiting_for_activepath = 1;
2262 					vlun->svl_wfa_time = ddi_get_time();
2263 				}
2264 				mps_flag |= MDI_SELECT_STANDBY_PATH;
2265 				rval = mdi_select_path(cdip,
2266 				    vpkt->vpkt_tgt_init_bp,
2267 				    mps_flag, NULL, &pip);
2268 				if ((rval != MDI_SUCCESS) || (pip == NULL)) {
2269 					if (pgr_sema_held) {
2270 						sema_v(&vlun->svl_pgr_sema);
2271 					}
2272 					return (TRAN_FATAL_ERROR);
2273 				}
2274 				goto bind_path;
2275 			}
2276 		} else if (rval == MDI_FAILURE) {
2277 			if (pgr_sema_held) {
2278 				sema_v(&vlun->svl_pgr_sema);
2279 			}
2280 			return (TRAN_FATAL_ERROR);
2281 		}
2282 
2283 		if ((pip == NULL) || (rval == MDI_NOPATH)) {
2284 			while (vlun->svl_waiting_for_activepath) {
2285 				/*
2286 				 * ---Waiting For Active Path---
2287 				 * This device was discovered across a
2288 				 * passive path; lets wait for a little
2289 				 * bit, hopefully an active path will
2290 				 * show up obviating the need for a
2291 				 * failover
2292 				 */
2293 				tnow = ddi_get_time();
2294 				if (tnow - vlun->svl_wfa_time >= 60) {
2295 					vlun->svl_waiting_for_activepath = 0;
2296 				} else {
2297 					drv_usecwait(1000);
2298 					if (vlun->svl_waiting_for_activepath
2299 					    == 0) {
2300 						/*
2301 						 * an active path has come
2302 						 * online!
2303 						 */
2304 						goto try_again;
2305 					}
2306 				}
2307 			}
2308 			VHCI_HOLD_LUN(vlun, VH_NOSLEEP, held);
2309 			if (!held) {
2310 				VHCI_DEBUG(4, (CE_NOTE, NULL,
2311 				    "!Lun not held\n"));
2312 				if (pgr_sema_held) {
2313 					sema_v(&vlun->svl_pgr_sema);
2314 				}
2315 				return (TRAN_BUSY);
2316 			}
2317 			/*
2318 			 * now that the LUN is stable, one last check
2319 			 * to make sure no other changes sneaked in
2320 			 * (like a path coming online or a
2321 			 * failover initiated by another thread)
2322 			 */
2323 			pip = NULL;
2324 			rval = mdi_select_path(cdip, vpkt->vpkt_tgt_init_bp,
2325 			    0, NULL, &pip);
2326 			if (pip != NULL) {
2327 				VHCI_RELEASE_LUN(vlun);
2328 				vlun->svl_waiting_for_activepath = 0;
2329 				goto bind_path;
2330 			}
2331 
2332 			/*
2333 			 * Check if there is an ONLINE path OR a STANDBY path
2334 			 * available. If none is available, do not attempt
2335 			 * to do a failover, just return a fatal error at this
2336 			 * point.
2337 			 */
2338 			npip = NULL;
2339 			rval = mdi_select_path(cdip, NULL,
2340 			    (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH),
2341 			    NULL, &npip);
2342 			if ((npip == NULL) || (rval != MDI_SUCCESS)) {
2343 				/*
2344 				 * No paths available, jus return FATAL error.
2345 				 */
2346 				VHCI_RELEASE_LUN(vlun);
2347 				if (pgr_sema_held) {
2348 					sema_v(&vlun->svl_pgr_sema);
2349 				}
2350 				return (TRAN_FATAL_ERROR);
2351 			}
2352 			mdi_rele_path(npip);
2353 			VHCI_DEBUG(1, (CE_NOTE, NULL, "!invoking "
2354 			    "mdi_failover\n"));
2355 			rval = mdi_failover(vhci->vhci_dip, cdip,
2356 			    MDI_FAILOVER_ASYNC);
2357 			if (rval == MDI_FAILURE) {
2358 				VHCI_RELEASE_LUN(vlun);
2359 				if (pgr_sema_held) {
2360 					sema_v(&vlun->svl_pgr_sema);
2361 				}
2362 				return (TRAN_FATAL_ERROR);
2363 			} else if (rval == MDI_BUSY) {
2364 				VHCI_RELEASE_LUN(vlun);
2365 				if (pgr_sema_held) {
2366 					sema_v(&vlun->svl_pgr_sema);
2367 				}
2368 				return (TRAN_BUSY);
2369 			} else {
2370 				if (pgr_sema_held) {
2371 					sema_v(&vlun->svl_pgr_sema);
2372 				}
2373 				return (TRAN_BUSY);
2374 			}
2375 		}
2376 		vlun->svl_waiting_for_activepath = 0;
2377 bind_path:
2378 		vpkt->vpkt_path = pip;
2379 		svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip);
2380 		ASSERT(svp != NULL);
2381 
2382 		psd = svp->svp_psd;
2383 		ASSERT(psd != NULL);
2384 		address = &psd->sd_address;
2385 	} else {
2386 		pkt = vpkt->vpkt_hba_pkt;
2387 		address = &pkt->pkt_address;
2388 	}
2389 
2390 	/* Verify match of specified path_instance and selected path_instance */
2391 	ASSERT((path_instance == 0) ||
2392 	    (path_instance == mdi_pi_get_path_instance(vpkt->vpkt_path)));
2393 
2394 	/*
2395 	 * For PKT_PARTIAL_DMA case, call pHCI's scsi_init_pkt whenever
2396 	 * target driver calls vhci_scsi_init_pkt.
2397 	 */
2398 	if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) &&
2399 	    vpkt->vpkt_path && vpkt->vpkt_hba_pkt) {
2400 		VHCI_DEBUG(4, (CE_NOTE, NULL,
2401 		    "vhci_bind_transport: PKT_PARTIAL_DMA "
2402 		    "vpkt 0x%p, path 0x%p hba_pkt 0x%p\n",
2403 		    (void *)vpkt, (void *)vpkt->vpkt_path, (void *)pkt));
2404 		pkt = vpkt->vpkt_hba_pkt;
2405 		address = &pkt->pkt_address;
2406 	}
2407 
2408 	if (pkt == NULL || (vpkt->vpkt_flags & CFLAG_DMA_PARTIAL)) {
2409 		pkt = scsi_init_pkt(address, pkt,
2410 		    vpkt->vpkt_tgt_init_bp, vpkt->vpkt_tgt_init_cdblen,
2411 		    vpkt->vpkt_tgt_init_scblen,
2412 		    0, flags, func, NULL);
2413 
2414 		if (pkt == NULL) {
2415 			VHCI_DEBUG(4, (CE_NOTE, NULL,
2416 			    "!bind transport: 0x%p 0x%p 0x%p\n",
2417 			    (void *)vhci, (void *)psd, (void *)vpkt));
2418 			if ((vpkt->vpkt_hba_pkt == NULL) && vpkt->vpkt_path) {
2419 				MDI_PI_ERRSTAT(vpkt->vpkt_path,
2420 				    MDI_PI_TRANSERR);
2421 				mdi_rele_path(vpkt->vpkt_path);
2422 				vpkt->vpkt_path = NULL;
2423 			}
2424 			if (pgr_sema_held) {
2425 				sema_v(&vlun->svl_pgr_sema);
2426 			}
2427 			/*
2428 			 * Looks like a fatal error.
2429 			 * May be device disappeared underneath.
2430 			 * Give another chance to target driver for a retry to
2431 			 * get another path.
2432 			 */
2433 			return (TRAN_BUSY);
2434 		}
2435 	}
2436 
2437 	pkt->pkt_private = vpkt;
2438 	vpkt->vpkt_hba_pkt = pkt;
2439 	return (TRAN_ACCEPT);
2440 }
2441 
2442 
2443 /*PRINTFLIKE3*/
2444 void
2445 vhci_log(int level, dev_info_t *dip, const char *fmt, ...)
2446 {
2447 	char		buf[256];
2448 	va_list		ap;
2449 
2450 	va_start(ap, fmt);
2451 	(void) vsprintf(buf, fmt, ap);
2452 	va_end(ap);
2453 
2454 	scsi_log(dip, "scsi_vhci", level, buf);
2455 }
2456 
2457 /* do a PGR out with the information we've saved away */
2458 static int
2459 vhci_do_prout(scsi_vhci_priv_t *svp)
2460 {
2461 
2462 	struct scsi_pkt			*new_pkt;
2463 	struct buf			*bp;
2464 	scsi_vhci_lun_t			*vlun;
2465 	int				rval, retry, nr_retry, ua_retry;
2466 	struct scsi_extended_sense	*sns;
2467 
2468 	bp = getrbuf(KM_SLEEP);
2469 	bp->b_flags = B_WRITE;
2470 	bp->b_resid = 0;
2471 
2472 	VHCI_INCR_PATH_CMDCOUNT(svp);
2473 	vlun = svp->svp_svl;
2474 
2475 	new_pkt = scsi_init_pkt(&svp->svp_psd->sd_address, NULL, bp,
2476 	    CDB_GROUP1, sizeof (struct scsi_arq_status), 0, 0,
2477 	    SLEEP_FUNC, NULL);
2478 	if (new_pkt == NULL) {
2479 		VHCI_DECR_PATH_CMDCOUNT(svp);
2480 		freerbuf(bp);
2481 		cmn_err(CE_WARN, "!vhci_do_prout: scsi_init_pkt failed");
2482 		return (0);
2483 	}
2484 	mutex_enter(&vlun->svl_mutex);
2485 	bp->b_un.b_addr = (caddr_t)&vlun->svl_prout;
2486 	bp->b_bcount = vlun->svl_bcount;
2487 	bcopy(vlun->svl_cdb, new_pkt->pkt_cdbp,
2488 	    sizeof (vlun->svl_cdb));
2489 	new_pkt->pkt_time = vlun->svl_time;
2490 	mutex_exit(&vlun->svl_mutex);
2491 	new_pkt->pkt_flags = FLAG_NOINTR;
2492 
2493 	ua_retry = nr_retry = retry = 0;
2494 again:
2495 	rval = vhci_do_scsi_cmd(new_pkt);
2496 	if (rval != 1) {
2497 		if ((new_pkt->pkt_reason == CMD_CMPLT) &&
2498 		    (SCBP_C(new_pkt) == STATUS_CHECK) &&
2499 		    (new_pkt->pkt_state & STATE_ARQ_DONE)) {
2500 			sns = &(((struct scsi_arq_status *)(uintptr_t)
2501 			    (new_pkt->pkt_scbp))->sts_sensedata);
2502 			if ((sns->es_key == KEY_UNIT_ATTENTION) ||
2503 			    (sns->es_key == KEY_NOT_READY)) {
2504 				int max_retry;
2505 				struct scsi_failover_ops *fops;
2506 				fops = vlun->svl_fops;
2507 				rval = (*fops->sfo_analyze_sense)
2508 				    (svp->svp_psd, sns,
2509 				    vlun->svl_fops_ctpriv);
2510 				if (rval == SCSI_SENSE_NOT_READY) {
2511 					max_retry = vhci_prout_not_ready_retry;
2512 					retry = nr_retry++;
2513 					delay(1*drv_usectohz(1000000));
2514 				} else {
2515 					/* chk for state change and update */
2516 					if (rval == SCSI_SENSE_STATE_CHANGED) {
2517 						int held;
2518 						VHCI_HOLD_LUN(vlun,
2519 						    VH_NOSLEEP, held);
2520 						if (!held) {
2521 							rval = TRAN_BUSY;
2522 						} else {
2523 							/* chk for alua first */
2524 							vhci_update_pathstates(
2525 							    (void *)vlun);
2526 						}
2527 					}
2528 					retry = ua_retry++;
2529 					max_retry = VHCI_MAX_PGR_RETRIES;
2530 				}
2531 				if (retry < max_retry) {
2532 					VHCI_DEBUG(4, (CE_WARN, NULL,
2533 					    "!vhci_do_prout retry 0x%x "
2534 					    "(0x%x 0x%x 0x%x)",
2535 					    SCBP_C(new_pkt),
2536 					    new_pkt->pkt_cdbp[0],
2537 					    new_pkt->pkt_cdbp[1],
2538 					    new_pkt->pkt_cdbp[2]));
2539 					goto again;
2540 				}
2541 				rval = 0;
2542 				VHCI_DEBUG(4, (CE_WARN, NULL,
2543 				    "!vhci_do_prout 0x%x "
2544 				    "(0x%x 0x%x 0x%x)",
2545 				    SCBP_C(new_pkt),
2546 				    new_pkt->pkt_cdbp[0],
2547 				    new_pkt->pkt_cdbp[1],
2548 				    new_pkt->pkt_cdbp[2]));
2549 			} else if (sns->es_key == KEY_ILLEGAL_REQUEST)
2550 				rval = VHCI_PGR_ILLEGALOP;
2551 		}
2552 	} else {
2553 		rval = 1;
2554 	}
2555 	scsi_destroy_pkt(new_pkt);
2556 	VHCI_DECR_PATH_CMDCOUNT(svp);
2557 	freerbuf(bp);
2558 	return (rval);
2559 }
2560 
2561 static void
2562 vhci_run_cmd(void *arg)
2563 {
2564 	struct scsi_pkt		*pkt = (struct scsi_pkt *)arg;
2565 	struct scsi_pkt		*tpkt;
2566 	scsi_vhci_priv_t	*svp;
2567 	mdi_pathinfo_t		*pip, *npip;
2568 	scsi_vhci_lun_t		*vlun;
2569 	dev_info_t		*cdip;
2570 	scsi_vhci_priv_t	*nsvp;
2571 	int			fail = 0;
2572 	int			rval;
2573 	struct vhci_pkt		*vpkt;
2574 	uchar_t			cdb_1;
2575 	vhci_prout_t		*prout;
2576 
2577 	vpkt = (struct vhci_pkt *)pkt->pkt_private;
2578 	tpkt = vpkt->vpkt_tgt_pkt;
2579 	pip = vpkt->vpkt_path;
2580 	svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip);
2581 	if (svp == NULL) {
2582 		tpkt->pkt_reason = CMD_TRAN_ERR;
2583 		tpkt->pkt_statistics = STAT_ABORTED;
2584 		goto done;
2585 	}
2586 	vlun = svp->svp_svl;
2587 	prout = &vlun->svl_prout;
2588 	if (SCBP_C(pkt) != STATUS_GOOD)
2589 		fail++;
2590 	cdip = vlun->svl_dip;
2591 	pip = npip = NULL;
2592 	rval = mdi_select_path(cdip, NULL,
2593 	    MDI_SELECT_ONLINE_PATH|MDI_SELECT_STANDBY_PATH, NULL, &npip);
2594 	if ((rval != MDI_SUCCESS) || (npip == NULL)) {
2595 		VHCI_DEBUG(4, (CE_NOTE, NULL,
2596 		    "vhci_run_cmd: no path! 0x%p\n", (void *)svp));
2597 		tpkt->pkt_reason = CMD_TRAN_ERR;
2598 		tpkt->pkt_statistics = STAT_ABORTED;
2599 		goto done;
2600 	}
2601 
2602 	cdb_1 = vlun->svl_cdb[1];
2603 	vlun->svl_cdb[1] &= 0xe0;
2604 	vlun->svl_cdb[1] |= VHCI_PROUT_R_AND_IGNORE;
2605 
2606 	do {
2607 		nsvp = (scsi_vhci_priv_t *)
2608 		    mdi_pi_get_vhci_private(npip);
2609 		if (nsvp == NULL) {
2610 			VHCI_DEBUG(4, (CE_NOTE, NULL,
2611 			    "vhci_run_cmd: no "
2612 			    "client priv! 0x%p offlined?\n",
2613 			    (void *)npip));
2614 			goto next_path;
2615 		}
2616 		if (vlun->svl_first_path == npip) {
2617 			goto next_path;
2618 		} else {
2619 			if (vhci_do_prout(nsvp) != 1)
2620 				fail++;
2621 		}
2622 next_path:
2623 		pip = npip;
2624 		rval = mdi_select_path(cdip, NULL,
2625 		    MDI_SELECT_ONLINE_PATH|MDI_SELECT_STANDBY_PATH,
2626 		    pip, &npip);
2627 		mdi_rele_path(pip);
2628 	} while ((rval == MDI_SUCCESS) && (npip != NULL));
2629 
2630 	vlun->svl_cdb[1] = cdb_1;
2631 
2632 	if (fail) {
2633 		VHCI_DEBUG(4, (CE_WARN, NULL, "%s%d: key registration failed, "
2634 		    "couldn't be replicated on all paths",
2635 		    ddi_driver_name(cdip), ddi_get_instance(cdip)));
2636 		vhci_print_prout_keys(vlun, "vhci_run_cmd: ");
2637 
2638 		if (SCBP_C(pkt) != STATUS_GOOD) {
2639 			tpkt->pkt_reason = CMD_TRAN_ERR;
2640 			tpkt->pkt_statistics = STAT_ABORTED;
2641 		}
2642 	} else {
2643 		vlun->svl_pgr_active = 1;
2644 		vhci_print_prout_keys(vlun, "vhci_run_cmd: before bcopy:");
2645 
2646 		bcopy((const void *)prout->service_key,
2647 		    (void *)prout->active_service_key, MHIOC_RESV_KEY_SIZE);
2648 		bcopy((const void *)prout->res_key,
2649 		    (void *)prout->active_res_key, MHIOC_RESV_KEY_SIZE);
2650 
2651 		vhci_print_prout_keys(vlun, "vhci_run_cmd: after bcopy:");
2652 	}
2653 done:
2654 	if (SCBP_C(pkt) == STATUS_GOOD)
2655 		vlun->svl_first_path = NULL;
2656 
2657 	if (svp)
2658 		VHCI_DECR_PATH_CMDCOUNT(svp);
2659 
2660 	if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) == 0) {
2661 		scsi_destroy_pkt(pkt);
2662 		vpkt->vpkt_hba_pkt = NULL;
2663 		if (vpkt->vpkt_path) {
2664 			mdi_rele_path(vpkt->vpkt_path);
2665 			vpkt->vpkt_path = NULL;
2666 		}
2667 	}
2668 
2669 	sema_v(&vlun->svl_pgr_sema);
2670 	/*
2671 	 * The PROUT commands are not included in the automatic retry
2672 	 * mechanism, therefore, vpkt_org_vpkt should never be set here.
2673 	 */
2674 	ASSERT(vpkt->vpkt_org_vpkt == NULL);
2675 	if (tpkt->pkt_comp)
2676 		(*tpkt->pkt_comp)(tpkt);
2677 
2678 }
2679 
2680 /*
2681  * Get the keys registered with this target.  Since we will have
2682  * registered the same key with multiple initiators, strip out
2683  * any duplicate keys.
2684  *
2685  * The pointers which will be used to filter the registered keys from
2686  * the device will be stored in filter_prin and filter_pkt.  If the
2687  * allocation length of the buffer was sufficient for the number of
2688  * parameter data bytes available to be returned by the device then the
2689  * key filtering will use the keylist returned from the original
2690  * request.  If the allocation length of the buffer was not sufficient,
2691  * then the filtering will use the keylist returned from the request
2692  * that is resent below.
2693  *
2694  * If the device returns an additional length field that is greater than
2695  * the allocation length of the buffer, then allocate a new buffer which
2696  * can accommodate the number of parameter data bytes available to be
2697  * returned.  Resend the scsi PRIN command, filter out the duplicate
2698  * keys and return as many of the unique keys found that was originally
2699  * requested and set the additional length field equal to the data bytes
2700  * of unique reservation keys available to be returned.
2701  *
2702  * If the device returns an additional length field that is less than or
2703  * equal to the allocation length of the buffer, then all the available
2704  * keys registered were returned by the device.  Filter out the
2705  * duplicate keys and return all of the unique keys found and set the
2706  * additional length field equal to the data bytes of the reservation
2707  * keys to be returned.
2708  */
2709 static int
2710 vhci_do_prin(struct vhci_pkt **vpkt)
2711 {
2712 	scsi_vhci_priv_t *svp = (scsi_vhci_priv_t *)
2713 	    mdi_pi_get_vhci_private((*vpkt)->vpkt_path);
2714 	vhci_prin_readkeys_t *prin;
2715 	scsi_vhci_lun_t *vlun = svp->svp_svl;
2716 	struct scsi_vhci *vhci =
2717 	    ADDR2VHCI(&((*vpkt)->vpkt_tgt_pkt->pkt_address));
2718 
2719 	struct buf		*new_bp = NULL;
2720 	struct scsi_pkt		*new_pkt = NULL;
2721 	struct vhci_pkt		*new_vpkt = NULL;
2722 	int			hdr_len = 0;
2723 	int			rval = VHCI_CMD_CMPLT;
2724 	uint32_t		prin_length = 0;
2725 	uint32_t		svl_prin_length = 0;
2726 
2727 	prin = (vhci_prin_readkeys_t *)
2728 	    bp_mapin_common((*vpkt)->vpkt_tgt_init_bp, VM_NOSLEEP);
2729 
2730 	if (prin != NULL) {
2731 		prin_length = BE_32(prin->length);
2732 	}
2733 
2734 	if (prin == NULL) {
2735 		VHCI_DEBUG(5, (CE_WARN, NULL,
2736 		    "vhci_do_prin: bp_mapin_common failed."));
2737 		rval = VHCI_CMD_ERROR;
2738 	} else {
2739 		/*
2740 		 * According to SPC-3r22, sec 4.3.4.6: "If the amount of
2741 		 * information to be transferred exceeds the maximum value
2742 		 * that the ALLOCATION LENGTH field is capable of specifying,
2743 		 * the device server shall...terminate the command with CHECK
2744 		 * CONDITION status".  The ALLOCATION LENGTH field of the
2745 		 * PERSISTENT RESERVE IN command is 2 bytes. We should never
2746 		 * get here with an ADDITIONAL LENGTH greater than 0xFFFF
2747 		 * so if we do, then it is an error!
2748 		 */
2749 
2750 		hdr_len = sizeof (prin->length) + sizeof (prin->generation);
2751 
2752 		if ((prin_length + hdr_len) > 0xFFFF) {
2753 			VHCI_DEBUG(5, (CE_NOTE, NULL,
2754 			    "vhci_do_prin: Device returned invalid "
2755 			    "length 0x%x\n", prin_length));
2756 			rval = VHCI_CMD_ERROR;
2757 		}
2758 	}
2759 
2760 	/*
2761 	 * If prin->length is greater than the byte count allocated in the
2762 	 * original buffer, then resend the request with enough buffer
2763 	 * allocated to get all of the available registered keys.
2764 	 */
2765 	if (rval != VHCI_CMD_ERROR) {
2766 		if (((*vpkt)->vpkt_tgt_init_bp->b_bcount - hdr_len) <
2767 		    prin_length) {
2768 			if ((*vpkt)->vpkt_org_vpkt == NULL) {
2769 				new_pkt = vhci_create_retry_pkt(*vpkt);
2770 				if (new_pkt != NULL) {
2771 					new_vpkt = TGTPKT2VHCIPKT(new_pkt);
2772 
2773 					/*
2774 					 * This is the buf with buffer pointer
2775 					 * where the prin readkeys will be
2776 					 * returned from the device
2777 					 */
2778 					new_bp = scsi_alloc_consistent_buf(
2779 					    &svp->svp_psd->sd_address,
2780 					    NULL, (prin_length + hdr_len),
2781 					    ((*vpkt)->vpkt_tgt_init_bp->
2782 					    b_flags & (B_READ | B_WRITE)),
2783 					    NULL_FUNC, NULL);
2784 					if (new_bp != NULL) {
2785 						if (new_bp->b_un.b_addr !=
2786 						    NULL) {
2787 
2788 							new_bp->b_bcount =
2789 							    prin_length +
2790 							    hdr_len;
2791 
2792 							new_pkt->pkt_cdbp[7] =
2793 							    (uchar_t)(new_bp->
2794 							    b_bcount >> 8);
2795 							new_pkt->pkt_cdbp[8] =
2796 							    (uchar_t)new_bp->
2797 							    b_bcount;
2798 
2799 							rval = VHCI_CMD_RETRY;
2800 						} else {
2801 							rval = VHCI_CMD_ERROR;
2802 						}
2803 					} else {
2804 						rval = VHCI_CMD_ERROR;
2805 					}
2806 				} else {
2807 					rval = VHCI_CMD_ERROR;
2808 				}
2809 			} else {
2810 				rval = VHCI_CMD_ERROR;
2811 			}
2812 		}
2813 	}
2814 
2815 	if (rval == VHCI_CMD_RETRY) {
2816 		new_vpkt->vpkt_tgt_init_bp = new_bp;
2817 
2818 		/*
2819 		 * Release the old path because it does not matter which path
2820 		 * this command is sent down.  This allows the normal bind
2821 		 * transport mechanism to be used.
2822 		 */
2823 		if ((*vpkt)->vpkt_path != NULL) {
2824 			mdi_rele_path((*vpkt)->vpkt_path);
2825 			(*vpkt)->vpkt_path = NULL;
2826 		}
2827 
2828 		/*
2829 		 * Dispatch the retry command
2830 		 */
2831 		if (taskq_dispatch(vhci->vhci_taskq, vhci_dispatch_scsi_start,
2832 		    (void *) new_vpkt, KM_NOSLEEP) == NULL) {
2833 			rval = VHCI_CMD_ERROR;
2834 		} else {
2835 			/*
2836 			 * If we return VHCI_CMD_RETRY, that means the caller
2837 			 * is going to bail and wait for the reissued command
2838 			 * to complete.  In that case, we need to decrement
2839 			 * the path command count right now.  In any other
2840 			 * case, it'll be decremented by the caller.
2841 			 */
2842 			VHCI_DECR_PATH_CMDCOUNT(svp);
2843 		}
2844 	}
2845 
2846 	if ((rval != VHCI_CMD_ERROR) && (rval != VHCI_CMD_RETRY)) {
2847 		int new, old;
2848 		int data_len = 0;
2849 
2850 		data_len = prin_length / MHIOC_RESV_KEY_SIZE;
2851 		VHCI_DEBUG(4, (CE_NOTE, NULL, "vhci_do_prin: %d keys read\n",
2852 		    data_len));
2853 
2854 #ifdef DEBUG
2855 		VHCI_DEBUG(5, (CE_NOTE, NULL, "vhci_do_prin: from storage\n"));
2856 		if (vhci_debug == 5)
2857 			vhci_print_prin_keys(prin, data_len);
2858 		VHCI_DEBUG(5, (CE_NOTE, NULL,
2859 		    "vhci_do_prin: MPxIO old keys:\n"));
2860 		if (vhci_debug == 5)
2861 			vhci_print_prin_keys(&vlun->svl_prin, data_len);
2862 #endif
2863 
2864 		/*
2865 		 * Filter out all duplicate keys returned from the device
2866 		 * We know that we use a different key for every host, so we
2867 		 * can simply strip out duplicates. Otherwise we would need to
2868 		 * do more bookkeeping to figure out which keys to strip out.
2869 		 */
2870 
2871 		new = 0;
2872 
2873 		if (data_len > 0) {
2874 			vlun->svl_prin.keylist[0] = prin->keylist[0];
2875 			new++;
2876 		}
2877 
2878 		for (old = 1; old < data_len; old++) {
2879 			int j;
2880 			int match = 0;
2881 			for (j = 0; j < new; j++) {
2882 				if (bcmp(&prin->keylist[old],
2883 				    &vlun->svl_prin.keylist[j],
2884 				    sizeof (mhioc_resv_key_t)) == 0) {
2885 					match = 1;
2886 					break;
2887 				}
2888 			}
2889 			if (!match) {
2890 				vlun->svl_prin.keylist[new] =
2891 				    prin->keylist[old];
2892 				new++;
2893 			}
2894 		}
2895 
2896 		vlun->svl_prin.generation = prin->generation;
2897 		svl_prin_length = new * MHIOC_RESV_KEY_SIZE;
2898 		vlun->svl_prin.length = BE_32(svl_prin_length);
2899 
2900 		/*
2901 		 * If we arrived at this point after issuing a retry, make sure
2902 		 * that we put everything back the way it originally was so
2903 		 * that the target driver can complete the command correctly.
2904 		 */
2905 		if ((*vpkt)->vpkt_org_vpkt != NULL) {
2906 			new_bp = (*vpkt)->vpkt_tgt_init_bp;
2907 
2908 			scsi_free_consistent_buf(new_bp);
2909 
2910 			*vpkt = vhci_sync_retry_pkt(*vpkt);
2911 
2912 			/*
2913 			 * Make sure the original buffer is mapped into kernel
2914 			 * space before we try to copy the filtered keys into
2915 			 * it.
2916 			 */
2917 			prin = (vhci_prin_readkeys_t *)bp_mapin_common(
2918 			    (*vpkt)->vpkt_tgt_init_bp, VM_NOSLEEP);
2919 		}
2920 
2921 		/*
2922 		 * Now copy the desired number of prin keys into the original
2923 		 * target buffer.
2924 		 */
2925 		if (svl_prin_length <=
2926 		    ((*vpkt)->vpkt_tgt_init_bp->b_bcount - hdr_len)) {
2927 			/*
2928 			 * It is safe to return all of the available unique
2929 			 * keys
2930 			 */
2931 			bcopy(&vlun->svl_prin, prin, svl_prin_length + hdr_len);
2932 		} else {
2933 			/*
2934 			 * Not all of the available keys were requested by the
2935 			 * original command.
2936 			 */
2937 			bcopy(&vlun->svl_prin, prin,
2938 			    (*vpkt)->vpkt_tgt_init_bp->b_bcount);
2939 		}
2940 #ifdef DEBUG
2941 		VHCI_DEBUG(5, (CE_NOTE, NULL,
2942 		    "vhci_do_prin: To Application:\n"));
2943 		if (vhci_debug == 5)
2944 			vhci_print_prin_keys(prin, new);
2945 		VHCI_DEBUG(5, (CE_NOTE, NULL,
2946 		    "vhci_do_prin: MPxIO new keys:\n"));
2947 		if (vhci_debug == 5)
2948 			vhci_print_prin_keys(&vlun->svl_prin, new);
2949 #endif
2950 	}
2951 
2952 	if (rval == VHCI_CMD_ERROR) {
2953 		/*
2954 		 * If we arrived at this point after issuing a
2955 		 * retry, make sure that we put everything back
2956 		 * the way it originally was so that ssd can
2957 		 * complete the command correctly.
2958 		 */
2959 
2960 		if ((*vpkt)->vpkt_org_vpkt != NULL) {
2961 			new_bp = (*vpkt)->vpkt_tgt_init_bp;
2962 			if (new_bp != NULL) {
2963 				scsi_free_consistent_buf(new_bp);
2964 			}
2965 
2966 			new_vpkt = *vpkt;
2967 			*vpkt = (*vpkt)->vpkt_org_vpkt;
2968 
2969 			vhci_scsi_destroy_pkt(&svp->svp_psd->sd_address,
2970 			    new_vpkt->vpkt_tgt_pkt);
2971 		}
2972 
2973 		/*
2974 		 * Mark this command completion as having an error so that
2975 		 * ssd will retry the command.
2976 		 */
2977 
2978 		(*vpkt)->vpkt_tgt_pkt->pkt_reason = CMD_ABORTED;
2979 		(*vpkt)->vpkt_tgt_pkt->pkt_statistics |= STAT_ABORTED;
2980 
2981 		rval = VHCI_CMD_CMPLT;
2982 	}
2983 
2984 	/*
2985 	 * Make sure that the semaphore is only released once.
2986 	 */
2987 	if (rval == VHCI_CMD_CMPLT) {
2988 		sema_v(&vlun->svl_pgr_sema);
2989 	}
2990 
2991 	return (rval);
2992 }
2993 
2994 static void
2995 vhci_intr(struct scsi_pkt *pkt)
2996 {
2997 	struct vhci_pkt		*vpkt = (struct vhci_pkt *)pkt->pkt_private;
2998 	struct scsi_pkt		*tpkt;
2999 	scsi_vhci_priv_t	*svp;
3000 	scsi_vhci_lun_t		*vlun;
3001 	int			rval, held;
3002 	struct scsi_failover_ops	*fops;
3003 	struct scsi_extended_sense	*sns;
3004 	mdi_pathinfo_t		*lpath;
3005 	static char		*timeout_err = "Command Timeout";
3006 	static char		*parity_err = "Parity Error";
3007 	char			*err_str = NULL;
3008 	dev_info_t		*vdip, *cdip, *pdip;
3009 	char			*cpath, *dpath;
3010 
3011 	ASSERT(vpkt != NULL);
3012 	tpkt = vpkt->vpkt_tgt_pkt;
3013 	ASSERT(tpkt != NULL);
3014 	svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(vpkt->vpkt_path);
3015 	ASSERT(svp != NULL);
3016 	vlun = svp->svp_svl;
3017 	ASSERT(vlun != NULL);
3018 	lpath = vpkt->vpkt_path;
3019 
3020 	/*
3021 	 * sync up the target driver's pkt with the pkt that
3022 	 * we actually used
3023 	 */
3024 	*(tpkt->pkt_scbp) = *(pkt->pkt_scbp);
3025 	tpkt->pkt_resid = pkt->pkt_resid;
3026 	tpkt->pkt_state = pkt->pkt_state;
3027 	tpkt->pkt_statistics = pkt->pkt_statistics;
3028 	tpkt->pkt_reason = pkt->pkt_reason;
3029 
3030 	/* Return path_instance information back to the target driver. */
3031 	if (scsi_pkt_allocated_correctly(tpkt)) {
3032 		if (scsi_pkt_allocated_correctly(pkt)) {
3033 			/*
3034 			 * If both packets were correctly allocated,
3035 			 * return path returned by pHCI.
3036 			 */
3037 			tpkt->pkt_path_instance = pkt->pkt_path_instance;
3038 		} else {
3039 			/* Otherwise return path of pHCI we used */
3040 			tpkt->pkt_path_instance =
3041 			    mdi_pi_get_path_instance(lpath);
3042 		}
3043 	}
3044 
3045 	if (pkt->pkt_cdbp[0] == SCMD_PROUT &&
3046 	    ((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_REGISTER) ||
3047 	    ((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_R_AND_IGNORE)) {
3048 		if ((SCBP_C(pkt) != STATUS_GOOD) ||
3049 		    (pkt->pkt_reason != CMD_CMPLT)) {
3050 			sema_v(&vlun->svl_pgr_sema);
3051 		}
3052 	} else if (pkt->pkt_cdbp[0] == SCMD_PRIN) {
3053 		if (pkt->pkt_reason != CMD_CMPLT ||
3054 		    (SCBP_C(pkt) != STATUS_GOOD)) {
3055 			sema_v(&vlun->svl_pgr_sema);
3056 		}
3057 	}
3058 
3059 	switch (pkt->pkt_reason) {
3060 	case CMD_CMPLT:
3061 		/*
3062 		 * cmd completed successfully, check for scsi errors
3063 		 */
3064 		switch (*(pkt->pkt_scbp)) {
3065 		case STATUS_CHECK:
3066 			if (pkt->pkt_state & STATE_ARQ_DONE) {
3067 				sns = &(((struct scsi_arq_status *)(uintptr_t)
3068 				    (pkt->pkt_scbp))->sts_sensedata);
3069 				fops = vlun->svl_fops;
3070 				ASSERT(fops != NULL);
3071 				VHCI_DEBUG(4, (CE_NOTE, NULL, "vhci_intr: "
3072 				    "Received sns key %x  esc %x  escq %x\n",
3073 				    sns->es_key, sns->es_add_code,
3074 				    sns->es_qual_code));
3075 
3076 				if (vlun->svl_waiting_for_activepath == 1) {
3077 					/*
3078 					 * if we are here it means we are
3079 					 * in the midst of a probe/attach
3080 					 * through a passive path; this
3081 					 * case is exempt from sense analysis
3082 					 * for detection of ext. failover
3083 					 * because that would unnecessarily
3084 					 * increase attach time.
3085 					 */
3086 					bcopy(pkt->pkt_scbp, tpkt->pkt_scbp,
3087 					    vpkt->vpkt_tgt_init_scblen);
3088 					break;
3089 				}
3090 				if (sns->es_add_code == VHCI_SCSI_PERR) {
3091 					/*
3092 					 * parity error
3093 					 */
3094 					err_str = parity_err;
3095 					bcopy(pkt->pkt_scbp, tpkt->pkt_scbp,
3096 					    vpkt->vpkt_tgt_init_scblen);
3097 					break;
3098 				}
3099 				rval = (*fops->sfo_analyze_sense)
3100 				    (svp->svp_psd, sns, vlun->svl_fops_ctpriv);
3101 				if ((rval == SCSI_SENSE_NOFAILOVER) ||
3102 				    (rval == SCSI_SENSE_UNKNOWN) ||
3103 				    (rval == SCSI_SENSE_NOT_READY)) {
3104 					bcopy(pkt->pkt_scbp, tpkt->pkt_scbp,
3105 					    vpkt->vpkt_tgt_init_scblen);
3106 					break;
3107 				} else if (rval == SCSI_SENSE_STATE_CHANGED) {
3108 					struct scsi_vhci	*vhci;
3109 					vhci = ADDR2VHCI(&tpkt->pkt_address);
3110 					VHCI_HOLD_LUN(vlun, VH_NOSLEEP, held);
3111 					if (!held) {
3112 						/*
3113 						 * looks like some other thread
3114 						 * has already detected this
3115 						 * condition
3116 						 */
3117 						tpkt->pkt_state &=
3118 						    ~STATE_ARQ_DONE;
3119 						*(tpkt->pkt_scbp) =
3120 						    STATUS_BUSY;
3121 						break;
3122 					}
3123 					(void) taskq_dispatch(
3124 					    vhci->vhci_update_pathstates_taskq,
3125 					    vhci_update_pathstates,
3126 					    (void *)vlun, KM_SLEEP);
3127 				} else {
3128 					/*
3129 					 * externally initiated failover
3130 					 * has occurred or is in progress
3131 					 */
3132 					VHCI_HOLD_LUN(vlun, VH_NOSLEEP, held);
3133 					if (!held) {
3134 						/*
3135 						 * looks like some other thread
3136 						 * has already detected this
3137 						 * condition
3138 						 */
3139 						tpkt->pkt_state &=
3140 						    ~STATE_ARQ_DONE;
3141 						*(tpkt->pkt_scbp) =
3142 						    STATUS_BUSY;
3143 						break;
3144 					} else {
3145 						rval = vhci_handle_ext_fo
3146 						    (pkt, rval);
3147 						if (rval == BUSY_RETURN) {
3148 							tpkt->pkt_state &=
3149 							    ~STATE_ARQ_DONE;
3150 							*(tpkt->pkt_scbp) =
3151 							    STATUS_BUSY;
3152 							break;
3153 						}
3154 						bcopy(pkt->pkt_scbp,
3155 						    tpkt->pkt_scbp,
3156 						    vpkt->vpkt_tgt_init_scblen);
3157 						break;
3158 					}
3159 				}
3160 			}
3161 			break;
3162 
3163 		/*
3164 		 * If this is a good SCSI-II RELEASE cmd completion then restore
3165 		 * the load balancing policy and reset VLUN_RESERVE_ACTIVE_FLG.
3166 		 * If this is a good SCSI-II RESERVE cmd completion then set
3167 		 * VLUN_RESERVE_ACTIVE_FLG.
3168 		 */
3169 		case STATUS_GOOD:
3170 			if ((pkt->pkt_cdbp[0] == SCMD_RELEASE) ||
3171 			    (pkt->pkt_cdbp[0] == SCMD_RELEASE_G1)) {
3172 				(void) mdi_set_lb_policy(vlun->svl_dip,
3173 				    vlun->svl_lb_policy_save);
3174 				vlun->svl_flags &= ~VLUN_RESERVE_ACTIVE_FLG;
3175 				VHCI_DEBUG(1, (CE_WARN, NULL,
3176 				    "!vhci_intr: vlun 0x%p release path 0x%p",
3177 				    (void *)vlun, (void *)vpkt->vpkt_path));
3178 			}
3179 
3180 			if ((pkt->pkt_cdbp[0] == SCMD_RESERVE) ||
3181 			    (pkt->pkt_cdbp[0] == SCMD_RESERVE_G1)) {
3182 				vlun->svl_flags |= VLUN_RESERVE_ACTIVE_FLG;
3183 				vlun->svl_resrv_pip = vpkt->vpkt_path;
3184 				VHCI_DEBUG(1, (CE_WARN, NULL,
3185 				    "!vhci_intr: vlun 0x%p reserved path 0x%p",
3186 				    (void *)vlun, (void *)vpkt->vpkt_path));
3187 			}
3188 			break;
3189 
3190 		case STATUS_RESERVATION_CONFLICT:
3191 			VHCI_DEBUG(1, (CE_WARN, NULL,
3192 			    "!vhci_intr: vlun 0x%p "
3193 			    "reserve conflict on path 0x%p",
3194 			    (void *)vlun, (void *)vpkt->vpkt_path));
3195 			/* FALLTHROUGH */
3196 		default:
3197 			break;
3198 		}
3199 
3200 		/*
3201 		 * Update I/O completion statistics for the path
3202 		 */
3203 		mdi_pi_kstat_iosupdate(vpkt->vpkt_path, vpkt->vpkt_tgt_init_bp);
3204 
3205 		/*
3206 		 * Command completed successfully, release the dma binding and
3207 		 * destroy the transport side of the packet.
3208 		 */
3209 		if ((pkt->pkt_cdbp[0] ==  SCMD_PROUT) &&
3210 		    (((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_REGISTER) ||
3211 		    ((pkt->pkt_cdbp[1] & 0x1f) ==
3212 		    VHCI_PROUT_R_AND_IGNORE))) {
3213 			if (SCBP_C(pkt) == STATUS_GOOD) {
3214 				ASSERT(vlun->svl_taskq);
3215 				svp->svp_last_pkt_reason = pkt->pkt_reason;
3216 				(void) taskq_dispatch(vlun->svl_taskq,
3217 				    vhci_run_cmd, pkt, KM_SLEEP);
3218 				return;
3219 			}
3220 		}
3221 		if ((SCBP_C(pkt) == STATUS_GOOD) &&
3222 		    (pkt->pkt_cdbp[0] == SCMD_PRIN) &&
3223 		    vpkt->vpkt_tgt_init_bp) {
3224 			/*
3225 			 * If the action (value in byte 1 of the cdb) is zero,
3226 			 * we're reading keys, and that's the only condition
3227 			 * where we need to be concerned with filtering keys
3228 			 * and potential retries.  Otherwise, we simply signal
3229 			 * the semaphore and move on.
3230 			 */
3231 			if (pkt->pkt_cdbp[1] == 0) {
3232 				/*
3233 				 * If this is the completion of an internal
3234 				 * retry then we need to make sure that the
3235 				 * pkt and tpkt pointers are readjusted so
3236 				 * the calls to scsi_destroy_pkt and pkt_comp
3237 				 * below work * correctly.
3238 				 */
3239 				if (vpkt->vpkt_org_vpkt != NULL) {
3240 					pkt = vpkt->vpkt_org_vpkt->vpkt_hba_pkt;
3241 					tpkt = vpkt->vpkt_org_vpkt->
3242 					    vpkt_tgt_pkt;
3243 
3244 					/*
3245 					 * If this command was issued through
3246 					 * the taskq then we need to clear
3247 					 * this flag for proper processing in
3248 					 * the case of a retry from the target
3249 					 * driver.
3250 					 */
3251 					vpkt->vpkt_state &=
3252 					    ~VHCI_PKT_THRU_TASKQ;
3253 				}
3254 
3255 				/*
3256 				 * if vhci_do_prin returns VHCI_CMD_CMPLT then
3257 				 * vpkt will contain the address of the
3258 				 * original vpkt
3259 				 */
3260 				if (vhci_do_prin(&vpkt) == VHCI_CMD_RETRY) {
3261 					/*
3262 					 * The command has been resent to get
3263 					 * all the keys from the device.  Don't
3264 					 * complete the command with ssd until
3265 					 * the retry completes.
3266 					 */
3267 					return;
3268 				}
3269 			} else {
3270 				sema_v(&vlun->svl_pgr_sema);
3271 			}
3272 		}
3273 
3274 		break;
3275 
3276 	case CMD_TIMEOUT:
3277 		if ((pkt->pkt_statistics &
3278 		    (STAT_BUS_RESET|STAT_DEV_RESET|STAT_ABORTED)) == 0) {
3279 
3280 			VHCI_DEBUG(1, (CE_NOTE, NULL,
3281 			    "!scsi vhci timeout invoked\n"));
3282 
3283 			(void) vhci_recovery_reset(vlun, &pkt->pkt_address,
3284 			    FALSE, VHCI_DEPTH_ALL);
3285 		}
3286 		MDI_PI_ERRSTAT(lpath, MDI_PI_TRANSERR);
3287 		tpkt->pkt_statistics |= STAT_ABORTED;
3288 		err_str = timeout_err;
3289 		break;
3290 
3291 	case CMD_TRAN_ERR:
3292 		/*
3293 		 * This status is returned if the transport has sent the cmd
3294 		 * down the link to the target and then some error occurs.
3295 		 * In case of SCSI-II RESERVE cmd, we don't know if the
3296 		 * reservation been accepted by the target or not, so we need
3297 		 * to clear the reservation.
3298 		 */
3299 		if ((pkt->pkt_cdbp[0] == SCMD_RESERVE) ||
3300 		    (pkt->pkt_cdbp[0] == SCMD_RESERVE_G1)) {
3301 			VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_intr received"
3302 			    " cmd_tran_err for scsi-2 reserve cmd\n"));
3303 			if (!vhci_recovery_reset(vlun, &pkt->pkt_address,
3304 			    TRUE, VHCI_DEPTH_TARGET)) {
3305 				VHCI_DEBUG(1, (CE_WARN, NULL,
3306 				    "!vhci_intr cmd_tran_err reset failed!"));
3307 			}
3308 		}
3309 		break;
3310 
3311 	case CMD_DEV_GONE:
3312 		tpkt->pkt_reason = CMD_CMPLT;
3313 		tpkt->pkt_state = STATE_GOT_BUS |
3314 		    STATE_GOT_TARGET | STATE_SENT_CMD |
3315 		    STATE_GOT_STATUS;
3316 		*(tpkt->pkt_scbp) = STATUS_BUSY;
3317 		break;
3318 
3319 	default:
3320 		break;
3321 	}
3322 
3323 	/*
3324 	 * SCSI-II RESERVE cmd has been serviced by the lower layers clear
3325 	 * the flag so the lun is not QUIESCED any longer.
3326 	 * Also clear the VHCI_PKT_THRU_TASKQ flag, to ensure that if this pkt
3327 	 * is retried, a taskq shall again be dispatched to service it.  Else
3328 	 * it may lead to a system hang if the retry is within interrupt
3329 	 * context.
3330 	 */
3331 	if ((pkt->pkt_cdbp[0] == SCMD_RESERVE) ||
3332 	    (pkt->pkt_cdbp[0] == SCMD_RESERVE_G1)) {
3333 		vlun->svl_flags &= ~VLUN_QUIESCED_FLG;
3334 		vpkt->vpkt_state &= ~VHCI_PKT_THRU_TASKQ;
3335 	}
3336 
3337 	/*
3338 	 * vpkt_org_vpkt should always be NULL here if the retry command
3339 	 * has been successfully processed.  If vpkt_org_vpkt != NULL at
3340 	 * this point, it is an error so restore the original vpkt and
3341 	 * return an error to the target driver so it can retry the
3342 	 * command as appropriate.
3343 	 */
3344 	if (vpkt->vpkt_org_vpkt != NULL) {
3345 		struct vhci_pkt *new_vpkt = vpkt;
3346 		vpkt = vpkt->vpkt_org_vpkt;
3347 
3348 		vhci_scsi_destroy_pkt(&svp->svp_psd->sd_address,
3349 		    new_vpkt->vpkt_tgt_pkt);
3350 
3351 		/*
3352 		 * Mark this command completion as having an error so that
3353 		 * ssd will retry the command.
3354 		 */
3355 		vpkt->vpkt_tgt_pkt->pkt_reason = CMD_ABORTED;
3356 		vpkt->vpkt_tgt_pkt->pkt_statistics |= STAT_ABORTED;
3357 
3358 		pkt = vpkt->vpkt_hba_pkt;
3359 		tpkt = vpkt->vpkt_tgt_pkt;
3360 	}
3361 
3362 	if ((err_str != NULL) && (pkt->pkt_reason !=
3363 	    svp->svp_last_pkt_reason)) {
3364 		cdip = vlun->svl_dip;
3365 		pdip = mdi_pi_get_phci(vpkt->vpkt_path);
3366 		vdip = ddi_get_parent(cdip);
3367 		cpath = kmem_alloc(MAXPATHLEN, KM_SLEEP);
3368 		dpath = kmem_alloc(MAXPATHLEN, KM_SLEEP);
3369 		vhci_log(CE_WARN, vdip, "!%s (%s%d): %s on path %s (%s%d)",
3370 		    ddi_pathname(cdip, cpath), ddi_driver_name(cdip),
3371 		    ddi_get_instance(cdip), err_str,
3372 		    ddi_pathname(pdip, dpath), ddi_driver_name(pdip),
3373 		    ddi_get_instance(pdip));
3374 		kmem_free(cpath, MAXPATHLEN);
3375 		kmem_free(dpath, MAXPATHLEN);
3376 	}
3377 	svp->svp_last_pkt_reason = pkt->pkt_reason;
3378 	VHCI_DECR_PATH_CMDCOUNT(svp);
3379 
3380 	/*
3381 	 * For PARTIAL_DMA, vhci should not free the path.
3382 	 * Target driver will call into vhci_scsi_dmafree or
3383 	 * destroy pkt to release this path.
3384 	 */
3385 	if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) == 0) {
3386 		scsi_destroy_pkt(pkt);
3387 		vpkt->vpkt_hba_pkt = NULL;
3388 		if (vpkt->vpkt_path) {
3389 			mdi_rele_path(vpkt->vpkt_path);
3390 			vpkt->vpkt_path = NULL;
3391 		}
3392 	}
3393 
3394 	if (tpkt->pkt_comp) {
3395 		(*tpkt->pkt_comp)(tpkt);
3396 	}
3397 }
3398 
3399 /*
3400  * two possibilities: (1) failover has completed
3401  * or (2) is in progress; update our path states for
3402  * the former case; for the latter case,
3403  * initiate a scsi_watch request to
3404  * determine when failover completes - vlun is HELD
3405  * until failover completes; BUSY is returned to upper
3406  * layer in both the cases
3407  */
3408 static int
3409 vhci_handle_ext_fo(struct scsi_pkt *pkt, int fostat)
3410 {
3411 	struct vhci_pkt		*vpkt = (struct vhci_pkt *)pkt->pkt_private;
3412 	struct scsi_pkt		*tpkt;
3413 	scsi_vhci_priv_t	*svp;
3414 	scsi_vhci_lun_t		*vlun;
3415 	struct scsi_vhci	*vhci;
3416 	scsi_vhci_swarg_t	*swarg;
3417 	char			*path;
3418 
3419 	ASSERT(vpkt != NULL);
3420 	tpkt = vpkt->vpkt_tgt_pkt;
3421 	ASSERT(tpkt != NULL);
3422 	svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(vpkt->vpkt_path);
3423 	ASSERT(svp != NULL);
3424 	vlun = svp->svp_svl;
3425 	ASSERT(vlun != NULL);
3426 	ASSERT(VHCI_LUN_IS_HELD(vlun));
3427 
3428 	vhci = ADDR2VHCI(&tpkt->pkt_address);
3429 
3430 	if (fostat == SCSI_SENSE_INACTIVE) {
3431 		VHCI_DEBUG(1, (CE_NOTE, NULL, "!Failover "
3432 		    "detected for %s; updating path states...\n",
3433 		    vlun->svl_lun_wwn));
3434 		/*
3435 		 * set the vlun flag to indicate to the task that the target
3436 		 * port group needs updating
3437 		 */
3438 		vlun->svl_flags |= VLUN_UPDATE_TPG;
3439 		(void) taskq_dispatch(vhci->vhci_update_pathstates_taskq,
3440 		    vhci_update_pathstates, (void *)vlun, KM_SLEEP);
3441 	} else {
3442 		path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
3443 		vhci_log(CE_NOTE, ddi_get_parent(vlun->svl_dip),
3444 		    "!%s (%s%d): Waiting for externally initiated failover "
3445 		    "to complete", ddi_pathname(vlun->svl_dip, path),
3446 		    ddi_driver_name(vlun->svl_dip),
3447 		    ddi_get_instance(vlun->svl_dip));
3448 		kmem_free(path, MAXPATHLEN);
3449 		swarg = kmem_alloc(sizeof (*swarg), KM_NOSLEEP);
3450 		if (swarg == NULL) {
3451 			VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_handle_ext_fo: "
3452 			    "request packet allocation for %s failed....\n",
3453 			    vlun->svl_lun_wwn));
3454 			VHCI_RELEASE_LUN(vlun);
3455 			return (PKT_RETURN);
3456 		}
3457 		swarg->svs_svp = svp;
3458 		swarg->svs_tos = ddi_get_time();
3459 		swarg->svs_pi = vpkt->vpkt_path;
3460 		swarg->svs_release_lun = 0;
3461 		swarg->svs_done = 0;
3462 		/*
3463 		 * place a hold on the path...we don't want it to
3464 		 * vanish while scsi_watch is in progress
3465 		 */
3466 		mdi_hold_path(vpkt->vpkt_path);
3467 		svp->svp_sw_token = scsi_watch_request_submit(svp->svp_psd,
3468 		    VHCI_FOWATCH_INTERVAL, SENSE_LENGTH, vhci_efo_watch_cb,
3469 		    (caddr_t)swarg);
3470 	}
3471 	return (BUSY_RETURN);
3472 }
3473 
3474 /*
3475  * vhci_efo_watch_cb:
3476  *	Callback from scsi_watch request to check the failover status.
3477  *	Completion is either due to successful failover or timeout.
3478  *	Upon successful completion, vhci_update_path_states is called.
3479  *	For timeout condition, vhci_efo_done is called.
3480  *	Always returns 0 to scsi_watch to keep retrying till vhci_efo_done
3481  *	terminates this request properly in a separate thread.
3482  */
3483 
3484 static int
3485 vhci_efo_watch_cb(caddr_t arg, struct scsi_watch_result *resultp)
3486 {
3487 	struct scsi_status		*statusp = resultp->statusp;
3488 	struct scsi_extended_sense	*sensep = resultp->sensep;
3489 	struct scsi_pkt			*pkt = resultp->pkt;
3490 	scsi_vhci_swarg_t		*swarg;
3491 	scsi_vhci_priv_t		*svp;
3492 	scsi_vhci_lun_t			*vlun;
3493 	struct scsi_vhci		*vhci;
3494 	dev_info_t			*vdip;
3495 	int				rval, updt_paths;
3496 
3497 	swarg = (scsi_vhci_swarg_t *)(uintptr_t)arg;
3498 	svp = swarg->svs_svp;
3499 	if (swarg->svs_done) {
3500 		/*
3501 		 * Already completed failover or timedout.
3502 		 * Waiting for vhci_efo_done to terminate this scsi_watch.
3503 		 */
3504 		return (0);
3505 	}
3506 
3507 	ASSERT(svp != NULL);
3508 	vlun = svp->svp_svl;
3509 	ASSERT(vlun != NULL);
3510 	ASSERT(VHCI_LUN_IS_HELD(vlun));
3511 	vlun->svl_efo_update_path = 0;
3512 	vdip = ddi_get_parent(vlun->svl_dip);
3513 	vhci = ddi_get_soft_state(vhci_softstate,
3514 	    ddi_get_instance(vdip));
3515 
3516 	updt_paths = 0;
3517 
3518 	if (pkt->pkt_reason != CMD_CMPLT) {
3519 		if ((ddi_get_time() - swarg->svs_tos) >= VHCI_EXTFO_TIMEOUT) {
3520 			swarg->svs_release_lun = 1;
3521 			goto done;
3522 		}
3523 		return (0);
3524 	}
3525 	if (*((unsigned char *)statusp) == STATUS_CHECK) {
3526 		rval = (*(vlun->svl_fops->sfo_analyze_sense))
3527 		    (svp->svp_psd, sensep, vlun->svl_fops_ctpriv);
3528 		switch (rval) {
3529 			/*
3530 			 * Only update path states in case path is definitely
3531 			 * inactive, or no failover occurred.  For all other
3532 			 * check conditions continue pinging.  A unexpected
3533 			 * check condition shouldn't cause pinging to complete
3534 			 * prematurely.
3535 			 */
3536 			case SCSI_SENSE_INACTIVE:
3537 			case SCSI_SENSE_NOFAILOVER:
3538 				updt_paths = 1;
3539 				break;
3540 			default:
3541 				if ((ddi_get_time() - swarg->svs_tos)
3542 				    >= VHCI_EXTFO_TIMEOUT) {
3543 					swarg->svs_release_lun = 1;
3544 					goto done;
3545 				}
3546 				return (0);
3547 		}
3548 	} else if (*((unsigned char *)statusp) ==
3549 	    STATUS_RESERVATION_CONFLICT) {
3550 		updt_paths = 1;
3551 	} else if ((*((unsigned char *)statusp)) &
3552 	    (STATUS_BUSY | STATUS_QFULL)) {
3553 		return (0);
3554 	}
3555 	if ((*((unsigned char *)statusp) == STATUS_GOOD) ||
3556 	    (updt_paths == 1)) {
3557 		/*
3558 		 * we got here because we had detected an
3559 		 * externally initiated failover; things
3560 		 * have settled down now, so let's
3561 		 * start up a task to update the
3562 		 * path states and target port group
3563 		 */
3564 		vlun->svl_efo_update_path = 1;
3565 		swarg->svs_done = 1;
3566 		vlun->svl_swarg = swarg;
3567 		vlun->svl_flags |= VLUN_UPDATE_TPG;
3568 		(void) taskq_dispatch(vhci->vhci_update_pathstates_taskq,
3569 		    vhci_update_pathstates, (void *)vlun,
3570 		    KM_SLEEP);
3571 		return (0);
3572 	}
3573 	if ((ddi_get_time() - swarg->svs_tos) >= VHCI_EXTFO_TIMEOUT) {
3574 		swarg->svs_release_lun = 1;
3575 		goto done;
3576 	}
3577 	return (0);
3578 done:
3579 	swarg->svs_done = 1;
3580 	(void) taskq_dispatch(vhci->vhci_taskq,
3581 	    vhci_efo_done, (void *)swarg, KM_SLEEP);
3582 	return (0);
3583 }
3584 
3585 /*
3586  * vhci_efo_done:
3587  *	cleanly terminates scsi_watch and free up resources.
3588  *	Called as taskq function in vhci_efo_watch_cb for EFO timeout condition
3589  *	or by vhci_update_path_states invoked during external initiated
3590  *	failover completion.
3591  */
3592 static void
3593 vhci_efo_done(void *arg)
3594 {
3595 	scsi_vhci_lun_t			*vlun;
3596 	scsi_vhci_swarg_t		*swarg = (scsi_vhci_swarg_t *)arg;
3597 	scsi_vhci_priv_t		*svp = swarg->svs_svp;
3598 	ASSERT(svp);
3599 
3600 	vlun = svp->svp_svl;
3601 	ASSERT(vlun);
3602 
3603 	/* Wait for clean termination of scsi_watch */
3604 	(void) scsi_watch_request_terminate(svp->svp_sw_token,
3605 	    SCSI_WATCH_TERMINATE_WAIT);
3606 	svp->svp_sw_token = NULL;
3607 
3608 	/* release path and freeup resources to indicate failover completion */
3609 	mdi_rele_path(swarg->svs_pi);
3610 	if (swarg->svs_release_lun) {
3611 		VHCI_RELEASE_LUN(vlun);
3612 	}
3613 	kmem_free((void *)swarg, sizeof (*swarg));
3614 }
3615 
3616 /*
3617  * Update the path states
3618  * vlun should be HELD when this is invoked.
3619  * Calls vhci_efo_done to cleanup resources allocated for EFO.
3620  */
3621 void
3622 vhci_update_pathstates(void *arg)
3623 {
3624 	mdi_pathinfo_t			*pip, *npip;
3625 	dev_info_t			*dip, *pdip;
3626 	struct scsi_failover_ops	*fo;
3627 	struct scsi_vhci_priv		*svp;
3628 	struct scsi_device		*psd;
3629 	struct scsi_path_opinfo		opinfo;
3630 	char				*pclass, *tptr;
3631 	struct scsi_vhci_lun		*vlun = (struct scsi_vhci_lun *)arg;
3632 	int				sps; /* mdi_select_path() status */
3633 	char				*cpath, *dpath;
3634 	struct scsi_vhci		*vhci;
3635 	struct scsi_pkt			*pkt;
3636 	struct buf			*bp;
3637 	int				reserve_conflict = 0;
3638 
3639 	ASSERT(VHCI_LUN_IS_HELD(vlun));
3640 	dip  = vlun->svl_dip;
3641 	pip = npip = NULL;
3642 
3643 	vhci = ddi_get_soft_state(vhci_softstate,
3644 	    ddi_get_instance(ddi_get_parent(dip)));
3645 
3646 	sps = mdi_select_path(dip, NULL, (MDI_SELECT_ONLINE_PATH |
3647 	    MDI_SELECT_STANDBY_PATH), NULL, &npip);
3648 	if ((npip == NULL) || (sps != MDI_SUCCESS)) {
3649 		goto done;
3650 	}
3651 
3652 	fo = vlun->svl_fops;
3653 	do {
3654 		pip = npip;
3655 		svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip);
3656 		psd = svp->svp_psd;
3657 		if ((*fo->sfo_path_get_opinfo)(psd, &opinfo,
3658 		    vlun->svl_fops_ctpriv) != 0) {
3659 			sps = mdi_select_path(dip, NULL,
3660 			    (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH),
3661 			    pip, &npip);
3662 			mdi_rele_path(pip);
3663 			continue;
3664 		}
3665 
3666 		if (mdi_prop_lookup_string(pip, "path-class", &pclass) !=
3667 		    MDI_SUCCESS) {
3668 			VHCI_DEBUG(1, (CE_NOTE, NULL,
3669 			    "!vhci_update_pathstates: prop lookup failed for "
3670 			    "path 0x%p\n", (void *)pip));
3671 			sps = mdi_select_path(dip, NULL,
3672 			    (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH),
3673 			    pip, &npip);
3674 			mdi_rele_path(pip);
3675 			continue;
3676 		}
3677 
3678 		/*
3679 		 * Need to update the "path-class" property
3680 		 * value in the device tree if different
3681 		 * from the existing value.
3682 		 */
3683 		if (strcmp(pclass, opinfo.opinfo_path_attr) != 0) {
3684 			(void) mdi_prop_update_string(pip, "path-class",
3685 			    opinfo.opinfo_path_attr);
3686 		}
3687 
3688 		/*
3689 		 * Only change the state if needed. i.e. Don't call
3690 		 * mdi_pi_set_state to ONLINE a path if its already
3691 		 * ONLINE. Same for STANDBY paths.
3692 		 */
3693 
3694 		if ((opinfo.opinfo_path_state == SCSI_PATH_ACTIVE ||
3695 		    opinfo.opinfo_path_state == SCSI_PATH_ACTIVE_NONOPT)) {
3696 			if (!(MDI_PI_IS_ONLINE(pip))) {
3697 				VHCI_DEBUG(1, (CE_NOTE, NULL,
3698 				    "!vhci_update_pathstates: marking path"
3699 				    " 0x%p as ONLINE\n", (void *)pip));
3700 				pdip = mdi_pi_get_phci(pip);
3701 				cpath = kmem_alloc(MAXPATHLEN, KM_SLEEP);
3702 				dpath = kmem_alloc(MAXPATHLEN, KM_SLEEP);
3703 				vhci_log(CE_NOTE, ddi_get_parent(dip), "!%s"
3704 				    " (%s%d): path %s (%s%d) target address %s"
3705 				    " is now ONLINE because of"
3706 				    " an externally initiated failover",
3707 				    ddi_pathname(dip, cpath),
3708 				    ddi_driver_name(dip),
3709 				    ddi_get_instance(dip),
3710 				    ddi_pathname(pdip, dpath),
3711 				    ddi_driver_name(pdip),
3712 				    ddi_get_instance(pdip),
3713 				    mdi_pi_get_addr(pip));
3714 				kmem_free(cpath, MAXPATHLEN);
3715 				kmem_free(dpath, MAXPATHLEN);
3716 				mdi_pi_set_state(pip,
3717 				    MDI_PATHINFO_STATE_ONLINE);
3718 				mdi_pi_set_preferred(pip,
3719 				    opinfo.opinfo_preferred);
3720 				tptr = kmem_alloc(strlen
3721 				    (opinfo.opinfo_path_attr)+1, KM_SLEEP);
3722 				(void) strlcpy(tptr, opinfo.opinfo_path_attr,
3723 				    (strlen(opinfo.opinfo_path_attr)+1));
3724 				mutex_enter(&vlun->svl_mutex);
3725 				if (vlun->svl_active_pclass != NULL) {
3726 					kmem_free(vlun->svl_active_pclass,
3727 					    strlen(vlun->svl_active_pclass)+1);
3728 				}
3729 				vlun->svl_active_pclass = tptr;
3730 				if (vlun->svl_waiting_for_activepath) {
3731 					vlun->svl_waiting_for_activepath = 0;
3732 				}
3733 				mutex_exit(&vlun->svl_mutex);
3734 				/* Check for Reservation Conflict */
3735 				bp = scsi_alloc_consistent_buf(
3736 				    &svp->svp_psd->sd_address,
3737 				    (struct buf *)NULL, DEV_BSIZE, B_READ,
3738 				    NULL, NULL);
3739 				if (!bp) {
3740 					VHCI_DEBUG(1, (CE_NOTE, NULL,
3741 					    "vhci_update_pathstates: "
3742 					    "!No resources (buf)\n"));
3743 					mdi_rele_path(pip);
3744 					goto done;
3745 				}
3746 				pkt = scsi_init_pkt(&svp->svp_psd->sd_address,
3747 				    NULL, bp, CDB_GROUP1,
3748 				    sizeof (struct scsi_arq_status), 0,
3749 				    PKT_CONSISTENT, NULL, NULL);
3750 				if (pkt) {
3751 					(void) scsi_setup_cdb((union scsi_cdb *)
3752 					    (uintptr_t)pkt->pkt_cdbp,
3753 					    SCMD_READ, 1, 1, 0);
3754 					pkt->pkt_time = 3*30;
3755 					pkt->pkt_flags = FLAG_NOINTR;
3756 					pkt->pkt_path_instance =
3757 					    mdi_pi_get_path_instance(pip);
3758 
3759 					if ((scsi_transport(pkt) ==
3760 					    TRAN_ACCEPT) && (pkt->pkt_reason
3761 					    == CMD_CMPLT) && (SCBP_C(pkt) ==
3762 					    STATUS_RESERVATION_CONFLICT)) {
3763 						reserve_conflict = 1;
3764 					}
3765 					scsi_destroy_pkt(pkt);
3766 				}
3767 				scsi_free_consistent_buf(bp);
3768 			} else if (MDI_PI_IS_ONLINE(pip)) {
3769 				if (strcmp(pclass, opinfo.opinfo_path_attr)
3770 				    != 0) {
3771 					mdi_pi_set_preferred(pip,
3772 					    opinfo.opinfo_preferred);
3773 					mutex_enter(&vlun->svl_mutex);
3774 					if (vlun->svl_active_pclass == NULL ||
3775 					    strcmp(opinfo.opinfo_path_attr,
3776 					    vlun->svl_active_pclass) != 0) {
3777 						mutex_exit(&vlun->svl_mutex);
3778 						tptr = kmem_alloc(strlen
3779 						    (opinfo.opinfo_path_attr)+1,
3780 						    KM_SLEEP);
3781 						(void) strlcpy(tptr,
3782 						    opinfo.opinfo_path_attr,
3783 						    (strlen
3784 						    (opinfo.opinfo_path_attr)
3785 						    +1));
3786 						mutex_enter(&vlun->svl_mutex);
3787 					} else {
3788 						/*
3789 						 * No need to update
3790 						 * svl_active_pclass
3791 						 */
3792 						tptr = NULL;
3793 						mutex_exit(&vlun->svl_mutex);
3794 					}
3795 					if (tptr) {
3796 						if (vlun->svl_active_pclass
3797 						    != NULL) {
3798 							kmem_free(vlun->
3799 							    svl_active_pclass,
3800 							    strlen(vlun->
3801 							    svl_active_pclass)
3802 							    +1);
3803 						}
3804 						vlun->svl_active_pclass = tptr;
3805 						mutex_exit(&vlun->svl_mutex);
3806 					}
3807 				}
3808 			}
3809 		} else if ((opinfo.opinfo_path_state == SCSI_PATH_INACTIVE) &&
3810 		    !(MDI_PI_IS_STANDBY(pip))) {
3811 			VHCI_DEBUG(1, (CE_NOTE, NULL,
3812 			    "!vhci_update_pathstates: marking path"
3813 			    " 0x%p as STANDBY\n", (void *)pip));
3814 			pdip = mdi_pi_get_phci(pip);
3815 			cpath = kmem_alloc(MAXPATHLEN, KM_SLEEP);
3816 			dpath = kmem_alloc(MAXPATHLEN, KM_SLEEP);
3817 			vhci_log(CE_NOTE, ddi_get_parent(dip), "!%s"
3818 			    " (%s%d): path %s (%s%d) target address %s"
3819 			    " is now STANDBY because of"
3820 			    " an externally initiated failover",
3821 			    ddi_pathname(dip, cpath),
3822 			    ddi_driver_name(dip),
3823 			    ddi_get_instance(dip),
3824 			    ddi_pathname(pdip, dpath),
3825 			    ddi_driver_name(pdip),
3826 			    ddi_get_instance(pdip),
3827 			    mdi_pi_get_addr(pip));
3828 			kmem_free(cpath, MAXPATHLEN);
3829 			kmem_free(dpath, MAXPATHLEN);
3830 			mdi_pi_set_state(pip,
3831 			    MDI_PATHINFO_STATE_STANDBY);
3832 			mdi_pi_set_preferred(pip,
3833 			    opinfo.opinfo_preferred);
3834 			mutex_enter(&vlun->svl_mutex);
3835 			if (vlun->svl_active_pclass != NULL) {
3836 				if (strcmp(vlun->svl_active_pclass,
3837 				    opinfo.opinfo_path_attr) == 0) {
3838 					kmem_free(vlun->
3839 					    svl_active_pclass,
3840 					    strlen(vlun->
3841 					    svl_active_pclass)+1);
3842 					vlun->svl_active_pclass = NULL;
3843 				}
3844 			}
3845 			mutex_exit(&vlun->svl_mutex);
3846 		}
3847 		(void) mdi_prop_free(pclass);
3848 		sps = mdi_select_path(dip, NULL,
3849 		    (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH),
3850 		    pip, &npip);
3851 		mdi_rele_path(pip);
3852 
3853 	} while ((npip != NULL) && (sps == MDI_SUCCESS));
3854 
3855 	/*
3856 	 * Check to see if this vlun has an active SCSI-II RESERVE.  If so
3857 	 * clear the reservation by sending a reset, so the host doesn't
3858 	 * receive a reservation conflict.
3859 	 * Reset VLUN_RESERVE_ACTIVE_FLG for this vlun. Also notify ssd
3860 	 * of the reset, explicitly.
3861 	 */
3862 	if (vlun->svl_flags & VLUN_RESERVE_ACTIVE_FLG) {
3863 		if (reserve_conflict && (vlun->svl_xlf_capable == 0)) {
3864 			(void) vhci_recovery_reset(vlun,
3865 			    &svp->svp_psd->sd_address, FALSE,
3866 			    VHCI_DEPTH_TARGET);
3867 		}
3868 		vlun->svl_flags &= ~VLUN_RESERVE_ACTIVE_FLG;
3869 		mutex_enter(&vhci->vhci_mutex);
3870 		scsi_hba_reset_notify_callback(&vhci->vhci_mutex,
3871 		    &vhci->vhci_reset_notify_listf);
3872 		mutex_exit(&vhci->vhci_mutex);
3873 	}
3874 	if (vlun->svl_flags & VLUN_UPDATE_TPG) {
3875 		/*
3876 		 * Update the AccessState of related MP-API TPGs
3877 		 */
3878 		(void) vhci_mpapi_update_tpg_acc_state_for_lu(vhci, vlun);
3879 		vlun->svl_flags &= ~VLUN_UPDATE_TPG;
3880 	}
3881 done:
3882 	if (vlun->svl_efo_update_path) {
3883 		vlun->svl_efo_update_path = 0;
3884 		vhci_efo_done(vlun->svl_swarg);
3885 		vlun->svl_swarg = 0;
3886 	}
3887 	VHCI_RELEASE_LUN(vlun);
3888 }
3889 
3890 /* ARGSUSED */
3891 static int
3892 vhci_pathinfo_init(dev_info_t *vdip, mdi_pathinfo_t *pip, int flags)
3893 {
3894 	scsi_hba_tran_t		*hba = NULL;
3895 	struct scsi_device	*psd = NULL;
3896 	scsi_vhci_lun_t		*vlun = NULL;
3897 	dev_info_t		*pdip = NULL;
3898 	dev_info_t		*tgt_dip;
3899 	struct scsi_vhci	*vhci;
3900 	char			*guid;
3901 	scsi_vhci_priv_t	*svp = NULL;
3902 	int			rval = MDI_FAILURE;
3903 	int			vlun_alloced = 0;
3904 
3905 	ASSERT(vdip != NULL);
3906 	ASSERT(pip != NULL);
3907 
3908 	vhci = ddi_get_soft_state(vhci_softstate, ddi_get_instance(vdip));
3909 	ASSERT(vhci != NULL);
3910 
3911 	pdip = mdi_pi_get_phci(pip);
3912 	ASSERT(pdip != NULL);
3913 
3914 	hba = ddi_get_driver_private(pdip);
3915 	ASSERT(hba != NULL);
3916 
3917 	tgt_dip = mdi_pi_get_client(pip);
3918 	ASSERT(tgt_dip != NULL);
3919 
3920 	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, tgt_dip, PROPFLAGS,
3921 	    MDI_CLIENT_GUID_PROP, &guid) != DDI_SUCCESS) {
3922 		VHCI_DEBUG(1, (CE_WARN, NULL,
3923 		    "vhci_pathinfo_init: lun guid property failed"));
3924 		goto failure;
3925 	}
3926 
3927 	vlun = vhci_lun_lookup_alloc(tgt_dip, guid, &vlun_alloced);
3928 	ddi_prop_free(guid);
3929 
3930 	vlun->svl_dip = tgt_dip;
3931 
3932 	svp = kmem_zalloc(sizeof (*svp), KM_SLEEP);
3933 	svp->svp_svl = vlun;
3934 
3935 	vlun->svl_lb_policy_save = mdi_get_lb_policy(tgt_dip);
3936 	mutex_init(&svp->svp_mutex, NULL, MUTEX_DRIVER, NULL);
3937 	cv_init(&svp->svp_cv, NULL, CV_DRIVER, NULL);
3938 
3939 	psd = kmem_zalloc(sizeof (*psd), KM_SLEEP);
3940 	mutex_init(&psd->sd_mutex, NULL, MUTEX_DRIVER, NULL);
3941 
3942 	/*
3943 	 * Clone transport structure if requested, so
3944 	 * Self enumerating HBAs always need to use cloning
3945 	 */
3946 
3947 	if (hba->tran_hba_flags & SCSI_HBA_TRAN_CLONE) {
3948 		scsi_hba_tran_t	*clone =
3949 		    kmem_alloc(sizeof (scsi_hba_tran_t), KM_SLEEP);
3950 		bcopy(hba, clone, sizeof (scsi_hba_tran_t));
3951 		hba = clone;
3952 		hba->tran_sd = psd;
3953 	} else {
3954 		ASSERT(hba->tran_sd == NULL);
3955 	}
3956 	psd->sd_dev = tgt_dip;
3957 	psd->sd_address.a_hba_tran = hba;
3958 	psd->sd_private = (caddr_t)pip;
3959 	svp->svp_psd = psd;
3960 	mdi_pi_set_vhci_private(pip, (caddr_t)svp);
3961 
3962 	/*
3963 	 * call hba's target init entry point if it exists
3964 	 */
3965 	if (hba->tran_tgt_init != NULL) {
3966 		if ((rval = (*hba->tran_tgt_init)(pdip, tgt_dip,
3967 		    hba, psd)) != DDI_SUCCESS) {
3968 			VHCI_DEBUG(1, (CE_WARN, pdip,
3969 			    "!vhci_pathinfo_init: tran_tgt_init failed for "
3970 			    "path=0x%p rval=%x", (void *)pip, rval));
3971 			goto failure;
3972 		}
3973 	}
3974 
3975 	svp->svp_new_path = 1;
3976 
3977 	psd->sd_inq = NULL;
3978 
3979 	VHCI_DEBUG(4, (CE_NOTE, NULL, "!vhci_pathinfo_init: path:%p\n",
3980 	    (void *)pip));
3981 	return (MDI_SUCCESS);
3982 
3983 failure:
3984 	if (psd) {
3985 		mutex_destroy(&psd->sd_mutex);
3986 		kmem_free(psd, sizeof (*psd));
3987 	}
3988 	if (svp) {
3989 		mdi_pi_set_vhci_private(pip, NULL);
3990 		mutex_destroy(&svp->svp_mutex);
3991 		cv_destroy(&svp->svp_cv);
3992 		kmem_free(svp, sizeof (*svp));
3993 	}
3994 	if (hba && hba->tran_hba_flags & SCSI_HBA_TRAN_CLONE)
3995 		kmem_free(hba, sizeof (scsi_hba_tran_t));
3996 
3997 	if (vlun_alloced)
3998 		vhci_lun_free(tgt_dip);
3999 
4000 	return (rval);
4001 }
4002 
4003 /* ARGSUSED */
4004 static int
4005 vhci_pathinfo_uninit(dev_info_t *vdip, mdi_pathinfo_t *pip, int flags)
4006 {
4007 	scsi_hba_tran_t		*hba = NULL;
4008 	struct scsi_device	*psd = NULL;
4009 	dev_info_t		*pdip = NULL;
4010 	dev_info_t		*cdip = NULL;
4011 	scsi_vhci_priv_t	*svp = NULL;
4012 
4013 	ASSERT(vdip != NULL);
4014 	ASSERT(pip != NULL);
4015 
4016 	pdip = mdi_pi_get_phci(pip);
4017 	ASSERT(pdip != NULL);
4018 
4019 	cdip = mdi_pi_get_client(pip);
4020 	ASSERT(cdip != NULL);
4021 
4022 	hba = ddi_get_driver_private(pdip);
4023 	ASSERT(hba != NULL);
4024 
4025 	vhci_mpapi_set_path_state(vdip, pip, MP_DRVR_PATH_STATE_REMOVED);
4026 	svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip);
4027 	if (svp == NULL) {
4028 		/* path already freed. Nothing to do. */
4029 		return (MDI_SUCCESS);
4030 	}
4031 
4032 	psd = svp->svp_psd;
4033 	ASSERT(psd != NULL);
4034 
4035 	if (hba->tran_hba_flags & SCSI_HBA_TRAN_CLONE) {
4036 		hba = psd->sd_address.a_hba_tran;
4037 		ASSERT(hba->tran_hba_flags & SCSI_HBA_TRAN_CLONE);
4038 		ASSERT(hba->tran_sd == psd);
4039 	} else {
4040 		ASSERT(hba->tran_sd == NULL);
4041 	}
4042 
4043 	if (hba->tran_tgt_free != NULL) {
4044 		(*hba->tran_tgt_free) (pdip, cdip, hba, psd);
4045 	}
4046 	mutex_destroy(&psd->sd_mutex);
4047 	if (hba->tran_hba_flags & SCSI_HBA_TRAN_CLONE) {
4048 		kmem_free(hba, sizeof (*hba));
4049 	}
4050 
4051 	mdi_pi_set_vhci_private(pip, NULL);
4052 	kmem_free((caddr_t)psd, sizeof (*psd));
4053 
4054 	mutex_destroy(&svp->svp_mutex);
4055 	cv_destroy(&svp->svp_cv);
4056 	kmem_free((caddr_t)svp, sizeof (*svp));
4057 
4058 	/*
4059 	 * If this is the last path to the client,
4060 	 * then free up the vlun as well.
4061 	 */
4062 	if (mdi_client_get_path_count(cdip) == 1) {
4063 		vhci_lun_free(cdip);
4064 	}
4065 
4066 	VHCI_DEBUG(4, (CE_NOTE, NULL, "!vhci_pathinfo_uninit: path=0x%p\n",
4067 	    (void *)pip));
4068 	return (MDI_SUCCESS);
4069 }
4070 
4071 /* ARGSUSED */
4072 static int
4073 vhci_pathinfo_state_change(dev_info_t *vdip, mdi_pathinfo_t *pip,
4074     mdi_pathinfo_state_t state, uint32_t ext_state, int flags)
4075 {
4076 	int			rval = MDI_SUCCESS;
4077 	scsi_vhci_priv_t	*svp;
4078 	scsi_vhci_lun_t		*vlun;
4079 	int			held;
4080 	int			op = (flags & 0xf00) >> 8;
4081 	struct scsi_vhci	*vhci;
4082 
4083 	vhci = ddi_get_soft_state(vhci_softstate, ddi_get_instance(vdip));
4084 
4085 	if (flags & MDI_EXT_STATE_CHANGE) {
4086 		/*
4087 		 * We do not want to issue any commands down the path in case
4088 		 * sync flag is set. Lower layers might not be ready to accept
4089 		 * any I/O commands.
4090 		 */
4091 		if (op == DRIVER_DISABLE)
4092 			return (MDI_SUCCESS);
4093 
4094 		svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip);
4095 		if (svp == NULL) {
4096 			return (MDI_FAILURE);
4097 		}
4098 		vlun = svp->svp_svl;
4099 
4100 		if (flags & MDI_BEFORE_STATE_CHANGE) {
4101 			/*
4102 			 * Hold the LUN.
4103 			 */
4104 			VHCI_HOLD_LUN(vlun, VH_SLEEP, held);
4105 			if (flags & MDI_DISABLE_OP)  {
4106 				/*
4107 				 * Issue scsi reset if it happens to be
4108 				 * reserved path.
4109 				 */
4110 				if (vlun->svl_flags & VLUN_RESERVE_ACTIVE_FLG) {
4111 					/*
4112 					 * if reservation pending on
4113 					 * this path, dont' mark the
4114 					 * path busy
4115 					 */
4116 					if (op == DRIVER_DISABLE_TRANSIENT) {
4117 						VHCI_DEBUG(1, (CE_NOTE, NULL,
4118 						    "!vhci_pathinfo"
4119 						    "_state_change (pip:%p): "
4120 						    " reservation: fail busy\n",
4121 						    (void *)pip));
4122 						return (MDI_FAILURE);
4123 					}
4124 					if (pip == vlun->svl_resrv_pip) {
4125 						if (vhci_recovery_reset(
4126 						    svp->svp_svl,
4127 						    &svp->svp_psd->sd_address,
4128 						    TRUE,
4129 						    VHCI_DEPTH_TARGET) == 0) {
4130 							VHCI_DEBUG(1,
4131 							    (CE_NOTE, NULL,
4132 							    "!vhci_pathinfo"
4133 							    "_state_change "
4134 							    " (pip:%p): "
4135 							    "reset failed, "
4136 							    "give up!\n",
4137 							    (void *)pip));
4138 						}
4139 						vlun->svl_flags &=
4140 						    ~VLUN_RESERVE_ACTIVE_FLG;
4141 					}
4142 				}
4143 			} else if (flags & MDI_ENABLE_OP)  {
4144 				if (((vhci->vhci_conf_flags &
4145 				    VHCI_CONF_FLAGS_AUTO_FAILBACK) ==
4146 				    VHCI_CONF_FLAGS_AUTO_FAILBACK) &&
4147 				    MDI_PI_IS_USER_DISABLE(pip) &&
4148 				    MDI_PI_IS_STANDBY(pip)) {
4149 					struct scsi_failover_ops	*fo;
4150 					char *best_pclass, *pclass = NULL;
4151 					int  best_class, rv;
4152 					/*
4153 					 * Failback if enabling a standby path
4154 					 * and it is the primary class or
4155 					 * preferred class
4156 					 */
4157 					best_class = mdi_pi_get_preferred(pip);
4158 					if (best_class == 0) {
4159 						/*
4160 						 * if not preferred - compare
4161 						 * path-class with class
4162 						 */
4163 						fo = vlun->svl_fops;
4164 						(*fo->sfo_pathclass_next)(NULL,
4165 						    &best_pclass,
4166 						    vlun->svl_fops_ctpriv);
4167 						pclass = NULL;
4168 						rv = mdi_prop_lookup_string(pip,
4169 						    "path-class", &pclass);
4170 						if (rv != MDI_SUCCESS ||
4171 						    pclass == NULL) {
4172 							vhci_log(CE_NOTE, vdip,
4173 							    "!path-class "
4174 							    " lookup "
4175 							    "failed. rv: %d"
4176 							    "class: %p", rv,
4177 							    (void *)pclass);
4178 						} else if (strncmp(pclass,
4179 						    best_pclass,
4180 						    strlen(best_pclass)) == 0) {
4181 							best_class = 1;
4182 						}
4183 						if (rv == MDI_SUCCESS &&
4184 						    pclass != NULL) {
4185 							rv = mdi_prop_free(
4186 							    pclass);
4187 							if (rv !=
4188 							    DDI_PROP_SUCCESS) {
4189 								vhci_log(
4190 								    CE_NOTE,
4191 								    vdip,
4192 								    "!path-"
4193 								    "class"
4194 								    " free"
4195 								    " failed"
4196 								    " rv: %d"
4197 								    " class: "
4198 								    "%p",
4199 								    rv,
4200 								    (void *)
4201 								    pclass);
4202 							}
4203 						}
4204 					}
4205 					if (best_class == 1) {
4206 						VHCI_DEBUG(1, (CE_NOTE, NULL,
4207 						    "preferred path: %p "
4208 						    "USER_DISABLE->USER_ENABLE "
4209 						    "transition for lun %s\n",
4210 						    (void *)pip,
4211 						    vlun->svl_lun_wwn));
4212 						(void) taskq_dispatch(
4213 						    vhci->vhci_taskq,
4214 						    vhci_initiate_auto_failback,
4215 						    (void *) vlun, KM_SLEEP);
4216 					}
4217 				}
4218 				/*
4219 				 * if PGR is active, revalidate key and
4220 				 * register on this path also, if key is
4221 				 * still valid
4222 				 */
4223 				sema_p(&vlun->svl_pgr_sema);
4224 				if (vlun->svl_pgr_active)
4225 					(void)
4226 					    vhci_pgr_validate_and_register(svp);
4227 				sema_v(&vlun->svl_pgr_sema);
4228 				/*
4229 				 * Inform target driver about any
4230 				 * reservations to be reinstated if target
4231 				 * has dropped reservation during the busy
4232 				 * period.
4233 				 */
4234 				mutex_enter(&vhci->vhci_mutex);
4235 				scsi_hba_reset_notify_callback(
4236 				    &vhci->vhci_mutex,
4237 				    &vhci->vhci_reset_notify_listf);
4238 				mutex_exit(&vhci->vhci_mutex);
4239 			}
4240 		}
4241 		if (flags & MDI_AFTER_STATE_CHANGE) {
4242 			if (flags & MDI_ENABLE_OP)  {
4243 				mutex_enter(&vhci_global_mutex);
4244 				cv_broadcast(&vhci_cv);
4245 				mutex_exit(&vhci_global_mutex);
4246 			}
4247 			if (vlun->svl_setcap_done) {
4248 				(void) vhci_pHCI_cap(&svp->svp_psd->sd_address,
4249 				    "sector-size", vlun->svl_sector_size,
4250 				    1, pip);
4251 			}
4252 
4253 			/*
4254 			 * Release the LUN
4255 			 */
4256 			VHCI_RELEASE_LUN(vlun);
4257 
4258 			/*
4259 			 * Path transition is complete.
4260 			 * Run callback to indicate target driver to
4261 			 * retry to prevent IO starvation.
4262 			 */
4263 			if (scsi_callback_id != 0) {
4264 				ddi_run_callback(&scsi_callback_id);
4265 			}
4266 		}
4267 	} else {
4268 		switch (state) {
4269 		case MDI_PATHINFO_STATE_ONLINE:
4270 			rval = vhci_pathinfo_online(vdip, pip, flags);
4271 			break;
4272 
4273 		case MDI_PATHINFO_STATE_OFFLINE:
4274 			rval = vhci_pathinfo_offline(vdip, pip, flags);
4275 			break;
4276 
4277 		default:
4278 			break;
4279 		}
4280 		/*
4281 		 * Path transition is complete.
4282 		 * Run callback to indicate target driver to
4283 		 * retry to prevent IO starvation.
4284 		 */
4285 		if ((rval == MDI_SUCCESS) && (scsi_callback_id != 0)) {
4286 			ddi_run_callback(&scsi_callback_id);
4287 		}
4288 		return (rval);
4289 	}
4290 
4291 	return (MDI_SUCCESS);
4292 }
4293 
4294 /*
4295  * Parse the mpxio load balancing options. The datanameptr
4296  * will point to a string containing the load-balance-options value.
4297  * The load-balance-options value will be a property that
4298  * defines the load-balance algorithm and any arguments to that
4299  * algorithm.
4300  * For example:
4301  * device-type-mpxio-options-list=
4302  * "device-type=SUN    SENA", "load-balance-options=logical-block-options"
4303  * "device-type=SUN     SE6920", "round-robin-options";
4304  * logical-block-options="load-balance=logical-block", "region-size=15";
4305  * round-robin-options="load-balance=round-robin";
4306  *
4307  * If the load-balance is not defined the load balance algorithm will
4308  * default to the global setting. There will be default values assigned
4309  * to the arguments (region-size=18) and if an argument is one
4310  * that is not known, it will be ignored.
4311  */
4312 static void
4313 vhci_parse_mpxio_lb_options(dev_info_t *dip, dev_info_t *cdip,
4314 	caddr_t datanameptr)
4315 {
4316 	char			*dataptr, *next_entry;
4317 	caddr_t			config_list	= NULL;
4318 	int			config_list_len = 0, list_len = 0;
4319 	int			region_size = -1;
4320 	client_lb_t		load_balance;
4321 
4322 	if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, datanameptr,
4323 	    (caddr_t)&config_list, &config_list_len) != DDI_PROP_SUCCESS) {
4324 		return;
4325 	}
4326 
4327 	list_len = config_list_len;
4328 	next_entry = config_list;
4329 	while (config_list_len > 0) {
4330 		dataptr = next_entry;
4331 
4332 		if (strncmp(mdi_load_balance, dataptr,
4333 		    strlen(mdi_load_balance)) == 0) {
4334 			/* get the load-balance scheme */
4335 			dataptr += strlen(mdi_load_balance) + 1;
4336 			if (strcmp(dataptr, LOAD_BALANCE_PROP_RR) == 0) {
4337 				(void) mdi_set_lb_policy(cdip, LOAD_BALANCE_RR);
4338 				load_balance = LOAD_BALANCE_RR;
4339 			} else if (strcmp(dataptr,
4340 			    LOAD_BALANCE_PROP_LBA) == 0) {
4341 				(void) mdi_set_lb_policy(cdip,
4342 				    LOAD_BALANCE_LBA);
4343 				load_balance = LOAD_BALANCE_LBA;
4344 			} else if (strcmp(dataptr,
4345 			    LOAD_BALANCE_PROP_NONE) == 0) {
4346 				(void) mdi_set_lb_policy(cdip,
4347 				    LOAD_BALANCE_NONE);
4348 				load_balance = LOAD_BALANCE_NONE;
4349 			}
4350 		} else if (strncmp(dataptr, LOGICAL_BLOCK_REGION_SIZE,
4351 		    strlen(LOGICAL_BLOCK_REGION_SIZE)) == 0) {
4352 			int	i = 0;
4353 			char	*ptr;
4354 			char	*tmp;
4355 
4356 			tmp = dataptr + (strlen(LOGICAL_BLOCK_REGION_SIZE) + 1);
4357 			/* check for numeric value */
4358 			for (ptr = tmp; i < strlen(tmp); i++, ptr++) {
4359 				if (!isdigit(*ptr)) {
4360 					cmn_err(CE_WARN,
4361 					    "Illegal region size: %s."
4362 					    " Setting to default value: %d",
4363 					    tmp,
4364 					    LOAD_BALANCE_DEFAULT_REGION_SIZE);
4365 					region_size =
4366 					    LOAD_BALANCE_DEFAULT_REGION_SIZE;
4367 					break;
4368 				}
4369 			}
4370 			if (i >= strlen(tmp)) {
4371 				region_size = stoi(&tmp);
4372 			}
4373 			(void) mdi_set_lb_region_size(cdip, region_size);
4374 		}
4375 		config_list_len -= (strlen(next_entry) + 1);
4376 		next_entry += strlen(next_entry) + 1;
4377 	}
4378 #ifdef DEBUG
4379 	if ((region_size >= 0) && (load_balance != LOAD_BALANCE_LBA)) {
4380 		VHCI_DEBUG(1, (CE_NOTE, dip,
4381 		    "!vhci_parse_mpxio_lb_options: region-size: %d"
4382 		    "only valid for load-balance=logical-block\n",
4383 		    region_size));
4384 	}
4385 #endif
4386 	if ((region_size == -1) && (load_balance == LOAD_BALANCE_LBA)) {
4387 		VHCI_DEBUG(1, (CE_NOTE, dip,
4388 		    "!vhci_parse_mpxio_lb_options: No region-size"
4389 		    " defined load-balance=logical-block."
4390 		    " Default to: %d\n", LOAD_BALANCE_DEFAULT_REGION_SIZE));
4391 		(void) mdi_set_lb_region_size(cdip,
4392 		    LOAD_BALANCE_DEFAULT_REGION_SIZE);
4393 	}
4394 	if (list_len > 0) {
4395 		kmem_free(config_list, list_len);
4396 	}
4397 }
4398 
4399 /*
4400  * Parse the device-type-mpxio-options-list looking for the key of
4401  * "load-balance-options". If found, parse the load balancing options.
4402  * Check the comment of the vhci_get_device_type_mpxio_options()
4403  * for the device-type-mpxio-options-list.
4404  */
4405 static void
4406 vhci_parse_mpxio_options(dev_info_t *dip, dev_info_t *cdip,
4407 		caddr_t datanameptr, int list_len)
4408 {
4409 	char		*dataptr;
4410 	int		len;
4411 
4412 	/*
4413 	 * get the data list
4414 	 */
4415 	dataptr = datanameptr;
4416 	len = 0;
4417 	while (len < list_len &&
4418 	    strncmp(dataptr, DEVICE_TYPE_STR, strlen(DEVICE_TYPE_STR))
4419 	    != 0) {
4420 		if (strncmp(dataptr, LOAD_BALANCE_OPTIONS,
4421 		    strlen(LOAD_BALANCE_OPTIONS)) == 0) {
4422 			len += strlen(LOAD_BALANCE_OPTIONS) + 1;
4423 			dataptr += strlen(LOAD_BALANCE_OPTIONS) + 1;
4424 			vhci_parse_mpxio_lb_options(dip, cdip, dataptr);
4425 		}
4426 		len += strlen(dataptr) + 1;
4427 		dataptr += strlen(dataptr) + 1;
4428 	}
4429 }
4430 
4431 /*
4432  * Check the inquriy string returned from the device wiith the device-type
4433  * Check for the existence of the device-type-mpxio-options-list and
4434  * if found parse the list checking for a match with the device-type
4435  * value and the inquiry string returned from the device. If a match
4436  * is found, parse the mpxio options list. The format of the
4437  * device-type-mpxio-options-list is:
4438  * device-type-mpxio-options-list=
4439  * "device-type=SUN    SENA", "load-balance-options=logical-block-options"
4440  * "device-type=SUN     SE6920", "round-robin-options";
4441  * logical-block-options="load-balance=logical-block", "region-size=15";
4442  * round-robin-options="load-balance=round-robin";
4443  */
4444 void
4445 vhci_get_device_type_mpxio_options(dev_info_t *dip, dev_info_t *cdip,
4446 	struct scsi_device *devp)
4447 {
4448 
4449 	caddr_t			config_list	= NULL;
4450 	caddr_t			vidptr, datanameptr;
4451 	int			vidlen, dupletlen = 0;
4452 	int			config_list_len = 0, len;
4453 	struct scsi_inquiry	*inq = devp->sd_inq;
4454 
4455 	/*
4456 	 * look up the device-type-mpxio-options-list and walk thru
4457 	 * the list compare the vendor ids of the earlier inquiry command and
4458 	 * with those vids in the list if there is a match, lookup
4459 	 * the mpxio-options value
4460 	 */
4461 	if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
4462 	    MPXIO_OPTIONS_LIST,
4463 	    (caddr_t)&config_list, &config_list_len) == DDI_PROP_SUCCESS) {
4464 
4465 		/*
4466 		 * Compare vids in each duplet - if it matches,
4467 		 * parse the mpxio options list.
4468 		 */
4469 		for (len = config_list_len, vidptr = config_list; len > 0;
4470 		    len -= dupletlen) {
4471 
4472 			dupletlen = 0;
4473 
4474 			if (strlen(vidptr) != 0 &&
4475 			    strncmp(vidptr, DEVICE_TYPE_STR,
4476 			    strlen(DEVICE_TYPE_STR)) == 0) {
4477 				/* point to next duplet */
4478 				datanameptr = vidptr + strlen(vidptr) + 1;
4479 				/* add len of this duplet */
4480 				dupletlen += strlen(vidptr) + 1;
4481 				/* get to device type */
4482 				vidptr += strlen(DEVICE_TYPE_STR) + 1;
4483 				vidlen = strlen(vidptr);
4484 				if ((vidlen != 0) &&
4485 				    bcmp(inq->inq_vid, vidptr, vidlen) == 0) {
4486 					vhci_parse_mpxio_options(dip, cdip,
4487 					    datanameptr, len - dupletlen);
4488 					break;
4489 				}
4490 				/* get to next duplet */
4491 				vidptr += strlen(vidptr) + 1;
4492 			}
4493 			/* get to the next device-type */
4494 			while (len - dupletlen > 0 &&
4495 			    strlen(vidptr) != 0 &&
4496 			    strncmp(vidptr, DEVICE_TYPE_STR,
4497 			    strlen(DEVICE_TYPE_STR)) != 0) {
4498 				dupletlen += strlen(vidptr) + 1;
4499 				vidptr += strlen(vidptr) + 1;
4500 			}
4501 		}
4502 		if (config_list_len > 0) {
4503 			kmem_free(config_list, config_list_len);
4504 		}
4505 	}
4506 }
4507 
4508 static int
4509 vhci_update_pathinfo(struct scsi_device *psd,  mdi_pathinfo_t *pip,
4510 	struct scsi_failover_ops *fo,
4511 	scsi_vhci_lun_t		*vlun,
4512 	struct scsi_vhci	*vhci)
4513 {
4514 	struct scsi_path_opinfo		opinfo;
4515 	char				*pclass, *best_pclass;
4516 
4517 	if ((*fo->sfo_path_get_opinfo)(psd, &opinfo,
4518 	    vlun->svl_fops_ctpriv) != 0) {
4519 		VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_update_pathinfo: "
4520 		    "Failed to get operation info for path:%p\n", (void *)pip));
4521 		return (MDI_FAILURE);
4522 	}
4523 	/* set the xlf capable flag in the vlun for future use */
4524 	vlun->svl_xlf_capable = opinfo.opinfo_xlf_capable;
4525 	(void) mdi_prop_update_string(pip, "path-class",
4526 	    opinfo.opinfo_path_attr);
4527 
4528 	pclass = opinfo.opinfo_path_attr;
4529 	if (opinfo.opinfo_path_state == SCSI_PATH_ACTIVE) {
4530 		mutex_enter(&vlun->svl_mutex);
4531 		if (vlun->svl_active_pclass != NULL) {
4532 			if (strcmp(vlun->svl_active_pclass, pclass) != 0) {
4533 				mutex_exit(&vlun->svl_mutex);
4534 				/*
4535 				 * Externally initiated failover has happened;
4536 				 * force the path state to be STANDBY/ONLINE,
4537 				 * next IO will trigger failover and thus
4538 				 * sync-up the pathstates.  Reason we don't
4539 				 * sync-up immediately by invoking
4540 				 * vhci_update_pathstates() is because it
4541 				 * needs a VHCI_HOLD_LUN() and we don't
4542 				 * want to block here.
4543 				 *
4544 				 * Further, if the device is an ALUA device,
4545 				 * then failure to exactly match 'pclass' and
4546 				 * 'svl_active_pclass'(as is the case here)
4547 				 * indicates that the currently active path
4548 				 * is a 'non-optimized' path - which means
4549 				 * that 'svl_active_pclass' needs to be
4550 				 * replaced with opinfo.opinfo_path_state
4551 				 * value.
4552 				 */
4553 
4554 				if (SCSI_FAILOVER_IS_TPGS(vlun->svl_fops)) {
4555 					char	*tptr;
4556 
4557 					/*
4558 					 * The device is ALUA compliant. The
4559 					 * state need to be changed to online
4560 					 * rather than standby state which is
4561 					 * done typically for a asymmetric
4562 					 * device that is non ALUA compliant.
4563 					 */
4564 					mdi_pi_set_state(pip,
4565 					    MDI_PATHINFO_STATE_ONLINE);
4566 					tptr = kmem_alloc(strlen
4567 					    (opinfo.opinfo_path_attr)+1,
4568 					    KM_SLEEP);
4569 					(void) strlcpy(tptr,
4570 					    opinfo.opinfo_path_attr,
4571 					    (strlen(opinfo.opinfo_path_attr)
4572 					    +1));
4573 					mutex_enter(&vlun->svl_mutex);
4574 					kmem_free(vlun->svl_active_pclass,
4575 					    strlen(vlun->svl_active_pclass)+1);
4576 					vlun->svl_active_pclass = tptr;
4577 					mutex_exit(&vlun->svl_mutex);
4578 				} else {
4579 					/*
4580 					 * Non ALUA device case.
4581 					 */
4582 					mdi_pi_set_state(pip,
4583 					    MDI_PATHINFO_STATE_STANDBY);
4584 				}
4585 				vlun->svl_fo_support = opinfo.opinfo_mode;
4586 				mdi_pi_set_preferred(pip,
4587 				    opinfo.opinfo_preferred);
4588 				return (MDI_SUCCESS);
4589 			}
4590 		} else {
4591 			char	*tptr;
4592 
4593 			/*
4594 			 * lets release the mutex before we try to
4595 			 * allocate since the potential to sleep is
4596 			 * possible.
4597 			 */
4598 			mutex_exit(&vlun->svl_mutex);
4599 			tptr = kmem_alloc(strlen(pclass)+1, KM_SLEEP);
4600 			(void) strlcpy(tptr, pclass, (strlen(pclass)+1));
4601 			mutex_enter(&vlun->svl_mutex);
4602 			vlun->svl_active_pclass = tptr;
4603 		}
4604 		mutex_exit(&vlun->svl_mutex);
4605 		mdi_pi_set_state(pip, MDI_PATHINFO_STATE_ONLINE);
4606 		vlun->svl_waiting_for_activepath = 0;
4607 	} else if (opinfo.opinfo_path_state == SCSI_PATH_ACTIVE_NONOPT) {
4608 		mutex_enter(&vlun->svl_mutex);
4609 		if (vlun->svl_active_pclass == NULL) {
4610 			char	*tptr;
4611 
4612 			mutex_exit(&vlun->svl_mutex);
4613 			tptr = kmem_alloc(strlen(pclass)+1, KM_SLEEP);
4614 			(void) strlcpy(tptr, pclass, (strlen(pclass)+1));
4615 			mutex_enter(&vlun->svl_mutex);
4616 			vlun->svl_active_pclass = tptr;
4617 		}
4618 		mutex_exit(&vlun->svl_mutex);
4619 		mdi_pi_set_state(pip, MDI_PATHINFO_STATE_ONLINE);
4620 		vlun->svl_waiting_for_activepath = 0;
4621 	} else if (opinfo.opinfo_path_state == SCSI_PATH_INACTIVE) {
4622 		mutex_enter(&vlun->svl_mutex);
4623 		if (vlun->svl_active_pclass != NULL) {
4624 			if (strcmp(vlun->svl_active_pclass, pclass) == 0) {
4625 				mutex_exit(&vlun->svl_mutex);
4626 				/*
4627 				 * externally initiated failover has happened;
4628 				 * force state to ONLINE (see comment above)
4629 				 */
4630 				mdi_pi_set_state(pip,
4631 				    MDI_PATHINFO_STATE_ONLINE);
4632 				vlun->svl_fo_support = opinfo.opinfo_mode;
4633 				mdi_pi_set_preferred(pip,
4634 				    opinfo.opinfo_preferred);
4635 				return (MDI_SUCCESS);
4636 			}
4637 		}
4638 		mutex_exit(&vlun->svl_mutex);
4639 		mdi_pi_set_state(pip, MDI_PATHINFO_STATE_STANDBY);
4640 
4641 		/*
4642 		 * Initiate auto-failback, if enabled, for path if path-state
4643 		 * is transitioning from OFFLINE->STANDBY and pathclass is the
4644 		 * prefered pathclass for this storage.
4645 		 * NOTE: In case where opinfo_path_state is SCSI_PATH_ACTIVE
4646 		 * (above), where the pi state is set to STANDBY, we don't
4647 		 * initiate auto-failback as the next IO shall take care of.
4648 		 * this. See comment above.
4649 		 */
4650 		(*fo->sfo_pathclass_next)(NULL, &best_pclass,
4651 		    vlun->svl_fops_ctpriv);
4652 		if (((vhci->vhci_conf_flags & VHCI_CONF_FLAGS_AUTO_FAILBACK) ==
4653 		    VHCI_CONF_FLAGS_AUTO_FAILBACK) &&
4654 		    ((strcmp(pclass, best_pclass) == 0) ||
4655 		    mdi_pi_get_preferred(pip) == 1) &&
4656 		    ((MDI_PI_OLD_STATE(pip) == MDI_PATHINFO_STATE_OFFLINE)||
4657 		    (MDI_PI_OLD_STATE(pip) == MDI_PATHINFO_STATE_INIT))) {
4658 			VHCI_DEBUG(1, (CE_NOTE, NULL, "%s pathclass path: %p"
4659 			    " OFFLINE->STANDBY transition for lun %s\n",
4660 			    best_pclass, (void *)pip, vlun->svl_lun_wwn));
4661 			(void) taskq_dispatch(vhci->vhci_taskq,
4662 			    vhci_initiate_auto_failback, (void *) vlun,
4663 			    KM_SLEEP);
4664 		}
4665 	}
4666 	vlun->svl_fo_support = opinfo.opinfo_mode;
4667 	mdi_pi_set_preferred(pip, opinfo.opinfo_preferred);
4668 
4669 	VHCI_DEBUG(8, (CE_NOTE, NULL, "vhci_update_pathinfo: opinfo_rev = %x,"
4670 	    " opinfo_path_state = %x opinfo_preferred = %x, opinfo_mode = %x\n",
4671 	    opinfo.opinfo_rev, opinfo.opinfo_path_state,
4672 	    opinfo.opinfo_preferred, opinfo.opinfo_mode));
4673 
4674 	return (MDI_SUCCESS);
4675 }
4676 
4677 /*
4678  * Form the kstat name and and call mdi_pi_kstat_create()
4679  */
4680 void
4681 vhci_kstat_create_pathinfo(mdi_pathinfo_t *pip)
4682 {
4683 	dev_info_t	*tgt_dip;
4684 	dev_info_t	*pdip;
4685 	char		*guid;
4686 	char		*target_port, *target_port_dup;
4687 	char		ks_name[KSTAT_STRLEN];
4688 	uint_t		pid;
4689 	int		by_id;
4690 	mod_hash_val_t	hv;
4691 
4692 
4693 	/* return if we have already allocated kstats */
4694 	if (mdi_pi_kstat_exists(pip))
4695 		return;
4696 
4697 	/*
4698 	 * We need instance numbers to create a kstat name, return if we don't
4699 	 * have instance numbers assigned yet.
4700 	 */
4701 	tgt_dip = mdi_pi_get_client(pip);
4702 	pdip = mdi_pi_get_phci(pip);
4703 	if ((ddi_get_instance(tgt_dip) == -1) || (ddi_get_instance(pdip) == -1))
4704 		return;
4705 
4706 	/*
4707 	 * A path oriented kstat has a ks_name of the form:
4708 	 *
4709 	 * <client-driver><instance>.t<pid>.<pHCI-driver><instance>
4710 	 *
4711 	 * We maintain a bidirectional 'target-port' to <pid> map,
4712 	 * called targetmap. All pathinfo nodes with the same
4713 	 * 'target-port' map to the same <pid>. The iostat(1M) code,
4714 	 * when parsing a path oriented kstat name, uses the <pid> as
4715 	 * a SCSI_VHCI_GET_TARGET_LONGNAME ioctl argument in order
4716 	 * to get the 'target-port'. For KSTAT_FLAG_PERSISTENT kstats,
4717 	 * this ioctl needs to translate a <pid> to a 'target-port'
4718 	 * even after all pathinfo nodes associated with the
4719 	 * 'target-port' have been destroyed. This is needed to support
4720 	 * consistent first-iteration activity-since-boot iostat(1M)
4721 	 * output. Because of this requirement, the mapping can't be
4722 	 * based on pathinfo information in a devinfo snapshot.
4723 	 */
4724 
4725 	/* determine 'target-port' */
4726 	if (mdi_prop_lookup_string(pip,
4727 	    "target-port", &target_port) == MDI_SUCCESS) {
4728 		target_port_dup = i_ddi_strdup(target_port, KM_SLEEP);
4729 		(void) mdi_prop_free(target_port);
4730 		by_id = 1;
4731 	} else {
4732 		/*
4733 		 * If the pHCI did not set up 'target-port' on this
4734 		 * pathinfo node, assume that our client is the only
4735 		 * one with paths to the device by using the guid
4736 		 * value as the 'target-port'. Since no other client
4737 		 * will have the same guid, no other client will use
4738 		 * the same <pid>.  NOTE: a client with an instance
4739 		 * number always has a guid.
4740 		 */
4741 		(void) ddi_prop_lookup_string(DDI_DEV_T_ANY, tgt_dip,
4742 		    PROPFLAGS, MDI_CLIENT_GUID_PROP, &guid);
4743 		target_port_dup = i_ddi_strdup(guid, KM_SLEEP);
4744 		ddi_prop_free(guid);
4745 
4746 		/*
4747 		 * For this type of mapping we don't want the
4748 		 * <id> -> 'target-port' mapping to be made.  This
4749 		 * will cause the SCSI_VHCI_GET_TARGET_LONGNAME ioctl
4750 		 * to fail, and the iostat(1M) long '-n' output will
4751 		 * still use the <pid>.  We do this because we just
4752 		 * made up the 'target-port' using the guid, and we
4753 		 * don't want to expose that fact in iostat output.
4754 		 */
4755 		by_id = 0;
4756 	}
4757 
4758 	/* find/establish <pid> given 'target-port' */
4759 	mutex_enter(&vhci_targetmap_mutex);
4760 	if (mod_hash_find(vhci_targetmap_byport,
4761 	    (mod_hash_key_t)target_port_dup, &hv) == 0) {
4762 		pid = (int)(intptr_t)hv;	/* mapping exists */
4763 	} else {
4764 		pid = vhci_targetmap_pid++;	/* new mapping */
4765 
4766 		(void) mod_hash_insert(vhci_targetmap_byport,
4767 		    (mod_hash_key_t)target_port_dup,
4768 		    (mod_hash_val_t)(intptr_t)pid);
4769 		if (by_id) {
4770 			(void) mod_hash_insert(vhci_targetmap_bypid,
4771 			    (mod_hash_key_t)(uintptr_t)pid,
4772 			    (mod_hash_val_t)(uintptr_t)target_port_dup);
4773 		}
4774 		target_port_dup = NULL;		/* owned by hash */
4775 	}
4776 	mutex_exit(&vhci_targetmap_mutex);
4777 
4778 	/* form kstat name */
4779 	(void) snprintf(ks_name, KSTAT_STRLEN, "%s%d.t%d.%s%d",
4780 	    ddi_driver_name(tgt_dip), ddi_get_instance(tgt_dip),
4781 	    pid, ddi_driver_name(pdip), ddi_get_instance(pdip));
4782 
4783 	VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_path_online: path:%p "
4784 	    "kstat %s: pid %x <-> port %s\n", (void *)pip,
4785 	    ks_name, pid, target_port_dup));
4786 	if (target_port_dup)
4787 		kmem_free(target_port_dup, strlen(target_port_dup) + 1);
4788 
4789 	/* call mdi to create kstats with the name we built */
4790 	(void) mdi_pi_kstat_create(pip, ks_name);
4791 }
4792 
4793 /* ARGSUSED */
4794 static int
4795 vhci_pathinfo_online(dev_info_t *vdip, mdi_pathinfo_t *pip, int flags)
4796 {
4797 	scsi_hba_tran_t			*hba = NULL;
4798 	struct scsi_device		*psd = NULL;
4799 	scsi_vhci_lun_t			*vlun = NULL;
4800 	dev_info_t			*pdip = NULL;
4801 	dev_info_t			*tgt_dip;
4802 	struct scsi_vhci		*vhci;
4803 	char				*guid;
4804 	struct scsi_failover		*sf;
4805 	struct scsi_failover_ops	*sfo;
4806 	char				*override;
4807 	scsi_vhci_priv_t		*svp = NULL;
4808 	struct buf			*bp;
4809 	struct scsi_address		*ap;
4810 	struct scsi_pkt			*pkt;
4811 	int				rval = MDI_FAILURE;
4812 	uint_t				inq_size = VHCI_STD_INQ_SIZE;
4813 	mpapi_item_list_t		*list_ptr;
4814 	mpapi_lu_data_t			*ld;
4815 
4816 	ASSERT(vdip != NULL);
4817 	ASSERT(pip != NULL);
4818 
4819 	vhci = ddi_get_soft_state(vhci_softstate, ddi_get_instance(vdip));
4820 	ASSERT(vhci != NULL);
4821 
4822 	pdip = mdi_pi_get_phci(pip);
4823 	hba = ddi_get_driver_private(pdip);
4824 	ASSERT(hba != NULL);
4825 
4826 	svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip);
4827 	ASSERT(svp != NULL);
4828 
4829 	tgt_dip = mdi_pi_get_client(pip);
4830 	ASSERT(tgt_dip != NULL);
4831 	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, tgt_dip, PROPFLAGS,
4832 	    MDI_CLIENT_GUID_PROP, &guid) != DDI_SUCCESS) {
4833 		VHCI_DEBUG(1, (CE_WARN, NULL, "vhci_path_online: lun guid "
4834 		    "property failed"));
4835 		goto failure;
4836 	}
4837 
4838 	vlun = vhci_lun_lookup(tgt_dip);
4839 	ASSERT(vlun != NULL);
4840 
4841 	ddi_prop_free(guid);
4842 
4843 	vlun->svl_dip = mdi_pi_get_client(pip);
4844 	ASSERT(vlun->svl_dip != NULL);
4845 
4846 	psd = svp->svp_psd;
4847 	ASSERT(psd != NULL);
4848 
4849 	/*
4850 	 * For INQUIRY response buffer size, we use VHCI_STD_INQ_SIZE(132bytes)
4851 	 * instead of SUN_INQSIZE(48bytes) which is used in sd layer. This is
4852 	 * because we could get the Vendor specific parameters(present 97th
4853 	 * byte onwards) which are required to process Vendor specific data
4854 	 * based on array type.
4855 	 * This INQUIRY buffer is freed in vhci_pathinfo_offline but NEVER
4856 	 * in a different layer like sd/phci transport. In other words, vhci
4857 	 * maintains its own copy of scsi_device and scsi_inquiry data on a
4858 	 * per-path basis.
4859 	 */
4860 	if (psd->sd_inq == NULL) {
4861 		psd->sd_inq = (struct scsi_inquiry *)
4862 		    kmem_zalloc(inq_size, KM_SLEEP);
4863 	}
4864 
4865 	tgt_dip = psd->sd_dev;
4866 	ASSERT(tgt_dip != NULL);
4867 
4868 	/*
4869 	 * do inquiry to pass into probe routine; this
4870 	 * will avoid each probe routine doing scsi inquiry
4871 	 */
4872 	bp = getrbuf(KM_SLEEP);
4873 	bp->b_un.b_addr = (caddr_t)psd->sd_inq;
4874 	bp->b_flags = B_READ;
4875 	bp->b_bcount = inq_size;
4876 	bp->b_resid = 0;
4877 
4878 	ap = &psd->sd_address;
4879 	pkt = scsi_init_pkt(ap, NULL, bp, CDB_GROUP0,
4880 	    sizeof (struct scsi_arq_status), 0, 0, SLEEP_FUNC, NULL);
4881 	if (pkt == NULL) {
4882 		VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_pathinfo_online: "
4883 		    "Inquiry init_pkt failed :%p\n", (void *)pip));
4884 		rval = MDI_FAILURE;
4885 		goto failure;
4886 	}
4887 	pkt->pkt_cdbp[0] = SCMD_INQUIRY;
4888 	pkt->pkt_cdbp[4] = (uchar_t)inq_size;
4889 	pkt->pkt_time = 60;
4890 
4891 	rval = vhci_do_scsi_cmd(pkt);
4892 	scsi_destroy_pkt(pkt);
4893 	freerbuf(bp);
4894 	if (rval == 0) {
4895 		VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_pathinfo_online: "
4896 		    "Failover Inquiry failed path:%p rval:%x\n",
4897 		    (void *)pip, rval));
4898 		rval = MDI_FAILURE;
4899 		goto failure;
4900 	}
4901 
4902 	/*
4903 	 * Determine if device is supported under scsi_vhci, and select
4904 	 * failover module.
4905 	 *
4906 	 * See if there is a scsi_vhci.conf file override for this devices's
4907 	 * VID/PID. The following values can be returned:
4908 	 *
4909 	 * NULL		If the NULL is returned then there is no scsi_vhci.conf
4910 	 *		override.  For NULL, we determine the failover_ops for
4911 	 *		this device by checking the sfo_device_probe entry
4912 	 *		point for each 'fops' module, in order.
4913 	 *
4914 	 *		NOTE: Correct operation may depend on module ordering
4915 	 *		of 'specific' (failover modules that are completely
4916 	 *		VID/PID table based) to 'generic' (failover modules
4917 	 *		that based on T10 standards like TPGS).  Currently,
4918 	 *		the value of 'ddi-forceload' in scsi_vhci.conf is used
4919 	 *		to establish the module list and probe order.
4920 	 *
4921 	 * "NONE"	If value "NONE" is returned then there is a
4922 	 *		scsi_vhci.conf VID/PID override to indicate the device
4923 	 *		should not be supported under scsi_vhci (even if there
4924 	 *		is an 'fops' module supporting the device).
4925 	 *
4926 	 * "<other>"	If another value is returned then that value is the
4927 	 *		name of the 'fops' module that should be used.
4928 	 */
4929 	sfo = NULL;	/* "NONE" */
4930 	override = scsi_get_device_type_string(
4931 	    "scsi-vhci-failover-override", vdip, psd);
4932 
4933 	if (override == NULL) {
4934 		/* NULL: default: select based on sfo_device_probe results */
4935 		for (sf = scsi_failover_table; sf->sf_mod; sf++) {
4936 			if ((sf->sf_sfo == NULL) ||
4937 			    ((*sf->sf_sfo->sfo_device_probe) (psd,
4938 			    psd->sd_inq, &vlun->svl_fops_ctpriv) ==
4939 			    SFO_DEVICE_PROBE_PHCI))
4940 				continue;
4941 
4942 			/* found failover module, supported under scsi_vhci */
4943 			sfo = sf->sf_sfo;
4944 			vlun->svl_fops_name =
4945 			    i_ddi_strdup(sfo->sfo_name, KM_SLEEP);
4946 			break;
4947 		}
4948 	} else if (strcmp(override, "NONE") && strcmp(override, "none")) {
4949 		/* !"NONE": select based on driver.conf specified name */
4950 		for (sf = scsi_failover_table, sfo = NULL; sf->sf_mod; sf++) {
4951 			if ((sf->sf_sfo == NULL) ||
4952 			    (sf->sf_sfo->sfo_name == NULL) ||
4953 			    strcmp(override, sf->sf_sfo->sfo_name))
4954 				continue;
4955 
4956 			/* found failover module, supported under scsi_vhci */
4957 			sfo = sf->sf_sfo;
4958 			vlun->svl_fops_name = kmem_alloc(strlen("conf ") +
4959 			    strlen(sfo->sfo_name) + 1, KM_SLEEP);
4960 			(void) sprintf(vlun->svl_fops_name, "conf %s",
4961 			    sfo->sfo_name);
4962 			break;
4963 		}
4964 	}
4965 	if (override)
4966 		kmem_free(override, strlen(override) + 1);
4967 
4968 	if (sfo == NULL) {
4969 		/* no failover module - device not supported */
4970 		VHCI_DEBUG(1, (CE_NOTE, vhci->vhci_dip,
4971 		    "!vhci_pathinfo_online: dev (path 0x%p) not "
4972 		    "supported\n", (void *)pip));
4973 		vlun->svl_not_supported = 1;
4974 		rval = MDI_NOT_SUPPORTED;
4975 		goto done;
4976 	}
4977 
4978 	/* failover supported for device - save failover_ops in vlun */
4979 	vlun->svl_fops = sfo;
4980 
4981 	/*
4982 	 * Obtain the device-type based mpxio options as specified in
4983 	 * scsi_vhci.conf file.
4984 	 *
4985 	 * NOTE: currently, the end result is a call to
4986 	 * mdi_set_lb_region_size().
4987 	 */
4988 	vhci_get_device_type_mpxio_options(vdip, tgt_dip, psd);
4989 
4990 	/*
4991 	 * if PGR is active, revalidate key and register on this path also,
4992 	 * if key is still valid
4993 	 */
4994 	sema_p(&vlun->svl_pgr_sema);
4995 	if (vlun->svl_pgr_active) {
4996 		rval = vhci_pgr_validate_and_register(svp);
4997 		if (rval != 1) {
4998 			rval = MDI_FAILURE;
4999 			sema_v(&vlun->svl_pgr_sema);
5000 			goto failure;
5001 		}
5002 	}
5003 	sema_v(&vlun->svl_pgr_sema);
5004 
5005 	if (svp->svp_new_path) {
5006 		/*
5007 		 * Last chance to perform any cleanup operations on this
5008 		 * new path before making this path completely online.
5009 		 */
5010 		svp->svp_new_path = 0;
5011 
5012 		/*
5013 		 * If scsi_vhci knows the lun is alread RESERVE'd,
5014 		 * then skip the issue of RELEASE on new path.
5015 		 */
5016 		if ((vlun->svl_flags & VLUN_RESERVE_ACTIVE_FLG) == 0) {
5017 			/*
5018 			 * Issue SCSI-2 RELEASE only for the first time on
5019 			 * a new path just in case the host rebooted and
5020 			 * a reservation is still pending on this path.
5021 			 * IBM Shark storage does not clear RESERVE upon
5022 			 * host reboot.
5023 			 */
5024 			ap = &psd->sd_address;
5025 			pkt = scsi_init_pkt(ap, NULL, NULL, CDB_GROUP0,
5026 			    sizeof (struct scsi_arq_status), 0, 0,
5027 			    SLEEP_FUNC, NULL);
5028 			if (pkt == NULL) {
5029 				VHCI_DEBUG(1, (CE_NOTE, NULL,
5030 				    "!vhci_pathinfo_online: "
5031 				    "Release init_pkt failed :%p\n",
5032 				    (void *)pip));
5033 				rval = MDI_FAILURE;
5034 				goto failure;
5035 			}
5036 			pkt->pkt_cdbp[0] = SCMD_RELEASE;
5037 			pkt->pkt_time = 60;
5038 
5039 			VHCI_DEBUG(1, (CE_NOTE, NULL,
5040 			    "!vhci_path_online: path:%p "
5041 			    "Issued SCSI-2 RELEASE\n", (void *)pip));
5042 
5043 			/* Ignore the return value */
5044 			(void) vhci_do_scsi_cmd(pkt);
5045 			scsi_destroy_pkt(pkt);
5046 		}
5047 	}
5048 
5049 	rval = vhci_update_pathinfo(psd, pip, sfo, vlun, vhci);
5050 	if (rval == MDI_FAILURE) {
5051 		goto failure;
5052 	}
5053 
5054 	/* Initialize MP-API data */
5055 	vhci_update_mpapi_data(vhci, vlun, pip);
5056 
5057 	/*
5058 	 * MP-API also needs the Inquiry data to be maintained in the
5059 	 * mp_vendor_prop_t structure, so find the lun and update its
5060 	 * structure with this data.
5061 	 */
5062 	list_ptr = (mpapi_item_list_t *)vhci_get_mpapi_item(vhci, NULL,
5063 	    MP_OBJECT_TYPE_MULTIPATH_LU, (void *)vlun);
5064 	ld = (mpapi_lu_data_t *)list_ptr->item->idata;
5065 	if (ld != NULL) {
5066 		bcopy(psd->sd_inq->inq_vid, ld->prop.prodInfo.vendor, 8);
5067 		bcopy(psd->sd_inq->inq_pid, ld->prop.prodInfo.product, 16);
5068 		bcopy(psd->sd_inq->inq_revision, ld->prop.prodInfo.revision, 4);
5069 	} else {
5070 		VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_pathinfo_online: "
5071 		    "mpapi_lu_data_t is NULL"));
5072 	}
5073 
5074 	/* create kstats for path */
5075 	vhci_kstat_create_pathinfo(pip);
5076 
5077 done:
5078 	mutex_enter(&vhci_global_mutex);
5079 	cv_broadcast(&vhci_cv);
5080 	mutex_exit(&vhci_global_mutex);
5081 
5082 	if (vlun->svl_setcap_done) {
5083 		(void) vhci_pHCI_cap(ap, "sector-size",
5084 		    vlun->svl_sector_size, 1, pip);
5085 	}
5086 
5087 	VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_path_online: path:%p\n",
5088 	    (void *)pip));
5089 
5090 failure:
5091 	if ((rval != MDI_SUCCESS) && psd->sd_inq) {
5092 		kmem_free((caddr_t)psd->sd_inq, inq_size);
5093 		psd->sd_inq = (struct scsi_inquiry *)NULL;
5094 	}
5095 	return (rval);
5096 }
5097 
5098 /*
5099  * path offline handler.  Release all bindings that will not be
5100  * released by the normal packet transport/completion code path.
5101  * Since we don't (presently) keep any bindings alive outside of
5102  * the in-transport packets (which will be released on completion)
5103  * there is not much to do here.
5104  */
5105 /* ARGSUSED */
5106 static int
5107 vhci_pathinfo_offline(dev_info_t *vdip, mdi_pathinfo_t *pip, int flags)
5108 {
5109 	scsi_hba_tran_t		*hba = NULL;
5110 	struct scsi_device	*psd = NULL;
5111 	dev_info_t		*pdip = NULL;
5112 	dev_info_t		*cdip = NULL;
5113 	scsi_vhci_priv_t	*svp = NULL;
5114 	uint_t			inq_size = VHCI_STD_INQ_SIZE;
5115 
5116 	ASSERT(vdip != NULL);
5117 	ASSERT(pip != NULL);
5118 
5119 	pdip = mdi_pi_get_phci(pip);
5120 	ASSERT(pdip != NULL);
5121 	if (pdip == NULL) {
5122 		VHCI_DEBUG(1, (CE_WARN, vdip, "Invalid path 0x%p: NULL "
5123 		    "phci dip", (void *)pip));
5124 		return (MDI_FAILURE);
5125 	}
5126 
5127 	cdip = mdi_pi_get_client(pip);
5128 	ASSERT(cdip != NULL);
5129 	if (cdip == NULL) {
5130 		VHCI_DEBUG(1, (CE_WARN, vdip, "Invalid path 0x%p: NULL "
5131 		    "client dip", (void *)pip));
5132 		return (MDI_FAILURE);
5133 	}
5134 
5135 	hba = ddi_get_driver_private(pdip);
5136 	ASSERT(hba != NULL);
5137 
5138 	svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip);
5139 	if (svp == NULL) {
5140 		/*
5141 		 * mdi_pathinfo node in INIT state can have vHCI private
5142 		 * information set to null
5143 		 */
5144 		VHCI_DEBUG(1, (CE_NOTE, vdip, "!vhci_pathinfo_offline: "
5145 		    "svp is NULL for pip 0x%p\n", (void *)pip));
5146 		return (MDI_SUCCESS);
5147 	}
5148 
5149 	psd = svp->svp_psd;
5150 	ASSERT(psd != NULL);
5151 
5152 	mutex_enter(&svp->svp_mutex);
5153 
5154 	VHCI_DEBUG(1, (CE_NOTE, vdip, "!vhci_pathinfo_offline: "
5155 	    "%d cmds pending on path: 0x%p\n", svp->svp_cmds, (void *)pip));
5156 	while (svp->svp_cmds != 0) {
5157 		if (cv_timedwait(&svp->svp_cv, &svp->svp_mutex,
5158 		    ddi_get_lbolt() +
5159 		    drv_usectohz(vhci_path_quiesce_timeout * 1000000)) == -1) {
5160 			/*
5161 			 * The timeout time reached without the condition
5162 			 * being signaled.
5163 			 */
5164 			VHCI_DEBUG(1, (CE_NOTE, vdip, "!vhci_pathinfo_offline: "
5165 			    "Timeout reached on path 0x%p without the cond\n",
5166 			    (void *)pip));
5167 			VHCI_DEBUG(1, (CE_NOTE, vdip, "!vhci_pathinfo_offline: "
5168 			    "%d cmds still pending on path: 0x%p\n",
5169 			    svp->svp_cmds, (void *)pip));
5170 			break;
5171 		}
5172 	}
5173 	mutex_exit(&svp->svp_mutex);
5174 
5175 	/*
5176 	 * Check to see if this vlun has an active SCSI-II RESERVE. And this
5177 	 * is the pip for the path that has been reserved.
5178 	 * If so clear the reservation by sending a reset, so the host will not
5179 	 * get a reservation conflict.  Reset the flag VLUN_RESERVE_ACTIVE_FLG
5180 	 * for this lun.  Also a reset notify is sent to the target driver
5181 	 * just in case the POR check condition is cleared by some other layer
5182 	 * in the stack.
5183 	 */
5184 	if (svp->svp_svl->svl_flags & VLUN_RESERVE_ACTIVE_FLG) {
5185 		if (pip == svp->svp_svl->svl_resrv_pip) {
5186 			if (vhci_recovery_reset(svp->svp_svl,
5187 			    &svp->svp_psd->sd_address, TRUE,
5188 			    VHCI_DEPTH_TARGET) == 0) {
5189 				VHCI_DEBUG(1, (CE_NOTE, NULL,
5190 				    "!vhci_pathinfo_offline (pip:%p):"
5191 				    "reset failed, retrying\n", (void *)pip));
5192 				delay(1*drv_usectohz(1000000));
5193 				if (vhci_recovery_reset(svp->svp_svl,
5194 				    &svp->svp_psd->sd_address, TRUE,
5195 				    VHCI_DEPTH_TARGET) == 0) {
5196 					VHCI_DEBUG(1, (CE_NOTE, NULL,
5197 					    "!vhci_pathinfo_offline "
5198 					    "(pip:%p): reset failed, "
5199 					    "giving up!\n", (void *)pip));
5200 				}
5201 			}
5202 			svp->svp_svl->svl_flags &= ~VLUN_RESERVE_ACTIVE_FLG;
5203 		}
5204 	}
5205 
5206 	mdi_pi_set_state(pip, MDI_PATHINFO_STATE_OFFLINE);
5207 	if (psd->sd_inq) {
5208 		kmem_free((caddr_t)psd->sd_inq, inq_size);
5209 		psd->sd_inq = (struct scsi_inquiry *)NULL;
5210 	}
5211 	vhci_mpapi_set_path_state(vdip, pip, MP_DRVR_PATH_STATE_REMOVED);
5212 
5213 	VHCI_DEBUG(1, (CE_NOTE, NULL,
5214 	    "!vhci_pathinfo_offline: offlined path 0x%p\n", (void *)pip));
5215 	return (MDI_SUCCESS);
5216 }
5217 
5218 
5219 /*
5220  * routine for SCSI VHCI IOCTL implementation.
5221  */
5222 /* ARGSUSED */
5223 static int
5224 vhci_ctl(dev_t dev, int cmd, intptr_t data, int mode, cred_t *credp, int *rval)
5225 {
5226 	struct scsi_vhci		*vhci;
5227 	dev_info_t			*vdip;
5228 	mdi_pathinfo_t			*pip;
5229 	int				instance, held;
5230 	int				retval = 0;
5231 	caddr_t				phci_path = NULL, client_path = NULL;
5232 	caddr_t				paddr = NULL;
5233 	sv_iocdata_t			ioc;
5234 	sv_iocdata_t			*pioc = &ioc;
5235 	sv_switch_to_cntlr_iocdata_t	iocsc;
5236 	sv_switch_to_cntlr_iocdata_t	*piocsc = &iocsc;
5237 	caddr_t				s;
5238 	scsi_vhci_lun_t			*vlun;
5239 	struct scsi_failover_ops	*fo;
5240 	char				*pclass;
5241 
5242 	/* Check for validity of vhci structure */
5243 	vhci = ddi_get_soft_state(vhci_softstate, MINOR2INST(getminor(dev)));
5244 	if (vhci == NULL) {
5245 		return (ENXIO);
5246 	}
5247 
5248 	mutex_enter(&vhci->vhci_mutex);
5249 	if ((vhci->vhci_state & VHCI_STATE_OPEN) == 0) {
5250 		mutex_exit(&vhci->vhci_mutex);
5251 		return (ENXIO);
5252 	}
5253 	mutex_exit(&vhci->vhci_mutex);
5254 
5255 	/* Get the vhci dip */
5256 	vdip = vhci->vhci_dip;
5257 	ASSERT(vdip != NULL);
5258 	instance = ddi_get_instance(vdip);
5259 
5260 	/* Allocate memory for getting parameters from userland */
5261 	phci_path	= kmem_zalloc(MAXPATHLEN, KM_SLEEP);
5262 	client_path	= kmem_zalloc(MAXPATHLEN, KM_SLEEP);
5263 	paddr		= kmem_zalloc(MAXNAMELEN, KM_SLEEP);
5264 
5265 	/*
5266 	 * Set a local variable indicating the ioctl name. Used for
5267 	 * printing debug strings.
5268 	 */
5269 	switch (cmd) {
5270 	case SCSI_VHCI_GET_CLIENT_MULTIPATH_INFO:
5271 		s = "GET_CLIENT_MULTIPATH_INFO";
5272 		break;
5273 
5274 	case SCSI_VHCI_GET_PHCI_MULTIPATH_INFO:
5275 		s = "GET_PHCI_MULTIPATH_INFO";
5276 		break;
5277 
5278 	case SCSI_VHCI_GET_CLIENT_NAME:
5279 		s = "GET_CLIENT_NAME";
5280 		break;
5281 
5282 	case SCSI_VHCI_PATH_ONLINE:
5283 		s = "PATH_ONLINE";
5284 		break;
5285 
5286 	case SCSI_VHCI_PATH_OFFLINE:
5287 		s = "PATH_OFFLINE";
5288 		break;
5289 
5290 	case SCSI_VHCI_PATH_STANDBY:
5291 		s = "PATH_STANDBY";
5292 		break;
5293 
5294 	case SCSI_VHCI_PATH_TEST:
5295 		s = "PATH_TEST";
5296 		break;
5297 
5298 	case SCSI_VHCI_SWITCH_TO_CNTLR:
5299 		s = "SWITCH_TO_CNTLR";
5300 		break;
5301 	case SCSI_VHCI_PATH_DISABLE:
5302 		s = "PATH_DISABLE";
5303 		break;
5304 	case SCSI_VHCI_PATH_ENABLE:
5305 		s = "PATH_ENABLE";
5306 		break;
5307 
5308 	case SCSI_VHCI_GET_TARGET_LONGNAME:
5309 		s = "GET_TARGET_LONGNAME";
5310 		break;
5311 
5312 #ifdef	DEBUG
5313 	case SCSI_VHCI_CONFIGURE_PHCI:
5314 		s = "CONFIGURE_PHCI";
5315 		break;
5316 
5317 	case SCSI_VHCI_UNCONFIGURE_PHCI:
5318 		s = "UNCONFIGURE_PHCI";
5319 		break;
5320 #endif
5321 
5322 	default:
5323 		s = "Unknown";
5324 		vhci_log(CE_NOTE, vdip,
5325 		    "!vhci%d: ioctl %x (unsupported ioctl)", instance, cmd);
5326 		retval = ENOTSUP;
5327 		break;
5328 	}
5329 	if (retval != 0) {
5330 		goto end;
5331 	}
5332 
5333 	VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci%d: ioctl <%s>", instance, s));
5334 
5335 	/*
5336 	 * Get IOCTL parameters from userland
5337 	 */
5338 	switch (cmd) {
5339 	case SCSI_VHCI_GET_CLIENT_MULTIPATH_INFO:
5340 	case SCSI_VHCI_GET_PHCI_MULTIPATH_INFO:
5341 	case SCSI_VHCI_GET_CLIENT_NAME:
5342 	case SCSI_VHCI_PATH_ONLINE:
5343 	case SCSI_VHCI_PATH_OFFLINE:
5344 	case SCSI_VHCI_PATH_STANDBY:
5345 	case SCSI_VHCI_PATH_TEST:
5346 	case SCSI_VHCI_PATH_DISABLE:
5347 	case SCSI_VHCI_PATH_ENABLE:
5348 	case SCSI_VHCI_GET_TARGET_LONGNAME:
5349 #ifdef	DEBUG
5350 	case SCSI_VHCI_CONFIGURE_PHCI:
5351 	case SCSI_VHCI_UNCONFIGURE_PHCI:
5352 #endif
5353 		retval = vhci_get_iocdata((const void *)data, pioc, mode, s);
5354 		break;
5355 
5356 	case SCSI_VHCI_SWITCH_TO_CNTLR:
5357 		retval = vhci_get_iocswitchdata((const void *)data, piocsc,
5358 		    mode, s);
5359 		break;
5360 	}
5361 	if (retval != 0) {
5362 		goto end;
5363 	}
5364 
5365 
5366 	/*
5367 	 * Process the IOCTL
5368 	 */
5369 	switch (cmd) {
5370 	case SCSI_VHCI_GET_CLIENT_MULTIPATH_INFO:
5371 	{
5372 		uint_t		num_paths;	/* Num paths to client dev */
5373 		sv_path_info_t	*upibuf = NULL;	/* To keep userland values */
5374 		sv_path_info_t	*kpibuf = NULL; /* Kernel data for ioctls */
5375 		dev_info_t	*cdip;		/* Client device dip */
5376 
5377 		if (pioc->ret_elem == NULL) {
5378 			retval = EINVAL;
5379 			break;
5380 		}
5381 
5382 		/* Get client device path from user land */
5383 		if (vhci_ioc_get_client_path(pioc, client_path, mode, s)) {
5384 			retval = EFAULT;
5385 			break;
5386 		}
5387 
5388 		VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> "
5389 		    "client <%s>", s, client_path));
5390 
5391 		/* Get number of paths to this client device */
5392 		if ((cdip = mdi_client_path2devinfo(vdip, client_path))
5393 		    == NULL) {
5394 			retval = ENXIO;
5395 			VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
5396 			    "client dip doesn't exist. invalid path <%s>",
5397 			    s, client_path));
5398 			break;
5399 		}
5400 		num_paths = mdi_client_get_path_count(cdip);
5401 
5402 		if (ddi_copyout(&num_paths, pioc->ret_elem,
5403 		    sizeof (num_paths), mode)) {
5404 			VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
5405 			    "num_paths copyout failed", s));
5406 			retval = EFAULT;
5407 			break;
5408 		}
5409 
5410 		/* If  user just wanted num_paths, then return */
5411 		if (pioc->buf_elem == 0 || pioc->ret_buf == NULL ||
5412 		    num_paths == 0) {
5413 			break;
5414 		}
5415 
5416 		/* Set num_paths to value as much as can be sent to userland */
5417 		if (num_paths > pioc->buf_elem) {
5418 			num_paths = pioc->buf_elem;
5419 		}
5420 
5421 		/* Allocate memory and get userland pointers */
5422 		if (vhci_ioc_alloc_pathinfo(&upibuf, &kpibuf, num_paths,
5423 		    pioc, mode, s) != 0) {
5424 			retval = EFAULT;
5425 			break;
5426 		}
5427 		ASSERT(upibuf != NULL);
5428 		ASSERT(kpibuf != NULL);
5429 
5430 		/*
5431 		 * Get the path information and send it to userland.
5432 		 */
5433 		if (vhci_get_client_path_list(cdip, kpibuf, num_paths)
5434 		    != MDI_SUCCESS) {
5435 			retval = ENXIO;
5436 			vhci_ioc_free_pathinfo(upibuf, kpibuf, num_paths);
5437 			break;
5438 		}
5439 
5440 		if (vhci_ioc_send_pathinfo(upibuf, kpibuf, num_paths,
5441 		    pioc, mode, s)) {
5442 			retval = EFAULT;
5443 			vhci_ioc_free_pathinfo(upibuf, kpibuf, num_paths);
5444 			break;
5445 		}
5446 
5447 		/* Free the memory allocated for path information */
5448 		vhci_ioc_free_pathinfo(upibuf, kpibuf, num_paths);
5449 		break;
5450 	}
5451 
5452 	case SCSI_VHCI_GET_PHCI_MULTIPATH_INFO:
5453 	{
5454 		uint_t		num_paths;	/* Num paths to client dev */
5455 		sv_path_info_t	*upibuf = NULL;	/* To keep userland values */
5456 		sv_path_info_t	*kpibuf = NULL; /* Kernel data for ioctls */
5457 		dev_info_t	*pdip;		/* PHCI device dip */
5458 
5459 		if (pioc->ret_elem == NULL) {
5460 			retval = EINVAL;
5461 			break;
5462 		}
5463 
5464 		/* Get PHCI device path from user land */
5465 		if (vhci_ioc_get_phci_path(pioc, phci_path, mode, s)) {
5466 			retval = EFAULT;
5467 			break;
5468 		}
5469 
5470 		VHCI_DEBUG(6, (CE_WARN, vdip,
5471 		    "!vhci_ioctl: ioctl <%s> phci <%s>", s, phci_path));
5472 
5473 		/* Get number of devices associated with this PHCI device */
5474 		if ((pdip = mdi_phci_path2devinfo(vdip, phci_path)) == NULL) {
5475 			VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
5476 			    "phci dip doesn't exist. invalid path <%s>",
5477 			    s, phci_path));
5478 			retval = ENXIO;
5479 			break;
5480 		}
5481 
5482 		num_paths = mdi_phci_get_path_count(pdip);
5483 
5484 		if (ddi_copyout(&num_paths, pioc->ret_elem,
5485 		    sizeof (num_paths), mode)) {
5486 			VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
5487 			    "num_paths copyout failed", s));
5488 			retval = EFAULT;
5489 			break;
5490 		}
5491 
5492 		/* If  user just wanted num_paths, then return */
5493 		if (pioc->buf_elem == 0 || pioc->ret_buf == NULL ||
5494 		    num_paths == 0) {
5495 			break;
5496 		}
5497 
5498 		/* Set num_paths to value as much as can be sent to userland */
5499 		if (num_paths > pioc->buf_elem) {
5500 			num_paths = pioc->buf_elem;
5501 		}
5502 
5503 		/* Allocate memory and get userland pointers */
5504 		if (vhci_ioc_alloc_pathinfo(&upibuf, &kpibuf, num_paths,
5505 		    pioc, mode, s) != 0) {
5506 			retval = EFAULT;
5507 			break;
5508 		}
5509 		ASSERT(upibuf != NULL);
5510 		ASSERT(kpibuf != NULL);
5511 
5512 		/*
5513 		 * Get the path information and send it to userland.
5514 		 */
5515 		if (vhci_get_phci_path_list(pdip, kpibuf, num_paths)
5516 		    != MDI_SUCCESS) {
5517 			retval = ENXIO;
5518 			vhci_ioc_free_pathinfo(upibuf, kpibuf, num_paths);
5519 			break;
5520 		}
5521 
5522 		if (vhci_ioc_send_pathinfo(upibuf, kpibuf, num_paths,
5523 		    pioc, mode, s)) {
5524 			retval = EFAULT;
5525 			vhci_ioc_free_pathinfo(upibuf, kpibuf, num_paths);
5526 			break;
5527 		}
5528 
5529 		/* Free the memory allocated for path information */
5530 		vhci_ioc_free_pathinfo(upibuf, kpibuf, num_paths);
5531 		break;
5532 	}
5533 
5534 	case SCSI_VHCI_GET_CLIENT_NAME:
5535 	{
5536 		dev_info_t		*cdip, *pdip;
5537 
5538 		/* Get PHCI path and device address from user land */
5539 		if (vhci_ioc_get_phci_path(pioc, phci_path, mode, s) ||
5540 		    vhci_ioc_get_paddr(pioc, paddr, mode, s)) {
5541 			retval = EFAULT;
5542 			break;
5543 		}
5544 
5545 		VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> "
5546 		    "phci <%s>, paddr <%s>", s, phci_path, paddr));
5547 
5548 		/* Get the PHCI dip */
5549 		if ((pdip = mdi_phci_path2devinfo(vdip, phci_path)) == NULL) {
5550 			VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
5551 			    "phci dip doesn't exist. invalid path <%s>",
5552 			    s, phci_path));
5553 			retval = ENXIO;
5554 			break;
5555 		}
5556 
5557 		if ((pip = mdi_pi_find(pdip, NULL, paddr)) == NULL) {
5558 			VHCI_DEBUG(1, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> "
5559 			    "pathinfo doesn't exist. invalid device addr", s));
5560 			retval = ENXIO;
5561 			break;
5562 		}
5563 
5564 		/* Get the client device pathname and send to userland */
5565 		cdip = mdi_pi_get_client(pip);
5566 		vhci_ioc_devi_to_path(cdip, client_path);
5567 
5568 		VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> "
5569 		    "client <%s>", s, client_path));
5570 
5571 		if (vhci_ioc_send_client_path(client_path, pioc, mode, s)) {
5572 			retval = EFAULT;
5573 			break;
5574 		}
5575 		break;
5576 	}
5577 
5578 	case SCSI_VHCI_PATH_ONLINE:
5579 	case SCSI_VHCI_PATH_OFFLINE:
5580 	case SCSI_VHCI_PATH_STANDBY:
5581 	case SCSI_VHCI_PATH_TEST:
5582 	{
5583 		dev_info_t		*pdip;	/* PHCI dip */
5584 
5585 		/* Get PHCI path and device address from user land */
5586 		if (vhci_ioc_get_phci_path(pioc, phci_path, mode, s) ||
5587 		    vhci_ioc_get_paddr(pioc, paddr, mode, s)) {
5588 			retval = EFAULT;
5589 			break;
5590 		}
5591 
5592 		VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> "
5593 		    "phci <%s>, paddr <%s>", s, phci_path, paddr));
5594 
5595 		/* Get the PHCI dip */
5596 		if ((pdip = mdi_phci_path2devinfo(vdip, phci_path)) == NULL) {
5597 			VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
5598 			    "phci dip doesn't exist. invalid path <%s>",
5599 			    s, phci_path));
5600 			retval = ENXIO;
5601 			break;
5602 		}
5603 
5604 		if ((pip = mdi_pi_find(pdip, NULL, paddr)) == NULL) {
5605 			VHCI_DEBUG(1, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> "
5606 			    "pathinfo doesn't exist. invalid device addr", s));
5607 			retval = ENXIO;
5608 			break;
5609 		}
5610 
5611 		VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> "
5612 		    "Calling MDI function to change device state", s));
5613 
5614 		switch (cmd) {
5615 		case SCSI_VHCI_PATH_ONLINE:
5616 			retval = mdi_pi_online(pip, 0);
5617 			break;
5618 
5619 		case SCSI_VHCI_PATH_OFFLINE:
5620 			retval = mdi_pi_offline(pip, 0);
5621 			break;
5622 
5623 		case SCSI_VHCI_PATH_STANDBY:
5624 			retval = mdi_pi_standby(pip, 0);
5625 			break;
5626 
5627 		case SCSI_VHCI_PATH_TEST:
5628 			break;
5629 		}
5630 		break;
5631 	}
5632 
5633 	case SCSI_VHCI_SWITCH_TO_CNTLR:
5634 	{
5635 		dev_info_t *cdip;
5636 		struct scsi_device *devp;
5637 
5638 		/* Get the client device pathname */
5639 		if (ddi_copyin(piocsc->client, client_path,
5640 		    MAXPATHLEN, mode)) {
5641 			VHCI_DEBUG(2, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> "
5642 			    "client_path copyin failed", s));
5643 			retval = EFAULT;
5644 			break;
5645 		}
5646 
5647 		/* Get the path class to which user wants to switch */
5648 		if (ddi_copyin(piocsc->class, paddr, MAXNAMELEN, mode)) {
5649 			VHCI_DEBUG(2, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> "
5650 			    "controller_class copyin failed", s));
5651 			retval = EFAULT;
5652 			break;
5653 		}
5654 
5655 		/* Perform validity checks */
5656 		if ((cdip = mdi_client_path2devinfo(vdip,
5657 		    client_path)) == NULL) {
5658 			VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
5659 			    "client dip doesn't exist. invalid path <%s>",
5660 			    s, client_path));
5661 			retval = ENXIO;
5662 			break;
5663 		}
5664 
5665 		VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: Calling MDI func "
5666 		    "to switch controller"));
5667 		VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: client <%s> "
5668 		    "class <%s>", client_path, paddr));
5669 
5670 		if (strcmp(paddr, PCLASS_PRIMARY) &&
5671 		    strcmp(paddr, PCLASS_SECONDARY)) {
5672 			VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
5673 			    "invalid path class <%s>", s, paddr));
5674 			retval = ENXIO;
5675 			break;
5676 		}
5677 
5678 		devp = ddi_get_driver_private(cdip);
5679 		if (devp == NULL) {
5680 			VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
5681 			    "invalid scsi device <%s>", s, client_path));
5682 			retval = ENXIO;
5683 			break;
5684 		}
5685 		vlun = ADDR2VLUN(&devp->sd_address);
5686 		ASSERT(vlun);
5687 
5688 		/*
5689 		 * Checking to see if device has only one pclass, PRIMARY.
5690 		 * If so this device doesn't support failovers.  Assumed
5691 		 * that the devices with one pclass is PRIMARY, as thats the
5692 		 * case today.  If this is not true and in future other
5693 		 * symmetric devices are supported with other pclass, this
5694 		 * IOCTL shall have to be overhauled anyways as now the only
5695 		 * arguments it accepts are PRIMARY and SECONDARY.
5696 		 */
5697 		fo = vlun->svl_fops;
5698 		if ((*fo->sfo_pathclass_next)(PCLASS_PRIMARY, &pclass,
5699 		    vlun->svl_fops_ctpriv)) {
5700 			retval = ENOTSUP;
5701 			break;
5702 		}
5703 
5704 		VHCI_HOLD_LUN(vlun, VH_SLEEP, held);
5705 		mutex_enter(&vlun->svl_mutex);
5706 		if (vlun->svl_active_pclass != NULL) {
5707 			if (strcmp(vlun->svl_active_pclass, paddr) == 0) {
5708 				mutex_exit(&vlun->svl_mutex);
5709 				retval = EALREADY;
5710 				VHCI_RELEASE_LUN(vlun);
5711 				break;
5712 			}
5713 		}
5714 		mutex_exit(&vlun->svl_mutex);
5715 		/* Call mdi function to cause  a switch over */
5716 		retval = mdi_failover(vdip, cdip, MDI_FAILOVER_SYNC);
5717 		if (retval == MDI_SUCCESS) {
5718 			retval = 0;
5719 		} else if (retval == MDI_BUSY) {
5720 			retval = EBUSY;
5721 		} else {
5722 			retval = EIO;
5723 		}
5724 		VHCI_RELEASE_LUN(vlun);
5725 		break;
5726 	}
5727 
5728 	case SCSI_VHCI_PATH_ENABLE:
5729 	case SCSI_VHCI_PATH_DISABLE:
5730 	{
5731 		dev_info_t	*cdip, *pdip;
5732 
5733 		/*
5734 		 * Get client device path from user land
5735 		 */
5736 		if (vhci_ioc_get_client_path(pioc, client_path, mode, s)) {
5737 			retval = EFAULT;
5738 			break;
5739 		}
5740 
5741 		/*
5742 		 * Get Phci device path from user land
5743 		 */
5744 		if (vhci_ioc_get_phci_path(pioc, phci_path, mode, s)) {
5745 			retval = EFAULT;
5746 			break;
5747 		}
5748 
5749 		/*
5750 		 * Get the devinfo for the Phci.
5751 		 */
5752 		if ((pdip = mdi_phci_path2devinfo(vdip, phci_path)) == NULL) {
5753 			VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
5754 			    "phci dip doesn't exist. invalid path <%s>",
5755 			    s, phci_path));
5756 			retval = ENXIO;
5757 			break;
5758 		}
5759 
5760 		/*
5761 		 * If the client path is set to /scsi_vhci then we need
5762 		 * to do the operation on all the clients so set cdip to NULL.
5763 		 * Else, try to get the client dip.
5764 		 */
5765 		if (strcmp(client_path, "/scsi_vhci") == 0) {
5766 			cdip = NULL;
5767 		} else {
5768 			if ((cdip = mdi_client_path2devinfo(vdip,
5769 			    client_path)) == NULL) {
5770 				retval = ENXIO;
5771 				VHCI_DEBUG(1, (CE_WARN, NULL,
5772 				    "!vhci_ioctl: ioctl <%s> client dip "
5773 				    "doesn't exist. invalid path <%s>",
5774 				    s, client_path));
5775 				break;
5776 			}
5777 		}
5778 
5779 		if (cmd == SCSI_VHCI_PATH_ENABLE)
5780 			retval = mdi_pi_enable(cdip, pdip, USER_DISABLE);
5781 		else
5782 			retval = mdi_pi_disable(cdip, pdip, USER_DISABLE);
5783 
5784 		break;
5785 	}
5786 
5787 	case SCSI_VHCI_GET_TARGET_LONGNAME:
5788 	{
5789 		uint_t		pid = pioc->buf_elem;
5790 		char		*target_port;
5791 		mod_hash_val_t	hv;
5792 
5793 		/* targetmap lookup of 'target-port' by <pid> */
5794 		if (mod_hash_find(vhci_targetmap_bypid,
5795 		    (mod_hash_key_t)(uintptr_t)pid, &hv) != 0) {
5796 			/*
5797 			 * NOTE: failure to find the mapping is OK for guid
5798 			 * based 'target-port' values.
5799 			 */
5800 			VHCI_DEBUG(3, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
5801 			    "targetport mapping doesn't exist: pid %d",
5802 			    s, pid));
5803 			retval = ENXIO;
5804 			break;
5805 		}
5806 
5807 		/* copyout 'target-port' result */
5808 		target_port = (char *)hv;
5809 		if (copyoutstr(target_port, pioc->addr, MAXNAMELEN, NULL)) {
5810 			VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
5811 			    "targetport copyout failed: len: %d",
5812 			    s, (int)strlen(target_port)));
5813 			retval = EFAULT;
5814 		}
5815 		break;
5816 	}
5817 
5818 #ifdef	DEBUG
5819 	case SCSI_VHCI_CONFIGURE_PHCI:
5820 	{
5821 		dev_info_t		*pdip;
5822 
5823 		/* Get PHCI path and device address from user land */
5824 		if (vhci_ioc_get_phci_path(pioc, phci_path, mode, s)) {
5825 			retval = EFAULT;
5826 			break;
5827 		}
5828 
5829 		VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> "
5830 		    "phci <%s>", s, phci_path));
5831 
5832 		/* Get the PHCI dip */
5833 		if ((pdip = e_ddi_hold_devi_by_path(phci_path, 0)) == NULL) {
5834 			VHCI_DEBUG(3, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
5835 			    "phci dip doesn't exist. invalid path <%s>",
5836 			    s, phci_path));
5837 			retval = ENXIO;
5838 			break;
5839 		}
5840 
5841 		if (ndi_devi_config(pdip,
5842 		    NDI_DEVFS_CLEAN|NDI_DEVI_PERSIST) != NDI_SUCCESS) {
5843 			retval = EIO;
5844 		}
5845 
5846 		ddi_release_devi(pdip);
5847 		break;
5848 	}
5849 
5850 	case SCSI_VHCI_UNCONFIGURE_PHCI:
5851 	{
5852 		dev_info_t		*pdip;
5853 
5854 		/* Get PHCI path and device address from user land */
5855 		if (vhci_ioc_get_phci_path(pioc, phci_path, mode, s)) {
5856 			retval = EFAULT;
5857 			break;
5858 		}
5859 
5860 		VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> "
5861 		    "phci <%s>", s, phci_path));
5862 
5863 		/* Get the PHCI dip */
5864 		if ((pdip = e_ddi_hold_devi_by_path(phci_path, 0)) == NULL) {
5865 			VHCI_DEBUG(3, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
5866 			    "phci dip doesn't exist. invalid path <%s>",
5867 			    s, phci_path));
5868 			retval = ENXIO;
5869 			break;
5870 		}
5871 
5872 		if (ndi_devi_unconfig(pdip,
5873 		    NDI_DEVI_REMOVE|NDI_DEVFS_CLEAN) != NDI_SUCCESS) {
5874 			retval = EBUSY;
5875 		}
5876 
5877 		ddi_release_devi(pdip);
5878 		break;
5879 	}
5880 #endif
5881 	}
5882 
5883 end:
5884 	/* Free the memory allocated above */
5885 	if (phci_path != NULL) {
5886 		kmem_free(phci_path, MAXPATHLEN);
5887 	}
5888 	if (client_path != NULL) {
5889 		kmem_free(client_path, MAXPATHLEN);
5890 	}
5891 	if (paddr != NULL) {
5892 		kmem_free(paddr, MAXNAMELEN);
5893 	}
5894 	return (retval);
5895 }
5896 
5897 /*
5898  * devctl IOCTL support for client device DR
5899  */
5900 /* ARGSUSED */
5901 int
5902 vhci_devctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp,
5903     int *rvalp)
5904 {
5905 	dev_info_t *self;
5906 	dev_info_t *child;
5907 	scsi_hba_tran_t *hba;
5908 	struct devctl_iocdata *dcp;
5909 	struct scsi_vhci *vhci;
5910 	int rv = 0;
5911 	int retval = 0;
5912 	scsi_vhci_priv_t *svp;
5913 	mdi_pathinfo_t  *pip;
5914 
5915 	if ((vhci = ddi_get_soft_state(vhci_softstate,
5916 	    MINOR2INST(getminor(dev)))) == NULL)
5917 		return (ENXIO);
5918 
5919 	/*
5920 	 * check if :devctl minor device has been opened
5921 	 */
5922 	mutex_enter(&vhci->vhci_mutex);
5923 	if ((vhci->vhci_state & VHCI_STATE_OPEN) == 0) {
5924 		mutex_exit(&vhci->vhci_mutex);
5925 		return (ENXIO);
5926 	}
5927 	mutex_exit(&vhci->vhci_mutex);
5928 
5929 	self = vhci->vhci_dip;
5930 	hba = ddi_get_driver_private(self);
5931 	if (hba == NULL)
5932 		return (ENXIO);
5933 
5934 	/*
5935 	 * We can use the generic implementation for these ioctls
5936 	 */
5937 	switch (cmd) {
5938 	case DEVCTL_DEVICE_GETSTATE:
5939 	case DEVCTL_DEVICE_ONLINE:
5940 	case DEVCTL_DEVICE_OFFLINE:
5941 	case DEVCTL_DEVICE_REMOVE:
5942 	case DEVCTL_BUS_GETSTATE:
5943 		return (ndi_devctl_ioctl(self, cmd, arg, mode, 0));
5944 	}
5945 
5946 	/*
5947 	 * read devctl ioctl data
5948 	 */
5949 	if (ndi_dc_allochdl((void *)arg, &dcp) != NDI_SUCCESS)
5950 		return (EFAULT);
5951 
5952 	switch (cmd) {
5953 
5954 	case DEVCTL_DEVICE_RESET:
5955 		/*
5956 		 * lookup and hold child device
5957 		 */
5958 		if ((child = ndi_devi_find(self, ndi_dc_getname(dcp),
5959 		    ndi_dc_getaddr(dcp))) == NULL) {
5960 			rv = ENXIO;
5961 			break;
5962 		}
5963 		retval = mdi_select_path(child, NULL,
5964 		    (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH),
5965 		    NULL, &pip);
5966 		if ((retval != MDI_SUCCESS) || (pip == NULL)) {
5967 			VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioctl:"
5968 			    "Unable to get a path, dip 0x%p", (void *)child));
5969 			rv = ENXIO;
5970 			break;
5971 		}
5972 		svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip);
5973 		if (vhci_recovery_reset(svp->svp_svl,
5974 		    &svp->svp_psd->sd_address, TRUE,
5975 		    VHCI_DEPTH_TARGET) == 0) {
5976 			VHCI_DEBUG(1, (CE_NOTE, NULL,
5977 			    "!vhci_ioctl(pip:%p): "
5978 			    "reset failed\n", (void *)pip));
5979 			rv = ENXIO;
5980 		}
5981 		mdi_rele_path(pip);
5982 		break;
5983 
5984 	case DEVCTL_BUS_QUIESCE:
5985 	case DEVCTL_BUS_UNQUIESCE:
5986 	case DEVCTL_BUS_RESET:
5987 	case DEVCTL_BUS_RESETALL:
5988 #ifdef	DEBUG
5989 	case DEVCTL_BUS_CONFIGURE:
5990 	case DEVCTL_BUS_UNCONFIGURE:
5991 #endif
5992 		rv = ENOTSUP;
5993 		break;
5994 
5995 	default:
5996 		rv = ENOTTY;
5997 	} /* end of outer switch */
5998 
5999 	ndi_dc_freehdl(dcp);
6000 	return (rv);
6001 }
6002 
6003 /*
6004  * Routine to get the PHCI pathname from ioctl structures in userland
6005  */
6006 /* ARGSUSED */
6007 static int
6008 vhci_ioc_get_phci_path(sv_iocdata_t *pioc, caddr_t phci_path,
6009 	int mode, caddr_t s)
6010 {
6011 	int retval = 0;
6012 
6013 	if (ddi_copyin(pioc->phci, phci_path, MAXPATHLEN, mode)) {
6014 		VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioc_get_phci: ioctl <%s> "
6015 		    "phci_path copyin failed", s));
6016 		retval = EFAULT;
6017 	}
6018 	return (retval);
6019 
6020 }
6021 
6022 
6023 /*
6024  * Routine to get the Client device pathname from ioctl structures in userland
6025  */
6026 /* ARGSUSED */
6027 static int
6028 vhci_ioc_get_client_path(sv_iocdata_t *pioc, caddr_t client_path,
6029 	int mode, caddr_t s)
6030 {
6031 	int retval = 0;
6032 
6033 	if (ddi_copyin(pioc->client, client_path, MAXPATHLEN, mode)) {
6034 		VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioc_get_client: "
6035 		    "ioctl <%s> client_path copyin failed", s));
6036 		retval = EFAULT;
6037 	}
6038 	return (retval);
6039 }
6040 
6041 
6042 /*
6043  * Routine to get physical device address from ioctl structure in userland
6044  */
6045 /* ARGSUSED */
6046 static int
6047 vhci_ioc_get_paddr(sv_iocdata_t *pioc, caddr_t paddr, int mode, caddr_t s)
6048 {
6049 	int retval = 0;
6050 
6051 	if (ddi_copyin(pioc->addr, paddr, MAXNAMELEN, mode)) {
6052 		VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioc_get_paddr: "
6053 		    "ioctl <%s> device addr copyin failed", s));
6054 		retval = EFAULT;
6055 	}
6056 	return (retval);
6057 }
6058 
6059 
6060 /*
6061  * Routine to send client device pathname to userland.
6062  */
6063 /* ARGSUSED */
6064 static int
6065 vhci_ioc_send_client_path(caddr_t client_path, sv_iocdata_t *pioc,
6066 	int mode, caddr_t s)
6067 {
6068 	int retval = 0;
6069 
6070 	if (ddi_copyout(client_path, pioc->client, MAXPATHLEN, mode)) {
6071 		VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioc_send_client: "
6072 		    "ioctl <%s> client_path copyout failed", s));
6073 		retval = EFAULT;
6074 	}
6075 	return (retval);
6076 }
6077 
6078 
6079 /*
6080  * Routine to translated dev_info pointer (dip) to device pathname.
6081  */
6082 static void
6083 vhci_ioc_devi_to_path(dev_info_t *dip, caddr_t path)
6084 {
6085 	(void) ddi_pathname(dip, path);
6086 }
6087 
6088 
6089 /*
6090  * vhci_get_phci_path_list:
6091  *		get information about devices associated with a
6092  *		given PHCI device.
6093  *
6094  * Return Values:
6095  *		path information elements
6096  */
6097 int
6098 vhci_get_phci_path_list(dev_info_t *pdip, sv_path_info_t *pibuf,
6099 	uint_t num_elems)
6100 {
6101 	uint_t			count, done;
6102 	mdi_pathinfo_t		*pip;
6103 	sv_path_info_t		*ret_pip;
6104 	int			status;
6105 	size_t			prop_size;
6106 	int			circular;
6107 
6108 	/*
6109 	 * Get the PHCI structure and retrieve the path information
6110 	 * from the GUID hash table.
6111 	 */
6112 
6113 	ret_pip = pibuf;
6114 	count = 0;
6115 
6116 	ndi_devi_enter(pdip, &circular);
6117 
6118 	done = (count >= num_elems);
6119 	pip = mdi_get_next_client_path(pdip, NULL);
6120 	while (pip && !done) {
6121 		mdi_pi_lock(pip);
6122 		(void) ddi_pathname(mdi_pi_get_phci(pip),
6123 		    ret_pip->device.ret_phci);
6124 		(void) strcpy(ret_pip->ret_addr, mdi_pi_get_addr(pip));
6125 		(void) mdi_pi_get_state2(pip, &ret_pip->ret_state,
6126 		    &ret_pip->ret_ext_state);
6127 
6128 		status = mdi_prop_size(pip, &prop_size);
6129 		if (status == MDI_SUCCESS && ret_pip->ret_prop.ret_buf_size) {
6130 			*ret_pip->ret_prop.ret_buf_size = (uint_t)prop_size;
6131 		}
6132 
6133 #ifdef DEBUG
6134 		if (status != MDI_SUCCESS) {
6135 			VHCI_DEBUG(2, (CE_WARN, NULL,
6136 			    "!vhci_get_phci_path_list: "
6137 			    "phci <%s>, prop size failure 0x%x",
6138 			    ret_pip->device.ret_phci, status));
6139 		}
6140 #endif /* DEBUG */
6141 
6142 
6143 		if (status == MDI_SUCCESS && ret_pip->ret_prop.buf &&
6144 		    prop_size && ret_pip->ret_prop.buf_size >= prop_size) {
6145 			status = mdi_prop_pack(pip,
6146 			    &ret_pip->ret_prop.buf,
6147 			    ret_pip->ret_prop.buf_size);
6148 
6149 #ifdef DEBUG
6150 			if (status != MDI_SUCCESS) {
6151 				VHCI_DEBUG(2, (CE_WARN, NULL,
6152 				    "!vhci_get_phci_path_list: "
6153 				    "phci <%s>, prop pack failure 0x%x",
6154 				    ret_pip->device.ret_phci, status));
6155 			}
6156 #endif /* DEBUG */
6157 		}
6158 
6159 		mdi_pi_unlock(pip);
6160 		pip = mdi_get_next_client_path(pdip, pip);
6161 		ret_pip++;
6162 		count++;
6163 		done = (count >= num_elems);
6164 	}
6165 
6166 	ndi_devi_exit(pdip, circular);
6167 
6168 	return (MDI_SUCCESS);
6169 }
6170 
6171 
6172 /*
6173  * vhci_get_client_path_list:
6174  *		get information about various paths associated with a
6175  *		given client device.
6176  *
6177  * Return Values:
6178  *		path information elements
6179  */
6180 int
6181 vhci_get_client_path_list(dev_info_t *cdip, sv_path_info_t *pibuf,
6182 	uint_t num_elems)
6183 {
6184 	uint_t			count, done;
6185 	mdi_pathinfo_t		*pip;
6186 	sv_path_info_t		*ret_pip;
6187 	int			status;
6188 	size_t			prop_size;
6189 	int			circular;
6190 
6191 	ret_pip = pibuf;
6192 	count = 0;
6193 
6194 	ndi_devi_enter(cdip, &circular);
6195 
6196 	done = (count >= num_elems);
6197 	pip = mdi_get_next_phci_path(cdip, NULL);
6198 	while (pip && !done) {
6199 		mdi_pi_lock(pip);
6200 		(void) ddi_pathname(mdi_pi_get_phci(pip),
6201 		    ret_pip->device.ret_phci);
6202 		(void) strcpy(ret_pip->ret_addr, mdi_pi_get_addr(pip));
6203 		(void) mdi_pi_get_state2(pip, &ret_pip->ret_state,
6204 		    &ret_pip->ret_ext_state);
6205 
6206 		status = mdi_prop_size(pip, &prop_size);
6207 		if (status == MDI_SUCCESS && ret_pip->ret_prop.ret_buf_size) {
6208 			*ret_pip->ret_prop.ret_buf_size = (uint_t)prop_size;
6209 		}
6210 
6211 #ifdef DEBUG
6212 		if (status != MDI_SUCCESS) {
6213 			VHCI_DEBUG(2, (CE_WARN, NULL,
6214 			    "!vhci_get_client_path_list: "
6215 			    "phci <%s>, prop size failure 0x%x",
6216 			    ret_pip->device.ret_phci, status));
6217 		}
6218 #endif /* DEBUG */
6219 
6220 
6221 		if (status == MDI_SUCCESS && ret_pip->ret_prop.buf &&
6222 		    prop_size && ret_pip->ret_prop.buf_size >= prop_size) {
6223 			status = mdi_prop_pack(pip,
6224 			    &ret_pip->ret_prop.buf,
6225 			    ret_pip->ret_prop.buf_size);
6226 
6227 #ifdef DEBUG
6228 			if (status != MDI_SUCCESS) {
6229 				VHCI_DEBUG(2, (CE_WARN, NULL,
6230 				    "!vhci_get_client_path_list: "
6231 				    "phci <%s>, prop pack failure 0x%x",
6232 				    ret_pip->device.ret_phci, status));
6233 			}
6234 #endif /* DEBUG */
6235 		}
6236 
6237 		mdi_pi_unlock(pip);
6238 		pip = mdi_get_next_phci_path(cdip, pip);
6239 		ret_pip++;
6240 		count++;
6241 		done = (count >= num_elems);
6242 	}
6243 
6244 	ndi_devi_exit(cdip, circular);
6245 
6246 	return (MDI_SUCCESS);
6247 }
6248 
6249 
6250 /*
6251  * Routine to get ioctl argument structure from userland.
6252  */
6253 /* ARGSUSED */
6254 static int
6255 vhci_get_iocdata(const void *data, sv_iocdata_t *pioc, int mode, caddr_t s)
6256 {
6257 	int	retval = 0;
6258 
6259 #ifdef  _MULTI_DATAMODEL
6260 	switch (ddi_model_convert_from(mode & FMODELS)) {
6261 	case DDI_MODEL_ILP32:
6262 	{
6263 		sv_iocdata32_t	ioc32;
6264 
6265 		if (ddi_copyin(data, &ioc32, sizeof (ioc32), mode)) {
6266 			retval = EFAULT;
6267 			break;
6268 		}
6269 		pioc->client	= (caddr_t)(uintptr_t)ioc32.client;
6270 		pioc->phci	= (caddr_t)(uintptr_t)ioc32.phci;
6271 		pioc->addr	= (caddr_t)(uintptr_t)ioc32.addr;
6272 		pioc->buf_elem	= (uint_t)ioc32.buf_elem;
6273 		pioc->ret_buf	= (sv_path_info_t *)(uintptr_t)ioc32.ret_buf;
6274 		pioc->ret_elem	= (uint_t *)(uintptr_t)ioc32.ret_elem;
6275 		break;
6276 	}
6277 
6278 	case DDI_MODEL_NONE:
6279 		if (ddi_copyin(data, pioc, sizeof (*pioc), mode)) {
6280 			retval = EFAULT;
6281 			break;
6282 		}
6283 		break;
6284 	}
6285 #else   /* _MULTI_DATAMODEL */
6286 	if (ddi_copyin(data, pioc, sizeof (*pioc), mode)) {
6287 		retval = EFAULT;
6288 	}
6289 #endif  /* _MULTI_DATAMODEL */
6290 
6291 #ifdef DEBUG
6292 	if (retval) {
6293 		VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_get_ioc: cmd <%s> "
6294 		    "iocdata copyin failed", s));
6295 	}
6296 #endif
6297 
6298 	return (retval);
6299 }
6300 
6301 
6302 /*
6303  * Routine to get the ioctl argument for ioctl causing controller switchover.
6304  */
6305 /* ARGSUSED */
6306 static int
6307 vhci_get_iocswitchdata(const void *data, sv_switch_to_cntlr_iocdata_t *piocsc,
6308     int mode, caddr_t s)
6309 {
6310 	int	retval = 0;
6311 
6312 #ifdef  _MULTI_DATAMODEL
6313 	switch (ddi_model_convert_from(mode & FMODELS)) {
6314 	case DDI_MODEL_ILP32:
6315 	{
6316 		sv_switch_to_cntlr_iocdata32_t	ioc32;
6317 
6318 		if (ddi_copyin(data, &ioc32, sizeof (ioc32), mode)) {
6319 			retval = EFAULT;
6320 			break;
6321 		}
6322 		piocsc->client	= (caddr_t)(uintptr_t)ioc32.client;
6323 		piocsc->class	= (caddr_t)(uintptr_t)ioc32.class;
6324 		break;
6325 	}
6326 
6327 	case DDI_MODEL_NONE:
6328 		if (ddi_copyin(data, piocsc, sizeof (*piocsc), mode)) {
6329 			retval = EFAULT;
6330 		}
6331 		break;
6332 	}
6333 #else   /* _MULTI_DATAMODEL */
6334 	if (ddi_copyin(data, piocsc, sizeof (*piocsc), mode)) {
6335 		retval = EFAULT;
6336 	}
6337 #endif  /* _MULTI_DATAMODEL */
6338 
6339 #ifdef DEBUG
6340 	if (retval) {
6341 		VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_get_ioc: cmd <%s> "
6342 		    "switch_to_cntlr_iocdata copyin failed", s));
6343 	}
6344 #endif
6345 
6346 	return (retval);
6347 }
6348 
6349 
6350 /*
6351  * Routine to allocate memory for the path information structures.
6352  * It allocates two chunks of memory - one for keeping userland
6353  * pointers/values for path information and path properties, second for
6354  * keeping allocating kernel memory for path properties. These path
6355  * properties are finally copied to userland.
6356  */
6357 /* ARGSUSED */
6358 static int
6359 vhci_ioc_alloc_pathinfo(sv_path_info_t **upibuf, sv_path_info_t **kpibuf,
6360     uint_t num_paths, sv_iocdata_t *pioc, int mode, caddr_t s)
6361 {
6362 	sv_path_info_t	*pi;
6363 	uint_t		bufsize;
6364 	int		retval = 0;
6365 	int		index;
6366 
6367 	/* Allocate memory */
6368 	*upibuf = (sv_path_info_t *)
6369 	    kmem_zalloc(sizeof (sv_path_info_t) * num_paths, KM_SLEEP);
6370 	ASSERT(*upibuf != NULL);
6371 	*kpibuf = (sv_path_info_t *)
6372 	    kmem_zalloc(sizeof (sv_path_info_t) * num_paths, KM_SLEEP);
6373 	ASSERT(*kpibuf != NULL);
6374 
6375 	/*
6376 	 * Get the path info structure from the user space.
6377 	 * We are interested in the following fields:
6378 	 *	- user size of buffer for per path properties.
6379 	 *	- user address of buffer for path info properties.
6380 	 *	- user pointer for returning actual buffer size
6381 	 * Keep these fields in the 'upibuf' structures.
6382 	 * Allocate buffer for per path info properties in kernel
6383 	 * structure ('kpibuf').
6384 	 * Size of these buffers will be equal to the size of buffers
6385 	 * in the user space.
6386 	 */
6387 #ifdef  _MULTI_DATAMODEL
6388 	switch (ddi_model_convert_from(mode & FMODELS)) {
6389 	case DDI_MODEL_ILP32:
6390 	{
6391 		sv_path_info32_t	*src;
6392 		sv_path_info32_t	pi32;
6393 
6394 		src  = (sv_path_info32_t *)pioc->ret_buf;
6395 		pi = (sv_path_info_t *)*upibuf;
6396 		for (index = 0; index < num_paths; index++, src++, pi++) {
6397 			if (ddi_copyin(src, &pi32, sizeof (pi32), mode)) {
6398 				retval = EFAULT;
6399 				break;
6400 			}
6401 
6402 			pi->ret_prop.buf_size	=
6403 			    (uint_t)pi32.ret_prop.buf_size;
6404 			pi->ret_prop.ret_buf_size =
6405 			    (uint_t *)(uintptr_t)pi32.ret_prop.ret_buf_size;
6406 			pi->ret_prop.buf	=
6407 			    (caddr_t)(uintptr_t)pi32.ret_prop.buf;
6408 		}
6409 		break;
6410 	}
6411 
6412 	case DDI_MODEL_NONE:
6413 		if (ddi_copyin(pioc->ret_buf, *upibuf,
6414 		    sizeof (sv_path_info_t) * num_paths, mode)) {
6415 			retval = EFAULT;
6416 		}
6417 		break;
6418 	}
6419 #else   /* _MULTI_DATAMODEL */
6420 	if (ddi_copyin(pioc->ret_buf, *upibuf,
6421 	    sizeof (sv_path_info_t) * num_paths, mode)) {
6422 		retval = EFAULT;
6423 	}
6424 #endif  /* _MULTI_DATAMODEL */
6425 
6426 	if (retval != 0) {
6427 		VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_alloc_path_info: "
6428 		    "ioctl <%s> normal: path_info copyin failed", s));
6429 		kmem_free(*upibuf, sizeof (sv_path_info_t) * num_paths);
6430 		kmem_free(*kpibuf, sizeof (sv_path_info_t) * num_paths);
6431 		*upibuf = NULL;
6432 		*kpibuf = NULL;
6433 		return (retval);
6434 	}
6435 
6436 	/*
6437 	 * Allocate memory for per path properties.
6438 	 */
6439 	for (index = 0, pi = *kpibuf; index < num_paths; index++, pi++) {
6440 		bufsize = (*upibuf)[index].ret_prop.buf_size;
6441 
6442 		if (bufsize && bufsize <= SV_PROP_MAX_BUF_SIZE) {
6443 			pi->ret_prop.buf_size = bufsize;
6444 			pi->ret_prop.buf = (caddr_t)
6445 			    kmem_zalloc(bufsize, KM_SLEEP);
6446 			ASSERT(pi->ret_prop.buf != NULL);
6447 		} else {
6448 			pi->ret_prop.buf_size = 0;
6449 			pi->ret_prop.buf = NULL;
6450 		}
6451 
6452 		if ((*upibuf)[index].ret_prop.ret_buf_size != NULL) {
6453 			pi->ret_prop.ret_buf_size = (uint_t *)kmem_zalloc(
6454 			    sizeof (*pi->ret_prop.ret_buf_size), KM_SLEEP);
6455 			ASSERT(pi->ret_prop.ret_buf_size != NULL);
6456 		} else {
6457 			pi->ret_prop.ret_buf_size = NULL;
6458 		}
6459 	}
6460 
6461 	return (0);
6462 }
6463 
6464 
6465 /*
6466  * Routine to free memory for the path information structures.
6467  * This is the memory which was allocated earlier.
6468  */
6469 /* ARGSUSED */
6470 static void
6471 vhci_ioc_free_pathinfo(sv_path_info_t *upibuf, sv_path_info_t *kpibuf,
6472     uint_t num_paths)
6473 {
6474 	sv_path_info_t	*pi;
6475 	int		index;
6476 
6477 	/* Free memory for per path properties */
6478 	for (index = 0, pi = kpibuf; index < num_paths; index++, pi++) {
6479 		if (pi->ret_prop.ret_buf_size != NULL) {
6480 			kmem_free(pi->ret_prop.ret_buf_size,
6481 			    sizeof (*pi->ret_prop.ret_buf_size));
6482 		}
6483 
6484 		if (pi->ret_prop.buf != NULL) {
6485 			kmem_free(pi->ret_prop.buf, pi->ret_prop.buf_size);
6486 		}
6487 	}
6488 
6489 	/* Free memory for path info structures */
6490 	kmem_free(upibuf, sizeof (sv_path_info_t) * num_paths);
6491 	kmem_free(kpibuf, sizeof (sv_path_info_t) * num_paths);
6492 }
6493 
6494 
6495 /*
6496  * Routine to copy path information and path properties to userland.
6497  */
6498 /* ARGSUSED */
6499 static int
6500 vhci_ioc_send_pathinfo(sv_path_info_t *upibuf, sv_path_info_t *kpibuf,
6501     uint_t num_paths, sv_iocdata_t *pioc, int mode, caddr_t s)
6502 {
6503 	int			retval = 0, index;
6504 	sv_path_info_t		*upi_ptr;
6505 	sv_path_info32_t	*upi32_ptr;
6506 
6507 #ifdef  _MULTI_DATAMODEL
6508 	switch (ddi_model_convert_from(mode & FMODELS)) {
6509 	case DDI_MODEL_ILP32:
6510 		goto copy_32bit;
6511 
6512 	case DDI_MODEL_NONE:
6513 		goto copy_normal;
6514 	}
6515 #else   /* _MULTI_DATAMODEL */
6516 
6517 	goto copy_normal;
6518 
6519 #endif  /* _MULTI_DATAMODEL */
6520 
6521 copy_normal:
6522 
6523 	/*
6524 	 * Copy path information and path properties to user land.
6525 	 * Pointer fields inside the path property structure were
6526 	 * saved in the 'upibuf' structure earlier.
6527 	 */
6528 	upi_ptr = pioc->ret_buf;
6529 	for (index = 0; index < num_paths; index++) {
6530 		if (ddi_copyout(kpibuf[index].device.ret_ct,
6531 		    upi_ptr[index].device.ret_ct, MAXPATHLEN, mode)) {
6532 			retval = EFAULT;
6533 			break;
6534 		}
6535 
6536 		if (ddi_copyout(kpibuf[index].ret_addr,
6537 		    upi_ptr[index].ret_addr, MAXNAMELEN, mode)) {
6538 			retval = EFAULT;
6539 			break;
6540 		}
6541 
6542 		if (ddi_copyout(&kpibuf[index].ret_state,
6543 		    &upi_ptr[index].ret_state, sizeof (kpibuf[index].ret_state),
6544 		    mode)) {
6545 			retval = EFAULT;
6546 			break;
6547 		}
6548 
6549 		if (ddi_copyout(&kpibuf[index].ret_ext_state,
6550 		    &upi_ptr[index].ret_ext_state,
6551 		    sizeof (kpibuf[index].ret_ext_state), mode)) {
6552 			retval = EFAULT;
6553 			break;
6554 		}
6555 
6556 		if ((kpibuf[index].ret_prop.ret_buf_size != NULL) &&
6557 		    ddi_copyout(kpibuf[index].ret_prop.ret_buf_size,
6558 		    upibuf[index].ret_prop.ret_buf_size,
6559 		    sizeof (*upibuf[index].ret_prop.ret_buf_size), mode)) {
6560 			retval = EFAULT;
6561 			break;
6562 		}
6563 
6564 		if ((kpibuf[index].ret_prop.buf != NULL) &&
6565 		    ddi_copyout(kpibuf[index].ret_prop.buf,
6566 		    upibuf[index].ret_prop.buf,
6567 		    upibuf[index].ret_prop.buf_size, mode)) {
6568 			retval = EFAULT;
6569 			break;
6570 		}
6571 	}
6572 
6573 #ifdef DEBUG
6574 	if (retval) {
6575 		VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_get_ioc: ioctl <%s> "
6576 		    "normal: path_info copyout failed", s));
6577 	}
6578 #endif
6579 
6580 	return (retval);
6581 
6582 copy_32bit:
6583 	/*
6584 	 * Copy path information and path properties to user land.
6585 	 * Pointer fields inside the path property structure were
6586 	 * saved in the 'upibuf' structure earlier.
6587 	 */
6588 	upi32_ptr = (sv_path_info32_t *)pioc->ret_buf;
6589 	for (index = 0; index < num_paths; index++) {
6590 		if (ddi_copyout(kpibuf[index].device.ret_ct,
6591 		    upi32_ptr[index].device.ret_ct, MAXPATHLEN, mode)) {
6592 			retval = EFAULT;
6593 			break;
6594 		}
6595 
6596 		if (ddi_copyout(kpibuf[index].ret_addr,
6597 		    upi32_ptr[index].ret_addr, MAXNAMELEN, mode)) {
6598 			retval = EFAULT;
6599 			break;
6600 		}
6601 
6602 		if (ddi_copyout(&kpibuf[index].ret_state,
6603 		    &upi32_ptr[index].ret_state,
6604 		    sizeof (kpibuf[index].ret_state), mode)) {
6605 			retval = EFAULT;
6606 			break;
6607 		}
6608 
6609 		if (ddi_copyout(&kpibuf[index].ret_ext_state,
6610 		    &upi32_ptr[index].ret_ext_state,
6611 		    sizeof (kpibuf[index].ret_ext_state), mode)) {
6612 			retval = EFAULT;
6613 			break;
6614 		}
6615 		if ((kpibuf[index].ret_prop.ret_buf_size != NULL) &&
6616 		    ddi_copyout(kpibuf[index].ret_prop.ret_buf_size,
6617 		    upibuf[index].ret_prop.ret_buf_size,
6618 		    sizeof (*upibuf[index].ret_prop.ret_buf_size), mode)) {
6619 			retval = EFAULT;
6620 			break;
6621 		}
6622 
6623 		if ((kpibuf[index].ret_prop.buf != NULL) &&
6624 		    ddi_copyout(kpibuf[index].ret_prop.buf,
6625 		    upibuf[index].ret_prop.buf,
6626 		    upibuf[index].ret_prop.buf_size, mode)) {
6627 			retval = EFAULT;
6628 			break;
6629 		}
6630 	}
6631 
6632 #ifdef DEBUG
6633 	if (retval) {
6634 		VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_get_ioc: ioctl <%s> "
6635 		    "normal: path_info copyout failed", s));
6636 	}
6637 #endif
6638 
6639 	return (retval);
6640 }
6641 
6642 
6643 /*
6644  * vhci_failover()
6645  * This routine expects VHCI_HOLD_LUN before being invoked.  It can be invoked
6646  * as MDI_FAILOVER_ASYNC or MDI_FAILOVER_SYNC.  For Asynchronous failovers
6647  * this routine shall VHCI_RELEASE_LUN on exiting.  For synchronous failovers
6648  * it is the callers responsibility to release lun.
6649  */
6650 
6651 /* ARGSUSED */
6652 static int
6653 vhci_failover(dev_info_t *vdip, dev_info_t *cdip, int flags)
6654 {
6655 	char			*guid;
6656 	scsi_vhci_lun_t		*vlun = NULL;
6657 	struct scsi_vhci	*vhci;
6658 	mdi_pathinfo_t		*pip, *npip;
6659 	char			*s_pclass, *pclass1, *pclass2, *pclass;
6660 	char			active_pclass_copy[255], *active_pclass_ptr;
6661 	char			*ptr1, *ptr2;
6662 	mdi_pathinfo_state_t	pi_state;
6663 	uint32_t		pi_ext_state;
6664 	scsi_vhci_priv_t	*svp;
6665 	struct scsi_device	*sd;
6666 	struct scsi_failover_ops	*sfo;
6667 	int			sps; /* mdi_select_path() status */
6668 	int			activation_done = 0;
6669 	int			rval, retval = MDI_FAILURE;
6670 	int			reserve_pending, check_condition, UA_condition;
6671 	struct scsi_pkt		*pkt;
6672 	struct buf		*bp;
6673 
6674 	vhci = ddi_get_soft_state(vhci_softstate, ddi_get_instance(vdip));
6675 	sd = ddi_get_driver_private(cdip);
6676 	vlun = ADDR2VLUN(&sd->sd_address);
6677 	ASSERT(vlun != 0);
6678 	ASSERT(VHCI_LUN_IS_HELD(vlun));
6679 	guid = vlun->svl_lun_wwn;
6680 	VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(1): guid %s\n", guid));
6681 	vhci_log(CE_NOTE, vdip, "!Initiating failover for device %s "
6682 	    "(GUID %s)", ddi_node_name(cdip), guid);
6683 
6684 	/*
6685 	 * Lets maintain a local copy of the vlun->svl_active_pclass
6686 	 * for the rest of the processing. Accessing the field
6687 	 * directly in the loop below causes loop logic to break
6688 	 * especially when the field gets updated by other threads
6689 	 * update path status etc and causes 'paths are not currently
6690 	 * available' condition to be declared prematurely.
6691 	 */
6692 	mutex_enter(&vlun->svl_mutex);
6693 	if (vlun->svl_active_pclass != NULL) {
6694 		(void) strlcpy(active_pclass_copy, vlun->svl_active_pclass,
6695 		    sizeof (active_pclass_copy));
6696 		active_pclass_ptr = &active_pclass_copy[0];
6697 		mutex_exit(&vlun->svl_mutex);
6698 		if (vhci_quiesce_paths(vdip, cdip, vlun, guid,
6699 		    active_pclass_ptr) != 0) {
6700 			retval = MDI_FAILURE;
6701 		}
6702 	} else {
6703 		/*
6704 		 * can happen only when the available path to device
6705 		 * discovered is a STANDBY path.
6706 		 */
6707 		mutex_exit(&vlun->svl_mutex);
6708 		active_pclass_copy[0] = '\0';
6709 		active_pclass_ptr = NULL;
6710 	}
6711 
6712 	sfo = vlun->svl_fops;
6713 	ASSERT(sfo != NULL);
6714 	pclass1 = s_pclass = active_pclass_ptr;
6715 	VHCI_DEBUG(1, (CE_NOTE, NULL, "!(%s)failing over from %s\n", guid,
6716 	    (s_pclass == NULL ? "<none>" : s_pclass)));
6717 
6718 next_pathclass:
6719 
6720 	rval = (*sfo->sfo_pathclass_next)(pclass1, &pclass2,
6721 	    vlun->svl_fops_ctpriv);
6722 	if (rval == ENOENT) {
6723 		if (s_pclass == NULL) {
6724 			VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(4)(%s): "
6725 			    "failed, no more pathclasses\n", guid));
6726 			goto done;
6727 		} else {
6728 			(*sfo->sfo_pathclass_next)(NULL, &pclass2,
6729 			    vlun->svl_fops_ctpriv);
6730 		}
6731 	} else if (rval == EINVAL) {
6732 		vhci_log(CE_NOTE, vdip, "!Failover operation failed for "
6733 		    "device %s (GUID %s): Invalid path-class %s",
6734 		    ddi_node_name(cdip), guid,
6735 		    ((pclass1 == NULL) ? "<none>" : pclass1));
6736 		goto done;
6737 	}
6738 	if ((s_pclass != NULL) && (strcmp(pclass2, s_pclass) == 0)) {
6739 		/*
6740 		 * paths are not currently available
6741 		 */
6742 		vhci_log(CE_NOTE, vdip, "!Failover path currently unavailable"
6743 		    " for device %s (GUID %s)",
6744 		    ddi_node_name(cdip), guid);
6745 		goto done;
6746 	}
6747 	pip = npip = NULL;
6748 	VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(5.2)(%s): considering "
6749 	    "%s as failover destination\n", guid, pclass2));
6750 	sps = mdi_select_path(cdip, NULL, MDI_SELECT_STANDBY_PATH, NULL, &npip);
6751 	if ((npip == NULL) || (sps != MDI_SUCCESS)) {
6752 		VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(%s): no "
6753 		    "STANDBY paths found (status:%x)!\n", guid, sps));
6754 		pclass1 = pclass2;
6755 		goto next_pathclass;
6756 	}
6757 	do {
6758 		pclass = NULL;
6759 		if ((mdi_prop_lookup_string(npip, "path-class",
6760 		    &pclass) != MDI_SUCCESS) || (strcmp(pclass2,
6761 		    pclass) != 0)) {
6762 			VHCI_DEBUG(1, (CE_NOTE, NULL,
6763 			    "!vhci_failover(5.5)(%s): skipping path "
6764 			    "%p(%s)...\n", guid, (void *)npip, pclass));
6765 			pip = npip;
6766 			sps = mdi_select_path(cdip, NULL,
6767 			    MDI_SELECT_STANDBY_PATH, pip, &npip);
6768 			mdi_rele_path(pip);
6769 			(void) mdi_prop_free(pclass);
6770 			continue;
6771 		}
6772 		svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(npip);
6773 
6774 		/*
6775 		 * Issue READ at non-zer block on this STANDBY path.
6776 		 * Purple returns
6777 		 * 1. RESERVATION_CONFLICT if reservation is pending
6778 		 * 2. POR check condition if it reset happened.
6779 		 * 2. failover Check Conditions if one is already in progress.
6780 		 */
6781 		reserve_pending = 0;
6782 		check_condition = 0;
6783 		UA_condition = 0;
6784 
6785 		bp = scsi_alloc_consistent_buf(&svp->svp_psd->sd_address,
6786 		    (struct buf *)NULL, DEV_BSIZE, B_READ, NULL, NULL);
6787 		if (!bp) {
6788 			VHCI_DEBUG(1, (CE_NOTE, NULL,
6789 			    "vhci_failover !No resources (buf)\n"));
6790 			mdi_rele_path(npip);
6791 			goto done;
6792 		}
6793 		pkt = scsi_init_pkt(&svp->svp_psd->sd_address, NULL, bp,
6794 		    CDB_GROUP1, sizeof (struct scsi_arq_status), 0,
6795 		    PKT_CONSISTENT, NULL, NULL);
6796 		if (pkt) {
6797 			(void) scsi_setup_cdb((union scsi_cdb *)(uintptr_t)
6798 			    pkt->pkt_cdbp, SCMD_READ, 1, 1, 0);
6799 			pkt->pkt_flags = FLAG_NOINTR;
6800 check_path_again:
6801 			pkt->pkt_path_instance = mdi_pi_get_path_instance(npip);
6802 			pkt->pkt_time = 3*30;
6803 
6804 			if (scsi_transport(pkt) == TRAN_ACCEPT) {
6805 				switch (pkt->pkt_reason) {
6806 				case CMD_CMPLT:
6807 					switch (SCBP_C(pkt)) {
6808 					case STATUS_GOOD:
6809 						/* Already failed over */
6810 						activation_done = 1;
6811 						break;
6812 					case STATUS_RESERVATION_CONFLICT:
6813 						reserve_pending = 1;
6814 						break;
6815 					case STATUS_CHECK:
6816 						check_condition = 1;
6817 						break;
6818 					}
6819 				}
6820 			}
6821 			if (check_condition &&
6822 			    (pkt->pkt_state & STATE_ARQ_DONE)) {
6823 				struct scsi_extended_sense *sns =
6824 				    &(((struct scsi_arq_status *)(uintptr_t)
6825 				    (pkt->pkt_scbp))->sts_sensedata);
6826 				if (sns->es_key == KEY_UNIT_ATTENTION &&
6827 				    sns->es_add_code == 0x29) {
6828 					/* Already failed over */
6829 					VHCI_DEBUG(1, (CE_NOTE, NULL,
6830 					    "!vhci_failover(7)(%s): "
6831 					    "path 0x%p POR UA condition\n",
6832 					    guid, (void *)npip));
6833 					if (UA_condition == 0) {
6834 						UA_condition = 1;
6835 						goto check_path_again;
6836 					}
6837 				} else {
6838 					activation_done = 0;
6839 					VHCI_DEBUG(1, (CE_NOTE, NULL,
6840 					    "!vhci_failover(%s): path 0x%p "
6841 					    "unhandled chkcond %x %x %x\n",
6842 					    guid, (void *)npip, sns->es_key,
6843 					    sns->es_add_code,
6844 					    sns->es_qual_code));
6845 				}
6846 			}
6847 			scsi_destroy_pkt(pkt);
6848 		}
6849 		scsi_free_consistent_buf(bp);
6850 
6851 		if (activation_done) {
6852 			mdi_rele_path(npip);
6853 			VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(7)(%s): "
6854 			    "path 0x%p already failedover\n", guid,
6855 			    (void *)npip));
6856 			break;
6857 		}
6858 		if (reserve_pending && (vlun->svl_xlf_capable == 0)) {
6859 			(void) vhci_recovery_reset(vlun,
6860 			    &svp->svp_psd->sd_address,
6861 			    FALSE, VHCI_DEPTH_ALL);
6862 		}
6863 		VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(6)(%s): "
6864 		    "activating path 0x%p(psd:%p)\n", guid, (void *)npip,
6865 		    (void *)svp->svp_psd));
6866 		if ((*sfo->sfo_path_activate)(svp->svp_psd, pclass2,
6867 		    vlun->svl_fops_ctpriv) == 0) {
6868 			activation_done = 1;
6869 			mdi_rele_path(npip);
6870 			VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(7)(%s): "
6871 			    "path 0x%p successfully activated\n", guid,
6872 			    (void *)npip));
6873 			break;
6874 		}
6875 		pip = npip;
6876 		sps = mdi_select_path(cdip, NULL, MDI_SELECT_STANDBY_PATH,
6877 		    pip, &npip);
6878 		mdi_rele_path(pip);
6879 	} while ((npip != NULL) && (sps == MDI_SUCCESS));
6880 	if (activation_done == 0) {
6881 		pclass1 = pclass2;
6882 		goto next_pathclass;
6883 	}
6884 
6885 	/*
6886 	 * if we are here, we have succeeded in activating path npip of
6887 	 * pathclass pclass2; let us validate all paths of pclass2 by
6888 	 * "ping"-ing each one and mark the good ones ONLINE
6889 	 * Also, set the state of the paths belonging to the previously
6890 	 * active pathclass to STANDBY
6891 	 */
6892 	pip = npip = NULL;
6893 	sps = mdi_select_path(cdip, NULL, (MDI_SELECT_ONLINE_PATH |
6894 	    MDI_SELECT_STANDBY_PATH | MDI_SELECT_USER_DISABLE_PATH),
6895 	    NULL, &npip);
6896 	if (npip == NULL || sps != MDI_SUCCESS) {
6897 		VHCI_DEBUG(1, (CE_NOTE, NULL, "!Failover operation failed for "
6898 		    "device %s (GUID %s): paths may be busy\n",
6899 		    ddi_node_name(cdip), guid));
6900 		goto done;
6901 	}
6902 	do {
6903 		(void) mdi_pi_get_state2(npip, &pi_state, &pi_ext_state);
6904 		if (mdi_prop_lookup_string(npip, "path-class", &pclass)
6905 		    != MDI_SUCCESS) {
6906 			pip = npip;
6907 			sps = mdi_select_path(cdip, NULL,
6908 			    (MDI_SELECT_ONLINE_PATH |
6909 			    MDI_SELECT_STANDBY_PATH |
6910 			    MDI_SELECT_USER_DISABLE_PATH),
6911 			    pip, &npip);
6912 			mdi_rele_path(pip);
6913 			continue;
6914 		}
6915 		if (strcmp(pclass, pclass2) == 0) {
6916 			if (pi_state == MDI_PATHINFO_STATE_STANDBY) {
6917 				svp = (scsi_vhci_priv_t *)
6918 				    mdi_pi_get_vhci_private(npip);
6919 				VHCI_DEBUG(1, (CE_NOTE, NULL,
6920 				    "!vhci_failover(8)(%s): "
6921 				    "pinging path 0x%p\n",
6922 				    guid, (void *)npip));
6923 				if ((*sfo->sfo_path_ping)(svp->svp_psd,
6924 				    vlun->svl_fops_ctpriv) == 1) {
6925 					mdi_pi_set_state(npip,
6926 					    MDI_PATHINFO_STATE_ONLINE);
6927 					VHCI_DEBUG(1, (CE_NOTE, NULL,
6928 					    "!vhci_failover(9)(%s): "
6929 					    "path 0x%p ping successful, "
6930 					    "marked online\n", guid,
6931 					    (void *)npip));
6932 					MDI_PI_ERRSTAT(npip, MDI_PI_FAILTO);
6933 				}
6934 			}
6935 		} else if ((s_pclass != NULL) && (strcmp(pclass, s_pclass)
6936 		    == 0)) {
6937 			if (pi_state == MDI_PATHINFO_STATE_ONLINE) {
6938 				mdi_pi_set_state(npip,
6939 				    MDI_PATHINFO_STATE_STANDBY);
6940 				VHCI_DEBUG(1, (CE_NOTE, NULL,
6941 				    "!vhci_failover(10)(%s): path 0x%p marked "
6942 				    "STANDBY\n", guid, (void *)npip));
6943 				MDI_PI_ERRSTAT(npip, MDI_PI_FAILFROM);
6944 			}
6945 		}
6946 		(void) mdi_prop_free(pclass);
6947 		pip = npip;
6948 		sps = mdi_select_path(cdip, NULL, (MDI_SELECT_ONLINE_PATH |
6949 		    MDI_SELECT_STANDBY_PATH|MDI_SELECT_USER_DISABLE_PATH),
6950 		    pip, &npip);
6951 		mdi_rele_path(pip);
6952 	} while ((npip != NULL) && (sps == MDI_SUCCESS));
6953 
6954 	/*
6955 	 * Update the AccessState of related MP-API TPGs
6956 	 */
6957 	(void) vhci_mpapi_update_tpg_acc_state_for_lu(vhci, vlun);
6958 
6959 	vhci_log(CE_NOTE, vdip, "!Failover operation completed successfully "
6960 	    "for device %s (GUID %s): failed over from %s to %s",
6961 	    ddi_node_name(cdip), guid, ((s_pclass == NULL) ? "<none>" :
6962 	    s_pclass), pclass2);
6963 	ptr1 = kmem_alloc(strlen(pclass2)+1, KM_SLEEP);
6964 	(void) strlcpy(ptr1, pclass2, (strlen(pclass2)+1));
6965 	mutex_enter(&vlun->svl_mutex);
6966 	ptr2 = vlun->svl_active_pclass;
6967 	vlun->svl_active_pclass = ptr1;
6968 	mutex_exit(&vlun->svl_mutex);
6969 	if (ptr2) {
6970 		kmem_free(ptr2, strlen(ptr2)+1);
6971 	}
6972 	mutex_enter(&vhci->vhci_mutex);
6973 	scsi_hba_reset_notify_callback(&vhci->vhci_mutex,
6974 	    &vhci->vhci_reset_notify_listf);
6975 	/* All reservations are cleared upon these resets. */
6976 	vlun->svl_flags &= ~VLUN_RESERVE_ACTIVE_FLG;
6977 	mutex_exit(&vhci->vhci_mutex);
6978 	VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(11): DONE! Active "
6979 	    "pathclass for %s is now %s\n", guid, pclass2));
6980 	retval = MDI_SUCCESS;
6981 
6982 done:
6983 	if (flags == MDI_FAILOVER_ASYNC) {
6984 		VHCI_RELEASE_LUN(vlun);
6985 		VHCI_DEBUG(6, (CE_NOTE, NULL, "!vhci_failover(12): DONE! "
6986 		    "releasing lun, as failover was ASYNC\n"));
6987 	} else {
6988 		VHCI_DEBUG(6, (CE_NOTE, NULL, "!vhci_failover(12): DONE! "
6989 		    "NOT releasing lun, as failover was SYNC\n"));
6990 	}
6991 	return (retval);
6992 }
6993 
6994 /*
6995  * vhci_client_attached is called after the successful attach of a
6996  * client devinfo node.
6997  */
6998 static void
6999 vhci_client_attached(dev_info_t *cdip)
7000 {
7001 	mdi_pathinfo_t	*pip;
7002 	int		circular;
7003 
7004 	/*
7005 	 * At this point the client has attached and it's instance number is
7006 	 * valid, so we can set up kstats.  We need to do this here because it
7007 	 * is possible for paths to go online prior to client attach, in which
7008 	 * case the call to vhci_kstat_create_pathinfo in vhci_pathinfo_online
7009 	 * was a noop.
7010 	 */
7011 	ndi_devi_enter(cdip, &circular);
7012 	for (pip = mdi_get_next_phci_path(cdip, NULL); pip;
7013 	    pip = mdi_get_next_phci_path(cdip, pip))
7014 		vhci_kstat_create_pathinfo(pip);
7015 	ndi_devi_exit(cdip, circular);
7016 }
7017 
7018 /*
7019  * quiesce all of the online paths
7020  */
7021 static int
7022 vhci_quiesce_paths(dev_info_t *vdip, dev_info_t *cdip, scsi_vhci_lun_t *vlun,
7023 	char *guid, char *active_pclass_ptr)
7024 {
7025 	scsi_vhci_priv_t	*svp;
7026 	char			*s_pclass = NULL;
7027 	mdi_pathinfo_t		*npip, *pip;
7028 	int			sps;
7029 
7030 	/* quiesce currently active paths */
7031 	s_pclass = NULL;
7032 	pip = npip = NULL;
7033 	sps = mdi_select_path(cdip, NULL, MDI_SELECT_ONLINE_PATH, NULL, &npip);
7034 	if ((npip == NULL) || (sps != MDI_SUCCESS)) {
7035 		return (1);
7036 	}
7037 	do {
7038 		if (mdi_prop_lookup_string(npip, "path-class",
7039 		    &s_pclass) != MDI_SUCCESS) {
7040 			mdi_rele_path(npip);
7041 			vhci_log(CE_NOTE, vdip, "!Failover operation failed "
7042 			    "for device %s (GUID %s) due to an internal "
7043 			    "error", ddi_node_name(cdip), guid);
7044 			return (1);
7045 		}
7046 		if (strcmp(s_pclass, active_pclass_ptr) == 0) {
7047 			/*
7048 			 * quiesce path. Free s_pclass since
7049 			 * we don't need it anymore
7050 			 */
7051 			VHCI_DEBUG(1, (CE_NOTE, NULL,
7052 			    "!vhci_failover(2)(%s): failing over "
7053 			    "from %s; quiescing path %p\n",
7054 			    guid, s_pclass, (void *)npip));
7055 			(void) mdi_prop_free(s_pclass);
7056 			svp = (scsi_vhci_priv_t *)
7057 			    mdi_pi_get_vhci_private(npip);
7058 			if (svp == NULL) {
7059 				VHCI_DEBUG(1, (CE_NOTE, NULL,
7060 				    "!vhci_failover(2.5)(%s): no "
7061 				    "client priv! %p offlined?\n",
7062 				    guid, (void *)npip));
7063 				pip = npip;
7064 				sps = mdi_select_path(cdip, NULL,
7065 				    MDI_SELECT_ONLINE_PATH, pip, &npip);
7066 				mdi_rele_path(pip);
7067 				continue;
7068 			}
7069 			if (scsi_abort(&svp->svp_psd->sd_address, NULL)
7070 			    == 0) {
7071 				(void) vhci_recovery_reset(vlun,
7072 				    &svp->svp_psd->sd_address, FALSE,
7073 				    VHCI_DEPTH_TARGET);
7074 			}
7075 			mutex_enter(&svp->svp_mutex);
7076 			if (svp->svp_cmds == 0) {
7077 				VHCI_DEBUG(1, (CE_NOTE, NULL,
7078 				    "!vhci_failover(3)(%s):"
7079 				    "quiesced path %p\n", guid, (void *)npip));
7080 			} else {
7081 				while (svp->svp_cmds != 0) {
7082 					cv_wait(&svp->svp_cv, &svp->svp_mutex);
7083 					VHCI_DEBUG(1, (CE_NOTE, NULL,
7084 					    "!vhci_failover(3.cv)(%s):"
7085 					    "quiesced path %p\n", guid,
7086 					    (void *)npip));
7087 				}
7088 			}
7089 			mutex_exit(&svp->svp_mutex);
7090 		} else {
7091 			/*
7092 			 * make sure we freeup the memory
7093 			 */
7094 			(void) mdi_prop_free(s_pclass);
7095 		}
7096 		pip = npip;
7097 		sps = mdi_select_path(cdip, NULL, MDI_SELECT_ONLINE_PATH,
7098 		    pip, &npip);
7099 		mdi_rele_path(pip);
7100 	} while ((npip != NULL) && (sps == MDI_SUCCESS));
7101 	return (0);
7102 }
7103 
7104 static struct scsi_vhci_lun *
7105 vhci_lun_lookup(dev_info_t *tgt_dip)
7106 {
7107 	return ((struct scsi_vhci_lun *)
7108 	    mdi_client_get_vhci_private(tgt_dip));
7109 }
7110 
7111 static struct scsi_vhci_lun *
7112 vhci_lun_lookup_alloc(dev_info_t *tgt_dip, char *guid, int *didalloc)
7113 {
7114 	struct scsi_vhci_lun *svl;
7115 
7116 	if (svl = vhci_lun_lookup(tgt_dip)) {
7117 		return (svl);
7118 	}
7119 
7120 	svl = kmem_zalloc(sizeof (*svl), KM_SLEEP);
7121 	svl->svl_lun_wwn = kmem_zalloc(strlen(guid)+1, KM_SLEEP);
7122 	(void) strcpy(svl->svl_lun_wwn,  guid);
7123 	mutex_init(&svl->svl_mutex, NULL, MUTEX_DRIVER, NULL);
7124 	cv_init(&svl->svl_cv, NULL, CV_DRIVER, NULL);
7125 	sema_init(&svl->svl_pgr_sema, 1, NULL, SEMA_DRIVER, NULL);
7126 	svl->svl_waiting_for_activepath = 1;
7127 	svl->svl_sector_size = 1;
7128 	mdi_client_set_vhci_private(tgt_dip, svl);
7129 	*didalloc = 1;
7130 	VHCI_DEBUG(1, (CE_NOTE, NULL,
7131 	    "vhci_lun_lookup_alloc: guid %s vlun 0x%p\n",
7132 	    guid, (void *)svl));
7133 	return (svl);
7134 }
7135 
7136 static void
7137 vhci_lun_free(dev_info_t *tgt_dip)
7138 {
7139 	struct scsi_vhci_lun *dvlp;
7140 	char *guid;
7141 	struct scsi_device *sd;
7142 
7143 	/*
7144 	 * The scsi_device was set to driver private during child node
7145 	 * initialization in the scsi_hba_bus_ctl().
7146 	 */
7147 	sd = (struct scsi_device *)ddi_get_driver_private(tgt_dip);
7148 
7149 	dvlp = (struct scsi_vhci_lun *)
7150 	    mdi_client_get_vhci_private(tgt_dip);
7151 	ASSERT(dvlp != NULL);
7152 
7153 	mdi_client_set_vhci_private(tgt_dip, NULL);
7154 
7155 	guid = dvlp->svl_lun_wwn;
7156 	ASSERT(guid != NULL);
7157 	VHCI_DEBUG(4, (CE_NOTE, NULL, "!vhci_lun_free: %s\n", guid));
7158 
7159 	mutex_enter(&dvlp->svl_mutex);
7160 	if (dvlp->svl_active_pclass != NULL) {
7161 		kmem_free(dvlp->svl_active_pclass,
7162 		    strlen(dvlp->svl_active_pclass)+1);
7163 	}
7164 	dvlp->svl_active_pclass = NULL;
7165 	mutex_exit(&dvlp->svl_mutex);
7166 
7167 	if (dvlp->svl_lun_wwn != NULL) {
7168 		kmem_free(dvlp->svl_lun_wwn, strlen(dvlp->svl_lun_wwn)+1);
7169 	}
7170 	dvlp->svl_lun_wwn = NULL;
7171 
7172 	if (dvlp->svl_fops_name) {
7173 		kmem_free(dvlp->svl_fops_name, strlen(dvlp->svl_fops_name)+1);
7174 	}
7175 	dvlp->svl_fops_name = NULL;
7176 
7177 	if (dvlp->svl_fops_ctpriv != NULL) {
7178 		dvlp->svl_fops->sfo_device_unprobe(sd, dvlp->svl_fops_ctpriv);
7179 	}
7180 
7181 	if (dvlp->svl_flags & VLUN_TASK_D_ALIVE_FLG)
7182 		taskq_destroy(dvlp->svl_taskq);
7183 
7184 	mutex_destroy(&dvlp->svl_mutex);
7185 	cv_destroy(&dvlp->svl_cv);
7186 	sema_destroy(&dvlp->svl_pgr_sema);
7187 	kmem_free(dvlp, sizeof (*dvlp));
7188 	/*
7189 	 * vhci_lun_free may be called before the tgt_dip
7190 	 * initialization so check if the sd is NULL.
7191 	 */
7192 	if (sd != NULL)
7193 		sd->sd_address.a_hba_tran->tran_tgt_private = NULL;
7194 }
7195 
7196 
7197 int
7198 vhci_do_scsi_cmd(struct scsi_pkt *pkt)
7199 {
7200 	int	err = 0;
7201 	int	retry_cnt = 0;
7202 	struct scsi_extended_sense	*sns;
7203 
7204 retry:
7205 	err = scsi_poll(pkt);
7206 	if (err) {
7207 		if (pkt->pkt_cdbp[0] == SCMD_RELEASE) {
7208 			if (SCBP_C(pkt) == STATUS_RESERVATION_CONFLICT) {
7209 				VHCI_DEBUG(1, (CE_NOTE, NULL,
7210 				    "!v_s_do_s_c: RELEASE conflict\n"));
7211 				return (0);
7212 			}
7213 		}
7214 		if (retry_cnt++ < 3) {
7215 			VHCI_DEBUG(1, (CE_WARN, NULL,
7216 			    "!v_s_do_s_c:retry packet 0x%p "
7217 			    "status 0x%x reason %s",
7218 			    (void *)pkt, SCBP_C(pkt),
7219 			    scsi_rname(pkt->pkt_reason)));
7220 			if ((pkt->pkt_reason == CMD_CMPLT) &&
7221 			    (SCBP_C(pkt) == STATUS_CHECK) &&
7222 			    (pkt->pkt_state & STATE_ARQ_DONE)) {
7223 				sns = &(((struct scsi_arq_status *)(uintptr_t)
7224 				    (pkt->pkt_scbp))->sts_sensedata);
7225 				VHCI_DEBUG(1, (CE_WARN, NULL,
7226 				    "!v_s_do_s_c:retry "
7227 				    "packet 0x%p  sense data %s", (void *)pkt,
7228 				    scsi_sname(sns->es_key)));
7229 			}
7230 			goto retry;
7231 		}
7232 		VHCI_DEBUG(1, (CE_WARN, NULL,
7233 		    "!v_s_do_s_c: failed transport 0x%p 0x%x",
7234 		    (void *)pkt, SCBP_C(pkt)));
7235 		return (0);
7236 	}
7237 
7238 	switch (pkt->pkt_reason) {
7239 		case CMD_TIMEOUT:
7240 			VHCI_DEBUG(1, (CE_WARN, NULL, "!pkt timed "
7241 			    "out (pkt 0x%p)", (void *)pkt));
7242 			return (0);
7243 		case CMD_CMPLT:
7244 			switch (SCBP_C(pkt)) {
7245 				case STATUS_GOOD:
7246 					break;
7247 				case STATUS_CHECK:
7248 					if (pkt->pkt_state & STATE_ARQ_DONE) {
7249 						sns = &(((
7250 						    struct scsi_arq_status *)
7251 						    (uintptr_t)
7252 						    (pkt->pkt_scbp))->
7253 						    sts_sensedata);
7254 						if ((sns->es_key ==
7255 						    KEY_UNIT_ATTENTION) ||
7256 						    (sns->es_key ==
7257 						    KEY_NOT_READY)) {
7258 							/*
7259 							 * clear unit attn.
7260 							 */
7261 
7262 							VHCI_DEBUG(1,
7263 							    (CE_WARN, NULL,
7264 							    "!v_s_do_s_c: "
7265 							    "retry "
7266 							    "packet 0x%p sense "
7267 							    "data %s",
7268 							    (void *)pkt,
7269 							    scsi_sname
7270 							    (sns->es_key)));
7271 							goto retry;
7272 						}
7273 						VHCI_DEBUG(4, (CE_WARN, NULL,
7274 						    "!ARQ while "
7275 						    "transporting "
7276 						    "(pkt 0x%p)",
7277 						    (void *)pkt));
7278 						return (0);
7279 					}
7280 					return (0);
7281 				default:
7282 					VHCI_DEBUG(1, (CE_WARN, NULL,
7283 					    "!Bad status returned "
7284 					    "(pkt 0x%p, status %x)",
7285 					    (void *)pkt, SCBP_C(pkt)));
7286 					return (0);
7287 			}
7288 			break;
7289 		case CMD_INCOMPLETE:
7290 		case CMD_RESET:
7291 		case CMD_ABORTED:
7292 		case CMD_TRAN_ERR:
7293 			if (retry_cnt++ < 1) {
7294 				VHCI_DEBUG(1, (CE_WARN, NULL,
7295 				    "!v_s_do_s_c: retry packet 0x%p %s",
7296 				    (void *)pkt, scsi_rname(pkt->pkt_reason)));
7297 				goto retry;
7298 			}
7299 			/* FALLTHROUGH */
7300 		default:
7301 			VHCI_DEBUG(1, (CE_WARN, NULL, "!pkt did not "
7302 			    "complete successfully (pkt 0x%p,"
7303 			    "reason %x)", (void *)pkt, pkt->pkt_reason));
7304 			return (0);
7305 	}
7306 	return (1);
7307 }
7308 
7309 static int
7310 vhci_quiesce_lun(struct scsi_vhci_lun *vlun)
7311 {
7312 	mdi_pathinfo_t		*pip, *spip;
7313 	dev_info_t		*cdip;
7314 	struct scsi_vhci_priv	*svp;
7315 	mdi_pathinfo_state_t	pstate;
7316 	uint32_t		p_ext_state;
7317 	int			circular;
7318 
7319 	cdip = vlun->svl_dip;
7320 	pip = spip = NULL;
7321 	ndi_devi_enter(cdip, &circular);
7322 	pip = mdi_get_next_phci_path(cdip, NULL);
7323 	while (pip != NULL) {
7324 		(void) mdi_pi_get_state2(pip, &pstate, &p_ext_state);
7325 		if (pstate != MDI_PATHINFO_STATE_ONLINE) {
7326 			spip = pip;
7327 			pip = mdi_get_next_phci_path(cdip, spip);
7328 			continue;
7329 		}
7330 		mdi_hold_path(pip);
7331 		ndi_devi_exit(cdip, circular);
7332 		svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip);
7333 		mutex_enter(&svp->svp_mutex);
7334 		while (svp->svp_cmds != 0) {
7335 			if (cv_timedwait(&svp->svp_cv, &svp->svp_mutex,
7336 			    ddi_get_lbolt() + drv_usectohz
7337 			    (vhci_path_quiesce_timeout * 1000000)) == -1) {
7338 				mutex_exit(&svp->svp_mutex);
7339 				mdi_rele_path(pip);
7340 				VHCI_DEBUG(1, (CE_WARN, NULL,
7341 				    "Quiesce of lun is not successful "
7342 				    "vlun: 0x%p.", (void *)vlun));
7343 				return (0);
7344 			}
7345 		}
7346 		mutex_exit(&svp->svp_mutex);
7347 		ndi_devi_enter(cdip, &circular);
7348 		spip = pip;
7349 		pip = mdi_get_next_phci_path(cdip, spip);
7350 		mdi_rele_path(spip);
7351 	}
7352 	ndi_devi_exit(cdip, circular);
7353 	return (1);
7354 }
7355 
7356 static int
7357 vhci_pgr_validate_and_register(scsi_vhci_priv_t *svp)
7358 {
7359 	scsi_vhci_lun_t		*vlun;
7360 	vhci_prout_t		*prout;
7361 	int			rval, success;
7362 	mdi_pathinfo_t		*pip, *npip;
7363 	scsi_vhci_priv_t	*osvp;
7364 	dev_info_t		*cdip;
7365 	uchar_t			cdb_1;
7366 	uchar_t			temp_res_key[MHIOC_RESV_KEY_SIZE];
7367 
7368 
7369 	/*
7370 	 * see if there are any other paths available; if none,
7371 	 * then there is nothing to do.
7372 	 */
7373 	cdip = svp->svp_svl->svl_dip;
7374 	rval = mdi_select_path(cdip, NULL, MDI_SELECT_ONLINE_PATH |
7375 	    MDI_SELECT_STANDBY_PATH, NULL, &pip);
7376 	if ((rval != MDI_SUCCESS) || (pip == NULL)) {
7377 		VHCI_DEBUG(4, (CE_NOTE, NULL,
7378 		    "%s%d: vhci_pgr_validate_and_register: first path\n",
7379 		    ddi_driver_name(cdip), ddi_get_instance(cdip)));
7380 		return (1);
7381 	}
7382 
7383 	vlun = svp->svp_svl;
7384 	prout = &vlun->svl_prout;
7385 	ASSERT(vlun->svl_pgr_active != 0);
7386 
7387 	/*
7388 	 * When the path was busy/offlined, some other host might have
7389 	 * cleared this key. Validate key on some other path first.
7390 	 * If it fails, return failure.
7391 	 */
7392 
7393 	npip = pip;
7394 	pip = NULL;
7395 	success = 0;
7396 
7397 	/* Save the res key */
7398 	bcopy((const void *)prout->res_key,
7399 	    (void *)temp_res_key, MHIOC_RESV_KEY_SIZE);
7400 
7401 	/*
7402 	 * Sometimes CDB from application can be a Register_And_Ignore.
7403 	 * Instead of validation, this cdb would result in force registration.
7404 	 * Convert it to normal cdb for validation.
7405 	 * After that be sure to restore the cdb.
7406 	 */
7407 	cdb_1 = vlun->svl_cdb[1];
7408 	vlun->svl_cdb[1] &= 0xe0;
7409 
7410 	do {
7411 		osvp = (scsi_vhci_priv_t *)
7412 		    mdi_pi_get_vhci_private(npip);
7413 		if (osvp == NULL) {
7414 			VHCI_DEBUG(4, (CE_NOTE, NULL,
7415 			    "vhci_pgr_validate_and_register: no "
7416 			    "client priv! 0x%p offlined?\n",
7417 			    (void *)npip));
7418 			goto next_path_1;
7419 		}
7420 
7421 		if (osvp == svp) {
7422 			VHCI_DEBUG(4, (CE_NOTE, NULL,
7423 			    "vhci_pgr_validate_and_register: same svp 0x%p"
7424 			    " npip 0x%p vlun 0x%p\n",
7425 			    (void *)svp, (void *)npip, (void *)vlun));
7426 			goto next_path_1;
7427 		}
7428 
7429 		VHCI_DEBUG(4, (CE_NOTE, NULL,
7430 		    "vhci_pgr_validate_and_register: First validate on"
7431 		    " osvp 0x%p being done. vlun 0x%p thread 0x%p Before bcopy"
7432 		    " cdb1 %x\n", (void *)osvp, (void *)vlun,
7433 		    (void *)curthread, vlun->svl_cdb[1]));
7434 		vhci_print_prout_keys(vlun, "v_pgr_val_reg: before bcopy:");
7435 
7436 		bcopy((const void *)prout->service_key,
7437 		    (void *)prout->res_key, MHIOC_RESV_KEY_SIZE);
7438 
7439 		VHCI_DEBUG(4, (CE_WARN, NULL, "vlun 0x%p After bcopy",
7440 		    (void *)vlun));
7441 		vhci_print_prout_keys(vlun, "v_pgr_val_reg: after bcopy: ");
7442 
7443 		rval = vhci_do_prout(osvp);
7444 		if (rval == 1) {
7445 			VHCI_DEBUG(4, (CE_NOTE, NULL,
7446 			    "%s%d: vhci_pgr_validate_and_register: key"
7447 			    " validated thread 0x%p\n", ddi_driver_name(cdip),
7448 			    ddi_get_instance(cdip), (void *)curthread));
7449 			pip = npip;
7450 			success = 1;
7451 			break;
7452 		} else {
7453 			VHCI_DEBUG(4, (CE_NOTE, NULL,
7454 			    "vhci_pgr_validate_and_register: First validation"
7455 			    " on osvp 0x%p failed %x\n", (void *)osvp, rval));
7456 			vhci_print_prout_keys(vlun, "v_pgr_val_reg: failed:");
7457 		}
7458 
7459 		/*
7460 		 * Try other paths
7461 		 */
7462 next_path_1:
7463 		pip = npip;
7464 		rval = mdi_select_path(cdip, NULL,
7465 		    MDI_SELECT_ONLINE_PATH|MDI_SELECT_STANDBY_PATH,
7466 		    pip, &npip);
7467 		mdi_rele_path(pip);
7468 	} while ((rval == MDI_SUCCESS) && (npip != NULL));
7469 
7470 
7471 	/* Be sure to restore original cdb */
7472 	vlun->svl_cdb[1] = cdb_1;
7473 
7474 	/* Restore the res_key */
7475 	bcopy((const void *)temp_res_key,
7476 	    (void *)prout->res_key, MHIOC_RESV_KEY_SIZE);
7477 
7478 	/*
7479 	 * If key could not be registered on any path for the first time,
7480 	 * return success as online should still continue.
7481 	 */
7482 	if (success == 0) {
7483 		return (1);
7484 	}
7485 
7486 	ASSERT(pip != NULL);
7487 
7488 	/*
7489 	 * Force register on new path
7490 	 */
7491 	cdb_1 = vlun->svl_cdb[1];		/* store the cdb */
7492 
7493 	vlun->svl_cdb[1] &= 0xe0;
7494 	vlun->svl_cdb[1] |= VHCI_PROUT_R_AND_IGNORE;
7495 
7496 	vhci_print_prout_keys(vlun, "v_pgr_val_reg: keys before bcopy: ");
7497 
7498 	bcopy((const void *)prout->active_service_key,
7499 	    (void *)prout->service_key, MHIOC_RESV_KEY_SIZE);
7500 	bcopy((const void *)prout->active_res_key,
7501 	    (void *)prout->res_key, MHIOC_RESV_KEY_SIZE);
7502 
7503 	vhci_print_prout_keys(vlun, "v_pgr_val_reg:keys after bcopy: ");
7504 
7505 	rval = vhci_do_prout(svp);
7506 	vlun->svl_cdb[1] = cdb_1;		/* restore the cdb */
7507 	if (rval != 1) {
7508 		VHCI_DEBUG(4, (CE_NOTE, NULL,
7509 		    "vhci_pgr_validate_and_register: register on new"
7510 		    " path 0x%p svp 0x%p failed %x\n",
7511 		    (void *)pip, (void *)svp, rval));
7512 		vhci_print_prout_keys(vlun, "v_pgr_val_reg: reg failed: ");
7513 		mdi_rele_path(pip);
7514 		return (0);
7515 	}
7516 
7517 	if (bcmp(prout->service_key, zero_key, MHIOC_RESV_KEY_SIZE) == 0) {
7518 		VHCI_DEBUG(4, (CE_NOTE, NULL,
7519 		    "vhci_pgr_validate_and_register: zero service key\n"));
7520 		mdi_rele_path(pip);
7521 		return (rval);
7522 	}
7523 
7524 	/*
7525 	 * While the key was force registered, some other host might have
7526 	 * cleared the key. Re-validate key on another pre-existing path
7527 	 * before declaring success.
7528 	 */
7529 	npip = pip;
7530 	pip = NULL;
7531 
7532 	/*
7533 	 * Sometimes CDB from application can be Register and Ignore.
7534 	 * Instead of validation, it would result in force registration.
7535 	 * Convert it to normal cdb for validation.
7536 	 * After that be sure to restore the cdb.
7537 	 */
7538 	cdb_1 = vlun->svl_cdb[1];
7539 	vlun->svl_cdb[1] &= 0xe0;
7540 	success = 0;
7541 
7542 	do {
7543 		osvp = (scsi_vhci_priv_t *)
7544 		    mdi_pi_get_vhci_private(npip);
7545 		if (osvp == NULL) {
7546 			VHCI_DEBUG(4, (CE_NOTE, NULL,
7547 			    "vhci_pgr_validate_and_register: no "
7548 			    "client priv! 0x%p offlined?\n",
7549 			    (void *)npip));
7550 			goto next_path_2;
7551 		}
7552 
7553 		if (osvp == svp) {
7554 			VHCI_DEBUG(4, (CE_NOTE, NULL,
7555 			    "vhci_pgr_validate_and_register: same osvp 0x%p"
7556 			    " npip 0x%p vlun 0x%p\n",
7557 			    (void *)svp, (void *)npip, (void *)vlun));
7558 			goto next_path_2;
7559 		}
7560 
7561 		VHCI_DEBUG(4, (CE_NOTE, NULL,
7562 		    "vhci_pgr_validate_and_register: Re-validation on"
7563 		    " osvp 0x%p being done. vlun 0x%p Before bcopy cdb1 %x\n",
7564 		    (void *)osvp, (void *)vlun, vlun->svl_cdb[1]));
7565 		vhci_print_prout_keys(vlun, "v_pgr_val_reg: before bcopy: ");
7566 
7567 		bcopy((const void *)prout->service_key,
7568 		    (void *)prout->res_key, MHIOC_RESV_KEY_SIZE);
7569 
7570 		vhci_print_prout_keys(vlun, "v_pgr_val_reg: after bcopy: ");
7571 
7572 		rval = vhci_do_prout(osvp);
7573 		if (rval == 1) {
7574 			VHCI_DEBUG(4, (CE_NOTE, NULL,
7575 			    "%s%d: vhci_pgr_validate_and_register: key"
7576 			    " validated thread 0x%p\n", ddi_driver_name(cdip),
7577 			    ddi_get_instance(cdip), (void *)curthread));
7578 			pip = npip;
7579 			success = 1;
7580 			break;
7581 		} else {
7582 			VHCI_DEBUG(4, (CE_NOTE, NULL,
7583 			    "vhci_pgr_validate_and_register: Re-validation on"
7584 			    " osvp 0x%p failed %x\n", (void *)osvp, rval));
7585 			vhci_print_prout_keys(vlun,
7586 			    "v_pgr_val_reg: reval failed: ");
7587 		}
7588 
7589 		/*
7590 		 * Try other paths
7591 		 */
7592 next_path_2:
7593 		pip = npip;
7594 		rval = mdi_select_path(cdip, NULL,
7595 		    MDI_SELECT_ONLINE_PATH|MDI_SELECT_STANDBY_PATH,
7596 		    pip, &npip);
7597 		mdi_rele_path(pip);
7598 	} while ((rval == MDI_SUCCESS) && (npip != NULL));
7599 
7600 	/* Be sure to restore original cdb */
7601 	vlun->svl_cdb[1] = cdb_1;
7602 
7603 	if (success == 1) {
7604 		/* Successfully validated registration */
7605 		mdi_rele_path(pip);
7606 		return (1);
7607 	}
7608 
7609 	VHCI_DEBUG(4, (CE_WARN, NULL, "key validation failed"));
7610 
7611 	/*
7612 	 * key invalid, back out by registering key value of 0
7613 	 */
7614 	VHCI_DEBUG(4, (CE_NOTE, NULL,
7615 	    "vhci_pgr_validate_and_register: backout on"
7616 	    " svp 0x%p being done\n", (void *)svp));
7617 	vhci_print_prout_keys(vlun, "v_pgr_val_reg: before bcopy: ");
7618 
7619 	bcopy((const void *)prout->service_key, (void *)prout->res_key,
7620 	    MHIOC_RESV_KEY_SIZE);
7621 	bzero((void *)prout->service_key, MHIOC_RESV_KEY_SIZE);
7622 
7623 	vhci_print_prout_keys(vlun, "v_pgr_val_reg: before bcopy: ");
7624 
7625 	/*
7626 	 * Get a new path
7627 	 */
7628 	rval = mdi_select_path(cdip, NULL, MDI_SELECT_ONLINE_PATH |
7629 	    MDI_SELECT_STANDBY_PATH, NULL, &pip);
7630 	if ((rval != MDI_SUCCESS) || (pip == NULL)) {
7631 		VHCI_DEBUG(4, (CE_NOTE, NULL,
7632 		    "%s%d: vhci_pgr_validate_and_register: no valid pip\n",
7633 		    ddi_driver_name(cdip), ddi_get_instance(cdip)));
7634 		return (0);
7635 	}
7636 
7637 	if ((rval = vhci_do_prout(svp)) != 1) {
7638 		VHCI_DEBUG(4, (CE_NOTE, NULL,
7639 		    "vhci_pgr_validate_and_register: backout on"
7640 		    " svp 0x%p failed\n", (void *)svp));
7641 		vhci_print_prout_keys(vlun, "backout failed");
7642 
7643 		VHCI_DEBUG(4, (CE_WARN, NULL,
7644 		    "%s%d: vhci_pgr_validate_and_register: key"
7645 		    " validation and backout failed", ddi_driver_name(cdip),
7646 		    ddi_get_instance(cdip)));
7647 		if (rval == VHCI_PGR_ILLEGALOP) {
7648 			VHCI_DEBUG(4, (CE_WARN, NULL,
7649 			    "%s%d: vhci_pgr_validate_and_register: key"
7650 			    " already cleared", ddi_driver_name(cdip),
7651 			    ddi_get_instance(cdip)));
7652 			rval = 1;
7653 		} else
7654 			rval = 0;
7655 	} else {
7656 		VHCI_DEBUG(4, (CE_NOTE, NULL,
7657 		    "%s%d: vhci_pgr_validate_and_register: key"
7658 		    " validation failed, key backed out\n",
7659 		    ddi_driver_name(cdip), ddi_get_instance(cdip)));
7660 		vhci_print_prout_keys(vlun, "v_pgr_val_reg: key backed out: ");
7661 	}
7662 	mdi_rele_path(pip);
7663 
7664 	return (rval);
7665 }
7666 
7667 /*
7668  * taskq routine to dispatch a scsi cmd to vhci_scsi_start.  This ensures
7669  * that vhci_scsi_start is not called in interrupt context.
7670  * As the upper layer gets TRAN_ACCEPT when the command is dispatched, we
7671  * need to complete the command if something goes wrong.
7672  */
7673 static void
7674 vhci_dispatch_scsi_start(void *arg)
7675 {
7676 	struct vhci_pkt *vpkt = (struct vhci_pkt *)arg;
7677 	struct scsi_pkt *tpkt = vpkt->vpkt_tgt_pkt;
7678 	int			rval = TRAN_BUSY;
7679 
7680 	VHCI_DEBUG(6, (CE_NOTE, NULL, "!vhci_dispatch_scsi_start: sending"
7681 	    " scsi-2 reserve for 0x%p\n",
7682 	    (void *)ADDR2DIP(&(vpkt->vpkt_tgt_pkt->pkt_address))));
7683 
7684 	/*
7685 	 * To prevent the taskq from being called recursively we set the
7686 	 * the VHCI_PKT_THRU_TASKQ bit in the vhci_pkt_states.
7687 	 */
7688 	vpkt->vpkt_state |= VHCI_PKT_THRU_TASKQ;
7689 
7690 	/*
7691 	 * Wait for the transport to get ready to send packets
7692 	 * and if it times out, it will return something other than
7693 	 * TRAN_BUSY. The vhci_reserve_delay may want to
7694 	 * get tuned for other transports and is therefore a global.
7695 	 * Using delay since this routine is called by taskq dispatch
7696 	 * and not called during interrupt context.
7697 	 */
7698 	while ((rval = vhci_scsi_start(&(vpkt->vpkt_tgt_pkt->pkt_address),
7699 	    vpkt->vpkt_tgt_pkt)) == TRAN_BUSY) {
7700 		delay(drv_usectohz(vhci_reserve_delay));
7701 	}
7702 
7703 	switch (rval) {
7704 	case TRAN_ACCEPT:
7705 		return;
7706 
7707 	default:
7708 		/*
7709 		 * This pkt shall be retried, and to ensure another taskq
7710 		 * is dispatched for it, clear the VHCI_PKT_THRU_TASKQ
7711 		 * flag.
7712 		 */
7713 		vpkt->vpkt_state &= ~VHCI_PKT_THRU_TASKQ;
7714 
7715 		/* Ensure that the pkt is retried without a reset */
7716 		tpkt->pkt_reason = CMD_ABORTED;
7717 		tpkt->pkt_statistics |= STAT_ABORTED;
7718 		VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_dispatch_scsi_start: "
7719 		    "TRAN_rval %d returned for dip 0x%p", rval,
7720 		    (void *)ADDR2DIP(&(vpkt->vpkt_tgt_pkt->pkt_address))));
7721 		break;
7722 	}
7723 
7724 	/*
7725 	 * vpkt_org_vpkt should always be NULL here if the retry command
7726 	 * has been successfully dispatched.  If vpkt_org_vpkt != NULL at
7727 	 * this point, it is an error so restore the original vpkt and
7728 	 * return an error to the target driver so it can retry the
7729 	 * command as appropriate.
7730 	 */
7731 	if (vpkt->vpkt_org_vpkt != NULL) {
7732 		struct vhci_pkt		*new_vpkt = vpkt;
7733 		scsi_vhci_priv_t	*svp = (scsi_vhci_priv_t *)
7734 		    mdi_pi_get_vhci_private(vpkt->vpkt_path);
7735 
7736 		vpkt = vpkt->vpkt_org_vpkt;
7737 
7738 		vpkt->vpkt_tgt_pkt->pkt_reason = tpkt->pkt_reason;
7739 		vpkt->vpkt_tgt_pkt->pkt_statistics = tpkt->pkt_statistics;
7740 
7741 		vhci_scsi_destroy_pkt(&svp->svp_psd->sd_address,
7742 		    new_vpkt->vpkt_tgt_pkt);
7743 
7744 		tpkt = vpkt->vpkt_tgt_pkt;
7745 	}
7746 
7747 	if (tpkt->pkt_comp) {
7748 		(*tpkt->pkt_comp)(tpkt);
7749 	}
7750 }
7751 
7752 static void
7753 vhci_initiate_auto_failback(void *arg)
7754 {
7755 	struct scsi_vhci_lun	*vlun = (struct scsi_vhci_lun *)arg;
7756 	dev_info_t		*vdip, *cdip;
7757 	int			held;
7758 
7759 	cdip = vlun->svl_dip;
7760 	vdip = ddi_get_parent(cdip);
7761 
7762 	VHCI_HOLD_LUN(vlun, VH_SLEEP, held);
7763 
7764 	/*
7765 	 * Perform a final check to see if the active path class is indeed
7766 	 * not the prefered path class.  As in the time the auto failback
7767 	 * was dispatched, an external failover could have been detected.
7768 	 * [Some other host could have detected this condition and triggered
7769 	 *  the auto failback before].
7770 	 * In such a case if we go ahead with failover we will be negating the
7771 	 * whole purpose of auto failback.
7772 	 */
7773 	mutex_enter(&vlun->svl_mutex);
7774 	if (vlun->svl_active_pclass != NULL) {
7775 		char				*best_pclass;
7776 		struct scsi_failover_ops	*fo;
7777 
7778 		fo = vlun->svl_fops;
7779 
7780 		(*fo->sfo_pathclass_next)(NULL, &best_pclass,
7781 		    vlun->svl_fops_ctpriv);
7782 		if (strcmp(vlun->svl_active_pclass, best_pclass) == 0) {
7783 			mutex_exit(&vlun->svl_mutex);
7784 			VHCI_RELEASE_LUN(vlun);
7785 			VHCI_DEBUG(1, (CE_NOTE, NULL, "Not initiating "
7786 			    "auto failback for %s as %s pathclass already "
7787 			    "active.\n", vlun->svl_lun_wwn, best_pclass));
7788 			return;
7789 		}
7790 	}
7791 	mutex_exit(&vlun->svl_mutex);
7792 	if (mdi_failover(vdip, vlun->svl_dip, MDI_FAILOVER_SYNC)
7793 	    == MDI_SUCCESS) {
7794 		vhci_log(CE_NOTE, vdip, "!Auto failback operation "
7795 		    "succeeded for device %s (GUID %s)",
7796 		    ddi_node_name(cdip), vlun->svl_lun_wwn);
7797 	} else {
7798 		vhci_log(CE_NOTE, vdip, "!Auto failback operation "
7799 		    "failed for device %s (GUID %s)",
7800 		    ddi_node_name(cdip), vlun->svl_lun_wwn);
7801 	}
7802 	VHCI_RELEASE_LUN(vlun);
7803 }
7804 
7805 #ifdef DEBUG
7806 static void
7807 vhci_print_prin_keys(vhci_prin_readkeys_t *prin, int numkeys)
7808 {
7809 	uchar_t index = 0;
7810 	char buf[100];
7811 
7812 	VHCI_DEBUG(5, (CE_NOTE, NULL, "num keys %d\n", numkeys));
7813 
7814 	while (index < numkeys) {
7815 		bcopy(&prin->keylist[index], buf, MHIOC_RESV_KEY_SIZE);
7816 		VHCI_DEBUG(5, (CE_NOTE, NULL,
7817 		    "%02x%02x%02x%02x%02x%02x%02x%02x\t",
7818 		    buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7819 		    buf[7]));
7820 		index++;
7821 	}
7822 }
7823 #endif
7824 
7825 static void
7826 vhci_print_prout_keys(scsi_vhci_lun_t *vlun, char *msg)
7827 {
7828 	int			i;
7829 	vhci_prout_t		*prout;
7830 	char			buf1[4*MHIOC_RESV_KEY_SIZE + 1];
7831 	char			buf2[4*MHIOC_RESV_KEY_SIZE + 1];
7832 	char			buf3[4*MHIOC_RESV_KEY_SIZE + 1];
7833 	char			buf4[4*MHIOC_RESV_KEY_SIZE + 1];
7834 
7835 	prout = &vlun->svl_prout;
7836 
7837 	for (i = 0; i < MHIOC_RESV_KEY_SIZE; i++)
7838 		(void) sprintf(&buf1[4*i], "[%02x]", prout->res_key[i]);
7839 	for (i = 0; i < MHIOC_RESV_KEY_SIZE; i++)
7840 		(void) sprintf(&buf2[(4*i)], "[%02x]", prout->service_key[i]);
7841 	for (i = 0; i < MHIOC_RESV_KEY_SIZE; i++)
7842 		(void) sprintf(&buf3[4*i], "[%02x]", prout->active_res_key[i]);
7843 	for (i = 0; i < MHIOC_RESV_KEY_SIZE; i++)
7844 		(void) sprintf(&buf4[4*i], "[%02x]",
7845 		    prout->active_service_key[i]);
7846 
7847 	/* Printing all in one go. Otherwise it will jumble up */
7848 	VHCI_DEBUG(5, (CE_CONT, NULL, "%s vlun 0x%p, thread 0x%p\n"
7849 	    "res_key:          : %s\n"
7850 	    "service_key       : %s\n"
7851 	    "active_res_key    : %s\n"
7852 	    "active_service_key: %s\n",
7853 	    msg, (void *)vlun, (void *)curthread, buf1, buf2, buf3, buf4));
7854 }
7855 
7856 /*
7857  * Called from vhci_scsi_start to update the pHCI pkt with target packet.
7858  */
7859 static void
7860 vhci_update_pHCI_pkt(struct vhci_pkt *vpkt, struct scsi_pkt *pkt)
7861 {
7862 
7863 	ASSERT(vpkt->vpkt_hba_pkt);
7864 
7865 	vpkt->vpkt_hba_pkt->pkt_flags = pkt->pkt_flags;
7866 	vpkt->vpkt_hba_pkt->pkt_flags |= FLAG_NOQUEUE;
7867 
7868 	if ((vpkt->vpkt_hba_pkt->pkt_flags & FLAG_NOINTR) ||
7869 	    MDI_PI_IS_SUSPENDED(vpkt->vpkt_path)) {
7870 		/*
7871 		 * Polled Command is requested or HBA is in
7872 		 * suspended state
7873 		 */
7874 		vpkt->vpkt_hba_pkt->pkt_flags |= FLAG_NOINTR;
7875 		vpkt->vpkt_hba_pkt->pkt_comp = NULL;
7876 	} else {
7877 		vpkt->vpkt_hba_pkt->pkt_comp = vhci_intr;
7878 	}
7879 	vpkt->vpkt_hba_pkt->pkt_time = pkt->pkt_time;
7880 	bcopy(pkt->pkt_cdbp, vpkt->vpkt_hba_pkt->pkt_cdbp,
7881 	    vpkt->vpkt_tgt_init_cdblen);
7882 	vpkt->vpkt_hba_pkt->pkt_resid = pkt->pkt_resid;
7883 
7884 	/* Re-initialize the following pHCI packet state information */
7885 	vpkt->vpkt_hba_pkt->pkt_state = 0;
7886 	vpkt->vpkt_hba_pkt->pkt_statistics = 0;
7887 	vpkt->vpkt_hba_pkt->pkt_reason = 0;
7888 }
7889 
7890 static int
7891 vhci_scsi_bus_power(dev_info_t *parent, void *impl_arg, pm_bus_power_op_t op,
7892     void *arg, void *result)
7893 {
7894 	int ret = DDI_SUCCESS;
7895 
7896 	/*
7897 	 * Generic processing in MPxIO framework
7898 	 */
7899 	ret = mdi_bus_power(parent, impl_arg, op, arg, result);
7900 
7901 	switch (ret) {
7902 	case MDI_SUCCESS:
7903 		ret = DDI_SUCCESS;
7904 		break;
7905 	case MDI_FAILURE:
7906 		ret = DDI_FAILURE;
7907 		break;
7908 	default:
7909 		break;
7910 	}
7911 
7912 	return (ret);
7913 }
7914 
7915 static int
7916 vhci_pHCI_cap(struct scsi_address *ap, char *cap, int val, int whom,
7917     mdi_pathinfo_t *pip)
7918 {
7919 	dev_info_t		*cdip;
7920 	mdi_pathinfo_t		*npip = NULL;
7921 	scsi_vhci_priv_t	*svp = NULL;
7922 	struct scsi_address	*pap = NULL;
7923 	scsi_hba_tran_t		*hba = NULL;
7924 	int			sps;
7925 	int			mps_flag;
7926 	int			rval = 0;
7927 
7928 	mps_flag = (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH);
7929 	if (pip) {
7930 		/*
7931 		 * If the call is from vhci_pathinfo_state_change,
7932 		 * then this path was busy and is becoming ready to accept IO.
7933 		 */
7934 		ASSERT(ap != NULL);
7935 		hba = ap->a_hba_tran;
7936 		ASSERT(hba != NULL);
7937 		rval = scsi_ifsetcap(ap, cap, val, whom);
7938 
7939 		VHCI_DEBUG(2, (CE_NOTE, NULL,
7940 		    "!vhci_pHCI_cap: only on path %p, ap %p, rval %x\n",
7941 		    (void *)pip, (void *)ap, rval));
7942 
7943 		return (rval);
7944 	}
7945 
7946 	/*
7947 	 * Set capability on all the pHCIs.
7948 	 * If any path is busy, then the capability would be set by
7949 	 * vhci_pathinfo_state_change.
7950 	 */
7951 
7952 	cdip = ADDR2DIP(ap);
7953 	ASSERT(cdip != NULL);
7954 	sps = mdi_select_path(cdip, NULL, mps_flag, NULL, &pip);
7955 	if ((sps != MDI_SUCCESS) || (pip == NULL)) {
7956 		VHCI_DEBUG(2, (CE_WARN, NULL,
7957 		    "!vhci_pHCI_cap: Unable to get a path, dip 0x%p",
7958 		    (void *)cdip));
7959 		return (0);
7960 	}
7961 
7962 again:
7963 	svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip);
7964 	if (svp == NULL) {
7965 		VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_pHCI_cap: "
7966 		    "priv is NULL, pip 0x%p", (void *)pip));
7967 		mdi_rele_path(pip);
7968 		return (rval);
7969 	}
7970 
7971 	if (svp->svp_psd == NULL) {
7972 		VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_pHCI_cap: "
7973 		    "psd is NULL, pip 0x%p, svp 0x%p",
7974 		    (void *)pip, (void *)svp));
7975 		mdi_rele_path(pip);
7976 		return (rval);
7977 	}
7978 
7979 	pap = &svp->svp_psd->sd_address;
7980 	ASSERT(pap != NULL);
7981 	hba = pap->a_hba_tran;
7982 	ASSERT(hba != NULL);
7983 
7984 	if (hba->tran_setcap != NULL) {
7985 		rval = scsi_ifsetcap(pap, cap, val, whom);
7986 
7987 		VHCI_DEBUG(2, (CE_NOTE, NULL,
7988 		    "!vhci_pHCI_cap: path %p, ap %p, rval %x\n",
7989 		    (void *)pip, (void *)ap, rval));
7990 
7991 		/*
7992 		 * Select next path and issue the setcap, repeat
7993 		 * until all paths are exhausted
7994 		 */
7995 		sps = mdi_select_path(cdip, NULL, mps_flag, pip, &npip);
7996 		if ((sps != MDI_SUCCESS) || (npip == NULL)) {
7997 			mdi_rele_path(pip);
7998 			return (1);
7999 		}
8000 		mdi_rele_path(pip);
8001 		pip = npip;
8002 		goto again;
8003 	}
8004 	mdi_rele_path(pip);
8005 	return (rval);
8006 }
8007 
8008 static int
8009 vhci_scsi_bus_config(dev_info_t *pdip, uint_t flags, ddi_bus_config_op_t op,
8010     void *arg, dev_info_t **child)
8011 {
8012 	char *guid;
8013 
8014 	if (op == BUS_CONFIG_ONE || op == BUS_UNCONFIG_ONE)
8015 		guid = vhci_devnm_to_guid((char *)arg);
8016 	else
8017 		guid = NULL;
8018 
8019 	if (mdi_vhci_bus_config(pdip, flags, op, arg, child, guid)
8020 	    == MDI_SUCCESS)
8021 		return (NDI_SUCCESS);
8022 	else
8023 		return (NDI_FAILURE);
8024 }
8025 
8026 /*
8027  * Take the original vhci_pkt, create a duplicate of the pkt for resending
8028  * as though it originated in ssd.
8029  */
8030 static struct scsi_pkt *
8031 vhci_create_retry_pkt(struct vhci_pkt *vpkt)
8032 {
8033 	struct vhci_pkt *new_vpkt = NULL;
8034 	struct scsi_pkt	*pkt = NULL;
8035 
8036 	scsi_vhci_priv_t *svp = (scsi_vhci_priv_t *)
8037 	    mdi_pi_get_vhci_private(vpkt->vpkt_path);
8038 
8039 	/*
8040 	 * Ensure consistent data at completion time by setting PKT_CONSISTENT
8041 	 */
8042 	pkt = vhci_scsi_init_pkt(&svp->svp_psd->sd_address, pkt,
8043 	    vpkt->vpkt_tgt_init_bp, vpkt->vpkt_tgt_init_cdblen,
8044 	    vpkt->vpkt_tgt_init_scblen, 0,
8045 	    PKT_CONSISTENT,
8046 	    NULL_FUNC, NULL);
8047 	if (pkt != NULL) {
8048 		new_vpkt = TGTPKT2VHCIPKT(pkt);
8049 
8050 		pkt->pkt_address = vpkt->vpkt_tgt_pkt->pkt_address;
8051 		pkt->pkt_flags = vpkt->vpkt_tgt_pkt->pkt_flags;
8052 		pkt->pkt_time = vpkt->vpkt_tgt_pkt->pkt_time;
8053 		pkt->pkt_comp = vpkt->vpkt_tgt_pkt->pkt_comp;
8054 
8055 		pkt->pkt_resid = 0;
8056 		pkt->pkt_statistics = 0;
8057 		pkt->pkt_reason = 0;
8058 
8059 		bcopy(vpkt->vpkt_tgt_pkt->pkt_cdbp,
8060 		    pkt->pkt_cdbp, vpkt->vpkt_tgt_init_cdblen);
8061 
8062 		/*
8063 		 * Save a pointer to the original vhci_pkt
8064 		 */
8065 		new_vpkt->vpkt_org_vpkt = vpkt;
8066 	}
8067 
8068 	return (pkt);
8069 }
8070 
8071 /*
8072  * Copy the successful completion information from the hba packet into
8073  * the original target pkt from the upper layer.  Returns the original
8074  * vpkt and destroys the new vpkt from the internal retry.
8075  */
8076 static struct vhci_pkt *
8077 vhci_sync_retry_pkt(struct vhci_pkt *vpkt)
8078 {
8079 	struct vhci_pkt		*ret_vpkt = NULL;
8080 	struct scsi_pkt		*tpkt = NULL;
8081 	struct scsi_pkt		*hba_pkt = NULL;
8082 	scsi_vhci_priv_t	*svp = (scsi_vhci_priv_t *)
8083 	    mdi_pi_get_vhci_private(vpkt->vpkt_path);
8084 
8085 	ASSERT(vpkt->vpkt_org_vpkt != NULL);
8086 	VHCI_DEBUG(0, (CE_NOTE, NULL, "vhci_sync_retry_pkt: Retry pkt "
8087 	    "completed successfully!\n"));
8088 
8089 	ret_vpkt = vpkt->vpkt_org_vpkt;
8090 	tpkt = ret_vpkt->vpkt_tgt_pkt;
8091 	hba_pkt = vpkt->vpkt_hba_pkt;
8092 
8093 	/*
8094 	 * Copy the good status into the target driver's packet
8095 	 */
8096 	*(tpkt->pkt_scbp) = *(hba_pkt->pkt_scbp);
8097 	tpkt->pkt_resid = hba_pkt->pkt_resid;
8098 	tpkt->pkt_state = hba_pkt->pkt_state;
8099 	tpkt->pkt_statistics = hba_pkt->pkt_statistics;
8100 	tpkt->pkt_reason = hba_pkt->pkt_reason;
8101 
8102 	/*
8103 	 * Destroy the internally created vpkt for the retry
8104 	 */
8105 	vhci_scsi_destroy_pkt(&svp->svp_psd->sd_address,
8106 	    vpkt->vpkt_tgt_pkt);
8107 
8108 	return (ret_vpkt);
8109 }
8110 
8111 /* restart the request sense request */
8112 static void
8113 vhci_uscsi_restart_sense(void *arg)
8114 {
8115 	struct buf 	*rqbp;
8116 	struct buf 	*bp;
8117 	struct scsi_pkt *rqpkt = (struct scsi_pkt *)arg;
8118 	mp_uscsi_cmd_t 	*mp_uscmdp;
8119 
8120 	VHCI_DEBUG(4, (CE_WARN, NULL,
8121 	    "vhci_uscsi_restart_sense: enter: rqpkt: %p", (void *)rqpkt));
8122 
8123 	if (scsi_transport(rqpkt) != TRAN_ACCEPT) {
8124 		/* if it fails - need to wakeup the original command */
8125 		mp_uscmdp = rqpkt->pkt_private;
8126 		bp = mp_uscmdp->cmdbp;
8127 		rqbp = mp_uscmdp->rqbp;
8128 		ASSERT(mp_uscmdp && bp && rqbp);
8129 		scsi_free_consistent_buf(rqbp);
8130 		scsi_destroy_pkt(rqpkt);
8131 		bp->b_resid = bp->b_bcount;
8132 		bioerror(bp, EIO);
8133 		biodone(bp);
8134 	}
8135 }
8136 
8137 /*
8138  * auto-rqsense is not enabled so we have to retrieve the request sense
8139  * manually.
8140  */
8141 static int
8142 vhci_uscsi_send_sense(struct scsi_pkt *pkt, mp_uscsi_cmd_t *mp_uscmdp)
8143 {
8144 	struct buf 		*rqbp, *cmdbp;
8145 	struct scsi_pkt 	*rqpkt;
8146 	int			rval = 0;
8147 
8148 	cmdbp = mp_uscmdp->cmdbp;
8149 	ASSERT(cmdbp != NULL);
8150 
8151 	VHCI_DEBUG(4, (CE_WARN, NULL,
8152 	    "vhci_uscsi_send_sense: enter: bp: %p pkt: %p scmd: %p",
8153 	    (void *)cmdbp, (void *)pkt, (void *)mp_uscmdp));
8154 	/* set up the packet information and cdb */
8155 	if ((rqbp = scsi_alloc_consistent_buf(mp_uscmdp->ap, NULL,
8156 	    SENSE_LENGTH, B_READ, NULL, NULL)) == NULL) {
8157 		return (-1);
8158 	}
8159 
8160 	if ((rqpkt = scsi_init_pkt(mp_uscmdp->ap, NULL, rqbp,
8161 	    CDB_GROUP0, 1, 0, PKT_CONSISTENT, NULL, NULL)) == NULL) {
8162 		scsi_free_consistent_buf(rqbp);
8163 		return (-1);
8164 	}
8165 
8166 	(void) scsi_setup_cdb((union scsi_cdb *)(intptr_t)rqpkt->pkt_cdbp,
8167 	    SCMD_REQUEST_SENSE, 0, SENSE_LENGTH, 0);
8168 
8169 	mp_uscmdp->rqbp = rqbp;
8170 	rqbp->b_private = mp_uscmdp;
8171 	rqpkt->pkt_flags |= FLAG_SENSING;
8172 	rqpkt->pkt_time = 60;
8173 	rqpkt->pkt_comp = vhci_uscsi_iodone;
8174 	rqpkt->pkt_private = mp_uscmdp;
8175 
8176 	/*
8177 	 * NOTE: This code path is related to MPAPI uscsi(7I), so path
8178 	 * selection is not based on path_instance.
8179 	 */
8180 	if (scsi_pkt_allocated_correctly(rqpkt))
8181 		rqpkt->pkt_path_instance = 0;
8182 
8183 	/* get her done */
8184 	switch (scsi_transport(rqpkt)) {
8185 	case TRAN_ACCEPT:
8186 		VHCI_DEBUG(1, (CE_NOTE, NULL, "vhci_uscsi_send_sense: "
8187 		    "transport accepted."));
8188 		break;
8189 	case TRAN_BUSY:
8190 		VHCI_DEBUG(1, (CE_NOTE, NULL, "vhci_uscsi_send_sense: "
8191 		    "transport busy, setting timeout."));
8192 		vhci_restart_timeid = timeout(vhci_uscsi_restart_sense, rqpkt,
8193 		    (drv_usectohz(5 * 1000000)));
8194 		break;
8195 	default:
8196 		VHCI_DEBUG(1, (CE_NOTE, NULL, "vhci_uscsi_send_sense: "
8197 		    "transport failed"));
8198 		scsi_free_consistent_buf(rqbp);
8199 		scsi_destroy_pkt(rqpkt);
8200 		rval = -1;
8201 	}
8202 
8203 	return (rval);
8204 }
8205 
8206 /*
8207  * done routine for the mpapi uscsi command - this is behaving as though
8208  * FLAG_DIAGNOSE is set meaning there are no retries except for a manual
8209  * request sense.
8210  */
8211 void
8212 vhci_uscsi_iodone(struct scsi_pkt *pkt)
8213 {
8214 	struct buf 			*bp;
8215 	mp_uscsi_cmd_t 			*mp_uscmdp;
8216 	struct uscsi_cmd 		*uscmdp;
8217 	struct scsi_arq_status 		*arqstat;
8218 	int 				err;
8219 
8220 	mp_uscmdp = (mp_uscsi_cmd_t *)pkt->pkt_private;
8221 	uscmdp = mp_uscmdp->uscmdp;
8222 	bp = mp_uscmdp->cmdbp;
8223 	ASSERT(bp != NULL);
8224 	VHCI_DEBUG(4, (CE_WARN, NULL,
8225 	    "vhci_uscsi_iodone: enter: bp: %p pkt: %p scmd: %p",
8226 	    (void *)bp, (void *)pkt, (void *)mp_uscmdp));
8227 	/* Save the status and the residual into the uscsi_cmd struct */
8228 	uscmdp->uscsi_status = ((*(pkt)->pkt_scbp) & STATUS_MASK);
8229 	uscmdp->uscsi_resid = bp->b_resid;
8230 
8231 	/* return on a very successful command */
8232 	if (pkt->pkt_reason == CMD_CMPLT &&
8233 	    SCBP_C(pkt) == 0 && ((pkt->pkt_flags & FLAG_SENSING) == 0) &&
8234 	    pkt->pkt_resid == 0) {
8235 		mdi_pi_kstat_iosupdate(mp_uscmdp->pip, bp);
8236 		scsi_destroy_pkt(pkt);
8237 		biodone(bp);
8238 		return;
8239 	}
8240 	VHCI_DEBUG(4, (CE_NOTE, NULL, "iodone: reason=0x%x "
8241 	    " pkt_resid=%ld pkt_state: 0x%x b_count: %ld b_resid: %ld",
8242 	    pkt->pkt_reason, pkt->pkt_resid,
8243 	    pkt->pkt_state, bp->b_bcount, bp->b_resid));
8244 
8245 	err = EIO;
8246 
8247 	arqstat = (struct scsi_arq_status *)(intptr_t)(pkt->pkt_scbp);
8248 	if (pkt->pkt_reason != CMD_CMPLT) {
8249 		/*
8250 		 * The command did not complete.
8251 		 */
8252 		VHCI_DEBUG(4, (CE_NOTE, NULL,
8253 		    "vhci_uscsi_iodone: command did not complete."
8254 		    " reason: %x flag: %x", pkt->pkt_reason, pkt->pkt_flags));
8255 		if (pkt->pkt_flags & FLAG_SENSING) {
8256 			MDI_PI_ERRSTAT(mp_uscmdp->pip, MDI_PI_TRANSERR);
8257 		} else if (pkt->pkt_reason == CMD_TIMEOUT) {
8258 			MDI_PI_ERRSTAT(mp_uscmdp->pip, MDI_PI_HARDERR);
8259 			err = ETIMEDOUT;
8260 		}
8261 	} else if (pkt->pkt_state & STATE_ARQ_DONE && mp_uscmdp->arq_enabled) {
8262 		/*
8263 		 * The auto-rqsense happened, and the packet has a filled-in
8264 		 * scsi_arq_status structure, pointed to by pkt_scbp.
8265 		 */
8266 		VHCI_DEBUG(4, (CE_NOTE, NULL,
8267 		    "vhci_uscsi_iodone: received auto-requested sense"));
8268 		if (uscmdp->uscsi_flags & USCSI_RQENABLE) {
8269 			/* get the amount of data to copy into rqbuf */
8270 			int rqlen = SENSE_LENGTH - arqstat->sts_rqpkt_resid;
8271 			rqlen = min(((int)uscmdp->uscsi_rqlen), rqlen);
8272 			uscmdp->uscsi_rqresid = uscmdp->uscsi_rqlen - rqlen;
8273 			uscmdp->uscsi_rqstatus =
8274 			    *((char *)&arqstat->sts_rqpkt_status);
8275 			if (uscmdp->uscsi_rqbuf && uscmdp->uscsi_rqlen &&
8276 			    rqlen != 0) {
8277 				bcopy(&(arqstat->sts_sensedata),
8278 				    uscmdp->uscsi_rqbuf, rqlen);
8279 			}
8280 			mdi_pi_kstat_iosupdate(mp_uscmdp->pip, bp);
8281 			VHCI_DEBUG(4, (CE_NOTE, NULL,
8282 			    "vhci_uscsi_iodone: ARQ "
8283 			    "uscsi_rqstatus=0x%x uscsi_rqresid=%d rqlen: %d "
8284 			    "xfer: %d rqpkt_resid: %d\n",
8285 			    uscmdp->uscsi_rqstatus, uscmdp->uscsi_rqresid,
8286 			    uscmdp->uscsi_rqlen, rqlen,
8287 			    arqstat->sts_rqpkt_resid));
8288 		}
8289 	} else if (pkt->pkt_flags & FLAG_SENSING) {
8290 		struct buf *rqbp;
8291 		struct scsi_status *rqstatus;
8292 
8293 		rqstatus = (struct scsi_status *)pkt->pkt_scbp;
8294 		/* a manual request sense was done - get the information */
8295 		if (uscmdp->uscsi_flags & USCSI_RQENABLE) {
8296 			int rqlen = SENSE_LENGTH - pkt->pkt_resid;
8297 
8298 			rqbp = mp_uscmdp->rqbp;
8299 			/* get the amount of data to copy into rqbuf */
8300 			rqlen = min(((int)uscmdp->uscsi_rqlen), rqlen);
8301 			uscmdp->uscsi_rqresid = uscmdp->uscsi_rqlen - rqlen;
8302 			uscmdp->uscsi_rqstatus = *((char *)rqstatus);
8303 			if (uscmdp->uscsi_rqlen && uscmdp->uscsi_rqbuf) {
8304 				bcopy(rqbp->b_un.b_addr, uscmdp->uscsi_rqbuf,
8305 				    rqlen);
8306 			}
8307 			MDI_PI_ERRSTAT(mp_uscmdp->pip, MDI_PI_TRANSERR);
8308 			scsi_free_consistent_buf(rqbp);
8309 		}
8310 		VHCI_DEBUG(4, (CE_NOTE, NULL, "vhci_uscsi_iodone: FLAG_SENSING"
8311 		    "uscsi_rqstatus=0x%x uscsi_rqresid=%d\n",
8312 		    uscmdp->uscsi_rqstatus, uscmdp->uscsi_rqresid));
8313 	} else {
8314 		struct scsi_status *status =
8315 		    (struct scsi_status *)pkt->pkt_scbp;
8316 		/*
8317 		 * Command completed and we're not getting sense. Check for
8318 		 * errors and decide what to do next.
8319 		 */
8320 		VHCI_DEBUG(4, (CE_NOTE, NULL,
8321 		    "vhci_uscsi_iodone: command appears complete: reason: %x",
8322 		    pkt->pkt_reason));
8323 		if (status->sts_chk) {
8324 			/* need to manually get the request sense */
8325 			if (vhci_uscsi_send_sense(pkt, mp_uscmdp) == 0) {
8326 				scsi_destroy_pkt(pkt);
8327 				return;
8328 			}
8329 		} else {
8330 			VHCI_DEBUG(4, (CE_NOTE, NULL,
8331 			    "vhci_chk_err: appears complete"));
8332 			err = 0;
8333 			mdi_pi_kstat_iosupdate(mp_uscmdp->pip, bp);
8334 			if (pkt->pkt_resid) {
8335 				bp->b_resid += pkt->pkt_resid;
8336 			}
8337 		}
8338 	}
8339 
8340 	if (err) {
8341 		if (bp->b_resid == 0)
8342 			bp->b_resid = bp->b_bcount;
8343 		bioerror(bp, err);
8344 		bp->b_flags |= B_ERROR;
8345 	}
8346 
8347 	scsi_destroy_pkt(pkt);
8348 	biodone(bp);
8349 
8350 	VHCI_DEBUG(4, (CE_WARN, NULL, "vhci_uscsi_iodone: exit"));
8351 }
8352 
8353 /*
8354  * start routine for the mpapi uscsi command
8355  */
8356 int
8357 vhci_uscsi_iostart(struct buf *bp)
8358 {
8359 	struct scsi_pkt 	*pkt;
8360 	struct uscsi_cmd	*uscmdp;
8361 	mp_uscsi_cmd_t 		*mp_uscmdp;
8362 	int			stat_size, rval;
8363 	int			retry = 0;
8364 
8365 	ASSERT(bp->b_private != NULL);
8366 
8367 	mp_uscmdp = (mp_uscsi_cmd_t *)bp->b_private;
8368 	uscmdp = mp_uscmdp->uscmdp;
8369 	if (uscmdp->uscsi_flags & USCSI_RQENABLE) {
8370 		stat_size = SENSE_LENGTH;
8371 	} else {
8372 		stat_size = 1;
8373 	}
8374 
8375 	pkt = scsi_init_pkt(mp_uscmdp->ap, NULL, bp, uscmdp->uscsi_cdblen,
8376 	    stat_size, 0, 0, SLEEP_FUNC, NULL);
8377 	if (pkt == NULL) {
8378 		VHCI_DEBUG(4, (CE_NOTE, NULL,
8379 		    "vhci_uscsi_iostart: rval: EINVAL"));
8380 		bp->b_resid = bp->b_bcount;
8381 		uscmdp->uscsi_resid = bp->b_bcount;
8382 		bioerror(bp, EINVAL);
8383 		biodone(bp);
8384 		return (EINVAL);
8385 	}
8386 
8387 	pkt->pkt_time = uscmdp->uscsi_timeout;
8388 	bcopy(uscmdp->uscsi_cdb, pkt->pkt_cdbp, (size_t)uscmdp->uscsi_cdblen);
8389 	pkt->pkt_comp = vhci_uscsi_iodone;
8390 	pkt->pkt_private = mp_uscmdp;
8391 	if (uscmdp->uscsi_flags & USCSI_SILENT)
8392 		pkt->pkt_flags |= FLAG_SILENT;
8393 	if (uscmdp->uscsi_flags & USCSI_ISOLATE)
8394 		pkt->pkt_flags |= FLAG_ISOLATE;
8395 	if (uscmdp->uscsi_flags & USCSI_DIAGNOSE)
8396 		pkt->pkt_flags |= FLAG_DIAGNOSE;
8397 	if (uscmdp->uscsi_flags & USCSI_RENEGOT) {
8398 		pkt->pkt_flags |= FLAG_RENEGOTIATE_WIDE_SYNC;
8399 	}
8400 	VHCI_DEBUG(4, (CE_WARN, NULL,
8401 	    "vhci_uscsi_iostart: ap: %p pkt: %p pcdbp: %p uscmdp: %p"
8402 	    " ucdbp: %p pcdblen: %d bp: %p count: %ld pip: %p"
8403 	    " stat_size: %d",
8404 	    (void *)mp_uscmdp->ap, (void *)pkt, (void *)pkt->pkt_cdbp,
8405 	    (void *)uscmdp, (void *)uscmdp->uscsi_cdb, pkt->pkt_cdblen,
8406 	    (void *)bp, bp->b_bcount, (void *)mp_uscmdp->pip, stat_size));
8407 
8408 	/*
8409 	 * NOTE: This code path is related to MPAPI uscsi(7I), so path
8410 	 * selection is not based on path_instance.
8411 	 */
8412 	if (scsi_pkt_allocated_correctly(pkt))
8413 		pkt->pkt_path_instance = 0;
8414 
8415 	while (((rval = scsi_transport(pkt)) == TRAN_BUSY) &&
8416 	    retry < vhci_uscsi_retry_count) {
8417 		delay(drv_usectohz(vhci_uscsi_delay));
8418 		retry++;
8419 	}
8420 	if (retry >= vhci_uscsi_retry_count) {
8421 		VHCI_DEBUG(4, (CE_NOTE, NULL,
8422 		    "vhci_uscsi_iostart: tran_busy - retry: %d", retry));
8423 	}
8424 	switch (rval) {
8425 	case TRAN_ACCEPT:
8426 		rval =  0;
8427 		break;
8428 
8429 	default:
8430 		VHCI_DEBUG(4, (CE_NOTE, NULL,
8431 		    "vhci_uscsi_iostart: rval: %d count: %ld res: %ld",
8432 		    rval, bp->b_bcount, bp->b_resid));
8433 		bp->b_resid = bp->b_bcount;
8434 		uscmdp->uscsi_resid = bp->b_bcount;
8435 		bioerror(bp, EIO);
8436 		scsi_destroy_pkt(pkt);
8437 		biodone(bp);
8438 		rval = EIO;
8439 		MDI_PI_ERRSTAT(mp_uscmdp->pip, MDI_PI_TRANSERR);
8440 		break;
8441 	}
8442 	VHCI_DEBUG(4, (CE_NOTE, NULL,
8443 	    "vhci_uscsi_iostart: exit: rval: %d", rval));
8444 	return (rval);
8445 }
8446