xref: /illumos-gate/usr/src/uts/common/io/scsi/adapters/scsi_vhci/scsi_vhci.c (revision f998c95e3b7029fe5f7542e115f7474ddb8024d7)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 #pragma ident	"%Z%%M%	%I%	%E% SMI"
26 
27 /*
28  * Multiplexed I/O SCSI vHCI implementation
29  */
30 
31 #include <sys/conf.h>
32 #include <sys/file.h>
33 #include <sys/ddi.h>
34 #include <sys/sunddi.h>
35 #include <sys/scsi/scsi.h>
36 #include <sys/scsi/impl/scsi_reset_notify.h>
37 #include <sys/sunmdi.h>
38 #include <sys/mdi_impldefs.h>
39 #include <sys/scsi/adapters/scsi_vhci.h>
40 #include <sys/disp.h>
41 #include <sys/byteorder.h>
42 
43 extern uintptr_t scsi_callback_id;
44 extern ddi_dma_attr_t scsi_alloc_attr;
45 
46 #ifdef	DEBUG
47 int	vhci_debug = VHCI_DEBUG_DEFAULT_VAL;
48 #endif
49 
50 /* retry for the vhci_do_prout command when a not ready is returned */
51 int vhci_prout_not_ready_retry = 180;
52 
53 /*
54  * These values are defined to support the internal retry of
55  * SCSI packets for better sense code handling.
56  */
57 #define	VHCI_CMD_CMPLT	0
58 #define	VHCI_CMD_RETRY	1
59 #define	VHCI_CMD_ERROR	-1
60 
61 #define	PROPFLAGS (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM)
62 #define	VHCI_SCSI_PERR		0x47
63 #define	VHCI_PGR_ILLEGALOP	-2
64 #define	VHCI_NUM_UPDATE_TASKQ	8
65 /* changed to 132 to accomodate HDS */
66 #define	VHCI_STD_INQ_SIZE	132
67 
68 /*
69  * Version Macros
70  */
71 #define	VHCI_NAME_VERSION	"SCSI VHCI Driver 1.68"
72 char		vhci_version_name[] = VHCI_NAME_VERSION;
73 
74 int		vhci_first_time = 0;
75 clock_t		vhci_to_ticks = 0;
76 int		vhci_init_wait_timeout = VHCI_INIT_WAIT_TIMEOUT;
77 kcondvar_t	vhci_cv;
78 kmutex_t	vhci_global_mutex;
79 void		*vhci_softstate = NULL; /* for soft state */
80 
81 /*
82  * Flag to delay the retry of the reserve command
83  */
84 int		vhci_reserve_delay = 100000;
85 static int	vhci_path_quiesce_timeout = 60;
86 static uchar_t	zero_key[MHIOC_RESV_KEY_SIZE];
87 
88 /* uscsi delay for a TRAN_BUSY */
89 static int vhci_uscsi_delay = 100000;
90 static int vhci_uscsi_retry_count = 180;
91 /* uscsi_restart_sense timeout id in case it needs to get canceled */
92 static timeout_id_t vhci_restart_timeid = 0;
93 
94 /*
95  * Bidirectional map of 'target-port' to port id <pid> for support of
96  * iostat(1M) '-Xx' and '-Yx' output.
97  */
98 static kmutex_t		vhci_targetmap_mutex;
99 static uint_t		vhci_targetmap_pid = 1;
100 static mod_hash_t	*vhci_targetmap_bypid;	/* <pid> -> 'target-port' */
101 static mod_hash_t	*vhci_targetmap_byport;	/* 'target-port' -> <pid> */
102 
103 /*
104  * functions exported by scsi_vhci struct cb_ops
105  */
106 static int vhci_open(dev_t *, int, int, cred_t *);
107 static int vhci_close(dev_t, int, int, cred_t *);
108 static int vhci_ioctl(dev_t, int, intptr_t, int, cred_t *, int *);
109 
110 /*
111  * functions exported by scsi_vhci struct dev_ops
112  */
113 static int vhci_getinfo(dev_info_t *, ddi_info_cmd_t, void *, void **);
114 static int vhci_attach(dev_info_t *, ddi_attach_cmd_t);
115 static int vhci_detach(dev_info_t *, ddi_detach_cmd_t);
116 
117 /*
118  * functions exported by scsi_vhci scsi_hba_tran_t transport table
119  */
120 static int vhci_scsi_tgt_init(dev_info_t *, dev_info_t *,
121     scsi_hba_tran_t *, struct scsi_device *);
122 static void vhci_scsi_tgt_free(dev_info_t *, dev_info_t *, scsi_hba_tran_t *,
123     struct scsi_device *);
124 static int vhci_pgr_register_start(scsi_vhci_lun_t *, struct scsi_pkt *);
125 static int vhci_scsi_start(struct scsi_address *, struct scsi_pkt *);
126 static int vhci_scsi_abort(struct scsi_address *, struct scsi_pkt *);
127 static int vhci_scsi_reset(struct scsi_address *, int);
128 static int vhci_scsi_reset_target(struct scsi_address *, int level,
129     uint8_t select_path);
130 static int vhci_scsi_reset_bus(struct scsi_address *);
131 static int vhci_scsi_getcap(struct scsi_address *, char *, int);
132 static int vhci_scsi_setcap(struct scsi_address *, char *, int, int);
133 static int vhci_commoncap(struct scsi_address *, char *, int, int, int);
134 static int vhci_pHCI_cap(struct scsi_address *ap, char *cap, int val, int whom,
135     mdi_pathinfo_t *pip);
136 static struct scsi_pkt *vhci_scsi_init_pkt(struct scsi_address *,
137     struct scsi_pkt *, struct buf *, int, int, int, int, int (*)(), caddr_t);
138 static void vhci_scsi_destroy_pkt(struct scsi_address *, struct scsi_pkt *);
139 static void vhci_scsi_dmafree(struct scsi_address *, struct scsi_pkt *);
140 static void vhci_scsi_sync_pkt(struct scsi_address *, struct scsi_pkt *);
141 static int vhci_scsi_reset_notify(struct scsi_address *, int, void (*)(caddr_t),
142     caddr_t);
143 static int vhci_scsi_get_bus_addr(struct scsi_device *, char *, int);
144 static int vhci_scsi_get_name(struct scsi_device *, char *, int);
145 static int vhci_scsi_bus_power(dev_info_t *, void *, pm_bus_power_op_t,
146     void *, void *);
147 static int vhci_scsi_bus_config(dev_info_t *, uint_t, ddi_bus_config_op_t,
148     void *, dev_info_t **);
149 
150 /*
151  * functions registered with the mpxio framework via mdi_vhci_ops_t
152  */
153 static int vhci_pathinfo_init(dev_info_t *, mdi_pathinfo_t *, int);
154 static int vhci_pathinfo_uninit(dev_info_t *, mdi_pathinfo_t *, int);
155 static int vhci_pathinfo_state_change(dev_info_t *, mdi_pathinfo_t *,
156     mdi_pathinfo_state_t, uint32_t, int);
157 static int vhci_pathinfo_online(dev_info_t *, mdi_pathinfo_t *, int);
158 static int vhci_pathinfo_offline(dev_info_t *, mdi_pathinfo_t *, int);
159 static int vhci_failover(dev_info_t *, dev_info_t *, int);
160 static void vhci_client_attached(dev_info_t *);
161 
162 static int vhci_ctl(dev_t, int, intptr_t, int, cred_t *, int *);
163 static int vhci_devctl(dev_t, int, intptr_t, int, cred_t *, int *);
164 static int vhci_ioc_get_phci_path(sv_iocdata_t *, caddr_t, int, caddr_t);
165 static int vhci_ioc_get_client_path(sv_iocdata_t *, caddr_t, int, caddr_t);
166 static int vhci_ioc_get_paddr(sv_iocdata_t *, caddr_t, int, caddr_t);
167 static int vhci_ioc_send_client_path(caddr_t, sv_iocdata_t *, int, caddr_t);
168 static void vhci_ioc_devi_to_path(dev_info_t *, caddr_t);
169 static int vhci_get_phci_path_list(dev_info_t *, sv_path_info_t *, uint_t);
170 static int vhci_get_client_path_list(dev_info_t *, sv_path_info_t *, uint_t);
171 static int vhci_get_iocdata(const void *, sv_iocdata_t *, int, caddr_t);
172 static int vhci_get_iocswitchdata(const void *, sv_switch_to_cntlr_iocdata_t *,
173     int, caddr_t);
174 static int vhci_ioc_alloc_pathinfo(sv_path_info_t **, sv_path_info_t **,
175     uint_t, sv_iocdata_t *, int, caddr_t);
176 static void vhci_ioc_free_pathinfo(sv_path_info_t *, sv_path_info_t *, uint_t);
177 static int vhci_ioc_send_pathinfo(sv_path_info_t *, sv_path_info_t *, uint_t,
178     sv_iocdata_t *, int, caddr_t);
179 static int vhci_handle_ext_fo(struct scsi_pkt *, int);
180 static int vhci_efo_watch_cb(caddr_t, struct scsi_watch_result *);
181 static int vhci_quiesce_lun(struct scsi_vhci_lun *);
182 static int vhci_pgr_validate_and_register(scsi_vhci_priv_t *);
183 static void vhci_dispatch_scsi_start(void *);
184 static void vhci_efo_done(void *);
185 static void vhci_initiate_auto_failback(void *);
186 static void vhci_update_pHCI_pkt(struct vhci_pkt *, struct scsi_pkt *);
187 static int vhci_update_pathinfo(struct scsi_device *, mdi_pathinfo_t *,
188     struct scsi_failover_ops *, scsi_vhci_lun_t *, struct scsi_vhci *);
189 static void vhci_kstat_create_pathinfo(mdi_pathinfo_t *);
190 static int vhci_quiesce_paths(dev_info_t *, dev_info_t *,
191     scsi_vhci_lun_t *, char *, char *);
192 
193 static char *vhci_devnm_to_guid(char *);
194 static int vhci_bind_transport(struct scsi_address *, struct vhci_pkt *,
195     int, int (*func)(caddr_t));
196 static void vhci_intr(struct scsi_pkt *);
197 static int vhci_do_prout(scsi_vhci_priv_t *);
198 static void vhci_run_cmd(void *);
199 static int vhci_do_prin(struct vhci_pkt **);
200 static struct scsi_pkt *vhci_create_retry_pkt(struct vhci_pkt *);
201 static struct vhci_pkt *vhci_sync_retry_pkt(struct vhci_pkt *);
202 static struct scsi_vhci_lun *vhci_lun_lookup(dev_info_t *);
203 static struct scsi_vhci_lun *vhci_lun_lookup_alloc(dev_info_t *, char *, int *);
204 static void vhci_lun_free(dev_info_t *);
205 static int vhci_recovery_reset(scsi_vhci_lun_t *, struct scsi_address *,
206     uint8_t, uint8_t);
207 void vhci_update_pathstates(void *);
208 
209 #ifdef DEBUG
210 static void vhci_print_prin_keys(vhci_prin_readkeys_t *, int);
211 #endif
212 static void vhci_print_prout_keys(scsi_vhci_lun_t *, char *);
213 static void vhci_uscsi_iodone(struct scsi_pkt *pkt);
214 
215 /*
216  * MP-API related functions
217  */
218 extern int vhci_mpapi_init(struct scsi_vhci *);
219 extern void vhci_mpapi_add_dev_prod(struct scsi_vhci *, char *);
220 extern int vhci_mpapi_ctl(dev_t, int, intptr_t, int, cred_t *, int *);
221 extern void vhci_update_mpapi_data(struct scsi_vhci *,
222     scsi_vhci_lun_t *, mdi_pathinfo_t *);
223 extern void* vhci_get_mpapi_item(struct scsi_vhci *, mpapi_list_header_t *,
224     uint8_t, void*);
225 extern void vhci_mpapi_set_path_state(dev_info_t *, mdi_pathinfo_t *, int);
226 extern int vhci_mpapi_update_tpg_acc_state_for_lu(struct scsi_vhci *,
227     scsi_vhci_lun_t *);
228 
229 /* Special export to MP-API of tpgs non-'fops' entry point */
230 int (*tpgs_set_target_groups)(struct scsi_address *, int, int);
231 
232 #define	VHCI_DMA_MAX_XFER_CAP	0xffffffffULL
233 
234 #define	VHCI_MAX_PGR_RETRIES	3
235 
236 /*
237  * Macros for the device-type mpxio options
238  */
239 #define	LOAD_BALANCE_OPTIONS		"load-balance-options"
240 #define	LOGICAL_BLOCK_REGION_SIZE	"region-size"
241 #define	MPXIO_OPTIONS_LIST		"device-type-mpxio-options-list"
242 #define	DEVICE_TYPE_STR			"device-type"
243 #define	isdigit(ch)			((ch) >= '0' && (ch) <= '9')
244 
245 static struct cb_ops vhci_cb_ops = {
246 	vhci_open,			/* open */
247 	vhci_close,			/* close */
248 	nodev,				/* strategy */
249 	nodev,				/* print */
250 	nodev,				/* dump */
251 	nodev,				/* read */
252 	nodev,				/* write */
253 	vhci_ioctl,			/* ioctl */
254 	nodev,				/* devmap */
255 	nodev,				/* mmap */
256 	nodev,				/* segmap */
257 	nochpoll,			/* chpoll */
258 	ddi_prop_op,			/* cb_prop_op */
259 	0,				/* streamtab */
260 	D_NEW | D_MP,			/* cb_flag */
261 	CB_REV,				/* rev */
262 	nodev,				/* aread */
263 	nodev				/* awrite */
264 };
265 
266 static struct dev_ops vhci_ops = {
267 	DEVO_REV,
268 	0,
269 	vhci_getinfo,
270 	nulldev,		/* identify */
271 	nulldev,		/* probe */
272 	vhci_attach,		/* attach and detach are mandatory */
273 	vhci_detach,
274 	nodev,			/* reset */
275 	&vhci_cb_ops,		/* cb_ops */
276 	NULL,			/* bus_ops */
277 	NULL,			/* power */
278 };
279 
280 extern struct mod_ops mod_driverops;
281 
282 static struct modldrv modldrv = {
283 	&mod_driverops,
284 	vhci_version_name,	/* module name */
285 	&vhci_ops
286 };
287 
288 static struct modlinkage modlinkage = {
289 	MODREV_1,
290 	&modldrv,
291 	NULL
292 };
293 
294 static mdi_vhci_ops_t vhci_opinfo = {
295 	MDI_VHCI_OPS_REV,
296 	vhci_pathinfo_init,		/* Pathinfo node init callback	*/
297 	vhci_pathinfo_uninit,		/* Pathinfo uninit callback	*/
298 	vhci_pathinfo_state_change,	/* Pathinfo node state change	*/
299 	vhci_failover,			/* failover callback		*/
300 	vhci_client_attached		/* client attached callback	*/
301 };
302 
303 /*
304  * The scsi_failover table defines an ordered set of 'fops' modules supported
305  * by scsi_vhci.  Currently, initialize this table from the 'ddi-forceload'
306  * property specified in scsi_vhci.conf.
307  */
308 struct scsi_failover {
309 	ddi_modhandle_t			sf_mod;
310 	struct scsi_failover_ops	*sf_sfo;
311 } *scsi_failover_table;
312 uint_t	scsi_nfailover;
313 
314 int
315 _init(void)
316 {
317 	int	rval;
318 
319 	/*
320 	 * Allocate soft state and prepare to do ddi_soft_state_zalloc()
321 	 * before registering with the transport first.
322 	 */
323 	if ((rval = ddi_soft_state_init(&vhci_softstate,
324 	    sizeof (struct scsi_vhci), 1)) != 0) {
325 		VHCI_DEBUG(1, (CE_NOTE, NULL,
326 		    "!_init:soft state init failed\n"));
327 		return (rval);
328 	}
329 
330 	if ((rval = scsi_hba_init(&modlinkage)) != 0) {
331 		VHCI_DEBUG(1, (CE_NOTE, NULL,
332 		    "!_init: scsi hba init failed\n"));
333 		ddi_soft_state_fini(&vhci_softstate);
334 		return (rval);
335 	}
336 
337 	mutex_init(&vhci_global_mutex, NULL, MUTEX_DRIVER, NULL);
338 	cv_init(&vhci_cv, NULL, CV_DRIVER, NULL);
339 
340 	mutex_init(&vhci_targetmap_mutex, NULL, MUTEX_DRIVER, NULL);
341 	vhci_targetmap_byport = mod_hash_create_strhash(
342 	    "vhci_targetmap_byport", 256, mod_hash_null_valdtor);
343 	vhci_targetmap_bypid = mod_hash_create_idhash(
344 	    "vhci_targetmap_bypid", 256, mod_hash_null_valdtor);
345 
346 	if ((rval = mod_install(&modlinkage)) != 0) {
347 		VHCI_DEBUG(1, (CE_NOTE, NULL, "!_init: mod_install failed\n"));
348 		if (vhci_targetmap_bypid)
349 			mod_hash_destroy_idhash(vhci_targetmap_bypid);
350 		if (vhci_targetmap_byport)
351 			mod_hash_destroy_strhash(vhci_targetmap_byport);
352 		mutex_destroy(&vhci_targetmap_mutex);
353 		cv_destroy(&vhci_cv);
354 		mutex_destroy(&vhci_global_mutex);
355 		scsi_hba_fini(&modlinkage);
356 		ddi_soft_state_fini(&vhci_softstate);
357 	}
358 	return (rval);
359 }
360 
361 
362 /*
363  * the system is done with us as a driver, so clean up
364  */
365 int
366 _fini(void)
367 {
368 	int rval;
369 
370 	/*
371 	 * don't start cleaning up until we know that the module remove
372 	 * has worked  -- if this works, then we know that each instance
373 	 * has successfully been DDI_DETACHed
374 	 */
375 	if ((rval = mod_remove(&modlinkage)) != 0) {
376 		VHCI_DEBUG(4, (CE_NOTE, NULL, "!_fini: mod_remove failed\n"));
377 		return (rval);
378 	}
379 
380 	if (vhci_targetmap_bypid)
381 		mod_hash_destroy_idhash(vhci_targetmap_bypid);
382 	if (vhci_targetmap_byport)
383 		mod_hash_destroy_strhash(vhci_targetmap_byport);
384 	mutex_destroy(&vhci_targetmap_mutex);
385 	cv_destroy(&vhci_cv);
386 	mutex_destroy(&vhci_global_mutex);
387 	scsi_hba_fini(&modlinkage);
388 	ddi_soft_state_fini(&vhci_softstate);
389 
390 	return (rval);
391 }
392 
393 int
394 _info(struct modinfo *modinfop)
395 {
396 	return (mod_info(&modlinkage, modinfop));
397 }
398 
399 /*
400  * Lookup scsi_failover by "short name" of failover module.
401  */
402 struct scsi_failover_ops *
403 vhci_failover_ops_by_name(char *name)
404 {
405 	struct scsi_failover	*sf;
406 
407 	for (sf = scsi_failover_table; sf->sf_mod; sf++) {
408 		if (sf->sf_sfo == NULL)
409 			continue;
410 		if (strcmp(sf->sf_sfo->sfo_name, name) == 0)
411 			return (sf->sf_sfo);
412 	}
413 	return (NULL);
414 }
415 
416 /*
417  * Load all scsi_failover_ops 'fops' modules.
418  */
419 static void
420 vhci_failover_modopen(struct scsi_vhci *vhci)
421 {
422 	char			**module;
423 	int			i;
424 	struct scsi_failover	*sf;
425 	char			**dt;
426 	int			e;
427 
428 	if (scsi_failover_table)
429 		return;
430 
431 	/* Get the list of modules from scsi_vhci.conf */
432 	if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY,
433 	    vhci->vhci_dip, DDI_PROP_DONTPASS, "ddi-forceload",
434 	    &module, &scsi_nfailover) != DDI_PROP_SUCCESS) {
435 		cmn_err(CE_WARN, "scsi_vhci: "
436 		    "scsi_vhci.conf is missing 'ddi-forceload'");
437 		return;
438 	}
439 	if (scsi_nfailover == 0) {
440 		cmn_err(CE_WARN, "scsi_vhci: "
441 		    "scsi_vhci.conf has empty 'ddi-forceload'");
442 		ddi_prop_free(module);
443 		return;
444 	}
445 
446 	/* allocate failover table based on number of modules */
447 	scsi_failover_table = (struct scsi_failover *)
448 	    kmem_zalloc(sizeof (struct scsi_failover) * (scsi_nfailover + 1),
449 	    KM_SLEEP);
450 
451 	/* loop over modules specified in scsi_vhci.conf and open each module */
452 	for (i = 0, sf = scsi_failover_table; i < scsi_nfailover; i++) {
453 		if (module[i] == NULL)
454 			continue;
455 
456 		sf->sf_mod = ddi_modopen(module[i], KRTLD_MODE_FIRST, &e);
457 		if (sf->sf_mod == NULL) {
458 			/*
459 			 * A module returns EEXIST if other software is
460 			 * supporting the intended function: for example
461 			 * the scsi_vhci_f_sum_emc module returns EEXIST
462 			 * from _init if EMC powerpath software is installed.
463 			 */
464 			if (e != EEXIST)
465 				cmn_err(CE_WARN, "scsi_vhci: unable to open "
466 				    "module '%s', error %d", module[i], e);
467 			continue;
468 		}
469 		sf->sf_sfo = ddi_modsym(sf->sf_mod,
470 		    "scsi_vhci_failover_ops", &e);
471 		if (sf->sf_sfo == NULL) {
472 			cmn_err(CE_WARN, "scsi_vhci: "
473 			    "unable to import 'scsi_failover_ops' from '%s', "
474 			    "error %d", module[i], e);
475 			(void) ddi_modclose(sf->sf_mod);
476 			sf->sf_mod = NULL;
477 			continue;
478 		}
479 
480 		/* register vid/pid of devices supported with mpapi */
481 		for (dt = sf->sf_sfo->sfo_devices; *dt; dt++)
482 			vhci_mpapi_add_dev_prod(vhci, *dt);
483 
484 		/*
485 		 * Special processing for SFO_NAME_TPGS module, which contains
486 		 * the `tpgs_set_target_groups` implementation needed by the
487 		 * MP-API code.
488 		 */
489 		if (strcmp(sf->sf_sfo->sfo_name, SFO_NAME_TPGS) == 0) {
490 			tpgs_set_target_groups =
491 			    (int (*)(struct scsi_address *, int, int))
492 			    ddi_modsym(sf->sf_mod, "std_set_target_groups", &e);
493 			if (tpgs_set_target_groups == NULL) {
494 				cmn_err(CE_WARN, "scsi_vhci: "
495 				    "unable to import 'std_set_target_groups' "
496 				    "from '%s', error %d", module[i], e);
497 			}
498 		}
499 
500 		sf++;
501 	}
502 
503 	/* verify that at least the "well-known" modules were there */
504 	if (vhci_failover_ops_by_name(SFO_NAME_SYM) == NULL)
505 		cmn_err(CE_WARN, "scsi_vhci: well-known module \""
506 		    SFO_NAME_SYM "\" not defined in scsi_vhci.conf's "
507 		    "'ddi-forceload'");
508 	if (vhci_failover_ops_by_name(SFO_NAME_TPGS) == NULL)
509 		cmn_err(CE_WARN, "scsi_vhci: well-known module \""
510 		    SFO_NAME_TPGS "\" not defined in scsi_vhci.conf's "
511 		    "'ddi-forceload'");
512 
513 	/* call sfo_init for modules that need it */
514 	for (sf = scsi_failover_table; sf->sf_mod; sf++) {
515 		if (sf->sf_sfo && sf->sf_sfo->sfo_init)
516 			(*sf->sf_sfo->sfo_init)();
517 	}
518 
519 	ddi_prop_free(module);
520 }
521 
522 /*
523  * unload all loaded scsi_failover_ops modules
524  */
525 static void
526 vhci_failover_modclose()
527 {
528 	struct scsi_failover	*sf;
529 
530 	for (sf = scsi_failover_table; sf->sf_mod; sf++) {
531 		if ((sf->sf_mod == NULL) || (sf->sf_sfo == NULL))
532 			continue;
533 		(void) ddi_modclose(sf->sf_mod);
534 		sf->sf_mod = NULL;
535 		sf->sf_sfo = NULL;
536 	}
537 
538 	if (scsi_failover_table && scsi_nfailover)
539 		kmem_free(scsi_failover_table,
540 		    sizeof (struct scsi_failover) * (scsi_nfailover + 1));
541 	scsi_failover_table = NULL;
542 	scsi_nfailover = 0;
543 }
544 
545 /* ARGSUSED */
546 static int
547 vhci_open(dev_t *devp, int flag, int otype, cred_t *credp)
548 {
549 	struct scsi_vhci	*vhci;
550 
551 	if (otype != OTYP_CHR) {
552 		return (EINVAL);
553 	}
554 
555 	vhci = ddi_get_soft_state(vhci_softstate, MINOR2INST(getminor(*devp)));
556 	if (vhci == NULL) {
557 		VHCI_DEBUG(1, (CE_NOTE, NULL, "vhci_open: failed ENXIO\n"));
558 		return (ENXIO);
559 	}
560 
561 	mutex_enter(&vhci->vhci_mutex);
562 	if ((flag & FEXCL) && (vhci->vhci_state & VHCI_STATE_OPEN)) {
563 		mutex_exit(&vhci->vhci_mutex);
564 		vhci_log(CE_NOTE, vhci->vhci_dip,
565 		    "!vhci%d: Already open\n", getminor(*devp));
566 		return (EBUSY);
567 	}
568 
569 	vhci->vhci_state |= VHCI_STATE_OPEN;
570 	mutex_exit(&vhci->vhci_mutex);
571 	return (0);
572 }
573 
574 
575 /* ARGSUSED */
576 static int
577 vhci_close(dev_t dev, int flag, int otype, cred_t *credp)
578 {
579 	struct scsi_vhci	*vhci;
580 
581 	if (otype != OTYP_CHR) {
582 		return (EINVAL);
583 	}
584 
585 	vhci = ddi_get_soft_state(vhci_softstate, MINOR2INST(getminor(dev)));
586 	if (vhci == NULL) {
587 		VHCI_DEBUG(1, (CE_NOTE, NULL, "vhci_close: failed ENXIO\n"));
588 		return (ENXIO);
589 	}
590 
591 	mutex_enter(&vhci->vhci_mutex);
592 	vhci->vhci_state &= ~VHCI_STATE_OPEN;
593 	mutex_exit(&vhci->vhci_mutex);
594 
595 	return (0);
596 }
597 
598 /* ARGSUSED */
599 static int
600 vhci_ioctl(dev_t dev, int cmd, intptr_t data, int mode,
601 	cred_t *credp, int *rval)
602 {
603 	if (IS_DEVCTL(cmd)) {
604 		return (vhci_devctl(dev, cmd, data, mode, credp, rval));
605 	} else if (cmd == MP_CMD) {
606 		return (vhci_mpapi_ctl(dev, cmd, data, mode, credp, rval));
607 	} else {
608 		return (vhci_ctl(dev, cmd, data, mode, credp, rval));
609 	}
610 }
611 
612 /*
613  * attach the module
614  */
615 static int
616 vhci_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
617 {
618 	int			rval = DDI_FAILURE;
619 	int			scsi_hba_attached = 0;
620 	int			vhci_attached = 0;
621 	int			mutex_initted = 0;
622 	int			instance;
623 	struct scsi_vhci	*vhci;
624 	scsi_hba_tran_t		*tran;
625 	char			cache_name_buf[64];
626 	char			*data;
627 
628 	VHCI_DEBUG(4, (CE_NOTE, NULL, "vhci_attach: cmd=0x%x\n", cmd));
629 
630 	instance = ddi_get_instance(dip);
631 
632 	switch (cmd) {
633 	case DDI_ATTACH:
634 		break;
635 
636 	case DDI_RESUME:
637 	case DDI_PM_RESUME:
638 		VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_attach: resume not yet"
639 		    "implemented\n"));
640 		return (rval);
641 
642 	default:
643 		VHCI_DEBUG(1, (CE_NOTE, NULL,
644 		    "!vhci_attach: unknown ddi command\n"));
645 		return (rval);
646 	}
647 
648 	/*
649 	 * Allocate vhci data structure.
650 	 */
651 	if (ddi_soft_state_zalloc(vhci_softstate, instance) != DDI_SUCCESS) {
652 		VHCI_DEBUG(1, (CE_NOTE, dip, "!vhci_attach:"
653 		    "soft state alloc failed\n"));
654 		return (DDI_FAILURE);
655 	}
656 
657 	if ((vhci = ddi_get_soft_state(vhci_softstate, instance)) == NULL) {
658 		VHCI_DEBUG(1, (CE_NOTE, dip, "!vhci_attach:"
659 		    "bad soft state\n"));
660 		ddi_soft_state_free(vhci_softstate, instance);
661 		return (DDI_FAILURE);
662 	}
663 
664 	/* Allocate packet cache */
665 	(void) snprintf(cache_name_buf, sizeof (cache_name_buf),
666 	    "vhci%d_cache", instance);
667 
668 	mutex_init(&vhci->vhci_mutex, NULL, MUTEX_DRIVER, NULL);
669 	mutex_initted++;
670 
671 	/*
672 	 * Allocate a transport structure
673 	 */
674 	tran = scsi_hba_tran_alloc(dip, SCSI_HBA_CANSLEEP);
675 	ASSERT(tran != NULL);
676 
677 	vhci->vhci_tran		= tran;
678 	vhci->vhci_dip		= dip;
679 	vhci->vhci_instance	= instance;
680 
681 	tran->tran_hba_private	= vhci;
682 	tran->tran_tgt_private	= NULL;
683 	tran->tran_tgt_init	= vhci_scsi_tgt_init;
684 	tran->tran_tgt_probe	= NULL;
685 	tran->tran_tgt_free	= vhci_scsi_tgt_free;
686 
687 	tran->tran_start	= vhci_scsi_start;
688 	tran->tran_abort	= vhci_scsi_abort;
689 	tran->tran_reset	= vhci_scsi_reset;
690 	tran->tran_getcap	= vhci_scsi_getcap;
691 	tran->tran_setcap	= vhci_scsi_setcap;
692 	tran->tran_init_pkt	= vhci_scsi_init_pkt;
693 	tran->tran_destroy_pkt	= vhci_scsi_destroy_pkt;
694 	tran->tran_dmafree	= vhci_scsi_dmafree;
695 	tran->tran_sync_pkt	= vhci_scsi_sync_pkt;
696 	tran->tran_reset_notify = vhci_scsi_reset_notify;
697 
698 	tran->tran_get_bus_addr	= vhci_scsi_get_bus_addr;
699 	tran->tran_get_name	= vhci_scsi_get_name;
700 	tran->tran_bus_reset	= NULL;
701 	tran->tran_quiesce	= NULL;
702 	tran->tran_unquiesce	= NULL;
703 
704 	/*
705 	 * register event notification routines with scsa
706 	 */
707 	tran->tran_get_eventcookie = NULL;
708 	tran->tran_add_eventcall = NULL;
709 	tran->tran_remove_eventcall = NULL;
710 	tran->tran_post_event = NULL;
711 
712 	tran->tran_bus_power = vhci_scsi_bus_power;
713 
714 	tran->tran_bus_config = vhci_scsi_bus_config;
715 
716 	/*
717 	 * Attach this instance with the mpxio framework
718 	 */
719 	if (mdi_vhci_register(MDI_HCI_CLASS_SCSI, dip, &vhci_opinfo, 0)
720 	    != MDI_SUCCESS) {
721 		VHCI_DEBUG(1, (CE_NOTE, dip, "!vhci_attach:"
722 		    "mdi_vhci_register failed\n"));
723 		goto attach_fail;
724 	}
725 	vhci_attached++;
726 
727 	/*
728 	 * Attach this instance of the hba.
729 	 *
730 	 * Regarding dma attributes: Since scsi_vhci is a virtual scsi HBA
731 	 * driver, it has nothing to do with DMA. However, when calling
732 	 * scsi_hba_attach_setup() we need to pass something valid in the
733 	 * dma attributes parameter. So we just use scsi_alloc_attr.
734 	 * SCSA itself seems to care only for dma_attr_minxfer and
735 	 * dma_attr_burstsizes fields of dma attributes structure.
736 	 * It expects those fileds to be non-zero.
737 	 */
738 	if (scsi_hba_attach_setup(dip, &scsi_alloc_attr, tran,
739 	    SCSI_HBA_TRAN_CLONE) != DDI_SUCCESS) {
740 		VHCI_DEBUG(1, (CE_NOTE, dip, "!vhci_attach:"
741 		    "hba attach failed\n"));
742 		goto attach_fail;
743 	}
744 	scsi_hba_attached++;
745 
746 	if (ddi_create_minor_node(dip, "devctl", S_IFCHR,
747 	    INST2DEVCTL(instance), DDI_NT_SCSI_NEXUS, 0) != DDI_SUCCESS) {
748 		VHCI_DEBUG(1, (CE_NOTE, dip, "!vhci_attach:"
749 		    " ddi_create_minor_node failed\n"));
750 		goto attach_fail;
751 	}
752 
753 	/*
754 	 * Set pm-want-child-notification property for
755 	 * power management of the phci and client
756 	 */
757 	if (ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP,
758 	    "pm-want-child-notification?", NULL, NULL) != DDI_PROP_SUCCESS) {
759 		cmn_err(CE_WARN,
760 		    "%s%d fail to create pm-want-child-notification? prop",
761 		    ddi_driver_name(dip), ddi_get_instance(dip));
762 		goto attach_fail;
763 	}
764 
765 	vhci->vhci_taskq = taskq_create("vhci_taskq", 1, MINCLSYSPRI, 1, 4, 0);
766 	vhci->vhci_update_pathstates_taskq =
767 	    taskq_create("vhci_update_pathstates", VHCI_NUM_UPDATE_TASKQ,
768 	    MINCLSYSPRI, 1, 4, 0);
769 	ASSERT(vhci->vhci_taskq);
770 	ASSERT(vhci->vhci_update_pathstates_taskq);
771 
772 	/*
773 	 * Set appropriate configuration flags based on options set in
774 	 * conf file.
775 	 */
776 	vhci->vhci_conf_flags = 0;
777 	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, PROPFLAGS,
778 	    "auto-failback", &data) == DDI_SUCCESS) {
779 		if (strcmp(data, "enable") == 0)
780 			vhci->vhci_conf_flags |= VHCI_CONF_FLAGS_AUTO_FAILBACK;
781 		ddi_prop_free(data);
782 	}
783 
784 	if (!(vhci->vhci_conf_flags & VHCI_CONF_FLAGS_AUTO_FAILBACK))
785 		vhci_log(CE_NOTE, dip, "!Auto-failback capability "
786 		    "disabled through scsi_vhci.conf file.");
787 
788 	/*
789 	 * Allocate an mpapi private structure
790 	 */
791 	vhci->mp_priv = kmem_zalloc(sizeof (mpapi_priv_t), KM_SLEEP);
792 	if (vhci_mpapi_init(vhci) != 0) {
793 		VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_attach: "
794 		    "vhci_mpapi_init() failed"));
795 	}
796 
797 	vhci_failover_modopen(vhci);		/* load failover modules */
798 
799 	ddi_report_dev(dip);
800 	return (DDI_SUCCESS);
801 
802 attach_fail:
803 	if (vhci_attached)
804 		(void) mdi_vhci_unregister(dip, 0);
805 
806 	if (scsi_hba_attached)
807 		(void) scsi_hba_detach(dip);
808 
809 	if (vhci->vhci_tran)
810 		scsi_hba_tran_free(vhci->vhci_tran);
811 
812 	if (mutex_initted) {
813 		mutex_destroy(&vhci->vhci_mutex);
814 	}
815 
816 	ddi_soft_state_free(vhci_softstate, instance);
817 	return (DDI_FAILURE);
818 }
819 
820 
821 /*ARGSUSED*/
822 static int
823 vhci_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
824 {
825 	int			instance = ddi_get_instance(dip);
826 	scsi_hba_tran_t		*tran;
827 	struct scsi_vhci	*vhci;
828 
829 	VHCI_DEBUG(4, (CE_NOTE, NULL, "vhci_detach: cmd=0x%x\n", cmd));
830 
831 	if ((tran = ddi_get_driver_private(dip)) == NULL)
832 		return (DDI_FAILURE);
833 
834 	vhci = TRAN2HBAPRIVATE(tran);
835 	if (!vhci) {
836 		return (DDI_FAILURE);
837 	}
838 
839 	switch (cmd) {
840 	case DDI_DETACH:
841 		break;
842 
843 	case DDI_SUSPEND:
844 	case DDI_PM_SUSPEND:
845 		VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_detach: suspend/pm not yet"
846 		    "implemented\n"));
847 		return (DDI_FAILURE);
848 
849 	default:
850 		VHCI_DEBUG(1, (CE_NOTE, NULL,
851 		    "!vhci_detach: unknown ddi command\n"));
852 		return (DDI_FAILURE);
853 	}
854 
855 	(void) mdi_vhci_unregister(dip, 0);
856 	(void) scsi_hba_detach(dip);
857 	scsi_hba_tran_free(tran);
858 
859 	if (ddi_prop_remove(DDI_DEV_T_NONE, dip,
860 	    "pm-want-child-notification?") != DDI_PROP_SUCCESS) {
861 		cmn_err(CE_WARN,
862 		    "%s%d unable to remove prop pm-want_child_notification?",
863 		    ddi_driver_name(dip), ddi_get_instance(dip));
864 	}
865 	if (vhci_restart_timeid != 0) {
866 		(void) untimeout(vhci_restart_timeid);
867 	}
868 	vhci_restart_timeid = 0;
869 
870 	mutex_destroy(&vhci->vhci_mutex);
871 	vhci->vhci_dip = NULL;
872 	vhci->vhci_tran = NULL;
873 	taskq_destroy(vhci->vhci_taskq);
874 	taskq_destroy(vhci->vhci_update_pathstates_taskq);
875 	ddi_remove_minor_node(dip, NULL);
876 	ddi_soft_state_free(vhci_softstate, instance);
877 
878 	vhci_failover_modclose();		/* unload failover modules */
879 	return (DDI_SUCCESS);
880 }
881 
882 /*
883  * vhci_getinfo()
884  * Given the device number, return the devinfo pointer or the
885  * instance number.
886  * Note: always succeed DDI_INFO_DEVT2INSTANCE, even before attach.
887  */
888 
889 /*ARGSUSED*/
890 static int
891 vhci_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result)
892 {
893 	struct scsi_vhci	*vhcip;
894 	int			instance = MINOR2INST(getminor((dev_t)arg));
895 
896 	switch (cmd) {
897 	case DDI_INFO_DEVT2DEVINFO:
898 		vhcip = ddi_get_soft_state(vhci_softstate, instance);
899 		if (vhcip != NULL)
900 			*result = vhcip->vhci_dip;
901 		else {
902 			*result = NULL;
903 			return (DDI_FAILURE);
904 		}
905 		break;
906 
907 	case DDI_INFO_DEVT2INSTANCE:
908 		*result = (void *)(uintptr_t)instance;
909 		break;
910 
911 	default:
912 		return (DDI_FAILURE);
913 	}
914 
915 	return (DDI_SUCCESS);
916 }
917 
918 
919 /*ARGSUSED*/
920 static int
921 vhci_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
922 	scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
923 {
924 	char			*guid;
925 	scsi_vhci_lun_t		*vlun;
926 	struct scsi_vhci	*vhci;
927 	clock_t			from_ticks;
928 	mdi_pathinfo_t		*pip;
929 	int			rval;
930 
931 	ASSERT(hba_dip != NULL);
932 	ASSERT(tgt_dip != NULL);
933 
934 	vhci = ddi_get_soft_state(vhci_softstate, ddi_get_instance(hba_dip));
935 	ASSERT(vhci != NULL);
936 
937 	VHCI_DEBUG(4, (CE_NOTE, hba_dip,
938 	    "!tgt_init: called for %s (instance %d)\n",
939 	    ddi_driver_name(tgt_dip), ddi_get_instance(tgt_dip)));
940 
941 	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, tgt_dip, PROPFLAGS,
942 	    MDI_CLIENT_GUID_PROP, &guid) != DDI_SUCCESS) {
943 		/*
944 		 * This must be the .conf node.  The ssd node under
945 		 * fp already inserts a delay, so we just return from here.
946 		 * We rely on this delay to have all dips be posted to
947 		 * the ndi hotplug thread's newdev list.  This is
948 		 * necessary for the deferred attach mechanism to work
949 		 * and opens() done soon after boot to succeed.
950 		 */
951 		VHCI_DEBUG(4, (CE_WARN, hba_dip, "tgt_init: lun guid "
952 		    "property failed"));
953 		return (DDI_NOT_WELL_FORMED);
954 	}
955 
956 	vlun = vhci_lun_lookup(tgt_dip);
957 
958 	mutex_enter(&vhci_global_mutex);
959 
960 	from_ticks = ddi_get_lbolt();
961 	if (vhci_to_ticks == 0) {
962 		vhci_to_ticks = from_ticks +
963 		    drv_usectohz(vhci_init_wait_timeout);
964 	}
965 
966 #if DEBUG
967 	if (vlun) {
968 		VHCI_DEBUG(1, (CE_WARN, hba_dip, "tgt_init: "
969 		    "vhci_scsi_tgt_init: guid %s : found vlun 0x%p "
970 		    "from_ticks %lx to_ticks %lx",
971 		    guid, (void *)vlun, from_ticks, vhci_to_ticks));
972 	} else {
973 		VHCI_DEBUG(1, (CE_WARN, hba_dip, "tgt_init: "
974 		    "vhci_scsi_tgt_init: guid %s : vlun not found "
975 		    "from_ticks %lx to_ticks %lx", guid, from_ticks,
976 		    vhci_to_ticks));
977 	}
978 #endif
979 
980 	rval = mdi_select_path(tgt_dip, NULL,
981 	    (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH), NULL, &pip);
982 	if (rval == MDI_SUCCESS) {
983 		mdi_rele_path(pip);
984 	}
985 
986 	/*
987 	 * Wait for the following conditions :
988 	 *	1. no vlun available yet
989 	 *	2. no path established
990 	 *	3. timer did not expire
991 	 */
992 	while ((vlun == NULL) || (mdi_client_get_path_count(tgt_dip) == 0) ||
993 	    (rval != MDI_SUCCESS)) {
994 		if (vlun && vlun->svl_not_supported) {
995 			VHCI_DEBUG(1, (CE_WARN, hba_dip, "tgt_init: "
996 			    "vlun 0x%p lun guid %s not supported!",
997 			    (void *)vlun, guid));
998 			mutex_exit(&vhci_global_mutex);
999 			ddi_prop_free(guid);
1000 			return (DDI_NOT_WELL_FORMED);
1001 		}
1002 		if ((vhci_first_time == 0) && (from_ticks >= vhci_to_ticks)) {
1003 			vhci_first_time = 1;
1004 		}
1005 		if (vhci_first_time == 1) {
1006 			VHCI_DEBUG(1, (CE_WARN, hba_dip, "vhci_scsi_tgt_init: "
1007 			    "no wait for %s. from_tick %lx, to_tick %lx",
1008 			    guid, from_ticks, vhci_to_ticks));
1009 			mutex_exit(&vhci_global_mutex);
1010 			ddi_prop_free(guid);
1011 			return (DDI_NOT_WELL_FORMED);
1012 		}
1013 
1014 		if (cv_timedwait(&vhci_cv,
1015 		    &vhci_global_mutex, vhci_to_ticks) == -1) {
1016 			/* Timed out */
1017 #ifdef DEBUG
1018 			if (vlun == NULL) {
1019 				VHCI_DEBUG(1, (CE_WARN, hba_dip,
1020 				    "tgt_init: no vlun for %s!", guid));
1021 			} else if (mdi_client_get_path_count(tgt_dip) == 0) {
1022 				VHCI_DEBUG(1, (CE_WARN, hba_dip,
1023 				    "tgt_init: client path count is "
1024 				    "zero for %s!", guid));
1025 			} else {
1026 				VHCI_DEBUG(1, (CE_WARN, hba_dip,
1027 				    "tgt_init: client path not "
1028 				    "available yet for %s!", guid));
1029 			}
1030 #endif /* DEBUG */
1031 			mutex_exit(&vhci_global_mutex);
1032 			ddi_prop_free(guid);
1033 			return (DDI_NOT_WELL_FORMED);
1034 		}
1035 		vlun = vhci_lun_lookup(tgt_dip);
1036 		rval = mdi_select_path(tgt_dip, NULL,
1037 		    (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH),
1038 		    NULL, &pip);
1039 		if (rval == MDI_SUCCESS) {
1040 			mdi_rele_path(pip);
1041 		}
1042 		from_ticks = ddi_get_lbolt();
1043 	}
1044 	mutex_exit(&vhci_global_mutex);
1045 
1046 	ASSERT(vlun != NULL);
1047 	ddi_prop_free(guid);
1048 	hba_tran->tran_tgt_private = vlun;
1049 
1050 	return (DDI_SUCCESS);
1051 }
1052 
1053 /*ARGSUSED*/
1054 static void
1055 vhci_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
1056 	scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
1057 {
1058 }
1059 
1060 /*
1061  * a PGR register command has started; copy the info we need
1062  */
1063 int
1064 vhci_pgr_register_start(scsi_vhci_lun_t *vlun, struct scsi_pkt *pkt)
1065 {
1066 	struct vhci_pkt		*vpkt = TGTPKT2VHCIPKT(pkt);
1067 	void			*addr;
1068 
1069 	if (!vpkt->vpkt_tgt_init_bp)
1070 		return (TRAN_BADPKT);
1071 
1072 	addr = bp_mapin_common(vpkt->vpkt_tgt_init_bp,
1073 	    (vpkt->vpkt_flags & CFLAG_NOWAIT) ? VM_NOSLEEP : VM_SLEEP);
1074 	if (addr == NULL)
1075 		return (TRAN_BUSY);
1076 
1077 	mutex_enter(&vlun->svl_mutex);
1078 
1079 	vhci_print_prout_keys(vlun, "v_pgr_reg_start: before bcopy:");
1080 
1081 	bcopy(addr, &vlun->svl_prout, sizeof (vhci_prout_t) -
1082 	    (2 * MHIOC_RESV_KEY_SIZE*sizeof (char)));
1083 	bcopy(pkt->pkt_cdbp, vlun->svl_cdb, sizeof (vlun->svl_cdb));
1084 
1085 	vhci_print_prout_keys(vlun, "v_pgr_reg_start: after bcopy:");
1086 
1087 	vlun->svl_time = pkt->pkt_time;
1088 	vlun->svl_bcount = vpkt->vpkt_tgt_init_bp->b_bcount;
1089 	vlun->svl_first_path = vpkt->vpkt_path;
1090 	mutex_exit(&vlun->svl_mutex);
1091 	return (0);
1092 }
1093 
1094 /*
1095  * Function name : vhci_scsi_start()
1096  *
1097  * Return Values : TRAN_FATAL_ERROR	- vhci has been shutdown
1098  *					  or other fatal failure
1099  *					  preventing packet transportation
1100  *		   TRAN_BUSY		- request queue is full
1101  *		   TRAN_ACCEPT		- pkt has been submitted to phci
1102  *					  (or is held in the waitQ)
1103  * Description	 : Implements SCSA's tran_start() entry point for
1104  *		   packet transport
1105  *
1106  */
1107 static int
1108 vhci_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt)
1109 {
1110 	int			rval = TRAN_ACCEPT;
1111 	int			instance, held;
1112 	struct scsi_vhci	*vhci = ADDR2VHCI(ap);
1113 	struct scsi_vhci_lun	*vlun = ADDR2VLUN(ap);
1114 	struct vhci_pkt		*vpkt = TGTPKT2VHCIPKT(pkt);
1115 	int			flags = 0;
1116 	scsi_vhci_priv_t	*svp;
1117 	dev_info_t 		*cdip;
1118 	client_lb_t		lbp;
1119 	int			restore_lbp = 0;
1120 	/* set if pkt is SCSI-II RESERVE cmd */
1121 	int			pkt_reserve_cmd = 0;
1122 	int			reserve_failed = 0;
1123 
1124 	ASSERT(vhci != NULL);
1125 	ASSERT(vpkt != NULL);
1126 	ASSERT(vpkt->vpkt_state != VHCI_PKT_ISSUED);
1127 	cdip = ADDR2DIP(ap);
1128 
1129 	/*
1130 	 * Block IOs if LUN is held or QUIESCED for IOs.
1131 	 */
1132 	if ((VHCI_LUN_IS_HELD(vlun)) ||
1133 	    ((vlun->svl_flags & VLUN_QUIESCED_FLG) == VLUN_QUIESCED_FLG)) {
1134 		return (TRAN_BUSY);
1135 	}
1136 
1137 	/*
1138 	 * vhci_lun needs to be quiesced before SCSI-II RESERVE command
1139 	 * can be issued.  This may require a cv_timedwait, which is
1140 	 * dangerous to perform in an interrupt context.  So if this
1141 	 * is a RESERVE command a taskq is dispatched to service it.
1142 	 * This taskq shall again call vhci_scsi_start, but we shall be
1143 	 * sure its not in an interrupt context.
1144 	 */
1145 	if ((pkt->pkt_cdbp[0] == SCMD_RESERVE) ||
1146 	    (pkt->pkt_cdbp[0] == SCMD_RESERVE_G1)) {
1147 		if (!(vpkt->vpkt_state & VHCI_PKT_THRU_TASKQ)) {
1148 			if (taskq_dispatch(vhci->vhci_taskq,
1149 			    vhci_dispatch_scsi_start, (void *) vpkt,
1150 			    KM_NOSLEEP)) {
1151 				return (TRAN_ACCEPT);
1152 			} else {
1153 				return (TRAN_BUSY);
1154 			}
1155 		}
1156 
1157 		/*
1158 		 * Here we ensure that simultaneous SCSI-II RESERVE cmds don't
1159 		 * get serviced for a lun.
1160 		 */
1161 		VHCI_HOLD_LUN(vlun, VH_NOSLEEP, held);
1162 		if (!held) {
1163 			return (TRAN_BUSY);
1164 		} else if ((vlun->svl_flags & VLUN_QUIESCED_FLG) ==
1165 		    VLUN_QUIESCED_FLG) {
1166 			VHCI_RELEASE_LUN(vlun);
1167 			return (TRAN_BUSY);
1168 		}
1169 
1170 		/*
1171 		 * To ensure that no IOs occur for this LUN for the duration
1172 		 * of this pkt set the VLUN_QUIESCED_FLG.
1173 		 * In case this routine needs to exit on error make sure that
1174 		 * this flag is cleared.
1175 		 */
1176 		vlun->svl_flags |= VLUN_QUIESCED_FLG;
1177 		pkt_reserve_cmd = 1;
1178 
1179 		/*
1180 		 * if this is a SCSI-II RESERVE command, set load balancing
1181 		 * policy to be ALTERNATE PATH to ensure that all subsequent
1182 		 * IOs are routed on the same path.  This is because if commands
1183 		 * are routed across multiple paths then IOs on paths other than
1184 		 * the one on which the RESERVE was executed will get a
1185 		 * RESERVATION CONFLICT
1186 		 */
1187 		lbp = mdi_get_lb_policy(cdip);
1188 		if (lbp != LOAD_BALANCE_NONE) {
1189 			if (vhci_quiesce_lun(vlun) != 1) {
1190 				vlun->svl_flags &= ~VLUN_QUIESCED_FLG;
1191 				VHCI_RELEASE_LUN(vlun);
1192 				return (TRAN_FATAL_ERROR);
1193 			}
1194 			vlun->svl_lb_policy_save = lbp;
1195 			if (mdi_set_lb_policy(cdip, LOAD_BALANCE_NONE) !=
1196 			    MDI_SUCCESS) {
1197 				vlun->svl_flags &= ~VLUN_QUIESCED_FLG;
1198 				VHCI_RELEASE_LUN(vlun);
1199 				return (TRAN_FATAL_ERROR);
1200 			}
1201 			restore_lbp = 1;
1202 		}
1203 		/*
1204 		 * See comments for VLUN_RESERVE_ACTIVE_FLG in scsi_vhci.h
1205 		 * To narrow this window where a reserve command may be sent
1206 		 * down an inactive path the path states first need to be
1207 		 * updated. Before calling vhci_update_pathstates reset
1208 		 * VLUN_RESERVE_ACTIVE_FLG, just in case it was already set
1209 		 * for this lun.  This shall prevent an unnecessary reset
1210 		 * from being sent out.
1211 		 */
1212 		vlun->svl_flags &= ~VLUN_RESERVE_ACTIVE_FLG;
1213 		vhci_update_pathstates((void *)vlun);
1214 	}
1215 
1216 	instance = ddi_get_instance(vhci->vhci_dip);
1217 
1218 	/*
1219 	 * If the command is PRIN with action of zero, then the cmd
1220 	 * is reading PR keys which requires filtering on completion.
1221 	 * Data cache sync must be guaranteed.
1222 	 */
1223 	if ((pkt->pkt_cdbp[0] == SCMD_PRIN) &&
1224 	    (pkt->pkt_cdbp[1] == 0) &&
1225 	    (vpkt->vpkt_org_vpkt == NULL)) {
1226 		vpkt->vpkt_tgt_init_pkt_flags |= PKT_CONSISTENT;
1227 	}
1228 
1229 	/*
1230 	 * Do not defer bind for PKT_DMA_PARTIAL
1231 	 */
1232 	if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) == 0) {
1233 
1234 		/* This is a non pkt_dma_partial case */
1235 		if ((rval = vhci_bind_transport(
1236 		    ap, vpkt, vpkt->vpkt_tgt_init_pkt_flags, NULL_FUNC))
1237 		    != TRAN_ACCEPT) {
1238 			VHCI_DEBUG(6, (CE_WARN, vhci->vhci_dip,
1239 			    "!vhci%d %x: failed to bind transport: "
1240 			    "vlun 0x%p pkt_reserved %x restore_lbp %x,"
1241 			    "lbp %x", instance, rval, (void *)vlun,
1242 			    pkt_reserve_cmd, restore_lbp, lbp));
1243 			if (restore_lbp)
1244 				(void) mdi_set_lb_policy(cdip, lbp);
1245 			if (pkt_reserve_cmd)
1246 				vlun->svl_flags &= ~VLUN_QUIESCED_FLG;
1247 			return (rval);
1248 		}
1249 		VHCI_DEBUG(8, (CE_NOTE, NULL,
1250 		    "vhci_scsi_start: v_b_t called 0x%p\n", (void *)vpkt));
1251 	}
1252 	ASSERT(vpkt->vpkt_hba_pkt != NULL);
1253 	ASSERT(vpkt->vpkt_path != NULL);
1254 
1255 	/*
1256 	 * This is the chance to adjust the pHCI's pkt and other information
1257 	 * from target driver's pkt.
1258 	 */
1259 	VHCI_DEBUG(8, (CE_NOTE, vhci->vhci_dip, "vhci_scsi_start vpkt %p\n",
1260 	    (void *)vpkt));
1261 	vhci_update_pHCI_pkt(vpkt, pkt);
1262 
1263 	if (vlun->svl_flags & VLUN_RESERVE_ACTIVE_FLG) {
1264 		if (vpkt->vpkt_path != vlun->svl_resrv_pip) {
1265 			VHCI_DEBUG(1, (CE_WARN, vhci->vhci_dip,
1266 			    "!vhci_bind: reserve flag set for vlun 0x%p, but, "
1267 			    "pktpath 0x%p resrv path 0x%p differ. lb_policy %x",
1268 			    (void *)vlun, (void *)vpkt->vpkt_path,
1269 			    (void *)vlun->svl_resrv_pip,
1270 			    mdi_get_lb_policy(cdip)));
1271 			reserve_failed = 1;
1272 		}
1273 	}
1274 
1275 	svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(
1276 	    vpkt->vpkt_path);
1277 	if (svp == NULL || reserve_failed) {
1278 		if (pkt_reserve_cmd) {
1279 			VHCI_DEBUG(6, (CE_WARN, vhci->vhci_dip,
1280 			    "!vhci_bind returned null svp vlun 0x%p",
1281 			    (void *)vlun));
1282 			vlun->svl_flags &= ~VLUN_QUIESCED_FLG;
1283 			if (restore_lbp)
1284 				(void) mdi_set_lb_policy(cdip, lbp);
1285 		}
1286 pkt_cleanup:
1287 		if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) == 0) {
1288 			scsi_destroy_pkt(vpkt->vpkt_hba_pkt);
1289 			vpkt->vpkt_hba_pkt = NULL;
1290 			if (vpkt->vpkt_path) {
1291 				mdi_rele_path(vpkt->vpkt_path);
1292 				vpkt->vpkt_path = NULL;
1293 			}
1294 		}
1295 		if ((pkt->pkt_cdbp[0] == SCMD_PROUT) &&
1296 		    (((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_REGISTER) ||
1297 		    ((pkt->pkt_cdbp[1] & 0x1f) ==
1298 		    VHCI_PROUT_R_AND_IGNORE))) {
1299 			sema_v(&vlun->svl_pgr_sema);
1300 		}
1301 		return (TRAN_BUSY);
1302 	}
1303 
1304 	VHCI_INCR_PATH_CMDCOUNT(svp);
1305 
1306 	/*
1307 	 * Ensure that no other IOs raced ahead, while a RESERVE cmd was
1308 	 * QUIESCING the same lun.
1309 	 */
1310 	if ((!pkt_reserve_cmd) &&
1311 	    ((vlun->svl_flags & VLUN_QUIESCED_FLG) == VLUN_QUIESCED_FLG)) {
1312 		VHCI_DECR_PATH_CMDCOUNT(svp);
1313 		goto pkt_cleanup;
1314 	}
1315 
1316 	if ((pkt->pkt_cdbp[0] == SCMD_PRIN) ||
1317 	    (pkt->pkt_cdbp[0] == SCMD_PROUT)) {
1318 		/*
1319 		 * currently this thread only handles running PGR
1320 		 * commands, so don't bother creating it unless
1321 		 * something interesting is going to happen (like
1322 		 * either a PGR out, or a PGR in with enough space
1323 		 * to hold the keys that are getting returned)
1324 		 */
1325 		mutex_enter(&vlun->svl_mutex);
1326 		if (((vlun->svl_flags & VLUN_TASK_D_ALIVE_FLG) == 0) &&
1327 		    (pkt->pkt_cdbp[0] == SCMD_PROUT)) {
1328 			vlun->svl_taskq = taskq_create("vlun_pgr_task_daemon",
1329 			    1, MINCLSYSPRI, 1, 4, 0);
1330 			vlun->svl_flags |= VLUN_TASK_D_ALIVE_FLG;
1331 		}
1332 		mutex_exit(&vlun->svl_mutex);
1333 		if ((pkt->pkt_cdbp[0] == SCMD_PROUT) &&
1334 		    (((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_REGISTER) ||
1335 		    ((pkt->pkt_cdbp[1] & 0x1f) ==
1336 		    VHCI_PROUT_R_AND_IGNORE))) {
1337 			if (rval = vhci_pgr_register_start(vlun, pkt)) {
1338 				/* an error */
1339 				sema_v(&vlun->svl_pgr_sema);
1340 				return (rval);
1341 			}
1342 		}
1343 	}
1344 
1345 	/*
1346 	 * SCSI-II RESERVE cmd is not expected in polled mode.
1347 	 * If this changes it needs to be handled for the polled scenario.
1348 	 */
1349 	flags = vpkt->vpkt_hba_pkt->pkt_flags;
1350 	rval = scsi_transport(vpkt->vpkt_hba_pkt);
1351 	if (rval == TRAN_ACCEPT) {
1352 		if (flags & FLAG_NOINTR) {
1353 			struct scsi_pkt *tpkt = vpkt->vpkt_tgt_pkt;
1354 			struct scsi_pkt *pkt = vpkt->vpkt_hba_pkt;
1355 
1356 			ASSERT(tpkt != NULL);
1357 			*(tpkt->pkt_scbp) = *(pkt->pkt_scbp);
1358 			tpkt->pkt_resid = pkt->pkt_resid;
1359 			tpkt->pkt_state = pkt->pkt_state;
1360 			tpkt->pkt_statistics = pkt->pkt_statistics;
1361 			tpkt->pkt_reason = pkt->pkt_reason;
1362 
1363 			if ((*(pkt->pkt_scbp) == STATUS_CHECK) &&
1364 			    (pkt->pkt_state & STATE_ARQ_DONE)) {
1365 				bcopy(pkt->pkt_scbp, tpkt->pkt_scbp,
1366 				    vpkt->vpkt_tgt_init_scblen);
1367 			}
1368 
1369 			VHCI_DECR_PATH_CMDCOUNT(svp);
1370 			if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) == 0) {
1371 				scsi_destroy_pkt(vpkt->vpkt_hba_pkt);
1372 				vpkt->vpkt_hba_pkt = NULL;
1373 				if (vpkt->vpkt_path) {
1374 					mdi_rele_path(vpkt->vpkt_path);
1375 					vpkt->vpkt_path = NULL;
1376 				}
1377 			}
1378 			/*
1379 			 * This path will not automatically retry pkts
1380 			 * internally, therefore, vpkt_org_vpkt should
1381 			 * never be set.
1382 			 */
1383 			ASSERT(vpkt->vpkt_org_vpkt == NULL);
1384 			if (tpkt->pkt_comp) {
1385 				(*tpkt->pkt_comp)(tpkt);
1386 			}
1387 		}
1388 		return (rval);
1389 	} else if ((pkt->pkt_cdbp[0] == SCMD_PROUT) &&
1390 	    (((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_REGISTER) ||
1391 	    ((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_R_AND_IGNORE))) {
1392 		/* the command exited with bad status */
1393 		sema_v(&vlun->svl_pgr_sema);
1394 	} else if (vpkt->vpkt_tgt_pkt->pkt_cdbp[0] == SCMD_PRIN) {
1395 		/* the command exited with bad status */
1396 		sema_v(&vlun->svl_pgr_sema);
1397 	} else if (pkt_reserve_cmd) {
1398 		VHCI_DEBUG(6, (CE_WARN, vhci->vhci_dip,
1399 		    "!vhci_scsi_start: reserve failed vlun 0x%p",
1400 		    (void *)vlun));
1401 		vlun->svl_flags &= ~VLUN_QUIESCED_FLG;
1402 		if (restore_lbp)
1403 			(void) mdi_set_lb_policy(cdip, lbp);
1404 	}
1405 
1406 	ASSERT(vpkt->vpkt_hba_pkt != NULL);
1407 	VHCI_DECR_PATH_CMDCOUNT(svp);
1408 
1409 	/* Do not destroy phci packet information for PKT_DMA_PARTIAL */
1410 	if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) == 0) {
1411 		scsi_destroy_pkt(vpkt->vpkt_hba_pkt);
1412 		vpkt->vpkt_hba_pkt = NULL;
1413 		if (vpkt->vpkt_path) {
1414 			MDI_PI_ERRSTAT(vpkt->vpkt_path, MDI_PI_TRANSERR);
1415 			mdi_rele_path(vpkt->vpkt_path);
1416 			vpkt->vpkt_path = NULL;
1417 		}
1418 	}
1419 	return (TRAN_BUSY);
1420 }
1421 
1422 /*
1423  * Function name : vhci_scsi_reset()
1424  *
1425  * Return Values : 0 - reset failed
1426  *		   1 - reset succeeded
1427  */
1428 
1429 /* ARGSUSED */
1430 static int
1431 vhci_scsi_reset(struct scsi_address *ap, int level)
1432 {
1433 	int rval = 0;
1434 
1435 	cmn_err(CE_WARN, "!vhci_scsi_reset 0x%x", level);
1436 	if ((level == RESET_TARGET) || (level == RESET_LUN)) {
1437 		return (vhci_scsi_reset_target(ap, level, TRUE));
1438 	} else if (level == RESET_ALL) {
1439 		return (vhci_scsi_reset_bus(ap));
1440 	}
1441 
1442 	return (rval);
1443 }
1444 
1445 /*
1446  * vhci_recovery_reset:
1447  *	Issues reset to the device
1448  * Input:
1449  *	vlun - vhci lun pointer of the device
1450  *	ap - address of the device
1451  *	select_path:
1452  *		If select_path is FALSE, then the address specified in ap is
1453  *		the path on which reset will be issued.
1454  *		If select_path is TRUE, then path is obtained by calling
1455  *		mdi_select_path.
1456  *
1457  *	recovery_depth:
1458  *		Caller can specify the level of reset.
1459  *		VHCI_DEPTH_LUN -
1460  *			Issues LUN RESET if device supports lun reset.
1461  *		VHCI_DEPTH_TARGET -
1462  *			If Lun Reset fails or the device does not support
1463  *			Lun Reset, issues TARGET RESET
1464  *		VHCI_DEPTH_ALL -
1465  *			If Lun Reset fails or the device does not support
1466  *			Lun Reset, issues TARGET RESET.
1467  *			If TARGET RESET does not succeed, issues Bus Reset.
1468  */
1469 
1470 static int
1471 vhci_recovery_reset(scsi_vhci_lun_t *vlun, struct scsi_address *ap,
1472 	uint8_t select_path, uint8_t recovery_depth)
1473 {
1474 	int	ret = 0;
1475 
1476 	ASSERT(ap != NULL);
1477 
1478 	if (vlun && vlun->svl_support_lun_reset == 1) {
1479 		ret = vhci_scsi_reset_target(ap, RESET_LUN,
1480 		    select_path);
1481 	}
1482 
1483 	recovery_depth--;
1484 
1485 	if ((ret == 0) && recovery_depth) {
1486 		ret = vhci_scsi_reset_target(ap, RESET_TARGET,
1487 		    select_path);
1488 		recovery_depth--;
1489 	}
1490 
1491 	if ((ret == 0) && recovery_depth) {
1492 		(void) scsi_reset(ap, RESET_ALL);
1493 	}
1494 
1495 	return (ret);
1496 }
1497 
1498 /*
1499  * Note: The scsi_address passed to this routine could be the scsi_address
1500  * for the virtual device or the physical device. No assumptions should be
1501  * made in this routine about the ap structure and a_hba_tran->tran_tgt_private
1502  * field of ap can not be assumed to be the vhci structure.
1503  * Further note that the child dip would be the dip of the ssd node irrespective
1504  * of the scsi_address passed.
1505  */
1506 
1507 static int
1508 vhci_scsi_reset_target(struct scsi_address *ap, int level, uint8_t select_path)
1509 {
1510 	dev_info_t		*vdip, *pdip, *cdip = ADDR2DIP(ap);
1511 	mdi_pathinfo_t		*pip = NULL;
1512 	mdi_pathinfo_t		*npip = NULL;
1513 	int			rval = -1;
1514 	scsi_vhci_priv_t	*svp = NULL;
1515 	struct scsi_address	*pap = NULL;
1516 	scsi_hba_tran_t		*hba = NULL;
1517 	int			sps;
1518 	struct scsi_vhci	*vhci = NULL;
1519 
1520 	if (select_path != TRUE) {
1521 		ASSERT(ap != NULL);
1522 		if (level == RESET_LUN) {
1523 			hba = ap->a_hba_tran;
1524 			ASSERT(hba != NULL);
1525 			return ((*hba->tran_reset)(ap, RESET_LUN));
1526 		}
1527 		return (scsi_reset(ap, level));
1528 	}
1529 
1530 	ASSERT(cdip != NULL);
1531 	vdip = ddi_get_parent(cdip);
1532 	ASSERT(vdip != NULL);
1533 	vhci = ddi_get_soft_state(vhci_softstate, ddi_get_instance(vdip));
1534 	ASSERT(vhci != NULL);
1535 
1536 	rval = mdi_select_path(cdip, NULL, MDI_SELECT_ONLINE_PATH, NULL, &pip);
1537 	if ((rval != MDI_SUCCESS) || (pip == NULL)) {
1538 		VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_scsi_reset_target: "
1539 		    "Unable to get a path, dip 0x%p", (void *)cdip));
1540 		return (0);
1541 	}
1542 again:
1543 	svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip);
1544 	if (svp == NULL) {
1545 		VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_scsi_reset_target: "
1546 		    "priv is NULL, pip 0x%p", (void *)pip));
1547 		mdi_rele_path(pip);
1548 		return (0);
1549 	}
1550 
1551 	if (svp->svp_psd == NULL) {
1552 		VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_scsi_reset_target: "
1553 		    "psd is NULL, pip 0x%p, svp 0x%p",
1554 		    (void *)pip, (void *)svp));
1555 		mdi_rele_path(pip);
1556 		return (0);
1557 	}
1558 
1559 	pap = &svp->svp_psd->sd_address;
1560 	hba = pap->a_hba_tran;
1561 
1562 	ASSERT(pap != NULL);
1563 	ASSERT(hba != NULL);
1564 
1565 	if (hba->tran_reset != NULL) {
1566 		if ((*hba->tran_reset)(pap, level) == 0) {
1567 			pdip = mdi_pi_get_phci(pip);
1568 			vhci_log(CE_WARN, vdip, "!(%s%d):"
1569 			    " path (%s%d), reset %d failed",
1570 			    ddi_driver_name(cdip), ddi_get_instance(cdip),
1571 			    ddi_driver_name(pdip), ddi_get_instance(pdip),
1572 			    level);
1573 
1574 			/*
1575 			 * Select next path and issue the reset, repeat
1576 			 * until all paths are exhausted
1577 			 */
1578 			sps = mdi_select_path(cdip, NULL,
1579 			    MDI_SELECT_ONLINE_PATH, pip, &npip);
1580 			if ((sps != MDI_SUCCESS) || (npip == NULL)) {
1581 				mdi_rele_path(pip);
1582 				return (0);
1583 			}
1584 			mdi_rele_path(pip);
1585 			pip = npip;
1586 			goto again;
1587 		}
1588 		mdi_rele_path(pip);
1589 		mutex_enter(&vhci->vhci_mutex);
1590 		scsi_hba_reset_notify_callback(&vhci->vhci_mutex,
1591 		    &vhci->vhci_reset_notify_listf);
1592 		mutex_exit(&vhci->vhci_mutex);
1593 		VHCI_DEBUG(6, (CE_NOTE, NULL, "!vhci_scsi_reset_target: "
1594 		    "reset %d sent down pip:%p for cdip:%p\n", level,
1595 		    (void *)pip, (void *)cdip));
1596 		return (1);
1597 	}
1598 	mdi_rele_path(pip);
1599 	return (0);
1600 }
1601 
1602 
1603 /* ARGSUSED */
1604 static int
1605 vhci_scsi_reset_bus(struct scsi_address *ap)
1606 {
1607 	return (1);
1608 }
1609 
1610 
1611 /*
1612  * called by vhci_getcap and vhci_setcap to get and set (respectively)
1613  * SCSI capabilities
1614  */
1615 /* ARGSUSED */
1616 static int
1617 vhci_commoncap(struct scsi_address *ap, char *cap,
1618     int val, int tgtonly, int doset)
1619 {
1620 	struct scsi_vhci		*vhci = ADDR2VHCI(ap);
1621 	struct scsi_vhci_lun		*vlun = ADDR2VLUN(ap);
1622 	int			cidx;
1623 	int			rval = 0;
1624 
1625 	if (cap == (char *)0) {
1626 		VHCI_DEBUG(3, (CE_WARN, vhci->vhci_dip,
1627 		    "!vhci_commoncap: invalid arg"));
1628 		return (rval);
1629 	}
1630 
1631 	if (vlun == NULL) {
1632 		VHCI_DEBUG(3, (CE_WARN, vhci->vhci_dip,
1633 		    "!vhci_commoncap: vlun is null"));
1634 		return (rval);
1635 	}
1636 
1637 	if ((cidx = scsi_hba_lookup_capstr(cap)) == -1) {
1638 		return (UNDEFINED);
1639 	}
1640 
1641 	/*
1642 	 * Process setcap request.
1643 	 */
1644 	if (doset) {
1645 		/*
1646 		 * At present, we can only set binary (0/1) values
1647 		 */
1648 		switch (cidx) {
1649 		case SCSI_CAP_ARQ:
1650 			if (val == 0) {
1651 				rval = 0;
1652 			} else {
1653 				rval = 1;
1654 			}
1655 			break;
1656 
1657 		case SCSI_CAP_LUN_RESET:
1658 			if (tgtonly == 0) {
1659 				VHCI_DEBUG(1, (CE_WARN, vhci->vhci_dip,
1660 				    "scsi_vhci_setcap: "
1661 				    "Returning error since whom = 0"));
1662 				rval = -1;
1663 				break;
1664 			}
1665 			/*
1666 			 * Set the capability accordingly.
1667 			 */
1668 			mutex_enter(&vlun->svl_mutex);
1669 			vlun->svl_support_lun_reset = val;
1670 			rval = val;
1671 			mutex_exit(&vlun->svl_mutex);
1672 			break;
1673 
1674 		case SCSI_CAP_SECTOR_SIZE:
1675 			mutex_enter(&vlun->svl_mutex);
1676 			vlun->svl_sector_size = val;
1677 			vlun->svl_setcap_done = 1;
1678 			mutex_exit(&vlun->svl_mutex);
1679 			(void) vhci_pHCI_cap(ap, cap, val, tgtonly, NULL);
1680 
1681 			/* Always return success */
1682 			rval = 1;
1683 			break;
1684 
1685 		default:
1686 			VHCI_DEBUG(6, (CE_WARN, vhci->vhci_dip,
1687 			    "!vhci_setcap: unsupported %d", cidx));
1688 			rval = UNDEFINED;
1689 			break;
1690 		}
1691 
1692 		VHCI_DEBUG(6, (CE_NOTE, vhci->vhci_dip,
1693 		    "!set cap: cap=%s, val/tgtonly/doset/rval = "
1694 		    "0x%x/0x%x/0x%x/%d\n",
1695 		    cap, val, tgtonly, doset, rval));
1696 
1697 	} else {
1698 		/*
1699 		 * Process getcap request.
1700 		 */
1701 		switch (cidx) {
1702 		case SCSI_CAP_DMA_MAX:
1703 			rval = (int)VHCI_DMA_MAX_XFER_CAP;
1704 			break;
1705 
1706 		case SCSI_CAP_INITIATOR_ID:
1707 			rval = 0x00;
1708 			break;
1709 
1710 		case SCSI_CAP_ARQ:
1711 		case SCSI_CAP_RESET_NOTIFICATION:
1712 		case SCSI_CAP_TAGGED_QING:
1713 			rval = 1;
1714 			break;
1715 
1716 		case SCSI_CAP_SCSI_VERSION:
1717 			rval = 3;
1718 			break;
1719 
1720 		case SCSI_CAP_INTERCONNECT_TYPE:
1721 			rval = INTERCONNECT_FABRIC;
1722 			break;
1723 
1724 		case SCSI_CAP_LUN_RESET:
1725 			/*
1726 			 * scsi_vhci will always return success for LUN reset.
1727 			 * When request for doing LUN reset comes
1728 			 * through scsi_reset entry point, at that time attempt
1729 			 * will be made to do reset through all the possible
1730 			 * paths.
1731 			 */
1732 			mutex_enter(&vlun->svl_mutex);
1733 			rval = vlun->svl_support_lun_reset;
1734 			mutex_exit(&vlun->svl_mutex);
1735 			VHCI_DEBUG(4, (CE_WARN, vhci->vhci_dip,
1736 			    "scsi_vhci_getcap:"
1737 			    "Getting the Lun reset capability %d", rval));
1738 			break;
1739 
1740 		case SCSI_CAP_SECTOR_SIZE:
1741 			mutex_enter(&vlun->svl_mutex);
1742 			rval = vlun->svl_sector_size;
1743 			mutex_exit(&vlun->svl_mutex);
1744 			break;
1745 
1746 		default:
1747 			VHCI_DEBUG(6, (CE_WARN, vhci->vhci_dip,
1748 			    "!vhci_getcap: unsupported %d", cidx));
1749 			rval = UNDEFINED;
1750 			break;
1751 		}
1752 
1753 		VHCI_DEBUG(6, (CE_NOTE, vhci->vhci_dip,
1754 		    "!get cap: cap=%s, val/tgtonly/doset/rval = "
1755 		    "0x%x/0x%x/0x%x/%d\n",
1756 		    cap, val, tgtonly, doset, rval));
1757 	}
1758 	return (rval);
1759 }
1760 
1761 
1762 /*
1763  * Function name : vhci_scsi_getcap()
1764  *
1765  */
1766 static int
1767 vhci_scsi_getcap(struct scsi_address *ap, char *cap, int whom)
1768 {
1769 	return (vhci_commoncap(ap, cap, 0, whom, 0));
1770 }
1771 
1772 static int
1773 vhci_scsi_setcap(struct scsi_address *ap, char *cap, int value, int whom)
1774 {
1775 	return (vhci_commoncap(ap, cap, value, whom, 1));
1776 }
1777 
1778 /*
1779  * Function name : vhci_scsi_abort()
1780  */
1781 /* ARGSUSED */
1782 static int
1783 vhci_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt)
1784 {
1785 	return (0);
1786 }
1787 
1788 /*
1789  * Function name : vhci_scsi_init_pkt
1790  *
1791  * Return Values : pointer to scsi_pkt, or NULL
1792  */
1793 /* ARGSUSED */
1794 static struct scsi_pkt *
1795 vhci_scsi_init_pkt(struct scsi_address *ap, struct scsi_pkt *pkt,
1796 	struct buf *bp, int cmdlen, int statuslen, int tgtlen,
1797 	int flags, int (*callback)(caddr_t), caddr_t arg)
1798 {
1799 	struct scsi_vhci	*vhci = ADDR2VHCI(ap);
1800 	struct vhci_pkt		*vpkt;
1801 	int			rval;
1802 	int			newpkt = 0;
1803 	struct scsi_pkt		*pktp;
1804 
1805 
1806 	if (pkt == NULL) {
1807 		if (cmdlen > VHCI_SCSI_CDB_SIZE) {
1808 			VHCI_DEBUG(1, (CE_NOTE, NULL,
1809 			    "!init pkt: cdb size not supported\n"));
1810 			return (NULL);
1811 		}
1812 
1813 		pktp = scsi_hba_pkt_alloc(vhci->vhci_dip,
1814 		    ap, cmdlen, statuslen, tgtlen, sizeof (*vpkt), callback,
1815 		    arg);
1816 
1817 		if (pktp == NULL) {
1818 			return (NULL);
1819 		}
1820 
1821 		/* Get the vhci's private structure */
1822 		vpkt = (struct vhci_pkt *)(pktp->pkt_ha_private);
1823 		ASSERT(vpkt);
1824 
1825 		/* Save the target driver's packet */
1826 		vpkt->vpkt_tgt_pkt = pktp;
1827 
1828 		/*
1829 		 * Save pkt_tgt_init_pkt fields if deferred binding
1830 		 * is needed or for other purposes.
1831 		 */
1832 		vpkt->vpkt_tgt_init_pkt_flags = flags;
1833 		vpkt->vpkt_flags = (callback == NULL_FUNC) ? CFLAG_NOWAIT : 0;
1834 		vpkt->vpkt_state = VHCI_PKT_IDLE;
1835 		vpkt->vpkt_tgt_init_cdblen = cmdlen;
1836 		vpkt->vpkt_tgt_init_scblen = statuslen;
1837 		newpkt = 1;
1838 	} else { /* pkt not NULL */
1839 		vpkt = pkt->pkt_ha_private;
1840 	}
1841 
1842 	VHCI_DEBUG(8, (CE_NOTE, NULL, "vhci_scsi_init_pkt "
1843 	    "vpkt %p flags %x\n", (void *)vpkt, flags));
1844 
1845 	/* Clear any stale error flags */
1846 	if (bp) {
1847 		bioerror(bp, 0);
1848 	}
1849 
1850 	vpkt->vpkt_tgt_init_bp = bp;
1851 
1852 	if (flags & PKT_DMA_PARTIAL) {
1853 
1854 		/*
1855 		 * Immediate binding is needed.
1856 		 * Target driver may not set this flag in next invocation.
1857 		 * vhci has to remember this flag was set during first
1858 		 * invocation of vhci_scsi_init_pkt.
1859 		 */
1860 		vpkt->vpkt_flags |= CFLAG_DMA_PARTIAL;
1861 	}
1862 
1863 	if (vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) {
1864 
1865 		/*
1866 		 * Re-initialize some of the target driver packet state
1867 		 * information.
1868 		 */
1869 		vpkt->vpkt_tgt_pkt->pkt_state = 0;
1870 		vpkt->vpkt_tgt_pkt->pkt_statistics = 0;
1871 		vpkt->vpkt_tgt_pkt->pkt_reason = 0;
1872 
1873 		/*
1874 		 * Binding a vpkt->vpkt_path for this IO at init_time.
1875 		 * If an IO error happens later, target driver will clear
1876 		 * this vpkt->vpkt_path binding before re-init IO again.
1877 		 */
1878 		VHCI_DEBUG(8, (CE_NOTE, NULL,
1879 		    "vhci_scsi_init_pkt: calling v_b_t %p, newpkt %d\n",
1880 		    (void *)vpkt, newpkt));
1881 		if (pkt && vpkt->vpkt_hba_pkt) {
1882 			VHCI_DEBUG(4, (CE_NOTE, NULL,
1883 			    "v_s_i_p calling update_pHCI_pkt resid %ld\n",
1884 			    pkt->pkt_resid));
1885 			vhci_update_pHCI_pkt(vpkt, pkt);
1886 		}
1887 		if (callback == SLEEP_FUNC) {
1888 			rval = vhci_bind_transport(
1889 			    ap, vpkt, flags, callback);
1890 		} else {
1891 			rval = vhci_bind_transport(
1892 			    ap, vpkt, flags, NULL_FUNC);
1893 		}
1894 		VHCI_DEBUG(8, (CE_NOTE, NULL,
1895 		    "vhci_scsi_init_pkt: v_b_t called 0x%p rval 0x%x\n",
1896 		    (void *)vpkt, rval));
1897 		if (bp) {
1898 			if (rval == TRAN_FATAL_ERROR) {
1899 				/*
1900 				 * No paths available. Could not bind
1901 				 * any pHCI. Setting EFAULT as a way
1902 				 * to indicate no DMA is mapped.
1903 				 */
1904 				bioerror(bp, EFAULT);
1905 			} else {
1906 				/*
1907 				 * Do not indicate any pHCI errors to
1908 				 * target driver otherwise.
1909 				 */
1910 				bioerror(bp, 0);
1911 			}
1912 		}
1913 		if (rval != TRAN_ACCEPT) {
1914 			VHCI_DEBUG(8, (CE_NOTE, NULL,
1915 			    "vhci_scsi_init_pkt: "
1916 			    "v_b_t failed 0x%p newpkt %x\n",
1917 			    (void *)vpkt, newpkt));
1918 			if (newpkt) {
1919 				scsi_hba_pkt_free(ap,
1920 				    vpkt->vpkt_tgt_pkt);
1921 			}
1922 			return (NULL);
1923 		}
1924 		ASSERT(vpkt->vpkt_hba_pkt != NULL);
1925 		ASSERT(vpkt->vpkt_path != NULL);
1926 
1927 		/* Update the resid for the target driver */
1928 		vpkt->vpkt_tgt_pkt->pkt_resid =
1929 		    vpkt->vpkt_hba_pkt->pkt_resid;
1930 	}
1931 
1932 	return (vpkt->vpkt_tgt_pkt);
1933 }
1934 
1935 /*
1936  * Function name : vhci_scsi_destroy_pkt
1937  *
1938  * Return Values : none
1939  */
1940 static void
1941 vhci_scsi_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
1942 {
1943 	struct vhci_pkt		*vpkt = (struct vhci_pkt *)pkt->pkt_ha_private;
1944 
1945 	VHCI_DEBUG(8, (CE_NOTE, NULL,
1946 	    "vhci_scsi_destroy_pkt: vpkt 0x%p\n", (void *)vpkt));
1947 
1948 	vpkt->vpkt_tgt_init_pkt_flags = 0;
1949 	if (vpkt->vpkt_hba_pkt) {
1950 		scsi_destroy_pkt(vpkt->vpkt_hba_pkt);
1951 		vpkt->vpkt_hba_pkt = NULL;
1952 	}
1953 	if (vpkt->vpkt_path) {
1954 		mdi_rele_path(vpkt->vpkt_path);
1955 		vpkt->vpkt_path = NULL;
1956 	}
1957 
1958 	ASSERT(vpkt->vpkt_state != VHCI_PKT_ISSUED);
1959 	scsi_hba_pkt_free(ap, vpkt->vpkt_tgt_pkt);
1960 }
1961 
1962 /*
1963  * Function name : vhci_scsi_dmafree()
1964  *
1965  * Return Values : none
1966  */
1967 /*ARGSUSED*/
1968 static void
1969 vhci_scsi_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
1970 {
1971 	struct vhci_pkt	*vpkt = (struct vhci_pkt *)pkt->pkt_ha_private;
1972 
1973 	VHCI_DEBUG(6, (CE_NOTE, NULL,
1974 	    "vhci_scsi_dmafree: vpkt 0x%p\n", (void *)vpkt));
1975 
1976 	ASSERT(vpkt != NULL);
1977 	if (vpkt->vpkt_hba_pkt) {
1978 		scsi_destroy_pkt(vpkt->vpkt_hba_pkt);
1979 		vpkt->vpkt_hba_pkt = NULL;
1980 	}
1981 	if (vpkt->vpkt_path) {
1982 		mdi_rele_path(vpkt->vpkt_path);
1983 		vpkt->vpkt_path = NULL;
1984 	}
1985 }
1986 
1987 /*
1988  * Function name : vhci_scsi_sync_pkt()
1989  *
1990  * Return Values : none
1991  */
1992 /*ARGSUSED*/
1993 static void
1994 vhci_scsi_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
1995 {
1996 	struct vhci_pkt	*vpkt = (struct vhci_pkt *)pkt->pkt_ha_private;
1997 
1998 	ASSERT(vpkt != NULL);
1999 	if (vpkt->vpkt_hba_pkt) {
2000 		scsi_sync_pkt(vpkt->vpkt_hba_pkt);
2001 	}
2002 }
2003 
2004 /*
2005  * routine for reset notification setup, to register or cancel.
2006  */
2007 static int
2008 vhci_scsi_reset_notify(struct scsi_address *ap, int flag,
2009 	void (*callback)(caddr_t), caddr_t arg)
2010 {
2011 	struct scsi_vhci *vhci = ADDR2VHCI(ap);
2012 	return (scsi_hba_reset_notify_setup(ap, flag, callback, arg,
2013 	    &vhci->vhci_mutex, &vhci->vhci_reset_notify_listf));
2014 }
2015 
2016 static int
2017 vhci_scsi_get_name_bus_addr(struct scsi_device *sd,
2018     char *name, int len, int bus_addr)
2019 {
2020 	dev_info_t		*cdip;
2021 	char			*guid;
2022 	scsi_vhci_lun_t		*vlun;
2023 
2024 	ASSERT(sd != NULL);
2025 	ASSERT(name != NULL);
2026 
2027 	cdip = sd->sd_dev;
2028 
2029 	ASSERT(cdip != NULL);
2030 
2031 	if (mdi_component_is_client(cdip, NULL) != MDI_SUCCESS) {
2032 		name[0] = '\0';
2033 		return (1);
2034 	}
2035 
2036 	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, cdip, PROPFLAGS,
2037 	    MDI_CLIENT_GUID_PROP, &guid) != DDI_SUCCESS) {
2038 		name[0] = '\0';
2039 		return (1);
2040 	}
2041 
2042 	vlun = ADDR2VLUN(&sd->sd_address);
2043 	if (bus_addr && vlun && vlun->svl_fops_name) {
2044 		/* report the guid and the name of the failover module */
2045 		(void) snprintf(name, len, "g%s %s", guid, vlun->svl_fops_name);
2046 	} else {
2047 		/* report the guid */
2048 		(void) snprintf(name, len, "g%s", guid);
2049 	}
2050 
2051 	ddi_prop_free(guid);
2052 	return (1);
2053 }
2054 
2055 static int
2056 vhci_scsi_get_bus_addr(struct scsi_device *sd, char *name, int len)
2057 {
2058 	return (vhci_scsi_get_name_bus_addr(sd, name, len, 1));
2059 }
2060 
2061 static int
2062 vhci_scsi_get_name(struct scsi_device *sd, char *name, int len)
2063 {
2064 	return (vhci_scsi_get_name_bus_addr(sd, name, len, 0));
2065 }
2066 
2067 /*
2068  * Return a pointer to the guid part of the devnm.
2069  * devnm format is "nodename@busaddr", busaddr format is "gGUID".
2070  */
2071 static char *
2072 vhci_devnm_to_guid(char *devnm)
2073 {
2074 	char *cp = devnm;
2075 
2076 	if (devnm == NULL)
2077 		return (NULL);
2078 
2079 	while (*cp != '\0' && *cp != '@')
2080 		cp++;
2081 	if (*cp == '@' && *(cp + 1) == 'g')
2082 		return (cp + 2);
2083 	return (NULL);
2084 }
2085 
2086 static int
2087 vhci_bind_transport(struct scsi_address *ap, struct vhci_pkt *vpkt, int flags,
2088     int (*func)(caddr_t))
2089 {
2090 	struct scsi_vhci	*vhci = ADDR2VHCI(ap);
2091 	dev_info_t		*cdip = ADDR2DIP(ap);
2092 	mdi_pathinfo_t		*pip = NULL;
2093 	mdi_pathinfo_t		*npip = NULL;
2094 	scsi_vhci_priv_t	*svp = NULL;
2095 	struct scsi_device	*psd = NULL;
2096 	struct scsi_address	*address = NULL;
2097 	struct scsi_pkt		*pkt = NULL;
2098 	int			rval = -1;
2099 	int			pgr_sema_held = 0;
2100 	int			held;
2101 	int			mps_flag = MDI_SELECT_ONLINE_PATH;
2102 	struct scsi_vhci_lun	*vlun;
2103 	time_t			tnow;
2104 
2105 	vlun = ADDR2VLUN(ap);
2106 	ASSERT(vlun != 0);
2107 
2108 	if ((vpkt->vpkt_tgt_pkt->pkt_cdbp[0] == SCMD_PROUT) &&
2109 	    (((vpkt->vpkt_tgt_pkt->pkt_cdbp[1] & 0x1f) ==
2110 	    VHCI_PROUT_REGISTER) ||
2111 	    ((vpkt->vpkt_tgt_pkt->pkt_cdbp[1] & 0x1f) ==
2112 	    VHCI_PROUT_R_AND_IGNORE))) {
2113 		if (!sema_tryp(&vlun->svl_pgr_sema))
2114 			return (TRAN_BUSY);
2115 		pgr_sema_held = 1;
2116 		if (vlun->svl_first_path != NULL) {
2117 			rval = mdi_select_path(cdip, NULL,
2118 			    MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH,
2119 			    NULL, &pip);
2120 			if ((rval != MDI_SUCCESS) || (pip == NULL)) {
2121 				VHCI_DEBUG(4, (CE_NOTE, NULL,
2122 				    "vhci_bind_transport: path select fail\n"));
2123 			} else {
2124 				npip = pip;
2125 				do {
2126 					if (npip == vlun->svl_first_path) {
2127 						VHCI_DEBUG(4, (CE_NOTE, NULL,
2128 						    "vhci_bind_transport: "
2129 						    "valid first path 0x%p\n",
2130 						    (void *)
2131 						    vlun->svl_first_path));
2132 						pip = vlun->svl_first_path;
2133 						goto bind_path;
2134 					}
2135 					pip = npip;
2136 					rval = mdi_select_path(cdip, NULL,
2137 					    MDI_SELECT_ONLINE_PATH |
2138 					    MDI_SELECT_STANDBY_PATH,
2139 					    pip, &npip);
2140 					mdi_rele_path(pip);
2141 				} while ((rval == MDI_SUCCESS) &&
2142 				    (npip != NULL));
2143 			}
2144 		}
2145 
2146 		if (vlun->svl_first_path) {
2147 			VHCI_DEBUG(4, (CE_NOTE, NULL,
2148 			    "vhci_bind_transport: invalid first path 0x%p\n",
2149 			    (void *)vlun->svl_first_path));
2150 			vlun->svl_first_path = NULL;
2151 		}
2152 	} else if (vpkt->vpkt_tgt_pkt->pkt_cdbp[0] == SCMD_PRIN) {
2153 		if ((vpkt->vpkt_state & VHCI_PKT_THRU_TASKQ) == 0) {
2154 			if (!sema_tryp(&vlun->svl_pgr_sema))
2155 				return (TRAN_BUSY);
2156 		}
2157 		pgr_sema_held = 1;
2158 	}
2159 
2160 	/*
2161 	 * If the path is already bound for PKT_PARTIAL_DMA case,
2162 	 * try to use the same path.
2163 	 */
2164 	if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) && vpkt->vpkt_path) {
2165 		VHCI_DEBUG(4, (CE_NOTE, NULL,
2166 		    "vhci_bind_transport: PKT_PARTIAL_DMA "
2167 		    "vpkt 0x%p, path 0x%p\n",
2168 		    (void *)vpkt, (void *)vpkt->vpkt_path));
2169 		pip = vpkt->vpkt_path;
2170 		goto bind_path;
2171 	}
2172 
2173 	/*
2174 	 * If reservation is active bind the transport directly to the pip
2175 	 * with the reservation.
2176 	 */
2177 	if (vpkt->vpkt_hba_pkt == NULL) {
2178 		if (vlun->svl_flags & VLUN_RESERVE_ACTIVE_FLG) {
2179 			if (MDI_PI_IS_ONLINE(vlun->svl_resrv_pip)) {
2180 				pip = vlun->svl_resrv_pip;
2181 				mdi_hold_path(pip);
2182 				vlun->svl_waiting_for_activepath = 0;
2183 				rval = MDI_SUCCESS;
2184 				goto bind_path;
2185 			} else {
2186 				if (pgr_sema_held) {
2187 					sema_v(&vlun->svl_pgr_sema);
2188 				}
2189 				return (TRAN_BUSY);
2190 			}
2191 		}
2192 try_again:
2193 		rval = mdi_select_path(cdip, vpkt->vpkt_tgt_init_bp, 0, NULL,
2194 		    &pip);
2195 		if (rval == MDI_BUSY) {
2196 			if (pgr_sema_held) {
2197 				sema_v(&vlun->svl_pgr_sema);
2198 			}
2199 			return (TRAN_BUSY);
2200 		} else if (rval == MDI_DEVI_ONLINING) {
2201 			/*
2202 			 * if we are here then we are in the midst of
2203 			 * an attach/probe of the client device.
2204 			 * We attempt to bind to ONLINE path if available,
2205 			 * else it is OK to bind to a STANDBY path (instead
2206 			 * of triggering a failover) because IO associated
2207 			 * with attach/probe (eg. INQUIRY, block 0 read)
2208 			 * are completed by targets even on passive paths
2209 			 * If no ONLINE paths available, it is important
2210 			 * to set svl_waiting_for_activepath for two
2211 			 * reasons: (1) avoid sense analysis in the
2212 			 * "external failure detection" codepath in
2213 			 * vhci_intr().  Failure to do so will result in
2214 			 * infinite loop (unless an ONLINE path becomes
2215 			 * available at some point) (2) avoid
2216 			 * unnecessary failover (see "---Waiting For Active
2217 			 * Path---" comment below).
2218 			 */
2219 			VHCI_DEBUG(1, (CE_NOTE, NULL, "!%p in onlining "
2220 			    "state\n", (void *)cdip));
2221 			pip = NULL;
2222 			rval = mdi_select_path(cdip, vpkt->vpkt_tgt_init_bp,
2223 			    mps_flag, NULL, &pip);
2224 			if ((rval != MDI_SUCCESS) || (pip == NULL)) {
2225 				if (vlun->svl_waiting_for_activepath == 0) {
2226 					vlun->svl_waiting_for_activepath = 1;
2227 					vlun->svl_wfa_time = ddi_get_time();
2228 				}
2229 				mps_flag |= MDI_SELECT_STANDBY_PATH;
2230 				rval = mdi_select_path(cdip,
2231 				    vpkt->vpkt_tgt_init_bp,
2232 				    mps_flag, NULL, &pip);
2233 				if ((rval != MDI_SUCCESS) || (pip == NULL)) {
2234 					if (pgr_sema_held) {
2235 						sema_v(&vlun->svl_pgr_sema);
2236 					}
2237 					return (TRAN_FATAL_ERROR);
2238 				}
2239 				goto bind_path;
2240 			}
2241 		} else if (rval == MDI_FAILURE) {
2242 			if (pgr_sema_held) {
2243 				sema_v(&vlun->svl_pgr_sema);
2244 			}
2245 			return (TRAN_FATAL_ERROR);
2246 		}
2247 
2248 		if ((pip == NULL) || (rval == MDI_NOPATH)) {
2249 			while (vlun->svl_waiting_for_activepath) {
2250 				/*
2251 				 * ---Waiting For Active Path---
2252 				 * This device was discovered across a
2253 				 * passive path; lets wait for a little
2254 				 * bit, hopefully an active path will
2255 				 * show up obviating the need for a
2256 				 * failover
2257 				 */
2258 				tnow = ddi_get_time();
2259 				if (tnow - vlun->svl_wfa_time >= 60) {
2260 					vlun->svl_waiting_for_activepath = 0;
2261 				} else {
2262 					drv_usecwait(1000);
2263 					if (vlun->svl_waiting_for_activepath
2264 					    == 0) {
2265 						/*
2266 						 * an active path has come
2267 						 * online!
2268 						 */
2269 						goto try_again;
2270 					}
2271 				}
2272 			}
2273 			VHCI_HOLD_LUN(vlun, VH_NOSLEEP, held);
2274 			if (!held) {
2275 				VHCI_DEBUG(4, (CE_NOTE, NULL,
2276 				    "!Lun not held\n"));
2277 				if (pgr_sema_held) {
2278 					sema_v(&vlun->svl_pgr_sema);
2279 				}
2280 				return (TRAN_BUSY);
2281 			}
2282 			/*
2283 			 * now that the LUN is stable, one last check
2284 			 * to make sure no other changes sneaked in
2285 			 * (like a path coming online or a
2286 			 * failover initiated by another thread)
2287 			 */
2288 			pip = NULL;
2289 			rval = mdi_select_path(cdip, vpkt->vpkt_tgt_init_bp,
2290 			    0, NULL, &pip);
2291 			if (pip != NULL) {
2292 				VHCI_RELEASE_LUN(vlun);
2293 				vlun->svl_waiting_for_activepath = 0;
2294 				goto bind_path;
2295 			}
2296 
2297 			/*
2298 			 * Check if there is an ONLINE path OR a STANDBY path
2299 			 * available. If none is available, do not attempt
2300 			 * to do a failover, just return a fatal error at this
2301 			 * point.
2302 			 */
2303 			npip = NULL;
2304 			rval = mdi_select_path(cdip, NULL,
2305 			    (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH),
2306 			    NULL, &npip);
2307 			if ((npip == NULL) || (rval != MDI_SUCCESS)) {
2308 				/*
2309 				 * No paths available, jus return FATAL error.
2310 				 */
2311 				VHCI_RELEASE_LUN(vlun);
2312 				if (pgr_sema_held) {
2313 					sema_v(&vlun->svl_pgr_sema);
2314 				}
2315 				return (TRAN_FATAL_ERROR);
2316 			}
2317 			mdi_rele_path(npip);
2318 			VHCI_DEBUG(1, (CE_NOTE, NULL, "!invoking "
2319 			    "mdi_failover\n"));
2320 			rval = mdi_failover(vhci->vhci_dip, cdip,
2321 			    MDI_FAILOVER_ASYNC);
2322 			if (rval == MDI_FAILURE) {
2323 				VHCI_RELEASE_LUN(vlun);
2324 				if (pgr_sema_held) {
2325 					sema_v(&vlun->svl_pgr_sema);
2326 				}
2327 				return (TRAN_FATAL_ERROR);
2328 			} else if (rval == MDI_BUSY) {
2329 				VHCI_RELEASE_LUN(vlun);
2330 				if (pgr_sema_held) {
2331 					sema_v(&vlun->svl_pgr_sema);
2332 				}
2333 				return (TRAN_BUSY);
2334 			} else {
2335 				if (pgr_sema_held) {
2336 					sema_v(&vlun->svl_pgr_sema);
2337 				}
2338 				return (TRAN_BUSY);
2339 			}
2340 		}
2341 		vlun->svl_waiting_for_activepath = 0;
2342 bind_path:
2343 		vpkt->vpkt_path = pip;
2344 		svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip);
2345 		ASSERT(svp != NULL);
2346 
2347 		psd = svp->svp_psd;
2348 		ASSERT(psd != NULL);
2349 		address = &psd->sd_address;
2350 	} else {
2351 		pkt = vpkt->vpkt_hba_pkt;
2352 		address = &pkt->pkt_address;
2353 	}
2354 
2355 	/*
2356 	 * For PKT_PARTIAL_DMA case, call pHCI's scsi_init_pkt whenever
2357 	 * target driver calls vhci_scsi_init_pkt.
2358 	 */
2359 	if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) &&
2360 	    vpkt->vpkt_path && vpkt->vpkt_hba_pkt) {
2361 		VHCI_DEBUG(4, (CE_NOTE, NULL,
2362 		    "vhci_bind_transport: PKT_PARTIAL_DMA "
2363 		    "vpkt 0x%p, path 0x%p hba_pkt 0x%p\n",
2364 		    (void *)vpkt, (void *)vpkt->vpkt_path, (void *)pkt));
2365 		pkt = vpkt->vpkt_hba_pkt;
2366 		address = &pkt->pkt_address;
2367 	}
2368 
2369 	if (pkt == NULL || (vpkt->vpkt_flags & CFLAG_DMA_PARTIAL)) {
2370 		pkt = scsi_init_pkt(address, pkt,
2371 		    vpkt->vpkt_tgt_init_bp, vpkt->vpkt_tgt_init_cdblen,
2372 		    vpkt->vpkt_tgt_init_scblen,
2373 		    0, flags, func, NULL);
2374 
2375 		if (pkt == NULL) {
2376 			VHCI_DEBUG(4, (CE_NOTE, NULL,
2377 			    "!bind transport: 0x%p 0x%p 0x%p\n",
2378 			    (void *)vhci, (void *)psd, (void *)vpkt));
2379 			if ((vpkt->vpkt_hba_pkt == NULL) && vpkt->vpkt_path) {
2380 				MDI_PI_ERRSTAT(vpkt->vpkt_path,
2381 				    MDI_PI_TRANSERR);
2382 				mdi_rele_path(vpkt->vpkt_path);
2383 				vpkt->vpkt_path = NULL;
2384 			}
2385 			if (pgr_sema_held) {
2386 				sema_v(&vlun->svl_pgr_sema);
2387 			}
2388 			/*
2389 			 * Looks like a fatal error.
2390 			 * May be device disappeared underneath.
2391 			 * Give another chance to target driver for a retry to
2392 			 * get another path.
2393 			 */
2394 			return (TRAN_BUSY);
2395 		}
2396 	}
2397 
2398 	pkt->pkt_private = vpkt;
2399 	vpkt->vpkt_hba_pkt = pkt;
2400 	return (TRAN_ACCEPT);
2401 }
2402 
2403 
2404 /*PRINTFLIKE3*/
2405 void
2406 vhci_log(int level, dev_info_t *dip, const char *fmt, ...)
2407 {
2408 	char		buf[256];
2409 	va_list		ap;
2410 
2411 	va_start(ap, fmt);
2412 	(void) vsprintf(buf, fmt, ap);
2413 	va_end(ap);
2414 
2415 	scsi_log(dip, "scsi_vhci", level, buf);
2416 }
2417 
2418 /* do a PGR out with the information we've saved away */
2419 static int
2420 vhci_do_prout(scsi_vhci_priv_t *svp)
2421 {
2422 
2423 	struct scsi_pkt			*new_pkt;
2424 	struct buf			*bp;
2425 	scsi_vhci_lun_t			*vlun;
2426 	int				rval, retry, nr_retry, ua_retry;
2427 	struct scsi_extended_sense	*sns;
2428 
2429 	bp = getrbuf(KM_SLEEP);
2430 	bp->b_flags = B_WRITE;
2431 	bp->b_resid = 0;
2432 
2433 	VHCI_INCR_PATH_CMDCOUNT(svp);
2434 	vlun = svp->svp_svl;
2435 
2436 	new_pkt = scsi_init_pkt(&svp->svp_psd->sd_address, NULL, bp,
2437 	    CDB_GROUP1, sizeof (struct scsi_arq_status), 0, 0,
2438 	    SLEEP_FUNC, NULL);
2439 	if (new_pkt == NULL) {
2440 		VHCI_DECR_PATH_CMDCOUNT(svp);
2441 		freerbuf(bp);
2442 		cmn_err(CE_WARN, "!vhci_do_prout: scsi_init_pkt failed");
2443 		return (0);
2444 	}
2445 	mutex_enter(&vlun->svl_mutex);
2446 	bp->b_un.b_addr = (caddr_t)&vlun->svl_prout;
2447 	bp->b_bcount = vlun->svl_bcount;
2448 	bcopy(vlun->svl_cdb, new_pkt->pkt_cdbp,
2449 	    sizeof (vlun->svl_cdb));
2450 	new_pkt->pkt_time = vlun->svl_time;
2451 	mutex_exit(&vlun->svl_mutex);
2452 	new_pkt->pkt_flags = FLAG_NOINTR;
2453 
2454 	ua_retry = nr_retry = retry = 0;
2455 again:
2456 	rval = vhci_do_scsi_cmd(new_pkt);
2457 	if (rval != 1) {
2458 		if ((new_pkt->pkt_reason == CMD_CMPLT) &&
2459 		    (SCBP_C(new_pkt) == STATUS_CHECK) &&
2460 		    (new_pkt->pkt_state & STATE_ARQ_DONE)) {
2461 			sns = &(((struct scsi_arq_status *)(uintptr_t)
2462 			    (new_pkt->pkt_scbp))->sts_sensedata);
2463 			if ((sns->es_key == KEY_UNIT_ATTENTION) ||
2464 			    (sns->es_key == KEY_NOT_READY)) {
2465 				int max_retry;
2466 				struct scsi_failover_ops *fops;
2467 				fops = vlun->svl_fops;
2468 				rval = (*fops->sfo_analyze_sense)
2469 				    (svp->svp_psd, sns,
2470 				    vlun->svl_fops_ctpriv);
2471 				if (rval == SCSI_SENSE_NOT_READY) {
2472 					max_retry = vhci_prout_not_ready_retry;
2473 					retry = nr_retry++;
2474 					delay(1*drv_usectohz(1000000));
2475 				} else {
2476 					/* chk for state change and update */
2477 					if (rval == SCSI_SENSE_STATE_CHANGED) {
2478 						int held;
2479 						VHCI_HOLD_LUN(vlun,
2480 						    VH_NOSLEEP, held);
2481 						if (!held) {
2482 							rval = TRAN_BUSY;
2483 						} else {
2484 							/* chk for alua first */
2485 							vhci_update_pathstates(
2486 							    (void *)vlun);
2487 						}
2488 					}
2489 					retry = ua_retry++;
2490 					max_retry = VHCI_MAX_PGR_RETRIES;
2491 				}
2492 				if (retry < max_retry) {
2493 					VHCI_DEBUG(4, (CE_WARN, NULL,
2494 					    "!vhci_do_prout retry 0x%x "
2495 					    "(0x%x 0x%x 0x%x)",
2496 					    SCBP_C(new_pkt),
2497 					    new_pkt->pkt_cdbp[0],
2498 					    new_pkt->pkt_cdbp[1],
2499 					    new_pkt->pkt_cdbp[2]));
2500 					goto again;
2501 				}
2502 				rval = 0;
2503 				VHCI_DEBUG(4, (CE_WARN, NULL,
2504 				    "!vhci_do_prout 0x%x "
2505 				    "(0x%x 0x%x 0x%x)",
2506 				    SCBP_C(new_pkt),
2507 				    new_pkt->pkt_cdbp[0],
2508 				    new_pkt->pkt_cdbp[1],
2509 				    new_pkt->pkt_cdbp[2]));
2510 			} else if (sns->es_key == KEY_ILLEGAL_REQUEST)
2511 				rval = VHCI_PGR_ILLEGALOP;
2512 		}
2513 	} else {
2514 		rval = 1;
2515 	}
2516 	scsi_destroy_pkt(new_pkt);
2517 	VHCI_DECR_PATH_CMDCOUNT(svp);
2518 	freerbuf(bp);
2519 	return (rval);
2520 }
2521 
2522 static void
2523 vhci_run_cmd(void *arg)
2524 {
2525 	struct scsi_pkt		*pkt = (struct scsi_pkt *)arg;
2526 	struct scsi_pkt		*tpkt;
2527 	scsi_vhci_priv_t	*svp;
2528 	mdi_pathinfo_t		*pip, *npip;
2529 	scsi_vhci_lun_t		*vlun;
2530 	dev_info_t		*cdip;
2531 	scsi_vhci_priv_t	*nsvp;
2532 	int			fail = 0;
2533 	int			rval;
2534 	struct vhci_pkt		*vpkt;
2535 	uchar_t			cdb_1;
2536 	vhci_prout_t		*prout;
2537 
2538 	vpkt = (struct vhci_pkt *)pkt->pkt_private;
2539 	tpkt = vpkt->vpkt_tgt_pkt;
2540 	pip = vpkt->vpkt_path;
2541 	svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip);
2542 	if (svp == NULL) {
2543 		tpkt->pkt_reason = CMD_TRAN_ERR;
2544 		tpkt->pkt_statistics = STAT_ABORTED;
2545 		goto done;
2546 	}
2547 	vlun = svp->svp_svl;
2548 	prout = &vlun->svl_prout;
2549 	if (SCBP_C(pkt) != STATUS_GOOD)
2550 		fail++;
2551 	cdip = vlun->svl_dip;
2552 	pip = npip = NULL;
2553 	rval = mdi_select_path(cdip, NULL,
2554 	    MDI_SELECT_ONLINE_PATH|MDI_SELECT_STANDBY_PATH, NULL, &npip);
2555 	if ((rval != MDI_SUCCESS) || (npip == NULL)) {
2556 		VHCI_DEBUG(4, (CE_NOTE, NULL,
2557 		    "vhci_run_cmd: no path! 0x%p\n", (void *)svp));
2558 		tpkt->pkt_reason = CMD_TRAN_ERR;
2559 		tpkt->pkt_statistics = STAT_ABORTED;
2560 		goto done;
2561 	}
2562 
2563 	cdb_1 = vlun->svl_cdb[1];
2564 	vlun->svl_cdb[1] &= 0xe0;
2565 	vlun->svl_cdb[1] |= VHCI_PROUT_R_AND_IGNORE;
2566 
2567 	do {
2568 		nsvp = (scsi_vhci_priv_t *)
2569 		    mdi_pi_get_vhci_private(npip);
2570 		if (nsvp == NULL) {
2571 			VHCI_DEBUG(4, (CE_NOTE, NULL,
2572 			    "vhci_run_cmd: no "
2573 			    "client priv! 0x%p offlined?\n",
2574 			    (void *)npip));
2575 			goto next_path;
2576 		}
2577 		if (vlun->svl_first_path == npip) {
2578 			goto next_path;
2579 		} else {
2580 			if (vhci_do_prout(nsvp) != 1)
2581 				fail++;
2582 		}
2583 next_path:
2584 		pip = npip;
2585 		rval = mdi_select_path(cdip, NULL,
2586 		    MDI_SELECT_ONLINE_PATH|MDI_SELECT_STANDBY_PATH,
2587 		    pip, &npip);
2588 		mdi_rele_path(pip);
2589 	} while ((rval == MDI_SUCCESS) && (npip != NULL));
2590 
2591 	vlun->svl_cdb[1] = cdb_1;
2592 
2593 	if (fail) {
2594 		VHCI_DEBUG(4, (CE_WARN, NULL, "%s%d: key registration failed, "
2595 		    "couldn't be replicated on all paths",
2596 		    ddi_driver_name(cdip), ddi_get_instance(cdip)));
2597 		vhci_print_prout_keys(vlun, "vhci_run_cmd: ");
2598 
2599 		if (SCBP_C(pkt) != STATUS_GOOD) {
2600 			tpkt->pkt_reason = CMD_TRAN_ERR;
2601 			tpkt->pkt_statistics = STAT_ABORTED;
2602 		}
2603 	} else {
2604 		vlun->svl_pgr_active = 1;
2605 		vhci_print_prout_keys(vlun, "vhci_run_cmd: before bcopy:");
2606 
2607 		bcopy((const void *)prout->service_key,
2608 		    (void *)prout->active_service_key, MHIOC_RESV_KEY_SIZE);
2609 		bcopy((const void *)prout->res_key,
2610 		    (void *)prout->active_res_key, MHIOC_RESV_KEY_SIZE);
2611 
2612 		vhci_print_prout_keys(vlun, "vhci_run_cmd: after bcopy:");
2613 	}
2614 done:
2615 	if (SCBP_C(pkt) == STATUS_GOOD)
2616 		vlun->svl_first_path = NULL;
2617 
2618 	if (svp)
2619 		VHCI_DECR_PATH_CMDCOUNT(svp);
2620 
2621 	if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) == 0) {
2622 		scsi_destroy_pkt(pkt);
2623 		vpkt->vpkt_hba_pkt = NULL;
2624 		if (vpkt->vpkt_path) {
2625 			mdi_rele_path(vpkt->vpkt_path);
2626 			vpkt->vpkt_path = NULL;
2627 		}
2628 	}
2629 
2630 	sema_v(&vlun->svl_pgr_sema);
2631 	/*
2632 	 * The PROUT commands are not included in the automatic retry
2633 	 * mechanism, therefore, vpkt_org_vpkt should never be set here.
2634 	 */
2635 	ASSERT(vpkt->vpkt_org_vpkt == NULL);
2636 	if (tpkt->pkt_comp)
2637 		(*tpkt->pkt_comp)(tpkt);
2638 
2639 }
2640 
2641 /*
2642  * Get the keys registered with this target.  Since we will have
2643  * registered the same key with multiple initiators, strip out
2644  * any duplicate keys.
2645  *
2646  * The pointers which will be used to filter the registered keys from
2647  * the device will be stored in filter_prin and filter_pkt.  If the
2648  * allocation length of the buffer was sufficient for the number of
2649  * parameter data bytes available to be returned by the device then the
2650  * key filtering will use the keylist returned from the original
2651  * request.  If the allocation length of the buffer was not sufficient,
2652  * then the filtering will use the keylist returned from the request
2653  * that is resent below.
2654  *
2655  * If the device returns an additional length field that is greater than
2656  * the allocation length of the buffer, then allocate a new buffer which
2657  * can accommodate the number of parameter data bytes available to be
2658  * returned.  Resend the scsi PRIN command, filter out the duplicate
2659  * keys and return as many of the unique keys found that was originally
2660  * requested and set the additional length field equal to the data bytes
2661  * of unique reservation keys available to be returned.
2662  *
2663  * If the device returns an additional length field that is less than or
2664  * equal to the allocation length of the buffer, then all the available
2665  * keys registered were returned by the device.  Filter out the
2666  * duplicate keys and return all of the unique keys found and set the
2667  * additional length field equal to the data bytes of the reservation
2668  * keys to be returned.
2669  */
2670 static int
2671 vhci_do_prin(struct vhci_pkt **vpkt)
2672 {
2673 	scsi_vhci_priv_t *svp = (scsi_vhci_priv_t *)
2674 	    mdi_pi_get_vhci_private((*vpkt)->vpkt_path);
2675 	vhci_prin_readkeys_t *prin;
2676 	scsi_vhci_lun_t *vlun = svp->svp_svl;
2677 	struct scsi_vhci *vhci =
2678 	    ADDR2VHCI(&((*vpkt)->vpkt_tgt_pkt->pkt_address));
2679 
2680 	struct buf		*new_bp = NULL;
2681 	struct scsi_pkt		*new_pkt = NULL;
2682 	struct vhci_pkt		*new_vpkt = NULL;
2683 	int			hdr_len = 0;
2684 	int			rval = VHCI_CMD_CMPLT;
2685 	uint32_t		prin_length = 0;
2686 	uint32_t		svl_prin_length = 0;
2687 
2688 	prin = (vhci_prin_readkeys_t *)
2689 	    bp_mapin_common((*vpkt)->vpkt_tgt_init_bp, VM_NOSLEEP);
2690 
2691 	if (prin != NULL) {
2692 		prin_length = BE_32(prin->length);
2693 	}
2694 
2695 	if (prin == NULL) {
2696 		VHCI_DEBUG(5, (CE_WARN, NULL,
2697 		    "vhci_do_prin: bp_mapin_common failed."));
2698 		rval = VHCI_CMD_ERROR;
2699 	} else {
2700 		/*
2701 		 * According to SPC-3r22, sec 4.3.4.6: "If the amount of
2702 		 * information to be transferred exceeds the maximum value
2703 		 * that the ALLOCATION LENGTH field is capable of specifying,
2704 		 * the device server shall...terminate the command with CHECK
2705 		 * CONDITION status".  The ALLOCATION LENGTH field of the
2706 		 * PERSISTENT RESERVE IN command is 2 bytes. We should never
2707 		 * get here with an ADDITIONAL LENGTH greater than 0xFFFF
2708 		 * so if we do, then it is an error!
2709 		 */
2710 
2711 		hdr_len = sizeof (prin->length) + sizeof (prin->generation);
2712 
2713 		if ((prin_length + hdr_len) > 0xFFFF) {
2714 			VHCI_DEBUG(5, (CE_NOTE, NULL,
2715 			    "vhci_do_prin: Device returned invalid "
2716 			    "length 0x%x\n", prin_length));
2717 			rval = VHCI_CMD_ERROR;
2718 		}
2719 	}
2720 
2721 	/*
2722 	 * If prin->length is greater than the byte count allocated in the
2723 	 * original buffer, then resend the request with enough buffer
2724 	 * allocated to get all of the available registered keys.
2725 	 */
2726 	if (rval != VHCI_CMD_ERROR) {
2727 		if (((*vpkt)->vpkt_tgt_init_bp->b_bcount - hdr_len) <
2728 		    prin_length) {
2729 			if ((*vpkt)->vpkt_org_vpkt == NULL) {
2730 				new_pkt = vhci_create_retry_pkt(*vpkt);
2731 				if (new_pkt != NULL) {
2732 					new_vpkt = TGTPKT2VHCIPKT(new_pkt);
2733 
2734 					/*
2735 					 * This is the buf with buffer pointer
2736 					 * where the prin readkeys will be
2737 					 * returned from the device
2738 					 */
2739 					new_bp = scsi_alloc_consistent_buf(
2740 					    &svp->svp_psd->sd_address,
2741 					    NULL, (prin_length + hdr_len),
2742 					    ((*vpkt)->vpkt_tgt_init_bp->
2743 					    b_flags & (B_READ | B_WRITE)),
2744 					    NULL_FUNC, NULL);
2745 					if (new_bp != NULL) {
2746 						if (new_bp->b_un.b_addr !=
2747 						    NULL) {
2748 
2749 							new_bp->b_bcount =
2750 							    prin_length +
2751 							    hdr_len;
2752 
2753 							new_pkt->pkt_cdbp[7] =
2754 							    (uchar_t)(new_bp->
2755 							    b_bcount >> 8);
2756 							new_pkt->pkt_cdbp[8] =
2757 							    (uchar_t)new_bp->
2758 							    b_bcount;
2759 
2760 							rval = VHCI_CMD_RETRY;
2761 						} else {
2762 							rval = VHCI_CMD_ERROR;
2763 						}
2764 					} else {
2765 						rval = VHCI_CMD_ERROR;
2766 					}
2767 				} else {
2768 					rval = VHCI_CMD_ERROR;
2769 				}
2770 			} else {
2771 				rval = VHCI_CMD_ERROR;
2772 			}
2773 		}
2774 	}
2775 
2776 	if (rval == VHCI_CMD_RETRY) {
2777 		new_vpkt->vpkt_tgt_init_bp = new_bp;
2778 
2779 		/*
2780 		 * Release the old path because it does not matter which path
2781 		 * this command is sent down.  This allows the normal bind
2782 		 * transport mechanism to be used.
2783 		 */
2784 		if ((*vpkt)->vpkt_path != NULL) {
2785 			mdi_rele_path((*vpkt)->vpkt_path);
2786 			(*vpkt)->vpkt_path = NULL;
2787 		}
2788 
2789 		/*
2790 		 * Dispatch the retry command
2791 		 */
2792 		if (taskq_dispatch(vhci->vhci_taskq, vhci_dispatch_scsi_start,
2793 		    (void *) new_vpkt, KM_NOSLEEP) == NULL) {
2794 			rval = VHCI_CMD_ERROR;
2795 		} else {
2796 			/*
2797 			 * If we return VHCI_CMD_RETRY, that means the caller
2798 			 * is going to bail and wait for the reissued command
2799 			 * to complete.  In that case, we need to decrement
2800 			 * the path command count right now.  In any other
2801 			 * case, it'll be decremented by the caller.
2802 			 */
2803 			VHCI_DECR_PATH_CMDCOUNT(svp);
2804 		}
2805 	}
2806 
2807 	if ((rval != VHCI_CMD_ERROR) && (rval != VHCI_CMD_RETRY)) {
2808 		int new, old;
2809 		int data_len = 0;
2810 
2811 		data_len = prin_length / MHIOC_RESV_KEY_SIZE;
2812 		VHCI_DEBUG(4, (CE_NOTE, NULL, "vhci_do_prin: %d keys read\n",
2813 		    data_len));
2814 
2815 #ifdef DEBUG
2816 		VHCI_DEBUG(5, (CE_NOTE, NULL, "vhci_do_prin: from storage\n"));
2817 		if (vhci_debug == 5)
2818 			vhci_print_prin_keys(prin, data_len);
2819 		VHCI_DEBUG(5, (CE_NOTE, NULL,
2820 		    "vhci_do_prin: MPxIO old keys:\n"));
2821 		if (vhci_debug == 5)
2822 			vhci_print_prin_keys(&vlun->svl_prin, data_len);
2823 #endif
2824 
2825 		/*
2826 		 * Filter out all duplicate keys returned from the device
2827 		 * We know that we use a different key for every host, so we
2828 		 * can simply strip out duplicates. Otherwise we would need to
2829 		 * do more bookkeeping to figure out which keys to strip out.
2830 		 */
2831 
2832 		new = 0;
2833 
2834 		if (data_len > 0) {
2835 			vlun->svl_prin.keylist[0] = prin->keylist[0];
2836 			new++;
2837 		}
2838 
2839 		for (old = 1; old < data_len; old++) {
2840 			int j;
2841 			int match = 0;
2842 			for (j = 0; j < new; j++) {
2843 				if (bcmp(&prin->keylist[old],
2844 				    &vlun->svl_prin.keylist[j],
2845 				    sizeof (mhioc_resv_key_t)) == 0) {
2846 					match = 1;
2847 					break;
2848 				}
2849 			}
2850 			if (!match) {
2851 				vlun->svl_prin.keylist[new] =
2852 				    prin->keylist[old];
2853 				new++;
2854 			}
2855 		}
2856 
2857 		vlun->svl_prin.generation = prin->generation;
2858 		svl_prin_length = new * MHIOC_RESV_KEY_SIZE;
2859 		vlun->svl_prin.length = BE_32(svl_prin_length);
2860 
2861 		/*
2862 		 * If we arrived at this point after issuing a retry, make sure
2863 		 * that we put everything back the way it originally was so
2864 		 * that the target driver can complete the command correctly.
2865 		 */
2866 		if ((*vpkt)->vpkt_org_vpkt != NULL) {
2867 			new_bp = (*vpkt)->vpkt_tgt_init_bp;
2868 
2869 			scsi_free_consistent_buf(new_bp);
2870 
2871 			*vpkt = vhci_sync_retry_pkt(*vpkt);
2872 
2873 			/*
2874 			 * Make sure the original buffer is mapped into kernel
2875 			 * space before we try to copy the filtered keys into
2876 			 * it.
2877 			 */
2878 			prin = (vhci_prin_readkeys_t *)bp_mapin_common(
2879 			    (*vpkt)->vpkt_tgt_init_bp, VM_NOSLEEP);
2880 		}
2881 
2882 		/*
2883 		 * Now copy the desired number of prin keys into the original
2884 		 * target buffer.
2885 		 */
2886 		if (svl_prin_length <=
2887 		    ((*vpkt)->vpkt_tgt_init_bp->b_bcount - hdr_len)) {
2888 			/*
2889 			 * It is safe to return all of the available unique
2890 			 * keys
2891 			 */
2892 			bcopy(&vlun->svl_prin, prin, svl_prin_length + hdr_len);
2893 		} else {
2894 			/*
2895 			 * Not all of the available keys were requested by the
2896 			 * original command.
2897 			 */
2898 			bcopy(&vlun->svl_prin, prin,
2899 			    (*vpkt)->vpkt_tgt_init_bp->b_bcount);
2900 		}
2901 #ifdef DEBUG
2902 		VHCI_DEBUG(5, (CE_NOTE, NULL,
2903 		    "vhci_do_prin: To Application:\n"));
2904 		if (vhci_debug == 5)
2905 			vhci_print_prin_keys(prin, new);
2906 		VHCI_DEBUG(5, (CE_NOTE, NULL,
2907 		    "vhci_do_prin: MPxIO new keys:\n"));
2908 		if (vhci_debug == 5)
2909 			vhci_print_prin_keys(&vlun->svl_prin, new);
2910 #endif
2911 	}
2912 
2913 	if (rval == VHCI_CMD_ERROR) {
2914 		/*
2915 		 * If we arrived at this point after issuing a
2916 		 * retry, make sure that we put everything back
2917 		 * the way it originally was so that ssd can
2918 		 * complete the command correctly.
2919 		 */
2920 
2921 		if ((*vpkt)->vpkt_org_vpkt != NULL) {
2922 			new_bp = (*vpkt)->vpkt_tgt_init_bp;
2923 			if (new_bp != NULL) {
2924 				scsi_free_consistent_buf(new_bp);
2925 			}
2926 
2927 			new_vpkt = *vpkt;
2928 			*vpkt = (*vpkt)->vpkt_org_vpkt;
2929 
2930 			vhci_scsi_destroy_pkt(&svp->svp_psd->sd_address,
2931 			    new_vpkt->vpkt_tgt_pkt);
2932 		}
2933 
2934 		/*
2935 		 * Mark this command completion as having an error so that
2936 		 * ssd will retry the command.
2937 		 */
2938 
2939 		(*vpkt)->vpkt_tgt_pkt->pkt_reason = CMD_ABORTED;
2940 		(*vpkt)->vpkt_tgt_pkt->pkt_statistics |= STAT_ABORTED;
2941 
2942 		rval = VHCI_CMD_CMPLT;
2943 	}
2944 
2945 	/*
2946 	 * Make sure that the semaphore is only released once.
2947 	 */
2948 	if (rval == VHCI_CMD_CMPLT) {
2949 		sema_v(&vlun->svl_pgr_sema);
2950 	}
2951 
2952 	return (rval);
2953 }
2954 
2955 static void
2956 vhci_intr(struct scsi_pkt *pkt)
2957 {
2958 	struct vhci_pkt		*vpkt = (struct vhci_pkt *)pkt->pkt_private;
2959 	struct scsi_pkt		*tpkt;
2960 	scsi_vhci_priv_t	*svp;
2961 	scsi_vhci_lun_t		*vlun;
2962 	int			rval, held;
2963 	struct scsi_failover_ops	*fops;
2964 	struct scsi_extended_sense	*sns;
2965 	mdi_pathinfo_t		*lpath;
2966 	static char		*timeout_err = "Command Timeout";
2967 	static char		*parity_err = "Parity Error";
2968 	char			*err_str = NULL;
2969 	dev_info_t		*vdip, *cdip, *pdip;
2970 	char			*cpath, *dpath;
2971 
2972 	ASSERT(vpkt != NULL);
2973 	tpkt = vpkt->vpkt_tgt_pkt;
2974 	ASSERT(tpkt != NULL);
2975 	svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(vpkt->vpkt_path);
2976 	ASSERT(svp != NULL);
2977 	vlun = svp->svp_svl;
2978 	ASSERT(vlun != NULL);
2979 	lpath = vpkt->vpkt_path;
2980 
2981 	/*
2982 	 * sync up the target driver's pkt with the pkt that
2983 	 * we actually used
2984 	 */
2985 	*(tpkt->pkt_scbp) = *(pkt->pkt_scbp);
2986 	tpkt->pkt_resid = pkt->pkt_resid;
2987 	tpkt->pkt_state = pkt->pkt_state;
2988 	tpkt->pkt_statistics = pkt->pkt_statistics;
2989 	tpkt->pkt_reason = pkt->pkt_reason;
2990 
2991 	if (pkt->pkt_cdbp[0] == SCMD_PROUT &&
2992 	    ((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_REGISTER) ||
2993 	    ((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_R_AND_IGNORE)) {
2994 		if ((SCBP_C(pkt) != STATUS_GOOD) ||
2995 		    (pkt->pkt_reason != CMD_CMPLT)) {
2996 			sema_v(&vlun->svl_pgr_sema);
2997 		}
2998 	} else if (pkt->pkt_cdbp[0] == SCMD_PRIN) {
2999 		if (pkt->pkt_reason != CMD_CMPLT ||
3000 		    (SCBP_C(pkt) != STATUS_GOOD)) {
3001 			sema_v(&vlun->svl_pgr_sema);
3002 		}
3003 	}
3004 
3005 	switch (pkt->pkt_reason) {
3006 	case CMD_CMPLT:
3007 		/*
3008 		 * cmd completed successfully, check for scsi errors
3009 		 */
3010 		switch (*(pkt->pkt_scbp)) {
3011 		case STATUS_CHECK:
3012 			if (pkt->pkt_state & STATE_ARQ_DONE) {
3013 				sns = &(((struct scsi_arq_status *)(uintptr_t)
3014 				    (pkt->pkt_scbp))->sts_sensedata);
3015 				fops = vlun->svl_fops;
3016 				ASSERT(fops != NULL);
3017 				VHCI_DEBUG(4, (CE_NOTE, NULL, "vhci_intr: "
3018 				    "Received sns key %x  esc %x  escq %x\n",
3019 				    sns->es_key, sns->es_add_code,
3020 				    sns->es_qual_code));
3021 
3022 				if (vlun->svl_waiting_for_activepath == 1) {
3023 					/*
3024 					 * if we are here it means we are
3025 					 * in the midst of a probe/attach
3026 					 * through a passive path; this
3027 					 * case is exempt from sense analysis
3028 					 * for detection of ext. failover
3029 					 * because that would unnecessarily
3030 					 * increase attach time.
3031 					 */
3032 					bcopy(pkt->pkt_scbp, tpkt->pkt_scbp,
3033 					    vpkt->vpkt_tgt_init_scblen);
3034 					break;
3035 				}
3036 				if (sns->es_add_code == VHCI_SCSI_PERR) {
3037 					/*
3038 					 * parity error
3039 					 */
3040 					err_str = parity_err;
3041 					bcopy(pkt->pkt_scbp, tpkt->pkt_scbp,
3042 					    vpkt->vpkt_tgt_init_scblen);
3043 					break;
3044 				}
3045 				rval = (*fops->sfo_analyze_sense)
3046 				    (svp->svp_psd, sns, vlun->svl_fops_ctpriv);
3047 				if ((rval == SCSI_SENSE_NOFAILOVER) ||
3048 				    (rval == SCSI_SENSE_UNKNOWN) ||
3049 				    (rval == SCSI_SENSE_NOT_READY)) {
3050 					bcopy(pkt->pkt_scbp, tpkt->pkt_scbp,
3051 					    vpkt->vpkt_tgt_init_scblen);
3052 					break;
3053 				} else if (rval == SCSI_SENSE_STATE_CHANGED) {
3054 					struct scsi_vhci	*vhci;
3055 					vhci = ADDR2VHCI(&tpkt->pkt_address);
3056 					VHCI_HOLD_LUN(vlun, VH_NOSLEEP, held);
3057 					if (!held) {
3058 						/*
3059 						 * looks like some other thread
3060 						 * has already detected this
3061 						 * condition
3062 						 */
3063 						tpkt->pkt_state &=
3064 						    ~STATE_ARQ_DONE;
3065 						*(tpkt->pkt_scbp) =
3066 						    STATUS_BUSY;
3067 						break;
3068 					}
3069 					(void) taskq_dispatch(
3070 					    vhci->vhci_update_pathstates_taskq,
3071 					    vhci_update_pathstates,
3072 					    (void *)vlun, KM_SLEEP);
3073 				} else {
3074 					/*
3075 					 * externally initiated failover
3076 					 * has occurred or is in progress
3077 					 */
3078 					VHCI_HOLD_LUN(vlun, VH_NOSLEEP, held);
3079 					if (!held) {
3080 						/*
3081 						 * looks like some other thread
3082 						 * has already detected this
3083 						 * condition
3084 						 */
3085 						tpkt->pkt_state &=
3086 						    ~STATE_ARQ_DONE;
3087 						*(tpkt->pkt_scbp) =
3088 						    STATUS_BUSY;
3089 						break;
3090 					} else {
3091 						rval = vhci_handle_ext_fo
3092 						    (pkt, rval);
3093 						if (rval == BUSY_RETURN) {
3094 							tpkt->pkt_state &=
3095 							    ~STATE_ARQ_DONE;
3096 							*(tpkt->pkt_scbp) =
3097 							    STATUS_BUSY;
3098 							break;
3099 						}
3100 						bcopy(pkt->pkt_scbp,
3101 						    tpkt->pkt_scbp,
3102 						    vpkt->vpkt_tgt_init_scblen);
3103 						break;
3104 					}
3105 				}
3106 			}
3107 			break;
3108 
3109 		/*
3110 		 * If this is a good SCSI-II RELEASE cmd completion then restore
3111 		 * the load balancing policy and reset VLUN_RESERVE_ACTIVE_FLG.
3112 		 * If this is a good SCSI-II RESERVE cmd completion then set
3113 		 * VLUN_RESERVE_ACTIVE_FLG.
3114 		 */
3115 		case STATUS_GOOD:
3116 			if ((pkt->pkt_cdbp[0] == SCMD_RELEASE) ||
3117 			    (pkt->pkt_cdbp[0] == SCMD_RELEASE_G1)) {
3118 				(void) mdi_set_lb_policy(vlun->svl_dip,
3119 				    vlun->svl_lb_policy_save);
3120 				vlun->svl_flags &= ~VLUN_RESERVE_ACTIVE_FLG;
3121 				VHCI_DEBUG(1, (CE_WARN, NULL,
3122 				    "!vhci_intr: vlun 0x%p release path 0x%p",
3123 				    (void *)vlun, (void *)vpkt->vpkt_path));
3124 			}
3125 
3126 			if ((pkt->pkt_cdbp[0] == SCMD_RESERVE) ||
3127 			    (pkt->pkt_cdbp[0] == SCMD_RESERVE_G1)) {
3128 				vlun->svl_flags |= VLUN_RESERVE_ACTIVE_FLG;
3129 				vlun->svl_resrv_pip = vpkt->vpkt_path;
3130 				VHCI_DEBUG(1, (CE_WARN, NULL,
3131 				    "!vhci_intr: vlun 0x%p reserved path 0x%p",
3132 				    (void *)vlun, (void *)vpkt->vpkt_path));
3133 			}
3134 			break;
3135 
3136 		case STATUS_RESERVATION_CONFLICT:
3137 			VHCI_DEBUG(1, (CE_WARN, NULL,
3138 			    "!vhci_intr: vlun 0x%p "
3139 			    "reserve conflict on path 0x%p",
3140 			    (void *)vlun, (void *)vpkt->vpkt_path));
3141 			/* FALLTHROUGH */
3142 		default:
3143 			break;
3144 		}
3145 
3146 		/*
3147 		 * Update I/O completion statistics for the path
3148 		 */
3149 		mdi_pi_kstat_iosupdate(vpkt->vpkt_path, vpkt->vpkt_tgt_init_bp);
3150 
3151 		/*
3152 		 * Command completed successfully, release the dma binding and
3153 		 * destroy the transport side of the packet.
3154 		 */
3155 		if ((pkt->pkt_cdbp[0] ==  SCMD_PROUT) &&
3156 		    (((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_REGISTER) ||
3157 		    ((pkt->pkt_cdbp[1] & 0x1f) ==
3158 		    VHCI_PROUT_R_AND_IGNORE))) {
3159 			if (SCBP_C(pkt) == STATUS_GOOD) {
3160 				ASSERT(vlun->svl_taskq);
3161 				svp->svp_last_pkt_reason = pkt->pkt_reason;
3162 				(void) taskq_dispatch(vlun->svl_taskq,
3163 				    vhci_run_cmd, pkt, KM_SLEEP);
3164 				return;
3165 			}
3166 		}
3167 		if ((SCBP_C(pkt) == STATUS_GOOD) &&
3168 		    (pkt->pkt_cdbp[0] == SCMD_PRIN) &&
3169 		    vpkt->vpkt_tgt_init_bp) {
3170 			/*
3171 			 * If the action (value in byte 1 of the cdb) is zero,
3172 			 * we're reading keys, and that's the only condition
3173 			 * where we need to be concerned with filtering keys
3174 			 * and potential retries.  Otherwise, we simply signal
3175 			 * the semaphore and move on.
3176 			 */
3177 			if (pkt->pkt_cdbp[1] == 0) {
3178 				/*
3179 				 * If this is the completion of an internal
3180 				 * retry then we need to make sure that the
3181 				 * pkt and tpkt pointers are readjusted so
3182 				 * the calls to scsi_destroy_pkt and pkt_comp
3183 				 * below work * correctly.
3184 				 */
3185 				if (vpkt->vpkt_org_vpkt != NULL) {
3186 					pkt = vpkt->vpkt_org_vpkt->vpkt_hba_pkt;
3187 					tpkt = vpkt->vpkt_org_vpkt->
3188 					    vpkt_tgt_pkt;
3189 
3190 					/*
3191 					 * If this command was issued through
3192 					 * the taskq then we need to clear
3193 					 * this flag for proper processing in
3194 					 * the case of a retry from the target
3195 					 * driver.
3196 					 */
3197 					vpkt->vpkt_state &=
3198 					    ~VHCI_PKT_THRU_TASKQ;
3199 				}
3200 
3201 				/*
3202 				 * if vhci_do_prin returns VHCI_CMD_CMPLT then
3203 				 * vpkt will contain the address of the
3204 				 * original vpkt
3205 				 */
3206 				if (vhci_do_prin(&vpkt) == VHCI_CMD_RETRY) {
3207 					/*
3208 					 * The command has been resent to get
3209 					 * all the keys from the device.  Don't
3210 					 * complete the command with ssd until
3211 					 * the retry completes.
3212 					 */
3213 					return;
3214 				}
3215 			} else {
3216 				sema_v(&vlun->svl_pgr_sema);
3217 			}
3218 		}
3219 
3220 		break;
3221 
3222 	case CMD_TIMEOUT:
3223 		if ((pkt->pkt_statistics &
3224 		    (STAT_BUS_RESET|STAT_DEV_RESET|STAT_ABORTED)) == 0) {
3225 
3226 			VHCI_DEBUG(1, (CE_NOTE, NULL,
3227 			    "!scsi vhci timeout invoked\n"));
3228 
3229 			(void) vhci_recovery_reset(vlun, &pkt->pkt_address,
3230 			    FALSE, VHCI_DEPTH_ALL);
3231 		}
3232 		MDI_PI_ERRSTAT(lpath, MDI_PI_TRANSERR);
3233 		tpkt->pkt_statistics |= STAT_ABORTED;
3234 		err_str = timeout_err;
3235 		break;
3236 
3237 	case CMD_TRAN_ERR:
3238 		/*
3239 		 * This status is returned if the transport has sent the cmd
3240 		 * down the link to the target and then some error occurs.
3241 		 * In case of SCSI-II RESERVE cmd, we don't know if the
3242 		 * reservation been accepted by the target or not, so we need
3243 		 * to clear the reservation.
3244 		 */
3245 		if ((pkt->pkt_cdbp[0] == SCMD_RESERVE) ||
3246 		    (pkt->pkt_cdbp[0] == SCMD_RESERVE_G1)) {
3247 			VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_intr received"
3248 			    " cmd_tran_err for scsi-2 reserve cmd\n"));
3249 			if (!vhci_recovery_reset(vlun, &pkt->pkt_address,
3250 			    TRUE, VHCI_DEPTH_TARGET)) {
3251 				VHCI_DEBUG(1, (CE_WARN, NULL,
3252 				    "!vhci_intr cmd_tran_err reset failed!"));
3253 			}
3254 		}
3255 		break;
3256 
3257 	case CMD_DEV_GONE:
3258 		tpkt->pkt_reason = CMD_CMPLT;
3259 		tpkt->pkt_state = STATE_GOT_BUS |
3260 		    STATE_GOT_TARGET | STATE_SENT_CMD |
3261 		    STATE_GOT_STATUS;
3262 		*(tpkt->pkt_scbp) = STATUS_BUSY;
3263 		break;
3264 
3265 	default:
3266 		break;
3267 	}
3268 
3269 	/*
3270 	 * SCSI-II RESERVE cmd has been serviced by the lower layers clear
3271 	 * the flag so the lun is not QUIESCED any longer.
3272 	 * Also clear the VHCI_PKT_THRU_TASKQ flag, to ensure that if this pkt
3273 	 * is retried, a taskq shall again be dispatched to service it.  Else
3274 	 * it may lead to a system hang if the retry is within interrupt
3275 	 * context.
3276 	 */
3277 	if ((pkt->pkt_cdbp[0] == SCMD_RESERVE) ||
3278 	    (pkt->pkt_cdbp[0] == SCMD_RESERVE_G1)) {
3279 		vlun->svl_flags &= ~VLUN_QUIESCED_FLG;
3280 		vpkt->vpkt_state &= ~VHCI_PKT_THRU_TASKQ;
3281 	}
3282 
3283 	/*
3284 	 * vpkt_org_vpkt should always be NULL here if the retry command
3285 	 * has been successfully processed.  If vpkt_org_vpkt != NULL at
3286 	 * this point, it is an error so restore the original vpkt and
3287 	 * return an error to the target driver so it can retry the
3288 	 * command as appropriate.
3289 	 */
3290 	if (vpkt->vpkt_org_vpkt != NULL) {
3291 		struct vhci_pkt *new_vpkt = vpkt;
3292 		vpkt = vpkt->vpkt_org_vpkt;
3293 
3294 		vhci_scsi_destroy_pkt(&svp->svp_psd->sd_address,
3295 		    new_vpkt->vpkt_tgt_pkt);
3296 
3297 		/*
3298 		 * Mark this command completion as having an error so that
3299 		 * ssd will retry the command.
3300 		 */
3301 		vpkt->vpkt_tgt_pkt->pkt_reason = CMD_ABORTED;
3302 		vpkt->vpkt_tgt_pkt->pkt_statistics |= STAT_ABORTED;
3303 
3304 		pkt = vpkt->vpkt_hba_pkt;
3305 		tpkt = vpkt->vpkt_tgt_pkt;
3306 	}
3307 
3308 	if ((err_str != NULL) && (pkt->pkt_reason !=
3309 	    svp->svp_last_pkt_reason)) {
3310 		cdip = vlun->svl_dip;
3311 		pdip = mdi_pi_get_phci(vpkt->vpkt_path);
3312 		vdip = ddi_get_parent(cdip);
3313 		cpath = kmem_alloc(MAXPATHLEN, KM_SLEEP);
3314 		dpath = kmem_alloc(MAXPATHLEN, KM_SLEEP);
3315 		vhci_log(CE_WARN, vdip, "!%s (%s%d): %s on path %s (%s%d)",
3316 		    ddi_pathname(cdip, cpath), ddi_driver_name(cdip),
3317 		    ddi_get_instance(cdip), err_str,
3318 		    ddi_pathname(pdip, dpath), ddi_driver_name(pdip),
3319 		    ddi_get_instance(pdip));
3320 		kmem_free(cpath, MAXPATHLEN);
3321 		kmem_free(dpath, MAXPATHLEN);
3322 	}
3323 	svp->svp_last_pkt_reason = pkt->pkt_reason;
3324 	VHCI_DECR_PATH_CMDCOUNT(svp);
3325 
3326 	/*
3327 	 * For PARTIAL_DMA, vhci should not free the path.
3328 	 * Target driver will call into vhci_scsi_dmafree or
3329 	 * destroy pkt to release this path.
3330 	 */
3331 	if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) == 0) {
3332 		scsi_destroy_pkt(pkt);
3333 		vpkt->vpkt_hba_pkt = NULL;
3334 		if (vpkt->vpkt_path) {
3335 			mdi_rele_path(vpkt->vpkt_path);
3336 			vpkt->vpkt_path = NULL;
3337 		}
3338 	}
3339 
3340 	if (tpkt->pkt_comp) {
3341 		(*tpkt->pkt_comp)(tpkt);
3342 	}
3343 }
3344 
3345 /*
3346  * two possibilities: (1) failover has completed
3347  * or (2) is in progress; update our path states for
3348  * the former case; for the latter case,
3349  * initiate a scsi_watch request to
3350  * determine when failover completes - vlun is HELD
3351  * until failover completes; BUSY is returned to upper
3352  * layer in both the cases
3353  */
3354 static int
3355 vhci_handle_ext_fo(struct scsi_pkt *pkt, int fostat)
3356 {
3357 	struct vhci_pkt		*vpkt = (struct vhci_pkt *)pkt->pkt_private;
3358 	struct scsi_pkt		*tpkt;
3359 	scsi_vhci_priv_t	*svp;
3360 	scsi_vhci_lun_t		*vlun;
3361 	struct scsi_vhci	*vhci;
3362 	scsi_vhci_swarg_t	*swarg;
3363 	char			*path;
3364 
3365 	ASSERT(vpkt != NULL);
3366 	tpkt = vpkt->vpkt_tgt_pkt;
3367 	ASSERT(tpkt != NULL);
3368 	svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(vpkt->vpkt_path);
3369 	ASSERT(svp != NULL);
3370 	vlun = svp->svp_svl;
3371 	ASSERT(vlun != NULL);
3372 	ASSERT(VHCI_LUN_IS_HELD(vlun));
3373 
3374 	vhci = ADDR2VHCI(&tpkt->pkt_address);
3375 
3376 	if (fostat == SCSI_SENSE_INACTIVE) {
3377 		VHCI_DEBUG(1, (CE_NOTE, NULL, "!Failover "
3378 		    "detected for %s; updating path states...\n",
3379 		    vlun->svl_lun_wwn));
3380 		/*
3381 		 * set the vlun flag to indicate to the task that the target
3382 		 * port group needs updating
3383 		 */
3384 		vlun->svl_flags |= VLUN_UPDATE_TPG;
3385 		(void) taskq_dispatch(vhci->vhci_update_pathstates_taskq,
3386 		    vhci_update_pathstates, (void *)vlun, KM_SLEEP);
3387 	} else {
3388 		path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
3389 		vhci_log(CE_NOTE, ddi_get_parent(vlun->svl_dip),
3390 		    "!%s (%s%d): Waiting for externally initiated failover "
3391 		    "to complete", ddi_pathname(vlun->svl_dip, path),
3392 		    ddi_driver_name(vlun->svl_dip),
3393 		    ddi_get_instance(vlun->svl_dip));
3394 		kmem_free(path, MAXPATHLEN);
3395 		swarg = kmem_alloc(sizeof (*swarg), KM_NOSLEEP);
3396 		if (swarg == NULL) {
3397 			VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_handle_ext_fo: "
3398 			    "request packet allocation for %s failed....\n",
3399 			    vlun->svl_lun_wwn));
3400 			VHCI_RELEASE_LUN(vlun);
3401 			return (PKT_RETURN);
3402 		}
3403 		swarg->svs_svp = svp;
3404 		swarg->svs_tos = ddi_get_time();
3405 		swarg->svs_pi = vpkt->vpkt_path;
3406 		swarg->svs_release_lun = 0;
3407 		swarg->svs_done = 0;
3408 		/*
3409 		 * place a hold on the path...we don't want it to
3410 		 * vanish while scsi_watch is in progress
3411 		 */
3412 		mdi_hold_path(vpkt->vpkt_path);
3413 		svp->svp_sw_token = scsi_watch_request_submit(svp->svp_psd,
3414 		    VHCI_FOWATCH_INTERVAL, SENSE_LENGTH, vhci_efo_watch_cb,
3415 		    (caddr_t)swarg);
3416 	}
3417 	return (BUSY_RETURN);
3418 }
3419 
3420 /*
3421  * vhci_efo_watch_cb:
3422  *	Callback from scsi_watch request to check the failover status.
3423  *	Completion is either due to successful failover or timeout.
3424  *	Upon successful completion, vhci_update_path_states is called.
3425  *	For timeout condition, vhci_efo_done is called.
3426  *	Always returns 0 to scsi_watch to keep retrying till vhci_efo_done
3427  *	terminates this request properly in a separate thread.
3428  */
3429 
3430 static int
3431 vhci_efo_watch_cb(caddr_t arg, struct scsi_watch_result *resultp)
3432 {
3433 	struct scsi_status		*statusp = resultp->statusp;
3434 	struct scsi_extended_sense	*sensep = resultp->sensep;
3435 	struct scsi_pkt			*pkt = resultp->pkt;
3436 	scsi_vhci_swarg_t		*swarg;
3437 	scsi_vhci_priv_t		*svp;
3438 	scsi_vhci_lun_t			*vlun;
3439 	struct scsi_vhci		*vhci;
3440 	dev_info_t			*vdip;
3441 	int				rval, updt_paths;
3442 
3443 	swarg = (scsi_vhci_swarg_t *)(uintptr_t)arg;
3444 	svp = swarg->svs_svp;
3445 	if (swarg->svs_done) {
3446 		/*
3447 		 * Already completed failover or timedout.
3448 		 * Waiting for vhci_efo_done to terminate this scsi_watch.
3449 		 */
3450 		return (0);
3451 	}
3452 
3453 	ASSERT(svp != NULL);
3454 	vlun = svp->svp_svl;
3455 	ASSERT(vlun != NULL);
3456 	ASSERT(VHCI_LUN_IS_HELD(vlun));
3457 	vlun->svl_efo_update_path = 0;
3458 	vdip = ddi_get_parent(vlun->svl_dip);
3459 	vhci = ddi_get_soft_state(vhci_softstate,
3460 	    ddi_get_instance(vdip));
3461 
3462 	updt_paths = 0;
3463 
3464 	if (pkt->pkt_reason != CMD_CMPLT) {
3465 		if ((ddi_get_time() - swarg->svs_tos) >= VHCI_EXTFO_TIMEOUT) {
3466 			swarg->svs_release_lun = 1;
3467 			goto done;
3468 		}
3469 		return (0);
3470 	}
3471 	if (*((unsigned char *)statusp) == STATUS_CHECK) {
3472 		rval = (*(vlun->svl_fops->sfo_analyze_sense))
3473 		    (svp->svp_psd, sensep, vlun->svl_fops_ctpriv);
3474 		switch (rval) {
3475 			/*
3476 			 * Only update path states in case path is definitely
3477 			 * inactive, or no failover occurred.  For all other
3478 			 * check conditions continue pinging.  A unexpected
3479 			 * check condition shouldn't cause pinging to complete
3480 			 * prematurely.
3481 			 */
3482 			case SCSI_SENSE_INACTIVE:
3483 			case SCSI_SENSE_NOFAILOVER:
3484 				updt_paths = 1;
3485 				break;
3486 			default:
3487 				if ((ddi_get_time() - swarg->svs_tos)
3488 				    >= VHCI_EXTFO_TIMEOUT) {
3489 					swarg->svs_release_lun = 1;
3490 					goto done;
3491 				}
3492 				return (0);
3493 		}
3494 	} else if (*((unsigned char *)statusp) ==
3495 	    STATUS_RESERVATION_CONFLICT) {
3496 		updt_paths = 1;
3497 	} else if ((*((unsigned char *)statusp)) &
3498 	    (STATUS_BUSY | STATUS_QFULL)) {
3499 		return (0);
3500 	}
3501 	if ((*((unsigned char *)statusp) == STATUS_GOOD) ||
3502 	    (updt_paths == 1)) {
3503 		/*
3504 		 * we got here because we had detected an
3505 		 * externally initiated failover; things
3506 		 * have settled down now, so let's
3507 		 * start up a task to update the
3508 		 * path states and target port group
3509 		 */
3510 		vlun->svl_efo_update_path = 1;
3511 		swarg->svs_done = 1;
3512 		vlun->svl_swarg = swarg;
3513 		vlun->svl_flags |= VLUN_UPDATE_TPG;
3514 		(void) taskq_dispatch(vhci->vhci_update_pathstates_taskq,
3515 		    vhci_update_pathstates, (void *)vlun,
3516 		    KM_SLEEP);
3517 		return (0);
3518 	}
3519 	if ((ddi_get_time() - swarg->svs_tos) >= VHCI_EXTFO_TIMEOUT) {
3520 		swarg->svs_release_lun = 1;
3521 		goto done;
3522 	}
3523 	return (0);
3524 done:
3525 	swarg->svs_done = 1;
3526 	(void) taskq_dispatch(vhci->vhci_taskq,
3527 	    vhci_efo_done, (void *)swarg, KM_SLEEP);
3528 	return (0);
3529 }
3530 
3531 /*
3532  * vhci_efo_done:
3533  *	cleanly terminates scsi_watch and free up resources.
3534  *	Called as taskq function in vhci_efo_watch_cb for EFO timeout condition
3535  *	or by vhci_update_path_states invoked during external initiated
3536  *	failover completion.
3537  */
3538 static void
3539 vhci_efo_done(void *arg)
3540 {
3541 	scsi_vhci_lun_t			*vlun;
3542 	scsi_vhci_swarg_t		*swarg = (scsi_vhci_swarg_t *)arg;
3543 	scsi_vhci_priv_t		*svp = swarg->svs_svp;
3544 	ASSERT(svp);
3545 
3546 	vlun = svp->svp_svl;
3547 	ASSERT(vlun);
3548 
3549 	/* Wait for clean termination of scsi_watch */
3550 	(void) scsi_watch_request_terminate(svp->svp_sw_token,
3551 	    SCSI_WATCH_TERMINATE_WAIT);
3552 	svp->svp_sw_token = NULL;
3553 
3554 	/* release path and freeup resources to indicate failover completion */
3555 	mdi_rele_path(swarg->svs_pi);
3556 	if (swarg->svs_release_lun) {
3557 		VHCI_RELEASE_LUN(vlun);
3558 	}
3559 	kmem_free((void *)swarg, sizeof (*swarg));
3560 }
3561 
3562 /*
3563  * Update the path states
3564  * vlun should be HELD when this is invoked.
3565  * Calls vhci_efo_done to cleanup resources allocated for EFO.
3566  */
3567 void
3568 vhci_update_pathstates(void *arg)
3569 {
3570 	mdi_pathinfo_t			*pip, *npip;
3571 	dev_info_t			*dip, *pdip;
3572 	struct scsi_failover_ops	*fo;
3573 	struct scsi_vhci_priv		*svp;
3574 	struct scsi_device		*psd;
3575 	struct scsi_path_opinfo		opinfo;
3576 	char				*pclass, *tptr;
3577 	struct scsi_vhci_lun		*vlun = (struct scsi_vhci_lun *)arg;
3578 	int				sps; /* mdi_select_path() status */
3579 	char				*cpath, *dpath;
3580 	struct scsi_vhci		*vhci;
3581 	struct scsi_pkt			*pkt;
3582 	struct buf			*bp;
3583 	int				reserve_conflict = 0;
3584 
3585 	ASSERT(VHCI_LUN_IS_HELD(vlun));
3586 	dip  = vlun->svl_dip;
3587 	pip = npip = NULL;
3588 
3589 	vhci = ddi_get_soft_state(vhci_softstate,
3590 	    ddi_get_instance(ddi_get_parent(dip)));
3591 
3592 	sps = mdi_select_path(dip, NULL, (MDI_SELECT_ONLINE_PATH |
3593 	    MDI_SELECT_STANDBY_PATH), NULL, &npip);
3594 	if ((npip == NULL) || (sps != MDI_SUCCESS)) {
3595 		goto done;
3596 	}
3597 
3598 	fo = vlun->svl_fops;
3599 	do {
3600 		pip = npip;
3601 		svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip);
3602 		psd = svp->svp_psd;
3603 		if ((*fo->sfo_path_get_opinfo)(psd, &opinfo,
3604 		    vlun->svl_fops_ctpriv) != 0) {
3605 			sps = mdi_select_path(dip, NULL,
3606 			    (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH),
3607 			    pip, &npip);
3608 			mdi_rele_path(pip);
3609 			continue;
3610 		}
3611 
3612 		if (mdi_prop_lookup_string(pip, "path-class", &pclass) !=
3613 		    MDI_SUCCESS) {
3614 			VHCI_DEBUG(1, (CE_NOTE, NULL,
3615 			    "!vhci_update_pathstates: prop lookup failed for "
3616 			    "path 0x%p\n", (void *)pip));
3617 			sps = mdi_select_path(dip, NULL,
3618 			    (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH),
3619 			    pip, &npip);
3620 			mdi_rele_path(pip);
3621 			continue;
3622 		}
3623 
3624 		/*
3625 		 * Need to update the "path-class" property
3626 		 * value in the device tree if different
3627 		 * from the existing value.
3628 		 */
3629 		if (strcmp(pclass, opinfo.opinfo_path_attr) != 0) {
3630 			(void) mdi_prop_update_string(pip, "path-class",
3631 			    opinfo.opinfo_path_attr);
3632 		}
3633 
3634 		/*
3635 		 * Only change the state if needed. i.e. Don't call
3636 		 * mdi_pi_set_state to ONLINE a path if its already
3637 		 * ONLINE. Same for STANDBY paths.
3638 		 */
3639 
3640 		if ((opinfo.opinfo_path_state == SCSI_PATH_ACTIVE ||
3641 		    opinfo.opinfo_path_state == SCSI_PATH_ACTIVE_NONOPT)) {
3642 			if (!(MDI_PI_IS_ONLINE(pip))) {
3643 				VHCI_DEBUG(1, (CE_NOTE, NULL,
3644 				    "!vhci_update_pathstates: marking path"
3645 				    " 0x%p as ONLINE\n", (void *)pip));
3646 				pdip = mdi_pi_get_phci(pip);
3647 				cpath = kmem_alloc(MAXPATHLEN, KM_SLEEP);
3648 				dpath = kmem_alloc(MAXPATHLEN, KM_SLEEP);
3649 				vhci_log(CE_NOTE, ddi_get_parent(dip), "!%s"
3650 				    " (%s%d): path %s (%s%d) target address %s"
3651 				    " is now ONLINE because of"
3652 				    " an externally initiated failover",
3653 				    ddi_pathname(dip, cpath),
3654 				    ddi_driver_name(dip),
3655 				    ddi_get_instance(dip),
3656 				    ddi_pathname(pdip, dpath),
3657 				    ddi_driver_name(pdip),
3658 				    ddi_get_instance(pdip),
3659 				    mdi_pi_get_addr(pip));
3660 				kmem_free(cpath, MAXPATHLEN);
3661 				kmem_free(dpath, MAXPATHLEN);
3662 				mdi_pi_set_state(pip,
3663 				    MDI_PATHINFO_STATE_ONLINE);
3664 				mdi_pi_set_preferred(pip,
3665 				    opinfo.opinfo_preferred);
3666 				tptr = kmem_alloc(strlen
3667 				    (opinfo.opinfo_path_attr)+1, KM_SLEEP);
3668 				(void) strlcpy(tptr, opinfo.opinfo_path_attr,
3669 				    (strlen(opinfo.opinfo_path_attr)+1));
3670 				mutex_enter(&vlun->svl_mutex);
3671 				if (vlun->svl_active_pclass != NULL) {
3672 					kmem_free(vlun->svl_active_pclass,
3673 					    strlen(vlun->svl_active_pclass)+1);
3674 				}
3675 				vlun->svl_active_pclass = tptr;
3676 				if (vlun->svl_waiting_for_activepath) {
3677 					vlun->svl_waiting_for_activepath = 0;
3678 				}
3679 				mutex_exit(&vlun->svl_mutex);
3680 				/* Check for Reservation Conflict */
3681 				bp = scsi_alloc_consistent_buf(
3682 				    &svp->svp_psd->sd_address,
3683 				    (struct buf *)NULL, DEV_BSIZE, B_READ,
3684 				    NULL, NULL);
3685 				if (!bp) {
3686 					VHCI_DEBUG(1, (CE_NOTE, NULL,
3687 					    "vhci_update_pathstates: "
3688 					    "!No resources (buf)\n"));
3689 					mdi_rele_path(pip);
3690 					goto done;
3691 				}
3692 				pkt = scsi_init_pkt(&svp->svp_psd->sd_address,
3693 				    NULL, bp, CDB_GROUP1,
3694 				    sizeof (struct scsi_arq_status), 0,
3695 				    PKT_CONSISTENT, NULL, NULL);
3696 				if (pkt) {
3697 					(void) scsi_setup_cdb((union scsi_cdb *)
3698 					    (uintptr_t)pkt->pkt_cdbp,
3699 					    SCMD_READ, 1, 1, 0);
3700 					pkt->pkt_time = 3*30;
3701 					pkt->pkt_flags = FLAG_NOINTR;
3702 					if ((scsi_transport(pkt) ==
3703 					    TRAN_ACCEPT) && (pkt->pkt_reason
3704 					    == CMD_CMPLT) && (SCBP_C(pkt) ==
3705 					    STATUS_RESERVATION_CONFLICT)) {
3706 						reserve_conflict = 1;
3707 					}
3708 					scsi_destroy_pkt(pkt);
3709 				}
3710 				scsi_free_consistent_buf(bp);
3711 			} else if (MDI_PI_IS_ONLINE(pip)) {
3712 				if (strcmp(pclass, opinfo.opinfo_path_attr)
3713 				    != 0) {
3714 					mdi_pi_set_preferred(pip,
3715 					    opinfo.opinfo_preferred);
3716 					mutex_enter(&vlun->svl_mutex);
3717 					if (vlun->svl_active_pclass == NULL ||
3718 					    strcmp(opinfo.opinfo_path_attr,
3719 					    vlun->svl_active_pclass) != 0) {
3720 						mutex_exit(&vlun->svl_mutex);
3721 						tptr = kmem_alloc(strlen
3722 						    (opinfo.opinfo_path_attr)+1,
3723 						    KM_SLEEP);
3724 						(void) strlcpy(tptr,
3725 						    opinfo.opinfo_path_attr,
3726 						    (strlen
3727 						    (opinfo.opinfo_path_attr)
3728 						    +1));
3729 						mutex_enter(&vlun->svl_mutex);
3730 					} else {
3731 						/*
3732 						 * No need to update
3733 						 * svl_active_pclass
3734 						 */
3735 						tptr = NULL;
3736 						mutex_exit(&vlun->svl_mutex);
3737 					}
3738 					if (tptr) {
3739 						if (vlun->svl_active_pclass
3740 						    != NULL) {
3741 							kmem_free(vlun->
3742 							    svl_active_pclass,
3743 							    strlen(vlun->
3744 							    svl_active_pclass)
3745 							    +1);
3746 						}
3747 						vlun->svl_active_pclass = tptr;
3748 						mutex_exit(&vlun->svl_mutex);
3749 					}
3750 				}
3751 			}
3752 		} else if ((opinfo.opinfo_path_state == SCSI_PATH_INACTIVE) &&
3753 		    !(MDI_PI_IS_STANDBY(pip))) {
3754 			VHCI_DEBUG(1, (CE_NOTE, NULL,
3755 			    "!vhci_update_pathstates: marking path"
3756 			    " 0x%p as STANDBY\n", (void *)pip));
3757 			pdip = mdi_pi_get_phci(pip);
3758 			cpath = kmem_alloc(MAXPATHLEN, KM_SLEEP);
3759 			dpath = kmem_alloc(MAXPATHLEN, KM_SLEEP);
3760 			vhci_log(CE_NOTE, ddi_get_parent(dip), "!%s"
3761 			    " (%s%d): path %s (%s%d) target address %s"
3762 			    " is now STANDBY because of"
3763 			    " an externally initiated failover",
3764 			    ddi_pathname(dip, cpath),
3765 			    ddi_driver_name(dip),
3766 			    ddi_get_instance(dip),
3767 			    ddi_pathname(pdip, dpath),
3768 			    ddi_driver_name(pdip),
3769 			    ddi_get_instance(pdip),
3770 			    mdi_pi_get_addr(pip));
3771 			kmem_free(cpath, MAXPATHLEN);
3772 			kmem_free(dpath, MAXPATHLEN);
3773 			mdi_pi_set_state(pip,
3774 			    MDI_PATHINFO_STATE_STANDBY);
3775 			mdi_pi_set_preferred(pip,
3776 			    opinfo.opinfo_preferred);
3777 			mutex_enter(&vlun->svl_mutex);
3778 			if (vlun->svl_active_pclass != NULL) {
3779 				if (strcmp(vlun->svl_active_pclass,
3780 				    opinfo.opinfo_path_attr) == 0) {
3781 					kmem_free(vlun->
3782 					    svl_active_pclass,
3783 					    strlen(vlun->
3784 					    svl_active_pclass)+1);
3785 					vlun->svl_active_pclass = NULL;
3786 				}
3787 			}
3788 			mutex_exit(&vlun->svl_mutex);
3789 		}
3790 		(void) mdi_prop_free(pclass);
3791 		sps = mdi_select_path(dip, NULL,
3792 		    (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH),
3793 		    pip, &npip);
3794 		mdi_rele_path(pip);
3795 
3796 	} while ((npip != NULL) && (sps == MDI_SUCCESS));
3797 
3798 	/*
3799 	 * Check to see if this vlun has an active SCSI-II RESERVE.  If so
3800 	 * clear the reservation by sending a reset, so the host doesn't
3801 	 * receive a reservation conflict.
3802 	 * Reset VLUN_RESERVE_ACTIVE_FLG for this vlun. Also notify ssd
3803 	 * of the reset, explicitly.
3804 	 */
3805 	if (vlun->svl_flags & VLUN_RESERVE_ACTIVE_FLG) {
3806 		if (reserve_conflict && (vlun->svl_xlf_capable == 0)) {
3807 			(void) vhci_recovery_reset(vlun,
3808 			    &svp->svp_psd->sd_address, FALSE,
3809 			    VHCI_DEPTH_TARGET);
3810 		}
3811 		vlun->svl_flags &= ~VLUN_RESERVE_ACTIVE_FLG;
3812 		mutex_enter(&vhci->vhci_mutex);
3813 		scsi_hba_reset_notify_callback(&vhci->vhci_mutex,
3814 		    &vhci->vhci_reset_notify_listf);
3815 		mutex_exit(&vhci->vhci_mutex);
3816 	}
3817 	if (vlun->svl_flags & VLUN_UPDATE_TPG) {
3818 		/*
3819 		 * Update the AccessState of related MP-API TPGs
3820 		 */
3821 		(void) vhci_mpapi_update_tpg_acc_state_for_lu(vhci, vlun);
3822 		vlun->svl_flags &= ~VLUN_UPDATE_TPG;
3823 	}
3824 done:
3825 	if (vlun->svl_efo_update_path) {
3826 		vlun->svl_efo_update_path = 0;
3827 		vhci_efo_done(vlun->svl_swarg);
3828 		vlun->svl_swarg = 0;
3829 	}
3830 	VHCI_RELEASE_LUN(vlun);
3831 }
3832 
3833 /* ARGSUSED */
3834 static int
3835 vhci_pathinfo_init(dev_info_t *vdip, mdi_pathinfo_t *pip, int flags)
3836 {
3837 	scsi_hba_tran_t		*hba = NULL;
3838 	struct scsi_device	*psd = NULL;
3839 	scsi_vhci_lun_t		*vlun = NULL;
3840 	dev_info_t		*pdip = NULL;
3841 	dev_info_t		*tgt_dip;
3842 	struct scsi_vhci	*vhci;
3843 	char			*guid;
3844 	scsi_vhci_priv_t	*svp = NULL;
3845 	int			rval = MDI_FAILURE;
3846 	int			vlun_alloced = 0;
3847 
3848 	ASSERT(vdip != NULL);
3849 	ASSERT(pip != NULL);
3850 
3851 	vhci = ddi_get_soft_state(vhci_softstate, ddi_get_instance(vdip));
3852 	ASSERT(vhci != NULL);
3853 
3854 	pdip = mdi_pi_get_phci(pip);
3855 	ASSERT(pdip != NULL);
3856 
3857 	hba = ddi_get_driver_private(pdip);
3858 	ASSERT(hba != NULL);
3859 
3860 	tgt_dip = mdi_pi_get_client(pip);
3861 	ASSERT(tgt_dip != NULL);
3862 
3863 	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, tgt_dip, PROPFLAGS,
3864 	    MDI_CLIENT_GUID_PROP, &guid) != DDI_SUCCESS) {
3865 		VHCI_DEBUG(1, (CE_WARN, NULL,
3866 		    "vhci_pathinfo_init: lun guid property failed"));
3867 		goto failure;
3868 	}
3869 
3870 	vlun = vhci_lun_lookup_alloc(tgt_dip, guid, &vlun_alloced);
3871 	ddi_prop_free(guid);
3872 
3873 	vlun->svl_dip = tgt_dip;
3874 
3875 	svp = kmem_zalloc(sizeof (*svp), KM_SLEEP);
3876 	svp->svp_svl = vlun;
3877 
3878 	vlun->svl_lb_policy_save = mdi_get_lb_policy(tgt_dip);
3879 	mutex_init(&svp->svp_mutex, NULL, MUTEX_DRIVER, NULL);
3880 	cv_init(&svp->svp_cv, NULL, CV_DRIVER, NULL);
3881 
3882 	psd = kmem_zalloc(sizeof (*psd), KM_SLEEP);
3883 	mutex_init(&psd->sd_mutex, NULL, MUTEX_DRIVER, NULL);
3884 
3885 	/*
3886 	 * Clone transport structure if requested, so
3887 	 * Self enumerating HBAs always need to use cloning
3888 	 */
3889 
3890 	if (hba->tran_hba_flags & SCSI_HBA_TRAN_CLONE) {
3891 		scsi_hba_tran_t	*clone =
3892 		    kmem_alloc(sizeof (scsi_hba_tran_t), KM_SLEEP);
3893 		bcopy(hba, clone, sizeof (scsi_hba_tran_t));
3894 		hba = clone;
3895 		hba->tran_sd = psd;
3896 	} else {
3897 		ASSERT(hba->tran_sd == NULL);
3898 	}
3899 	psd->sd_dev = tgt_dip;
3900 	psd->sd_address.a_hba_tran = hba;
3901 	psd->sd_private = (caddr_t)pip;
3902 	svp->svp_psd = psd;
3903 	mdi_pi_set_vhci_private(pip, (caddr_t)svp);
3904 
3905 	/*
3906 	 * call hba's target init entry point if it exists
3907 	 */
3908 	if (hba->tran_tgt_init != NULL) {
3909 		if ((rval = (*hba->tran_tgt_init)(pdip, tgt_dip,
3910 		    hba, psd)) != DDI_SUCCESS) {
3911 			VHCI_DEBUG(1, (CE_WARN, pdip,
3912 			    "!vhci_pathinfo_init: tran_tgt_init failed for "
3913 			    "path=0x%p rval=%x", (void *)pip, rval));
3914 			goto failure;
3915 		}
3916 	}
3917 
3918 	svp->svp_new_path = 1;
3919 
3920 	psd->sd_inq = NULL;
3921 
3922 	VHCI_DEBUG(4, (CE_NOTE, NULL, "!vhci_pathinfo_init: path:%p\n",
3923 	    (void *)pip));
3924 	return (MDI_SUCCESS);
3925 
3926 failure:
3927 	if (psd) {
3928 		mutex_destroy(&psd->sd_mutex);
3929 		kmem_free(psd, sizeof (*psd));
3930 	}
3931 	if (svp) {
3932 		mdi_pi_set_vhci_private(pip, NULL);
3933 		mutex_destroy(&svp->svp_mutex);
3934 		cv_destroy(&svp->svp_cv);
3935 		kmem_free(svp, sizeof (*svp));
3936 	}
3937 	if (hba && hba->tran_hba_flags & SCSI_HBA_TRAN_CLONE)
3938 		kmem_free(hba, sizeof (scsi_hba_tran_t));
3939 
3940 	if (vlun_alloced)
3941 		vhci_lun_free(tgt_dip);
3942 
3943 	return (rval);
3944 }
3945 
3946 /* ARGSUSED */
3947 static int
3948 vhci_pathinfo_uninit(dev_info_t *vdip, mdi_pathinfo_t *pip, int flags)
3949 {
3950 	scsi_hba_tran_t		*hba = NULL;
3951 	struct scsi_device	*psd = NULL;
3952 	dev_info_t		*pdip = NULL;
3953 	dev_info_t		*cdip = NULL;
3954 	scsi_vhci_priv_t	*svp = NULL;
3955 
3956 	ASSERT(vdip != NULL);
3957 	ASSERT(pip != NULL);
3958 
3959 	pdip = mdi_pi_get_phci(pip);
3960 	ASSERT(pdip != NULL);
3961 
3962 	cdip = mdi_pi_get_client(pip);
3963 	ASSERT(cdip != NULL);
3964 
3965 	hba = ddi_get_driver_private(pdip);
3966 	ASSERT(hba != NULL);
3967 
3968 	vhci_mpapi_set_path_state(vdip, pip, MP_DRVR_PATH_STATE_REMOVED);
3969 	svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip);
3970 	if (svp == NULL) {
3971 		/* path already freed. Nothing to do. */
3972 		return (MDI_SUCCESS);
3973 	}
3974 
3975 	psd = svp->svp_psd;
3976 	ASSERT(psd != NULL);
3977 
3978 	if (hba->tran_hba_flags & SCSI_HBA_TRAN_CLONE) {
3979 		hba = psd->sd_address.a_hba_tran;
3980 		ASSERT(hba->tran_hba_flags & SCSI_HBA_TRAN_CLONE);
3981 		ASSERT(hba->tran_sd == psd);
3982 	} else {
3983 		ASSERT(hba->tran_sd == NULL);
3984 	}
3985 
3986 	if (hba->tran_tgt_free != NULL) {
3987 		(*hba->tran_tgt_free) (pdip, cdip, hba, psd);
3988 	}
3989 	mutex_destroy(&psd->sd_mutex);
3990 	if (hba->tran_hba_flags & SCSI_HBA_TRAN_CLONE) {
3991 		kmem_free(hba, sizeof (*hba));
3992 	}
3993 
3994 	mdi_pi_set_vhci_private(pip, NULL);
3995 	kmem_free((caddr_t)psd, sizeof (*psd));
3996 
3997 	mutex_destroy(&svp->svp_mutex);
3998 	cv_destroy(&svp->svp_cv);
3999 	kmem_free((caddr_t)svp, sizeof (*svp));
4000 
4001 	/*
4002 	 * If this is the last path to the client,
4003 	 * then free up the vlun as well.
4004 	 */
4005 	if (mdi_client_get_path_count(cdip) == 1) {
4006 		vhci_lun_free(cdip);
4007 	}
4008 
4009 	VHCI_DEBUG(4, (CE_NOTE, NULL, "!vhci_pathinfo_uninit: path=0x%p\n",
4010 	    (void *)pip));
4011 	return (MDI_SUCCESS);
4012 }
4013 
4014 /* ARGSUSED */
4015 static int
4016 vhci_pathinfo_state_change(dev_info_t *vdip, mdi_pathinfo_t *pip,
4017     mdi_pathinfo_state_t state, uint32_t ext_state, int flags)
4018 {
4019 	int			rval = MDI_SUCCESS;
4020 	scsi_vhci_priv_t	*svp;
4021 	scsi_vhci_lun_t		*vlun;
4022 	int			held;
4023 	int			op = (flags & 0xf00) >> 8;
4024 	struct scsi_vhci	*vhci;
4025 
4026 	vhci = ddi_get_soft_state(vhci_softstate, ddi_get_instance(vdip));
4027 
4028 	if (flags & MDI_EXT_STATE_CHANGE) {
4029 		/*
4030 		 * We do not want to issue any commands down the path in case
4031 		 * sync flag is set. Lower layers might not be ready to accept
4032 		 * any I/O commands.
4033 		 */
4034 		if (op == DRIVER_DISABLE)
4035 			return (MDI_SUCCESS);
4036 
4037 		svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip);
4038 		if (svp == NULL) {
4039 			return (MDI_FAILURE);
4040 		}
4041 		vlun = svp->svp_svl;
4042 
4043 		if (flags & MDI_BEFORE_STATE_CHANGE) {
4044 			/*
4045 			 * Hold the LUN.
4046 			 */
4047 			VHCI_HOLD_LUN(vlun, VH_SLEEP, held);
4048 			if (flags & MDI_DISABLE_OP)  {
4049 				/*
4050 				 * Issue scsi reset if it happens to be
4051 				 * reserved path.
4052 				 */
4053 				if (vlun->svl_flags & VLUN_RESERVE_ACTIVE_FLG) {
4054 					/*
4055 					 * if reservation pending on
4056 					 * this path, dont' mark the
4057 					 * path busy
4058 					 */
4059 					if (op == DRIVER_DISABLE_TRANSIENT) {
4060 						VHCI_DEBUG(1, (CE_NOTE, NULL,
4061 						    "!vhci_pathinfo"
4062 						    "_state_change (pip:%p): "
4063 						    " reservation: fail busy\n",
4064 						    (void *)pip));
4065 						return (MDI_FAILURE);
4066 					}
4067 					if (pip == vlun->svl_resrv_pip) {
4068 						if (vhci_recovery_reset(
4069 						    svp->svp_svl,
4070 						    &svp->svp_psd->sd_address,
4071 						    TRUE,
4072 						    VHCI_DEPTH_TARGET) == 0) {
4073 							VHCI_DEBUG(1,
4074 							    (CE_NOTE, NULL,
4075 							    "!vhci_pathinfo"
4076 							    "_state_change "
4077 							    " (pip:%p): "
4078 							    "reset failed, "
4079 							    "give up!\n",
4080 							    (void *)pip));
4081 						}
4082 						vlun->svl_flags &=
4083 						    ~VLUN_RESERVE_ACTIVE_FLG;
4084 					}
4085 				}
4086 			} else if (flags & MDI_ENABLE_OP)  {
4087 				if (((vhci->vhci_conf_flags &
4088 				    VHCI_CONF_FLAGS_AUTO_FAILBACK) ==
4089 				    VHCI_CONF_FLAGS_AUTO_FAILBACK) &&
4090 				    MDI_PI_IS_USER_DISABLE(pip) &&
4091 				    MDI_PI_IS_STANDBY(pip)) {
4092 					struct scsi_failover_ops	*fo;
4093 					char *best_pclass, *pclass = NULL;
4094 					int  best_class, rv;
4095 					/*
4096 					 * Failback if enabling a standby path
4097 					 * and it is the primary class or
4098 					 * preferred class
4099 					 */
4100 					best_class = mdi_pi_get_preferred(pip);
4101 					if (best_class == 0) {
4102 						/*
4103 						 * if not preferred - compare
4104 						 * path-class with class
4105 						 */
4106 						fo = vlun->svl_fops;
4107 						(*fo->sfo_pathclass_next)(NULL,
4108 						    &best_pclass,
4109 						    vlun->svl_fops_ctpriv);
4110 						pclass = NULL;
4111 						rv = mdi_prop_lookup_string(pip,
4112 						    "path-class", &pclass);
4113 						if (rv != MDI_SUCCESS ||
4114 						    pclass == NULL) {
4115 							vhci_log(CE_NOTE, vdip,
4116 							    "!path-class "
4117 							    " lookup "
4118 							    "failed. rv: %d"
4119 							    "class: %p", rv,
4120 							    (void *)pclass);
4121 						} else if (strncmp(pclass,
4122 						    best_pclass,
4123 						    strlen(best_pclass)) == 0) {
4124 							best_class = 1;
4125 						}
4126 						if (rv == MDI_SUCCESS &&
4127 						    pclass != NULL) {
4128 							rv = mdi_prop_free(
4129 							    pclass);
4130 							if (rv !=
4131 							    DDI_PROP_SUCCESS) {
4132 								vhci_log(
4133 								    CE_NOTE,
4134 								    vdip,
4135 								    "!path-"
4136 								    "class"
4137 								    " free"
4138 								    " failed"
4139 								    " rv: %d"
4140 								    " class: "
4141 								    "%p",
4142 								    rv,
4143 								    (void *)
4144 								    pclass);
4145 							}
4146 						}
4147 					}
4148 					if (best_class == 1) {
4149 						VHCI_DEBUG(1, (CE_NOTE, NULL,
4150 						    "preferred path: %p "
4151 						    "USER_DISABLE->USER_ENABLE "
4152 						    "transition for lun %s\n",
4153 						    (void *)pip,
4154 						    vlun->svl_lun_wwn));
4155 						(void) taskq_dispatch(
4156 						    vhci->vhci_taskq,
4157 						    vhci_initiate_auto_failback,
4158 						    (void *) vlun, KM_SLEEP);
4159 					}
4160 				}
4161 				/*
4162 				 * if PGR is active, revalidate key and
4163 				 * register on this path also, if key is
4164 				 * still valid
4165 				 */
4166 				sema_p(&vlun->svl_pgr_sema);
4167 				if (vlun->svl_pgr_active)
4168 					(void)
4169 					    vhci_pgr_validate_and_register(svp);
4170 				sema_v(&vlun->svl_pgr_sema);
4171 				/*
4172 				 * Inform target driver about any
4173 				 * reservations to be reinstated if target
4174 				 * has dropped reservation during the busy
4175 				 * period.
4176 				 */
4177 				mutex_enter(&vhci->vhci_mutex);
4178 				scsi_hba_reset_notify_callback(
4179 				    &vhci->vhci_mutex,
4180 				    &vhci->vhci_reset_notify_listf);
4181 				mutex_exit(&vhci->vhci_mutex);
4182 			}
4183 		}
4184 		if (flags & MDI_AFTER_STATE_CHANGE) {
4185 			if (flags & MDI_ENABLE_OP)  {
4186 				mutex_enter(&vhci_global_mutex);
4187 				cv_broadcast(&vhci_cv);
4188 				mutex_exit(&vhci_global_mutex);
4189 			}
4190 			if (vlun->svl_setcap_done) {
4191 				(void) vhci_pHCI_cap(&svp->svp_psd->sd_address,
4192 				    "sector-size", vlun->svl_sector_size,
4193 				    1, pip);
4194 			}
4195 
4196 			/*
4197 			 * Release the LUN
4198 			 */
4199 			VHCI_RELEASE_LUN(vlun);
4200 
4201 			/*
4202 			 * Path transition is complete.
4203 			 * Run callback to indicate target driver to
4204 			 * retry to prevent IO starvation.
4205 			 */
4206 			if (scsi_callback_id != 0) {
4207 				ddi_run_callback(&scsi_callback_id);
4208 			}
4209 		}
4210 	} else {
4211 		switch (state) {
4212 		case MDI_PATHINFO_STATE_ONLINE:
4213 			rval = vhci_pathinfo_online(vdip, pip, flags);
4214 			break;
4215 
4216 		case MDI_PATHINFO_STATE_OFFLINE:
4217 			rval = vhci_pathinfo_offline(vdip, pip, flags);
4218 			break;
4219 
4220 		default:
4221 			break;
4222 		}
4223 		/*
4224 		 * Path transition is complete.
4225 		 * Run callback to indicate target driver to
4226 		 * retry to prevent IO starvation.
4227 		 */
4228 		if ((rval == MDI_SUCCESS) && (scsi_callback_id != 0)) {
4229 			ddi_run_callback(&scsi_callback_id);
4230 		}
4231 		return (rval);
4232 	}
4233 
4234 	return (MDI_SUCCESS);
4235 }
4236 
4237 /*
4238  * Parse the mpxio load balancing options. The datanameptr
4239  * will point to a string containing the load-balance-options value.
4240  * The load-balance-options value will be a property that
4241  * defines the load-balance algorithm and any arguments to that
4242  * algorithm.
4243  * For example:
4244  * device-type-mpxio-options-list=
4245  * "device-type=SUN    SENA", "load-balance-options=logical-block-options"
4246  * "device-type=SUN     SE6920", "round-robin-options";
4247  * logical-block-options="load-balance=logical-block", "region-size=15";
4248  * round-robin-options="load-balance=round-robin";
4249  *
4250  * If the load-balance is not defined the load balance algorithm will
4251  * default to the global setting. There will be default values assigned
4252  * to the arguments (region-size=18) and if an argument is one
4253  * that is not known, it will be ignored.
4254  */
4255 static void
4256 vhci_parse_mpxio_lb_options(dev_info_t *dip, dev_info_t *cdip,
4257 	caddr_t datanameptr)
4258 {
4259 	char			*dataptr, *next_entry;
4260 	caddr_t			config_list	= NULL;
4261 	int			config_list_len = 0, list_len = 0;
4262 	int			region_size = -1;
4263 	client_lb_t		load_balance;
4264 
4265 	if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, datanameptr,
4266 	    (caddr_t)&config_list, &config_list_len) != DDI_PROP_SUCCESS) {
4267 		return;
4268 	}
4269 
4270 	list_len = config_list_len;
4271 	next_entry = config_list;
4272 	while (config_list_len > 0) {
4273 		dataptr = next_entry;
4274 
4275 		if (strncmp(mdi_load_balance, dataptr,
4276 		    strlen(mdi_load_balance)) == 0) {
4277 			/* get the load-balance scheme */
4278 			dataptr += strlen(mdi_load_balance) + 1;
4279 			if (strcmp(dataptr, LOAD_BALANCE_PROP_RR) == 0) {
4280 				(void) mdi_set_lb_policy(cdip, LOAD_BALANCE_RR);
4281 				load_balance = LOAD_BALANCE_RR;
4282 			} else if (strcmp(dataptr,
4283 			    LOAD_BALANCE_PROP_LBA) == 0) {
4284 				(void) mdi_set_lb_policy(cdip,
4285 				    LOAD_BALANCE_LBA);
4286 				load_balance = LOAD_BALANCE_LBA;
4287 			} else if (strcmp(dataptr,
4288 			    LOAD_BALANCE_PROP_NONE) == 0) {
4289 				(void) mdi_set_lb_policy(cdip,
4290 				    LOAD_BALANCE_NONE);
4291 				load_balance = LOAD_BALANCE_NONE;
4292 			}
4293 		} else if (strncmp(dataptr, LOGICAL_BLOCK_REGION_SIZE,
4294 		    strlen(LOGICAL_BLOCK_REGION_SIZE)) == 0) {
4295 			int	i = 0;
4296 			char	*ptr;
4297 			char	*tmp;
4298 
4299 			tmp = dataptr + (strlen(LOGICAL_BLOCK_REGION_SIZE) + 1);
4300 			/* check for numeric value */
4301 			for (ptr = tmp; i < strlen(tmp); i++, ptr++) {
4302 				if (!isdigit(*ptr)) {
4303 					cmn_err(CE_WARN,
4304 					    "Illegal region size: %s."
4305 					    " Setting to default value: %d",
4306 					    tmp,
4307 					    LOAD_BALANCE_DEFAULT_REGION_SIZE);
4308 					region_size =
4309 					    LOAD_BALANCE_DEFAULT_REGION_SIZE;
4310 					break;
4311 				}
4312 			}
4313 			if (i >= strlen(tmp)) {
4314 				region_size = stoi(&tmp);
4315 			}
4316 			(void) mdi_set_lb_region_size(cdip, region_size);
4317 		}
4318 		config_list_len -= (strlen(next_entry) + 1);
4319 		next_entry += strlen(next_entry) + 1;
4320 	}
4321 #ifdef DEBUG
4322 	if ((region_size >= 0) && (load_balance != LOAD_BALANCE_LBA)) {
4323 		VHCI_DEBUG(1, (CE_NOTE, dip,
4324 		    "!vhci_parse_mpxio_lb_options: region-size: %d"
4325 		    "only valid for load-balance=logical-block\n",
4326 		    region_size));
4327 	}
4328 #endif
4329 	if ((region_size == -1) && (load_balance == LOAD_BALANCE_LBA)) {
4330 		VHCI_DEBUG(1, (CE_NOTE, dip,
4331 		    "!vhci_parse_mpxio_lb_options: No region-size"
4332 		    " defined load-balance=logical-block."
4333 		    " Default to: %d\n", LOAD_BALANCE_DEFAULT_REGION_SIZE));
4334 		(void) mdi_set_lb_region_size(cdip,
4335 		    LOAD_BALANCE_DEFAULT_REGION_SIZE);
4336 	}
4337 	if (list_len > 0) {
4338 		kmem_free(config_list, list_len);
4339 	}
4340 }
4341 
4342 /*
4343  * Parse the device-type-mpxio-options-list looking for the key of
4344  * "load-balance-options". If found, parse the load balancing options.
4345  * Check the comment of the vhci_get_device_type_mpxio_options()
4346  * for the device-type-mpxio-options-list.
4347  */
4348 static void
4349 vhci_parse_mpxio_options(dev_info_t *dip, dev_info_t *cdip,
4350 		caddr_t datanameptr, int list_len)
4351 {
4352 	char		*dataptr;
4353 	int		len;
4354 
4355 	/*
4356 	 * get the data list
4357 	 */
4358 	dataptr = datanameptr;
4359 	len = 0;
4360 	while (len < list_len &&
4361 	    strncmp(dataptr, DEVICE_TYPE_STR, strlen(DEVICE_TYPE_STR))
4362 	    != 0) {
4363 		if (strncmp(dataptr, LOAD_BALANCE_OPTIONS,
4364 		    strlen(LOAD_BALANCE_OPTIONS)) == 0) {
4365 			len += strlen(LOAD_BALANCE_OPTIONS) + 1;
4366 			dataptr += strlen(LOAD_BALANCE_OPTIONS) + 1;
4367 			vhci_parse_mpxio_lb_options(dip, cdip, dataptr);
4368 		}
4369 		len += strlen(dataptr) + 1;
4370 		dataptr += strlen(dataptr) + 1;
4371 	}
4372 }
4373 
4374 /*
4375  * Check the inquriy string returned from the device wiith the device-type
4376  * Check for the existence of the device-type-mpxio-options-list and
4377  * if found parse the list checking for a match with the device-type
4378  * value and the inquiry string returned from the device. If a match
4379  * is found, parse the mpxio options list. The format of the
4380  * device-type-mpxio-options-list is:
4381  * device-type-mpxio-options-list=
4382  * "device-type=SUN    SENA", "load-balance-options=logical-block-options"
4383  * "device-type=SUN     SE6920", "round-robin-options";
4384  * logical-block-options="load-balance=logical-block", "region-size=15";
4385  * round-robin-options="load-balance=round-robin";
4386  */
4387 void
4388 vhci_get_device_type_mpxio_options(dev_info_t *dip, dev_info_t *cdip,
4389 	struct scsi_device *devp)
4390 {
4391 
4392 	caddr_t			config_list	= NULL;
4393 	caddr_t			vidptr, datanameptr;
4394 	int			vidlen, dupletlen = 0;
4395 	int			config_list_len = 0, len;
4396 	struct scsi_inquiry	*inq = devp->sd_inq;
4397 
4398 	/*
4399 	 * look up the device-type-mpxio-options-list and walk thru
4400 	 * the list compare the vendor ids of the earlier inquiry command and
4401 	 * with those vids in the list if there is a match, lookup
4402 	 * the mpxio-options value
4403 	 */
4404 	if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
4405 	    MPXIO_OPTIONS_LIST,
4406 	    (caddr_t)&config_list, &config_list_len) == DDI_PROP_SUCCESS) {
4407 
4408 		/*
4409 		 * Compare vids in each duplet - if it matches,
4410 		 * parse the mpxio options list.
4411 		 */
4412 		for (len = config_list_len, vidptr = config_list; len > 0;
4413 		    len -= dupletlen) {
4414 
4415 			dupletlen = 0;
4416 
4417 			if (strlen(vidptr) != 0 &&
4418 			    strncmp(vidptr, DEVICE_TYPE_STR,
4419 			    strlen(DEVICE_TYPE_STR)) == 0) {
4420 				/* point to next duplet */
4421 				datanameptr = vidptr + strlen(vidptr) + 1;
4422 				/* add len of this duplet */
4423 				dupletlen += strlen(vidptr) + 1;
4424 				/* get to device type */
4425 				vidptr += strlen(DEVICE_TYPE_STR) + 1;
4426 				vidlen = strlen(vidptr);
4427 				if ((vidlen != 0) &&
4428 				    bcmp(inq->inq_vid, vidptr, vidlen) == 0) {
4429 					vhci_parse_mpxio_options(dip, cdip,
4430 					    datanameptr, len - dupletlen);
4431 					break;
4432 				}
4433 				/* get to next duplet */
4434 				vidptr += strlen(vidptr) + 1;
4435 			}
4436 			/* get to the next device-type */
4437 			while (len - dupletlen > 0 &&
4438 			    strlen(vidptr) != 0 &&
4439 			    strncmp(vidptr, DEVICE_TYPE_STR,
4440 			    strlen(DEVICE_TYPE_STR)) != 0) {
4441 				dupletlen += strlen(vidptr) + 1;
4442 				vidptr += strlen(vidptr) + 1;
4443 			}
4444 		}
4445 		if (config_list_len > 0) {
4446 			kmem_free(config_list, config_list_len);
4447 		}
4448 	}
4449 }
4450 
4451 static int
4452 vhci_update_pathinfo(struct scsi_device *psd,  mdi_pathinfo_t *pip,
4453 	struct scsi_failover_ops *fo,
4454 	scsi_vhci_lun_t		*vlun,
4455 	struct scsi_vhci	*vhci)
4456 {
4457 	struct scsi_path_opinfo		opinfo;
4458 	char				*pclass, *best_pclass;
4459 
4460 	if ((*fo->sfo_path_get_opinfo)(psd, &opinfo,
4461 	    vlun->svl_fops_ctpriv) != 0) {
4462 		VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_update_pathinfo: "
4463 		    "Failed to get operation info for path:%p\n", (void *)pip));
4464 		return (MDI_FAILURE);
4465 	}
4466 	/* set the xlf capable flag in the vlun for future use */
4467 	vlun->svl_xlf_capable = opinfo.opinfo_xlf_capable;
4468 	(void) mdi_prop_update_string(pip, "path-class",
4469 	    opinfo.opinfo_path_attr);
4470 
4471 	pclass = opinfo.opinfo_path_attr;
4472 	if (opinfo.opinfo_path_state == SCSI_PATH_ACTIVE) {
4473 		mutex_enter(&vlun->svl_mutex);
4474 		if (vlun->svl_active_pclass != NULL) {
4475 			if (strcmp(vlun->svl_active_pclass, pclass) != 0) {
4476 				mutex_exit(&vlun->svl_mutex);
4477 				/*
4478 				 * Externally initiated failover has happened;
4479 				 * force the path state to be STANDBY/ONLINE,
4480 				 * next IO will trigger failover and thus
4481 				 * sync-up the pathstates.  Reason we don't
4482 				 * sync-up immediately by invoking
4483 				 * vhci_update_pathstates() is because it
4484 				 * needs a VHCI_HOLD_LUN() and we don't
4485 				 * want to block here.
4486 				 *
4487 				 * Further, if the device is an ALUA device,
4488 				 * then failure to exactly match 'pclass' and
4489 				 * 'svl_active_pclass'(as is the case here)
4490 				 * indicates that the currently active path
4491 				 * is a 'non-optimized' path - which means
4492 				 * that 'svl_active_pclass' needs to be
4493 				 * replaced with opinfo.opinfo_path_state
4494 				 * value.
4495 				 */
4496 
4497 				if (SCSI_FAILOVER_IS_TPGS(vlun->svl_fops)) {
4498 					char	*tptr;
4499 
4500 					/*
4501 					 * The device is ALUA compliant. The
4502 					 * state need to be changed to online
4503 					 * rather than standby state which is
4504 					 * done typically for a asymmetric
4505 					 * device that is non ALUA compliant.
4506 					 */
4507 					mdi_pi_set_state(pip,
4508 					    MDI_PATHINFO_STATE_ONLINE);
4509 					tptr = kmem_alloc(strlen
4510 					    (opinfo.opinfo_path_attr)+1,
4511 					    KM_SLEEP);
4512 					(void) strlcpy(tptr,
4513 					    opinfo.opinfo_path_attr,
4514 					    (strlen(opinfo.opinfo_path_attr)
4515 					    +1));
4516 					mutex_enter(&vlun->svl_mutex);
4517 					kmem_free(vlun->svl_active_pclass,
4518 					    strlen(vlun->svl_active_pclass)+1);
4519 					vlun->svl_active_pclass = tptr;
4520 					mutex_exit(&vlun->svl_mutex);
4521 				} else {
4522 					/*
4523 					 * Non ALUA device case.
4524 					 */
4525 					mdi_pi_set_state(pip,
4526 					    MDI_PATHINFO_STATE_STANDBY);
4527 				}
4528 				vlun->svl_fo_support = opinfo.opinfo_mode;
4529 				mdi_pi_set_preferred(pip,
4530 				    opinfo.opinfo_preferred);
4531 				return (MDI_SUCCESS);
4532 			}
4533 		} else {
4534 			char	*tptr;
4535 
4536 			/*
4537 			 * lets release the mutex before we try to
4538 			 * allocate since the potential to sleep is
4539 			 * possible.
4540 			 */
4541 			mutex_exit(&vlun->svl_mutex);
4542 			tptr = kmem_alloc(strlen(pclass)+1, KM_SLEEP);
4543 			(void) strlcpy(tptr, pclass, (strlen(pclass)+1));
4544 			mutex_enter(&vlun->svl_mutex);
4545 			vlun->svl_active_pclass = tptr;
4546 		}
4547 		mutex_exit(&vlun->svl_mutex);
4548 		mdi_pi_set_state(pip, MDI_PATHINFO_STATE_ONLINE);
4549 		vlun->svl_waiting_for_activepath = 0;
4550 	} else if (opinfo.opinfo_path_state == SCSI_PATH_ACTIVE_NONOPT) {
4551 		mutex_enter(&vlun->svl_mutex);
4552 		if (vlun->svl_active_pclass == NULL) {
4553 			char	*tptr;
4554 
4555 			mutex_exit(&vlun->svl_mutex);
4556 			tptr = kmem_alloc(strlen(pclass)+1, KM_SLEEP);
4557 			(void) strlcpy(tptr, pclass, (strlen(pclass)+1));
4558 			mutex_enter(&vlun->svl_mutex);
4559 			vlun->svl_active_pclass = tptr;
4560 		}
4561 		mutex_exit(&vlun->svl_mutex);
4562 		mdi_pi_set_state(pip, MDI_PATHINFO_STATE_ONLINE);
4563 		vlun->svl_waiting_for_activepath = 0;
4564 	} else if (opinfo.opinfo_path_state == SCSI_PATH_INACTIVE) {
4565 		mutex_enter(&vlun->svl_mutex);
4566 		if (vlun->svl_active_pclass != NULL) {
4567 			if (strcmp(vlun->svl_active_pclass, pclass) == 0) {
4568 				mutex_exit(&vlun->svl_mutex);
4569 				/*
4570 				 * externally initiated failover has happened;
4571 				 * force state to ONLINE (see comment above)
4572 				 */
4573 				mdi_pi_set_state(pip,
4574 				    MDI_PATHINFO_STATE_ONLINE);
4575 				vlun->svl_fo_support = opinfo.opinfo_mode;
4576 				mdi_pi_set_preferred(pip,
4577 				    opinfo.opinfo_preferred);
4578 				return (MDI_SUCCESS);
4579 			}
4580 		}
4581 		mutex_exit(&vlun->svl_mutex);
4582 		mdi_pi_set_state(pip, MDI_PATHINFO_STATE_STANDBY);
4583 
4584 		/*
4585 		 * Initiate auto-failback, if enabled, for path if path-state
4586 		 * is transitioning from OFFLINE->STANDBY and pathclass is the
4587 		 * prefered pathclass for this storage.
4588 		 * NOTE: In case where opinfo_path_state is SCSI_PATH_ACTIVE
4589 		 * (above), where the pi state is set to STANDBY, we don't
4590 		 * initiate auto-failback as the next IO shall take care of.
4591 		 * this. See comment above.
4592 		 */
4593 		(*fo->sfo_pathclass_next)(NULL, &best_pclass,
4594 		    vlun->svl_fops_ctpriv);
4595 		if (((vhci->vhci_conf_flags & VHCI_CONF_FLAGS_AUTO_FAILBACK) ==
4596 		    VHCI_CONF_FLAGS_AUTO_FAILBACK) &&
4597 		    ((strcmp(pclass, best_pclass) == 0) ||
4598 		    mdi_pi_get_preferred(pip) == 1) &&
4599 		    ((MDI_PI_OLD_STATE(pip) == MDI_PATHINFO_STATE_OFFLINE)||
4600 		    (MDI_PI_OLD_STATE(pip) == MDI_PATHINFO_STATE_INIT))) {
4601 			VHCI_DEBUG(1, (CE_NOTE, NULL, "%s pathclass path: %p"
4602 			    " OFFLINE->STANDBY transition for lun %s\n",
4603 			    best_pclass, (void *)pip, vlun->svl_lun_wwn));
4604 			(void) taskq_dispatch(vhci->vhci_taskq,
4605 			    vhci_initiate_auto_failback, (void *) vlun,
4606 			    KM_SLEEP);
4607 		}
4608 	}
4609 	vlun->svl_fo_support = opinfo.opinfo_mode;
4610 	mdi_pi_set_preferred(pip, opinfo.opinfo_preferred);
4611 
4612 	VHCI_DEBUG(8, (CE_NOTE, NULL, "vhci_update_pathinfo: opinfo_rev = %x,"
4613 	    " opinfo_path_state = %x opinfo_preferred = %x, opinfo_mode = %x\n",
4614 	    opinfo.opinfo_rev, opinfo.opinfo_path_state,
4615 	    opinfo.opinfo_preferred, opinfo.opinfo_mode));
4616 
4617 	return (MDI_SUCCESS);
4618 }
4619 
4620 /*
4621  * Form the kstat name and and call mdi_pi_kstat_create()
4622  */
4623 void
4624 vhci_kstat_create_pathinfo(mdi_pathinfo_t *pip)
4625 {
4626 	dev_info_t	*tgt_dip;
4627 	dev_info_t	*pdip;
4628 	char		*guid;
4629 	char		*target_port, *target_port_dup;
4630 	char		ks_name[KSTAT_STRLEN];
4631 	uint_t		pid;
4632 	int		by_id;
4633 	mod_hash_val_t	hv;
4634 
4635 
4636 	/* return if we have already allocated kstats */
4637 	if (mdi_pi_kstat_exists(pip))
4638 		return;
4639 
4640 	/*
4641 	 * We need instance numbers to create a kstat name, return if we don't
4642 	 * have instance numbers assigned yet.
4643 	 */
4644 	tgt_dip = mdi_pi_get_client(pip);
4645 	pdip = mdi_pi_get_phci(pip);
4646 	if ((ddi_get_instance(tgt_dip) == -1) || (ddi_get_instance(pdip) == -1))
4647 		return;
4648 
4649 	/*
4650 	 * A path oriented kstat has a ks_name of the form:
4651 	 *
4652 	 * <client-driver><instance>.t<pid>.<pHCI-driver><instance>
4653 	 *
4654 	 * We maintain a bidirectional 'target-port' to <pid> map,
4655 	 * called targetmap. All pathinfo nodes with the same
4656 	 * 'target-port' map to the same <pid>. The iostat(1M) code,
4657 	 * when parsing a path oriented kstat name, uses the <pid> as
4658 	 * a SCSI_VHCI_GET_TARGET_LONGNAME ioctl argument in order
4659 	 * to get the 'target-port'. For KSTAT_FLAG_PERSISTENT kstats,
4660 	 * this ioctl needs to translate a <pid> to a 'target-port'
4661 	 * even after all pathinfo nodes associated with the
4662 	 * 'target-port' have been destroyed. This is needed to support
4663 	 * consistent first-iteration activity-since-boot iostat(1M)
4664 	 * output. Because of this requirement, the mapping can't be
4665 	 * based on pathinfo information in a devinfo snapshot.
4666 	 */
4667 
4668 	/* determine 'target-port' */
4669 	if (mdi_prop_lookup_string(pip,
4670 	    "target-port", &target_port) == MDI_SUCCESS) {
4671 		target_port_dup = i_ddi_strdup(target_port, KM_SLEEP);
4672 		(void) mdi_prop_free(target_port);
4673 		by_id = 1;
4674 	} else {
4675 		/*
4676 		 * If the pHCI did not set up 'target-port' on this
4677 		 * pathinfo node, assume that our client is the only
4678 		 * one with paths to the device by using the guid
4679 		 * value as the 'target-port'. Since no other client
4680 		 * will have the same guid, no other client will use
4681 		 * the same <pid>.  NOTE: a client with an instance
4682 		 * number always has a guid.
4683 		 */
4684 		(void) ddi_prop_lookup_string(DDI_DEV_T_ANY, tgt_dip,
4685 		    PROPFLAGS, MDI_CLIENT_GUID_PROP, &guid);
4686 		target_port_dup = i_ddi_strdup(guid, KM_SLEEP);
4687 		ddi_prop_free(guid);
4688 
4689 		/*
4690 		 * For this type of mapping we don't want the
4691 		 * <id> -> 'target-port' mapping to be made.  This
4692 		 * will cause the SCSI_VHCI_GET_TARGET_LONGNAME ioctl
4693 		 * to fail, and the iostat(1M) long '-n' output will
4694 		 * still use the <pid>.  We do this because we just
4695 		 * made up the 'target-port' using the guid, and we
4696 		 * don't want to expose that fact in iostat output.
4697 		 */
4698 		by_id = 0;
4699 	}
4700 
4701 	/* find/establish <pid> given 'target-port' */
4702 	mutex_enter(&vhci_targetmap_mutex);
4703 	if (mod_hash_find(vhci_targetmap_byport,
4704 	    (mod_hash_key_t)target_port_dup, &hv) == 0) {
4705 		pid = (int)(intptr_t)hv;	/* mapping exists */
4706 	} else {
4707 		pid = vhci_targetmap_pid++;	/* new mapping */
4708 
4709 		(void) mod_hash_insert(vhci_targetmap_byport,
4710 		    (mod_hash_key_t)target_port_dup,
4711 		    (mod_hash_val_t)(intptr_t)pid);
4712 		if (by_id) {
4713 			(void) mod_hash_insert(vhci_targetmap_bypid,
4714 			    (mod_hash_key_t)(uintptr_t)pid,
4715 			    (mod_hash_val_t)(uintptr_t)target_port_dup);
4716 		}
4717 		target_port_dup = NULL;		/* owned by hash */
4718 	}
4719 	mutex_exit(&vhci_targetmap_mutex);
4720 
4721 	/* form kstat name */
4722 	(void) snprintf(ks_name, KSTAT_STRLEN, "%s%d.t%d.%s%d",
4723 	    ddi_driver_name(tgt_dip), ddi_get_instance(tgt_dip),
4724 	    pid, ddi_driver_name(pdip), ddi_get_instance(pdip));
4725 
4726 	VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_path_online: path:%p "
4727 	    "kstat %s: pid %x <-> port %s\n", (void *)pip,
4728 	    ks_name, pid, target_port_dup));
4729 	if (target_port_dup)
4730 		kmem_free(target_port_dup, strlen(target_port_dup) + 1);
4731 
4732 	/* call mdi to create kstats with the name we built */
4733 	(void) mdi_pi_kstat_create(pip, ks_name);
4734 }
4735 
4736 /* ARGSUSED */
4737 static int
4738 vhci_pathinfo_online(dev_info_t *vdip, mdi_pathinfo_t *pip, int flags)
4739 {
4740 	scsi_hba_tran_t			*hba = NULL;
4741 	struct scsi_device		*psd = NULL;
4742 	scsi_vhci_lun_t			*vlun = NULL;
4743 	dev_info_t			*pdip = NULL;
4744 	dev_info_t			*tgt_dip;
4745 	struct scsi_vhci		*vhci;
4746 	char				*guid;
4747 	struct scsi_failover		*sf;
4748 	struct scsi_failover_ops	*sfo;
4749 	char				*override;
4750 	scsi_vhci_priv_t		*svp = NULL;
4751 	struct buf			*bp;
4752 	struct scsi_address		*ap;
4753 	struct scsi_pkt			*pkt;
4754 	int				rval = MDI_FAILURE;
4755 	uint_t				inq_size = VHCI_STD_INQ_SIZE;
4756 	mpapi_item_list_t		*list_ptr;
4757 	mpapi_lu_data_t			*ld;
4758 
4759 	ASSERT(vdip != NULL);
4760 	ASSERT(pip != NULL);
4761 
4762 	vhci = ddi_get_soft_state(vhci_softstate, ddi_get_instance(vdip));
4763 	ASSERT(vhci != NULL);
4764 
4765 	pdip = mdi_pi_get_phci(pip);
4766 	hba = ddi_get_driver_private(pdip);
4767 	ASSERT(hba != NULL);
4768 
4769 	svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip);
4770 	ASSERT(svp != NULL);
4771 
4772 	tgt_dip = mdi_pi_get_client(pip);
4773 	ASSERT(tgt_dip != NULL);
4774 	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, tgt_dip, PROPFLAGS,
4775 	    MDI_CLIENT_GUID_PROP, &guid) != DDI_SUCCESS) {
4776 		VHCI_DEBUG(1, (CE_WARN, NULL, "vhci_path_online: lun guid "
4777 		    "property failed"));
4778 		goto failure;
4779 	}
4780 
4781 	vlun = vhci_lun_lookup(tgt_dip);
4782 	ASSERT(vlun != NULL);
4783 
4784 	ddi_prop_free(guid);
4785 
4786 	vlun->svl_dip = mdi_pi_get_client(pip);
4787 	ASSERT(vlun->svl_dip != NULL);
4788 
4789 	psd = svp->svp_psd;
4790 	ASSERT(psd != NULL);
4791 
4792 	/*
4793 	 * For INQUIRY response buffer size, we use VHCI_STD_INQ_SIZE(132bytes)
4794 	 * instead of SUN_INQSIZE(48bytes) which is used in sd layer. This is
4795 	 * because we could get the Vendor specific parameters(present 97th
4796 	 * byte onwards) which are required to process Vendor specific data
4797 	 * based on array type.
4798 	 * This INQUIRY buffer is freed in vhci_pathinfo_offline but NEVER
4799 	 * in a different layer like sd/phci transport. In other words, vhci
4800 	 * maintains its own copy of scsi_device and scsi_inquiry data on a
4801 	 * per-path basis.
4802 	 */
4803 	if (psd->sd_inq == NULL) {
4804 		psd->sd_inq = (struct scsi_inquiry *)
4805 		    kmem_zalloc(inq_size, KM_SLEEP);
4806 	}
4807 
4808 	tgt_dip = psd->sd_dev;
4809 	ASSERT(tgt_dip != NULL);
4810 
4811 	/*
4812 	 * do inquiry to pass into probe routine; this
4813 	 * will avoid each probe routine doing scsi inquiry
4814 	 */
4815 	bp = getrbuf(KM_SLEEP);
4816 	bp->b_un.b_addr = (caddr_t)psd->sd_inq;
4817 	bp->b_flags = B_READ;
4818 	bp->b_bcount = inq_size;
4819 	bp->b_resid = 0;
4820 
4821 	ap = &psd->sd_address;
4822 	pkt = scsi_init_pkt(ap, NULL, bp, CDB_GROUP0,
4823 	    sizeof (struct scsi_arq_status), 0, 0, SLEEP_FUNC, NULL);
4824 	if (pkt == NULL) {
4825 		VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_pathinfo_online: "
4826 		    "Inquiry init_pkt failed :%p\n", (void *)pip));
4827 		rval = MDI_FAILURE;
4828 		goto failure;
4829 	}
4830 	pkt->pkt_cdbp[0] = SCMD_INQUIRY;
4831 	pkt->pkt_cdbp[4] = (uchar_t)inq_size;
4832 	pkt->pkt_time = 60;
4833 
4834 	rval = vhci_do_scsi_cmd(pkt);
4835 	scsi_destroy_pkt(pkt);
4836 	freerbuf(bp);
4837 	if (rval == 0) {
4838 		VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_pathinfo_online: "
4839 		    "Failover Inquiry failed path:%p rval:%x\n",
4840 		    (void *)pip, rval));
4841 		rval = MDI_FAILURE;
4842 		goto failure;
4843 	}
4844 
4845 	/*
4846 	 * Determine if device is supported under scsi_vhci, and select
4847 	 * failover module.
4848 	 *
4849 	 * See if there is a scsi_vhci.conf file override for this devices's
4850 	 * VID/PID. The following values can be returned:
4851 	 *
4852 	 * NULL		If the NULL is returned then there is no scsi_vhci.conf
4853 	 *		override.  For NULL, we determine the failover_ops for
4854 	 *		this device by checking the sfo_device_probe entry
4855 	 *		point for each 'fops' module, in order.
4856 	 *
4857 	 *		NOTE: Correct operation may depend on module ordering
4858 	 *		of 'specific' (failover modules that are completely
4859 	 *		VID/PID table based) to 'generic' (failover modules
4860 	 *		that based on T10 standards like TPGS).  Currently,
4861 	 *		the value of 'ddi-forceload' in scsi_vhci.conf is used
4862 	 *		to establish the module list and probe order.
4863 	 *
4864 	 * "NONE"	If value "NONE" is returned then there is a
4865 	 *		scsi_vhci.conf VID/PID override to indicate the device
4866 	 *		should not be supported under scsi_vhci (even if there
4867 	 *		is an 'fops' module supporting the device).
4868 	 *
4869 	 * "<other>"	If another value is returned then that value is the
4870 	 *		name of the 'fops' module that should be used.
4871 	 */
4872 	sfo = NULL;	/* "NONE" */
4873 	override = scsi_get_device_type_string(
4874 	    "scsi-vhci-failover-override", vdip, psd);
4875 
4876 	if (override == NULL) {
4877 		/* NULL: default: select based on sfo_device_probe results */
4878 		for (sf = scsi_failover_table; sf->sf_mod; sf++) {
4879 			if ((sf->sf_sfo == NULL) ||
4880 			    ((*sf->sf_sfo->sfo_device_probe) (psd,
4881 			    psd->sd_inq, &vlun->svl_fops_ctpriv) ==
4882 			    SFO_DEVICE_PROBE_PHCI))
4883 				continue;
4884 
4885 			/* found failover module, supported under scsi_vhci */
4886 			sfo = sf->sf_sfo;
4887 			vlun->svl_fops_name =
4888 			    i_ddi_strdup(sfo->sfo_name, KM_SLEEP);
4889 			break;
4890 		}
4891 	} else if (strcmp(override, "NONE") && strcmp(override, "none")) {
4892 		/* !"NONE": select based on driver.conf specified name */
4893 		for (sf = scsi_failover_table, sfo = NULL; sf->sf_mod; sf++) {
4894 			if ((sf->sf_sfo == NULL) ||
4895 			    (sf->sf_sfo->sfo_name == NULL) ||
4896 			    strcmp(override, sf->sf_sfo->sfo_name))
4897 				continue;
4898 
4899 			/* found failover module, supported under scsi_vhci */
4900 			sfo = sf->sf_sfo;
4901 			vlun->svl_fops_name = kmem_alloc(strlen("conf ") +
4902 			    strlen(sfo->sfo_name) + 1, KM_SLEEP);
4903 			(void) sprintf(vlun->svl_fops_name, "conf %s",
4904 			    sfo->sfo_name);
4905 			break;
4906 		}
4907 	}
4908 	if (override)
4909 		kmem_free(override, strlen(override) + 1);
4910 
4911 	if (sfo == NULL) {
4912 		/* no failover module - device not supported */
4913 		VHCI_DEBUG(1, (CE_NOTE, vhci->vhci_dip,
4914 		    "!vhci_pathinfo_online: dev (path 0x%p) not "
4915 		    "supported\n", (void *)pip));
4916 		vlun->svl_not_supported = 1;
4917 		rval = MDI_NOT_SUPPORTED;
4918 		goto done;
4919 	}
4920 
4921 	/* failover supported for device - save failover_ops in vlun */
4922 	vlun->svl_fops = sfo;
4923 
4924 	/*
4925 	 * Obtain the device-type based mpxio options as specified in
4926 	 * scsi_vhci.conf file.
4927 	 *
4928 	 * NOTE: currently, the end result is a call to
4929 	 * mdi_set_lb_region_size().
4930 	 */
4931 	vhci_get_device_type_mpxio_options(vdip, tgt_dip, psd);
4932 
4933 	/*
4934 	 * if PGR is active, revalidate key and register on this path also,
4935 	 * if key is still valid
4936 	 */
4937 	sema_p(&vlun->svl_pgr_sema);
4938 	if (vlun->svl_pgr_active) {
4939 		rval = vhci_pgr_validate_and_register(svp);
4940 		if (rval != 1) {
4941 			rval = MDI_FAILURE;
4942 			sema_v(&vlun->svl_pgr_sema);
4943 			goto failure;
4944 		}
4945 	}
4946 	sema_v(&vlun->svl_pgr_sema);
4947 
4948 	if (svp->svp_new_path) {
4949 		/*
4950 		 * Last chance to perform any cleanup operations on this
4951 		 * new path before making this path completely online.
4952 		 */
4953 		svp->svp_new_path = 0;
4954 
4955 		/*
4956 		 * If scsi_vhci knows the lun is alread RESERVE'd,
4957 		 * then skip the issue of RELEASE on new path.
4958 		 */
4959 		if ((vlun->svl_flags & VLUN_RESERVE_ACTIVE_FLG) == 0) {
4960 			/*
4961 			 * Issue SCSI-2 RELEASE only for the first time on
4962 			 * a new path just in case the host rebooted and
4963 			 * a reservation is still pending on this path.
4964 			 * IBM Shark storage does not clear RESERVE upon
4965 			 * host reboot.
4966 			 */
4967 			ap = &psd->sd_address;
4968 			pkt = scsi_init_pkt(ap, NULL, NULL, CDB_GROUP0,
4969 			    sizeof (struct scsi_arq_status), 0, 0,
4970 			    SLEEP_FUNC, NULL);
4971 			if (pkt == NULL) {
4972 				VHCI_DEBUG(1, (CE_NOTE, NULL,
4973 				    "!vhci_pathinfo_online: "
4974 				    "Release init_pkt failed :%p\n",
4975 				    (void *)pip));
4976 				rval = MDI_FAILURE;
4977 				goto failure;
4978 			}
4979 			pkt->pkt_cdbp[0] = SCMD_RELEASE;
4980 			pkt->pkt_time = 60;
4981 
4982 			VHCI_DEBUG(1, (CE_NOTE, NULL,
4983 			    "!vhci_path_online: path:%p "
4984 			    "Issued SCSI-2 RELEASE\n", (void *)pip));
4985 
4986 			/* Ignore the return value */
4987 			(void) vhci_do_scsi_cmd(pkt);
4988 			scsi_destroy_pkt(pkt);
4989 		}
4990 	}
4991 
4992 	rval = vhci_update_pathinfo(psd, pip, sfo, vlun, vhci);
4993 	if (rval == MDI_FAILURE) {
4994 		goto failure;
4995 	}
4996 
4997 	/* Initialize MP-API data */
4998 	vhci_update_mpapi_data(vhci, vlun, pip);
4999 
5000 	/*
5001 	 * MP-API also needs the Inquiry data to be maintained in the
5002 	 * mp_vendor_prop_t structure, so find the lun and update its
5003 	 * structure with this data.
5004 	 */
5005 	list_ptr = (mpapi_item_list_t *)vhci_get_mpapi_item(vhci, NULL,
5006 	    MP_OBJECT_TYPE_MULTIPATH_LU, (void *)vlun);
5007 	ld = (mpapi_lu_data_t *)list_ptr->item->idata;
5008 	if (ld != NULL) {
5009 		bcopy(psd->sd_inq->inq_vid, ld->prop.prodInfo.vendor, 8);
5010 		bcopy(psd->sd_inq->inq_pid, ld->prop.prodInfo.product, 16);
5011 		bcopy(psd->sd_inq->inq_revision, ld->prop.prodInfo.revision, 4);
5012 	} else {
5013 		VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_pathinfo_online: "
5014 		    "mpapi_lu_data_t is NULL"));
5015 	}
5016 
5017 	/* create kstats for path */
5018 	vhci_kstat_create_pathinfo(pip);
5019 
5020 done:
5021 	mutex_enter(&vhci_global_mutex);
5022 	cv_broadcast(&vhci_cv);
5023 	mutex_exit(&vhci_global_mutex);
5024 
5025 	if (vlun->svl_setcap_done) {
5026 		(void) vhci_pHCI_cap(ap, "sector-size",
5027 		    vlun->svl_sector_size, 1, pip);
5028 	}
5029 
5030 	VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_path_online: path:%p\n",
5031 	    (void *)pip));
5032 
5033 failure:
5034 	if ((rval != MDI_SUCCESS) && psd->sd_inq) {
5035 		kmem_free((caddr_t)psd->sd_inq, inq_size);
5036 		psd->sd_inq = (struct scsi_inquiry *)NULL;
5037 	}
5038 	return (rval);
5039 }
5040 
5041 /*
5042  * path offline handler.  Release all bindings that will not be
5043  * released by the normal packet transport/completion code path.
5044  * Since we don't (presently) keep any bindings alive outside of
5045  * the in-transport packets (which will be released on completion)
5046  * there is not much to do here.
5047  */
5048 /* ARGSUSED */
5049 static int
5050 vhci_pathinfo_offline(dev_info_t *vdip, mdi_pathinfo_t *pip, int flags)
5051 {
5052 	scsi_hba_tran_t		*hba = NULL;
5053 	struct scsi_device	*psd = NULL;
5054 	dev_info_t		*pdip = NULL;
5055 	dev_info_t		*cdip = NULL;
5056 	scsi_vhci_priv_t	*svp = NULL;
5057 	uint_t			inq_size = VHCI_STD_INQ_SIZE;
5058 
5059 	ASSERT(vdip != NULL);
5060 	ASSERT(pip != NULL);
5061 
5062 	pdip = mdi_pi_get_phci(pip);
5063 	ASSERT(pdip != NULL);
5064 	if (pdip == NULL) {
5065 		VHCI_DEBUG(1, (CE_WARN, vdip, "Invalid path 0x%p: NULL "
5066 		    "phci dip", (void *)pip));
5067 		return (MDI_FAILURE);
5068 	}
5069 
5070 	cdip = mdi_pi_get_client(pip);
5071 	ASSERT(cdip != NULL);
5072 	if (cdip == NULL) {
5073 		VHCI_DEBUG(1, (CE_WARN, vdip, "Invalid path 0x%p: NULL "
5074 		    "client dip", (void *)pip));
5075 		return (MDI_FAILURE);
5076 	}
5077 
5078 	hba = ddi_get_driver_private(pdip);
5079 	ASSERT(hba != NULL);
5080 
5081 	svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip);
5082 	if (svp == NULL) {
5083 		/*
5084 		 * mdi_pathinfo node in INIT state can have vHCI private
5085 		 * information set to null
5086 		 */
5087 		VHCI_DEBUG(1, (CE_NOTE, vdip, "!vhci_pathinfo_offline: "
5088 		    "svp is NULL for pip 0x%p\n", (void *)pip));
5089 		return (MDI_SUCCESS);
5090 	}
5091 
5092 	psd = svp->svp_psd;
5093 	ASSERT(psd != NULL);
5094 
5095 	mutex_enter(&svp->svp_mutex);
5096 
5097 	VHCI_DEBUG(1, (CE_NOTE, vdip, "!vhci_pathinfo_offline: "
5098 	    "%d cmds pending on path: 0x%p\n", svp->svp_cmds, (void *)pip));
5099 	while (svp->svp_cmds != 0) {
5100 		if (cv_timedwait(&svp->svp_cv, &svp->svp_mutex,
5101 		    ddi_get_lbolt() +
5102 		    drv_usectohz(vhci_path_quiesce_timeout * 1000000)) == -1) {
5103 			/*
5104 			 * The timeout time reached without the condition
5105 			 * being signaled.
5106 			 */
5107 			VHCI_DEBUG(1, (CE_NOTE, vdip, "!vhci_pathinfo_offline: "
5108 			    "Timeout reached on path 0x%p without the cond\n",
5109 			    (void *)pip));
5110 			VHCI_DEBUG(1, (CE_NOTE, vdip, "!vhci_pathinfo_offline: "
5111 			    "%d cmds still pending on path: 0x%p\n",
5112 			    svp->svp_cmds, (void *)pip));
5113 			break;
5114 		}
5115 	}
5116 	mutex_exit(&svp->svp_mutex);
5117 
5118 	/*
5119 	 * Check to see if this vlun has an active SCSI-II RESERVE. And this
5120 	 * is the pip for the path that has been reserved.
5121 	 * If so clear the reservation by sending a reset, so the host will not
5122 	 * get a reservation conflict.  Reset the flag VLUN_RESERVE_ACTIVE_FLG
5123 	 * for this lun.  Also a reset notify is sent to the target driver
5124 	 * just in case the POR check condition is cleared by some other layer
5125 	 * in the stack.
5126 	 */
5127 	if (svp->svp_svl->svl_flags & VLUN_RESERVE_ACTIVE_FLG) {
5128 		if (pip == svp->svp_svl->svl_resrv_pip) {
5129 			if (vhci_recovery_reset(svp->svp_svl,
5130 			    &svp->svp_psd->sd_address, TRUE,
5131 			    VHCI_DEPTH_TARGET) == 0) {
5132 				VHCI_DEBUG(1, (CE_NOTE, NULL,
5133 				    "!vhci_pathinfo_offline (pip:%p):"
5134 				    "reset failed, retrying\n", (void *)pip));
5135 				delay(1*drv_usectohz(1000000));
5136 				if (vhci_recovery_reset(svp->svp_svl,
5137 				    &svp->svp_psd->sd_address, TRUE,
5138 				    VHCI_DEPTH_TARGET) == 0) {
5139 					VHCI_DEBUG(1, (CE_NOTE, NULL,
5140 					    "!vhci_pathinfo_offline "
5141 					    "(pip:%p): reset failed, "
5142 					    "giving up!\n", (void *)pip));
5143 				}
5144 			}
5145 			svp->svp_svl->svl_flags &= ~VLUN_RESERVE_ACTIVE_FLG;
5146 		}
5147 	}
5148 
5149 	mdi_pi_set_state(pip, MDI_PATHINFO_STATE_OFFLINE);
5150 	if (psd->sd_inq) {
5151 		kmem_free((caddr_t)psd->sd_inq, inq_size);
5152 		psd->sd_inq = (struct scsi_inquiry *)NULL;
5153 	}
5154 	vhci_mpapi_set_path_state(vdip, pip, MP_DRVR_PATH_STATE_REMOVED);
5155 
5156 	VHCI_DEBUG(1, (CE_NOTE, NULL,
5157 	    "!vhci_pathinfo_offline: offlined path 0x%p\n", (void *)pip));
5158 	return (MDI_SUCCESS);
5159 }
5160 
5161 
5162 /*
5163  * routine for SCSI VHCI IOCTL implementation.
5164  */
5165 /* ARGSUSED */
5166 static int
5167 vhci_ctl(dev_t dev, int cmd, intptr_t data, int mode, cred_t *credp, int *rval)
5168 {
5169 	struct scsi_vhci		*vhci;
5170 	dev_info_t			*vdip;
5171 	mdi_pathinfo_t			*pip;
5172 	int				instance, held;
5173 	int				retval = 0;
5174 	caddr_t				phci_path = NULL, client_path = NULL;
5175 	caddr_t				paddr = NULL;
5176 	sv_iocdata_t			ioc;
5177 	sv_iocdata_t			*pioc = &ioc;
5178 	sv_switch_to_cntlr_iocdata_t	iocsc;
5179 	sv_switch_to_cntlr_iocdata_t	*piocsc = &iocsc;
5180 	caddr_t				s;
5181 	scsi_vhci_lun_t			*vlun;
5182 	struct scsi_failover_ops	*fo;
5183 	char				*pclass;
5184 
5185 	/* Check for validity of vhci structure */
5186 	vhci = ddi_get_soft_state(vhci_softstate, MINOR2INST(getminor(dev)));
5187 	if (vhci == NULL) {
5188 		return (ENXIO);
5189 	}
5190 
5191 	mutex_enter(&vhci->vhci_mutex);
5192 	if ((vhci->vhci_state & VHCI_STATE_OPEN) == 0) {
5193 		mutex_exit(&vhci->vhci_mutex);
5194 		return (ENXIO);
5195 	}
5196 	mutex_exit(&vhci->vhci_mutex);
5197 
5198 	/* Get the vhci dip */
5199 	vdip = vhci->vhci_dip;
5200 	ASSERT(vdip != NULL);
5201 	instance = ddi_get_instance(vdip);
5202 
5203 	/* Allocate memory for getting parameters from userland */
5204 	phci_path	= kmem_zalloc(MAXPATHLEN, KM_SLEEP);
5205 	client_path	= kmem_zalloc(MAXPATHLEN, KM_SLEEP);
5206 	paddr		= kmem_zalloc(MAXNAMELEN, KM_SLEEP);
5207 
5208 	/*
5209 	 * Set a local variable indicating the ioctl name. Used for
5210 	 * printing debug strings.
5211 	 */
5212 	switch (cmd) {
5213 	case SCSI_VHCI_GET_CLIENT_MULTIPATH_INFO:
5214 		s = "GET_CLIENT_MULTIPATH_INFO";
5215 		break;
5216 
5217 	case SCSI_VHCI_GET_PHCI_MULTIPATH_INFO:
5218 		s = "GET_PHCI_MULTIPATH_INFO";
5219 		break;
5220 
5221 	case SCSI_VHCI_GET_CLIENT_NAME:
5222 		s = "GET_CLIENT_NAME";
5223 		break;
5224 
5225 	case SCSI_VHCI_PATH_ONLINE:
5226 		s = "PATH_ONLINE";
5227 		break;
5228 
5229 	case SCSI_VHCI_PATH_OFFLINE:
5230 		s = "PATH_OFFLINE";
5231 		break;
5232 
5233 	case SCSI_VHCI_PATH_STANDBY:
5234 		s = "PATH_STANDBY";
5235 		break;
5236 
5237 	case SCSI_VHCI_PATH_TEST:
5238 		s = "PATH_TEST";
5239 		break;
5240 
5241 	case SCSI_VHCI_SWITCH_TO_CNTLR:
5242 		s = "SWITCH_TO_CNTLR";
5243 		break;
5244 	case SCSI_VHCI_PATH_DISABLE:
5245 		s = "PATH_DISABLE";
5246 		break;
5247 	case SCSI_VHCI_PATH_ENABLE:
5248 		s = "PATH_ENABLE";
5249 		break;
5250 
5251 	case SCSI_VHCI_GET_TARGET_LONGNAME:
5252 		s = "GET_TARGET_LONGNAME";
5253 		break;
5254 
5255 #ifdef	DEBUG
5256 	case SCSI_VHCI_CONFIGURE_PHCI:
5257 		s = "CONFIGURE_PHCI";
5258 		break;
5259 
5260 	case SCSI_VHCI_UNCONFIGURE_PHCI:
5261 		s = "UNCONFIGURE_PHCI";
5262 		break;
5263 #endif
5264 
5265 	default:
5266 		s = "Unknown";
5267 		vhci_log(CE_NOTE, vdip,
5268 		    "!vhci%d: ioctl %x (unsupported ioctl)", instance, cmd);
5269 		retval = ENOTSUP;
5270 		break;
5271 	}
5272 	if (retval != 0) {
5273 		goto end;
5274 	}
5275 
5276 	VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci%d: ioctl <%s>", instance, s));
5277 
5278 	/*
5279 	 * Get IOCTL parameters from userland
5280 	 */
5281 	switch (cmd) {
5282 	case SCSI_VHCI_GET_CLIENT_MULTIPATH_INFO:
5283 	case SCSI_VHCI_GET_PHCI_MULTIPATH_INFO:
5284 	case SCSI_VHCI_GET_CLIENT_NAME:
5285 	case SCSI_VHCI_PATH_ONLINE:
5286 	case SCSI_VHCI_PATH_OFFLINE:
5287 	case SCSI_VHCI_PATH_STANDBY:
5288 	case SCSI_VHCI_PATH_TEST:
5289 	case SCSI_VHCI_PATH_DISABLE:
5290 	case SCSI_VHCI_PATH_ENABLE:
5291 	case SCSI_VHCI_GET_TARGET_LONGNAME:
5292 #ifdef	DEBUG
5293 	case SCSI_VHCI_CONFIGURE_PHCI:
5294 	case SCSI_VHCI_UNCONFIGURE_PHCI:
5295 #endif
5296 		retval = vhci_get_iocdata((const void *)data, pioc, mode, s);
5297 		break;
5298 
5299 	case SCSI_VHCI_SWITCH_TO_CNTLR:
5300 		retval = vhci_get_iocswitchdata((const void *)data, piocsc,
5301 		    mode, s);
5302 		break;
5303 	}
5304 	if (retval != 0) {
5305 		goto end;
5306 	}
5307 
5308 
5309 	/*
5310 	 * Process the IOCTL
5311 	 */
5312 	switch (cmd) {
5313 	case SCSI_VHCI_GET_CLIENT_MULTIPATH_INFO:
5314 	{
5315 		uint_t		num_paths;	/* Num paths to client dev */
5316 		sv_path_info_t	*upibuf = NULL;	/* To keep userland values */
5317 		sv_path_info_t	*kpibuf = NULL; /* Kernel data for ioctls */
5318 		dev_info_t	*cdip;		/* Client device dip */
5319 
5320 		if (pioc->ret_elem == NULL) {
5321 			retval = EINVAL;
5322 			break;
5323 		}
5324 
5325 		/* Get client device path from user land */
5326 		if (vhci_ioc_get_client_path(pioc, client_path, mode, s)) {
5327 			retval = EFAULT;
5328 			break;
5329 		}
5330 
5331 		VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> "
5332 		    "client <%s>", s, client_path));
5333 
5334 		/* Get number of paths to this client device */
5335 		if ((cdip = mdi_client_path2devinfo(vdip, client_path))
5336 		    == NULL) {
5337 			retval = ENXIO;
5338 			VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
5339 			    "client dip doesn't exist. invalid path <%s>",
5340 			    s, client_path));
5341 			break;
5342 		}
5343 		num_paths = mdi_client_get_path_count(cdip);
5344 
5345 		if (ddi_copyout(&num_paths, pioc->ret_elem,
5346 		    sizeof (num_paths), mode)) {
5347 			VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
5348 			    "num_paths copyout failed", s));
5349 			retval = EFAULT;
5350 			break;
5351 		}
5352 
5353 		/* If  user just wanted num_paths, then return */
5354 		if (pioc->buf_elem == 0 || pioc->ret_buf == NULL ||
5355 		    num_paths == 0) {
5356 			break;
5357 		}
5358 
5359 		/* Set num_paths to value as much as can be sent to userland */
5360 		if (num_paths > pioc->buf_elem) {
5361 			num_paths = pioc->buf_elem;
5362 		}
5363 
5364 		/* Allocate memory and get userland pointers */
5365 		if (vhci_ioc_alloc_pathinfo(&upibuf, &kpibuf, num_paths,
5366 		    pioc, mode, s) != 0) {
5367 			retval = EFAULT;
5368 			break;
5369 		}
5370 		ASSERT(upibuf != NULL);
5371 		ASSERT(kpibuf != NULL);
5372 
5373 		/*
5374 		 * Get the path information and send it to userland.
5375 		 */
5376 		if (vhci_get_client_path_list(cdip, kpibuf, num_paths)
5377 		    != MDI_SUCCESS) {
5378 			retval = ENXIO;
5379 			vhci_ioc_free_pathinfo(upibuf, kpibuf, num_paths);
5380 			break;
5381 		}
5382 
5383 		if (vhci_ioc_send_pathinfo(upibuf, kpibuf, num_paths,
5384 		    pioc, mode, s)) {
5385 			retval = EFAULT;
5386 			vhci_ioc_free_pathinfo(upibuf, kpibuf, num_paths);
5387 			break;
5388 		}
5389 
5390 		/* Free the memory allocated for path information */
5391 		vhci_ioc_free_pathinfo(upibuf, kpibuf, num_paths);
5392 		break;
5393 	}
5394 
5395 	case SCSI_VHCI_GET_PHCI_MULTIPATH_INFO:
5396 	{
5397 		uint_t		num_paths;	/* Num paths to client dev */
5398 		sv_path_info_t	*upibuf = NULL;	/* To keep userland values */
5399 		sv_path_info_t	*kpibuf = NULL; /* Kernel data for ioctls */
5400 		dev_info_t	*pdip;		/* PHCI device dip */
5401 
5402 		if (pioc->ret_elem == NULL) {
5403 			retval = EINVAL;
5404 			break;
5405 		}
5406 
5407 		/* Get PHCI device path from user land */
5408 		if (vhci_ioc_get_phci_path(pioc, phci_path, mode, s)) {
5409 			retval = EFAULT;
5410 			break;
5411 		}
5412 
5413 		VHCI_DEBUG(6, (CE_WARN, vdip,
5414 		    "!vhci_ioctl: ioctl <%s> phci <%s>", s, phci_path));
5415 
5416 		/* Get number of devices associated with this PHCI device */
5417 		if ((pdip = mdi_phci_path2devinfo(vdip, phci_path)) == NULL) {
5418 			VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
5419 			    "phci dip doesn't exist. invalid path <%s>",
5420 			    s, phci_path));
5421 			retval = ENXIO;
5422 			break;
5423 		}
5424 
5425 		num_paths = mdi_phci_get_path_count(pdip);
5426 
5427 		if (ddi_copyout(&num_paths, pioc->ret_elem,
5428 		    sizeof (num_paths), mode)) {
5429 			VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
5430 			    "num_paths copyout failed", s));
5431 			retval = EFAULT;
5432 			break;
5433 		}
5434 
5435 		/* If  user just wanted num_paths, then return */
5436 		if (pioc->buf_elem == 0 || pioc->ret_buf == NULL ||
5437 		    num_paths == 0) {
5438 			break;
5439 		}
5440 
5441 		/* Set num_paths to value as much as can be sent to userland */
5442 		if (num_paths > pioc->buf_elem) {
5443 			num_paths = pioc->buf_elem;
5444 		}
5445 
5446 		/* Allocate memory and get userland pointers */
5447 		if (vhci_ioc_alloc_pathinfo(&upibuf, &kpibuf, num_paths,
5448 		    pioc, mode, s) != 0) {
5449 			retval = EFAULT;
5450 			break;
5451 		}
5452 		ASSERT(upibuf != NULL);
5453 		ASSERT(kpibuf != NULL);
5454 
5455 		/*
5456 		 * Get the path information and send it to userland.
5457 		 */
5458 		if (vhci_get_phci_path_list(pdip, kpibuf, num_paths)
5459 		    != MDI_SUCCESS) {
5460 			retval = ENXIO;
5461 			vhci_ioc_free_pathinfo(upibuf, kpibuf, num_paths);
5462 			break;
5463 		}
5464 
5465 		if (vhci_ioc_send_pathinfo(upibuf, kpibuf, num_paths,
5466 		    pioc, mode, s)) {
5467 			retval = EFAULT;
5468 			vhci_ioc_free_pathinfo(upibuf, kpibuf, num_paths);
5469 			break;
5470 		}
5471 
5472 		/* Free the memory allocated for path information */
5473 		vhci_ioc_free_pathinfo(upibuf, kpibuf, num_paths);
5474 		break;
5475 	}
5476 
5477 	case SCSI_VHCI_GET_CLIENT_NAME:
5478 	{
5479 		dev_info_t		*cdip, *pdip;
5480 
5481 		/* Get PHCI path and device address from user land */
5482 		if (vhci_ioc_get_phci_path(pioc, phci_path, mode, s) ||
5483 		    vhci_ioc_get_paddr(pioc, paddr, mode, s)) {
5484 			retval = EFAULT;
5485 			break;
5486 		}
5487 
5488 		VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> "
5489 		    "phci <%s>, paddr <%s>", s, phci_path, paddr));
5490 
5491 		/* Get the PHCI dip */
5492 		if ((pdip = mdi_phci_path2devinfo(vdip, phci_path)) == NULL) {
5493 			VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
5494 			    "phci dip doesn't exist. invalid path <%s>",
5495 			    s, phci_path));
5496 			retval = ENXIO;
5497 			break;
5498 		}
5499 
5500 		if ((pip = mdi_pi_find(pdip, NULL, paddr)) == NULL) {
5501 			VHCI_DEBUG(1, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> "
5502 			    "pathinfo doesn't exist. invalid device addr", s));
5503 			retval = ENXIO;
5504 			break;
5505 		}
5506 
5507 		/* Get the client device pathname and send to userland */
5508 		cdip = mdi_pi_get_client(pip);
5509 		vhci_ioc_devi_to_path(cdip, client_path);
5510 
5511 		VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> "
5512 		    "client <%s>", s, client_path));
5513 
5514 		if (vhci_ioc_send_client_path(client_path, pioc, mode, s)) {
5515 			retval = EFAULT;
5516 			break;
5517 		}
5518 		break;
5519 	}
5520 
5521 	case SCSI_VHCI_PATH_ONLINE:
5522 	case SCSI_VHCI_PATH_OFFLINE:
5523 	case SCSI_VHCI_PATH_STANDBY:
5524 	case SCSI_VHCI_PATH_TEST:
5525 	{
5526 		dev_info_t		*pdip;	/* PHCI dip */
5527 
5528 		/* Get PHCI path and device address from user land */
5529 		if (vhci_ioc_get_phci_path(pioc, phci_path, mode, s) ||
5530 		    vhci_ioc_get_paddr(pioc, paddr, mode, s)) {
5531 			retval = EFAULT;
5532 			break;
5533 		}
5534 
5535 		VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> "
5536 		    "phci <%s>, paddr <%s>", s, phci_path, paddr));
5537 
5538 		/* Get the PHCI dip */
5539 		if ((pdip = mdi_phci_path2devinfo(vdip, phci_path)) == NULL) {
5540 			VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
5541 			    "phci dip doesn't exist. invalid path <%s>",
5542 			    s, phci_path));
5543 			retval = ENXIO;
5544 			break;
5545 		}
5546 
5547 		if ((pip = mdi_pi_find(pdip, NULL, paddr)) == NULL) {
5548 			VHCI_DEBUG(1, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> "
5549 			    "pathinfo doesn't exist. invalid device addr", s));
5550 			retval = ENXIO;
5551 			break;
5552 		}
5553 
5554 		VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> "
5555 		    "Calling MDI function to change device state", s));
5556 
5557 		switch (cmd) {
5558 		case SCSI_VHCI_PATH_ONLINE:
5559 			retval = mdi_pi_online(pip, 0);
5560 			break;
5561 
5562 		case SCSI_VHCI_PATH_OFFLINE:
5563 			retval = mdi_pi_offline(pip, 0);
5564 			break;
5565 
5566 		case SCSI_VHCI_PATH_STANDBY:
5567 			retval = mdi_pi_standby(pip, 0);
5568 			break;
5569 
5570 		case SCSI_VHCI_PATH_TEST:
5571 			break;
5572 		}
5573 		break;
5574 	}
5575 
5576 	case SCSI_VHCI_SWITCH_TO_CNTLR:
5577 	{
5578 		dev_info_t *cdip;
5579 		struct scsi_device *devp;
5580 
5581 		/* Get the client device pathname */
5582 		if (ddi_copyin(piocsc->client, client_path,
5583 		    MAXPATHLEN, mode)) {
5584 			VHCI_DEBUG(2, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> "
5585 			    "client_path copyin failed", s));
5586 			retval = EFAULT;
5587 			break;
5588 		}
5589 
5590 		/* Get the path class to which user wants to switch */
5591 		if (ddi_copyin(piocsc->class, paddr, MAXNAMELEN, mode)) {
5592 			VHCI_DEBUG(2, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> "
5593 			    "controller_class copyin failed", s));
5594 			retval = EFAULT;
5595 			break;
5596 		}
5597 
5598 		/* Perform validity checks */
5599 		if ((cdip = mdi_client_path2devinfo(vdip,
5600 		    client_path)) == NULL) {
5601 			VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
5602 			    "client dip doesn't exist. invalid path <%s>",
5603 			    s, client_path));
5604 			retval = ENXIO;
5605 			break;
5606 		}
5607 
5608 		VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: Calling MDI func "
5609 		    "to switch controller"));
5610 		VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: client <%s> "
5611 		    "class <%s>", client_path, paddr));
5612 
5613 		if (strcmp(paddr, PCLASS_PRIMARY) &&
5614 		    strcmp(paddr, PCLASS_SECONDARY)) {
5615 			VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
5616 			    "invalid path class <%s>", s, paddr));
5617 			retval = ENXIO;
5618 			break;
5619 		}
5620 
5621 		devp = ddi_get_driver_private(cdip);
5622 		if (devp == NULL) {
5623 			VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
5624 			    "invalid scsi device <%s>", s, client_path));
5625 			retval = ENXIO;
5626 			break;
5627 		}
5628 		vlun = ADDR2VLUN(&devp->sd_address);
5629 		ASSERT(vlun);
5630 
5631 		/*
5632 		 * Checking to see if device has only one pclass, PRIMARY.
5633 		 * If so this device doesn't support failovers.  Assumed
5634 		 * that the devices with one pclass is PRIMARY, as thats the
5635 		 * case today.  If this is not true and in future other
5636 		 * symmetric devices are supported with other pclass, this
5637 		 * IOCTL shall have to be overhauled anyways as now the only
5638 		 * arguments it accepts are PRIMARY and SECONDARY.
5639 		 */
5640 		fo = vlun->svl_fops;
5641 		if ((*fo->sfo_pathclass_next)(PCLASS_PRIMARY, &pclass,
5642 		    vlun->svl_fops_ctpriv)) {
5643 			retval = ENOTSUP;
5644 			break;
5645 		}
5646 
5647 		VHCI_HOLD_LUN(vlun, VH_SLEEP, held);
5648 		mutex_enter(&vlun->svl_mutex);
5649 		if (vlun->svl_active_pclass != NULL) {
5650 			if (strcmp(vlun->svl_active_pclass, paddr) == 0) {
5651 				mutex_exit(&vlun->svl_mutex);
5652 				retval = EALREADY;
5653 				VHCI_RELEASE_LUN(vlun);
5654 				break;
5655 			}
5656 		}
5657 		mutex_exit(&vlun->svl_mutex);
5658 		/* Call mdi function to cause  a switch over */
5659 		retval = mdi_failover(vdip, cdip, MDI_FAILOVER_SYNC);
5660 		if (retval == MDI_SUCCESS) {
5661 			retval = 0;
5662 		} else if (retval == MDI_BUSY) {
5663 			retval = EBUSY;
5664 		} else {
5665 			retval = EIO;
5666 		}
5667 		VHCI_RELEASE_LUN(vlun);
5668 		break;
5669 	}
5670 
5671 	case SCSI_VHCI_PATH_ENABLE:
5672 	case SCSI_VHCI_PATH_DISABLE:
5673 	{
5674 		dev_info_t	*cdip, *pdip;
5675 
5676 		/*
5677 		 * Get client device path from user land
5678 		 */
5679 		if (vhci_ioc_get_client_path(pioc, client_path, mode, s)) {
5680 			retval = EFAULT;
5681 			break;
5682 		}
5683 
5684 		/*
5685 		 * Get Phci device path from user land
5686 		 */
5687 		if (vhci_ioc_get_phci_path(pioc, phci_path, mode, s)) {
5688 			retval = EFAULT;
5689 			break;
5690 		}
5691 
5692 		/*
5693 		 * Get the devinfo for the Phci.
5694 		 */
5695 		if ((pdip = mdi_phci_path2devinfo(vdip, phci_path)) == NULL) {
5696 			VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
5697 			    "phci dip doesn't exist. invalid path <%s>",
5698 			    s, phci_path));
5699 			retval = ENXIO;
5700 			break;
5701 		}
5702 
5703 		/*
5704 		 * If the client path is set to /scsi_vhci then we need
5705 		 * to do the operation on all the clients so set cdip to NULL.
5706 		 * Else, try to get the client dip.
5707 		 */
5708 		if (strcmp(client_path, "/scsi_vhci") == 0) {
5709 			cdip = NULL;
5710 		} else {
5711 			if ((cdip = mdi_client_path2devinfo(vdip,
5712 			    client_path)) == NULL) {
5713 				retval = ENXIO;
5714 				VHCI_DEBUG(1, (CE_WARN, NULL,
5715 				    "!vhci_ioctl: ioctl <%s> client dip "
5716 				    "doesn't exist. invalid path <%s>",
5717 				    s, client_path));
5718 				break;
5719 			}
5720 		}
5721 
5722 		if (cmd == SCSI_VHCI_PATH_ENABLE)
5723 			retval = mdi_pi_enable(cdip, pdip, USER_DISABLE);
5724 		else
5725 			retval = mdi_pi_disable(cdip, pdip, USER_DISABLE);
5726 
5727 		break;
5728 	}
5729 
5730 	case SCSI_VHCI_GET_TARGET_LONGNAME:
5731 	{
5732 		uint_t		pid = pioc->buf_elem;
5733 		char		*target_port;
5734 		mod_hash_val_t	hv;
5735 
5736 		/* targetmap lookup of 'target-port' by <pid> */
5737 		if (mod_hash_find(vhci_targetmap_bypid,
5738 		    (mod_hash_key_t)(uintptr_t)pid, &hv) != 0) {
5739 			/*
5740 			 * NOTE: failure to find the mapping is OK for guid
5741 			 * based 'target-port' values.
5742 			 */
5743 			VHCI_DEBUG(3, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
5744 			    "targetport mapping doesn't exist: pid %d",
5745 			    s, pid));
5746 			retval = ENXIO;
5747 			break;
5748 		}
5749 
5750 		/* copyout 'target-port' result */
5751 		target_port = (char *)hv;
5752 		if (copyoutstr(target_port, pioc->addr, MAXNAMELEN, NULL)) {
5753 			VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
5754 			    "targetport copyout failed: len: %d",
5755 			    s, (int)strlen(target_port)));
5756 			retval = EFAULT;
5757 		}
5758 		break;
5759 	}
5760 
5761 #ifdef	DEBUG
5762 	case SCSI_VHCI_CONFIGURE_PHCI:
5763 	{
5764 		dev_info_t		*pdip;
5765 
5766 		/* Get PHCI path and device address from user land */
5767 		if (vhci_ioc_get_phci_path(pioc, phci_path, mode, s)) {
5768 			retval = EFAULT;
5769 			break;
5770 		}
5771 
5772 		VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> "
5773 		    "phci <%s>", s, phci_path));
5774 
5775 		/* Get the PHCI dip */
5776 		if ((pdip = e_ddi_hold_devi_by_path(phci_path, 0)) == NULL) {
5777 			VHCI_DEBUG(3, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
5778 			    "phci dip doesn't exist. invalid path <%s>",
5779 			    s, phci_path));
5780 			retval = ENXIO;
5781 			break;
5782 		}
5783 
5784 		if (ndi_devi_config(pdip,
5785 		    NDI_DEVFS_CLEAN|NDI_DEVI_PERSIST) != NDI_SUCCESS) {
5786 			retval = EIO;
5787 		}
5788 
5789 		ddi_release_devi(pdip);
5790 		break;
5791 	}
5792 
5793 	case SCSI_VHCI_UNCONFIGURE_PHCI:
5794 	{
5795 		dev_info_t		*pdip;
5796 
5797 		/* Get PHCI path and device address from user land */
5798 		if (vhci_ioc_get_phci_path(pioc, phci_path, mode, s)) {
5799 			retval = EFAULT;
5800 			break;
5801 		}
5802 
5803 		VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> "
5804 		    "phci <%s>", s, phci_path));
5805 
5806 		/* Get the PHCI dip */
5807 		if ((pdip = e_ddi_hold_devi_by_path(phci_path, 0)) == NULL) {
5808 			VHCI_DEBUG(3, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
5809 			    "phci dip doesn't exist. invalid path <%s>",
5810 			    s, phci_path));
5811 			retval = ENXIO;
5812 			break;
5813 		}
5814 
5815 		if (ndi_devi_unconfig(pdip,
5816 		    NDI_DEVI_REMOVE|NDI_DEVFS_CLEAN) != NDI_SUCCESS) {
5817 			retval = EBUSY;
5818 		}
5819 
5820 		ddi_release_devi(pdip);
5821 		break;
5822 	}
5823 #endif
5824 	}
5825 
5826 end:
5827 	/* Free the memory allocated above */
5828 	if (phci_path != NULL) {
5829 		kmem_free(phci_path, MAXPATHLEN);
5830 	}
5831 	if (client_path != NULL) {
5832 		kmem_free(client_path, MAXPATHLEN);
5833 	}
5834 	if (paddr != NULL) {
5835 		kmem_free(paddr, MAXNAMELEN);
5836 	}
5837 	return (retval);
5838 }
5839 
5840 /*
5841  * devctl IOCTL support for client device DR
5842  */
5843 /* ARGSUSED */
5844 int
5845 vhci_devctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp,
5846     int *rvalp)
5847 {
5848 	dev_info_t *self;
5849 	dev_info_t *child;
5850 	scsi_hba_tran_t *hba;
5851 	struct devctl_iocdata *dcp;
5852 	struct scsi_vhci *vhci;
5853 	int rv = 0;
5854 	int retval = 0;
5855 	scsi_vhci_priv_t *svp;
5856 	mdi_pathinfo_t  *pip;
5857 
5858 	if ((vhci = ddi_get_soft_state(vhci_softstate,
5859 	    MINOR2INST(getminor(dev)))) == NULL)
5860 		return (ENXIO);
5861 
5862 	/*
5863 	 * check if :devctl minor device has been opened
5864 	 */
5865 	mutex_enter(&vhci->vhci_mutex);
5866 	if ((vhci->vhci_state & VHCI_STATE_OPEN) == 0) {
5867 		mutex_exit(&vhci->vhci_mutex);
5868 		return (ENXIO);
5869 	}
5870 	mutex_exit(&vhci->vhci_mutex);
5871 
5872 	self = vhci->vhci_dip;
5873 	hba = ddi_get_driver_private(self);
5874 	if (hba == NULL)
5875 		return (ENXIO);
5876 
5877 	/*
5878 	 * We can use the generic implementation for these ioctls
5879 	 */
5880 	switch (cmd) {
5881 	case DEVCTL_DEVICE_GETSTATE:
5882 	case DEVCTL_DEVICE_ONLINE:
5883 	case DEVCTL_DEVICE_OFFLINE:
5884 	case DEVCTL_DEVICE_REMOVE:
5885 	case DEVCTL_BUS_GETSTATE:
5886 		return (ndi_devctl_ioctl(self, cmd, arg, mode, 0));
5887 	}
5888 
5889 	/*
5890 	 * read devctl ioctl data
5891 	 */
5892 	if (ndi_dc_allochdl((void *)arg, &dcp) != NDI_SUCCESS)
5893 		return (EFAULT);
5894 
5895 	switch (cmd) {
5896 
5897 	case DEVCTL_DEVICE_RESET:
5898 		/*
5899 		 * lookup and hold child device
5900 		 */
5901 		if ((child = ndi_devi_find(self, ndi_dc_getname(dcp),
5902 		    ndi_dc_getaddr(dcp))) == NULL) {
5903 			rv = ENXIO;
5904 			break;
5905 		}
5906 		retval = mdi_select_path(child, NULL,
5907 		    (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH),
5908 		    NULL, &pip);
5909 		if ((retval != MDI_SUCCESS) || (pip == NULL)) {
5910 			VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioctl:"
5911 			    "Unable to get a path, dip 0x%p", (void *)child));
5912 			rv = ENXIO;
5913 			break;
5914 		}
5915 		svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip);
5916 		if (vhci_recovery_reset(svp->svp_svl,
5917 		    &svp->svp_psd->sd_address, TRUE,
5918 		    VHCI_DEPTH_TARGET) == 0) {
5919 			VHCI_DEBUG(1, (CE_NOTE, NULL,
5920 			    "!vhci_ioctl(pip:%p): "
5921 			    "reset failed\n", (void *)pip));
5922 			rv = ENXIO;
5923 		}
5924 		mdi_rele_path(pip);
5925 		break;
5926 
5927 	case DEVCTL_BUS_QUIESCE:
5928 	case DEVCTL_BUS_UNQUIESCE:
5929 	case DEVCTL_BUS_RESET:
5930 	case DEVCTL_BUS_RESETALL:
5931 #ifdef	DEBUG
5932 	case DEVCTL_BUS_CONFIGURE:
5933 	case DEVCTL_BUS_UNCONFIGURE:
5934 #endif
5935 		rv = ENOTSUP;
5936 		break;
5937 
5938 	default:
5939 		rv = ENOTTY;
5940 	} /* end of outer switch */
5941 
5942 	ndi_dc_freehdl(dcp);
5943 	return (rv);
5944 }
5945 
5946 /*
5947  * Routine to get the PHCI pathname from ioctl structures in userland
5948  */
5949 /* ARGSUSED */
5950 static int
5951 vhci_ioc_get_phci_path(sv_iocdata_t *pioc, caddr_t phci_path,
5952 	int mode, caddr_t s)
5953 {
5954 	int retval = 0;
5955 
5956 	if (ddi_copyin(pioc->phci, phci_path, MAXPATHLEN, mode)) {
5957 		VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioc_get_phci: ioctl <%s> "
5958 		    "phci_path copyin failed", s));
5959 		retval = EFAULT;
5960 	}
5961 	return (retval);
5962 
5963 }
5964 
5965 
5966 /*
5967  * Routine to get the Client device pathname from ioctl structures in userland
5968  */
5969 /* ARGSUSED */
5970 static int
5971 vhci_ioc_get_client_path(sv_iocdata_t *pioc, caddr_t client_path,
5972 	int mode, caddr_t s)
5973 {
5974 	int retval = 0;
5975 
5976 	if (ddi_copyin(pioc->client, client_path, MAXPATHLEN, mode)) {
5977 		VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioc_get_client: "
5978 		    "ioctl <%s> client_path copyin failed", s));
5979 		retval = EFAULT;
5980 	}
5981 	return (retval);
5982 }
5983 
5984 
5985 /*
5986  * Routine to get physical device address from ioctl structure in userland
5987  */
5988 /* ARGSUSED */
5989 static int
5990 vhci_ioc_get_paddr(sv_iocdata_t *pioc, caddr_t paddr, int mode, caddr_t s)
5991 {
5992 	int retval = 0;
5993 
5994 	if (ddi_copyin(pioc->addr, paddr, MAXNAMELEN, mode)) {
5995 		VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioc_get_paddr: "
5996 		    "ioctl <%s> device addr copyin failed", s));
5997 		retval = EFAULT;
5998 	}
5999 	return (retval);
6000 }
6001 
6002 
6003 /*
6004  * Routine to send client device pathname to userland.
6005  */
6006 /* ARGSUSED */
6007 static int
6008 vhci_ioc_send_client_path(caddr_t client_path, sv_iocdata_t *pioc,
6009 	int mode, caddr_t s)
6010 {
6011 	int retval = 0;
6012 
6013 	if (ddi_copyout(client_path, pioc->client, MAXPATHLEN, mode)) {
6014 		VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioc_send_client: "
6015 		    "ioctl <%s> client_path copyout failed", s));
6016 		retval = EFAULT;
6017 	}
6018 	return (retval);
6019 }
6020 
6021 
6022 /*
6023  * Routine to translated dev_info pointer (dip) to device pathname.
6024  */
6025 static void
6026 vhci_ioc_devi_to_path(dev_info_t *dip, caddr_t path)
6027 {
6028 	(void) ddi_pathname(dip, path);
6029 }
6030 
6031 
6032 /*
6033  * vhci_get_phci_path_list:
6034  *		get information about devices associated with a
6035  *		given PHCI device.
6036  *
6037  * Return Values:
6038  *		path information elements
6039  */
6040 int
6041 vhci_get_phci_path_list(dev_info_t *pdip, sv_path_info_t *pibuf,
6042 	uint_t num_elems)
6043 {
6044 	uint_t			count, done;
6045 	mdi_pathinfo_t		*pip;
6046 	sv_path_info_t		*ret_pip;
6047 	int			status;
6048 	size_t			prop_size;
6049 	int			circular;
6050 
6051 	/*
6052 	 * Get the PHCI structure and retrieve the path information
6053 	 * from the GUID hash table.
6054 	 */
6055 
6056 	ret_pip = pibuf;
6057 	count = 0;
6058 
6059 	ndi_devi_enter(pdip, &circular);
6060 
6061 	done = (count >= num_elems);
6062 	pip = mdi_get_next_client_path(pdip, NULL);
6063 	while (pip && !done) {
6064 		mdi_pi_lock(pip);
6065 		(void) ddi_pathname(mdi_pi_get_phci(pip),
6066 		    ret_pip->device.ret_phci);
6067 		(void) strcpy(ret_pip->ret_addr, mdi_pi_get_addr(pip));
6068 		(void) mdi_pi_get_state2(pip, &ret_pip->ret_state,
6069 		    &ret_pip->ret_ext_state);
6070 
6071 		status = mdi_prop_size(pip, &prop_size);
6072 		if (status == MDI_SUCCESS && ret_pip->ret_prop.ret_buf_size) {
6073 			*ret_pip->ret_prop.ret_buf_size = (uint_t)prop_size;
6074 		}
6075 
6076 #ifdef DEBUG
6077 		if (status != MDI_SUCCESS) {
6078 			VHCI_DEBUG(2, (CE_WARN, NULL,
6079 			    "!vhci_get_phci_path_list: "
6080 			    "phci <%s>, prop size failure 0x%x",
6081 			    ret_pip->device.ret_phci, status));
6082 		}
6083 #endif /* DEBUG */
6084 
6085 
6086 		if (status == MDI_SUCCESS && ret_pip->ret_prop.buf &&
6087 		    prop_size && ret_pip->ret_prop.buf_size >= prop_size) {
6088 			status = mdi_prop_pack(pip,
6089 			    &ret_pip->ret_prop.buf,
6090 			    ret_pip->ret_prop.buf_size);
6091 
6092 #ifdef DEBUG
6093 			if (status != MDI_SUCCESS) {
6094 				VHCI_DEBUG(2, (CE_WARN, NULL,
6095 				    "!vhci_get_phci_path_list: "
6096 				    "phci <%s>, prop pack failure 0x%x",
6097 				    ret_pip->device.ret_phci, status));
6098 			}
6099 #endif /* DEBUG */
6100 		}
6101 
6102 		mdi_pi_unlock(pip);
6103 		pip = mdi_get_next_client_path(pdip, pip);
6104 		ret_pip++;
6105 		count++;
6106 		done = (count >= num_elems);
6107 	}
6108 
6109 	ndi_devi_exit(pdip, circular);
6110 
6111 	return (MDI_SUCCESS);
6112 }
6113 
6114 
6115 /*
6116  * vhci_get_client_path_list:
6117  *		get information about various paths associated with a
6118  *		given client device.
6119  *
6120  * Return Values:
6121  *		path information elements
6122  */
6123 int
6124 vhci_get_client_path_list(dev_info_t *cdip, sv_path_info_t *pibuf,
6125 	uint_t num_elems)
6126 {
6127 	uint_t			count, done;
6128 	mdi_pathinfo_t		*pip;
6129 	sv_path_info_t		*ret_pip;
6130 	int			status;
6131 	size_t			prop_size;
6132 	int			circular;
6133 
6134 	ret_pip = pibuf;
6135 	count = 0;
6136 
6137 	ndi_devi_enter(cdip, &circular);
6138 
6139 	done = (count >= num_elems);
6140 	pip = mdi_get_next_phci_path(cdip, NULL);
6141 	while (pip && !done) {
6142 		mdi_pi_lock(pip);
6143 		(void) ddi_pathname(mdi_pi_get_phci(pip),
6144 		    ret_pip->device.ret_phci);
6145 		(void) strcpy(ret_pip->ret_addr, mdi_pi_get_addr(pip));
6146 		(void) mdi_pi_get_state2(pip, &ret_pip->ret_state,
6147 		    &ret_pip->ret_ext_state);
6148 
6149 		status = mdi_prop_size(pip, &prop_size);
6150 		if (status == MDI_SUCCESS && ret_pip->ret_prop.ret_buf_size) {
6151 			*ret_pip->ret_prop.ret_buf_size = (uint_t)prop_size;
6152 		}
6153 
6154 #ifdef DEBUG
6155 		if (status != MDI_SUCCESS) {
6156 			VHCI_DEBUG(2, (CE_WARN, NULL,
6157 			    "!vhci_get_client_path_list: "
6158 			    "phci <%s>, prop size failure 0x%x",
6159 			    ret_pip->device.ret_phci, status));
6160 		}
6161 #endif /* DEBUG */
6162 
6163 
6164 		if (status == MDI_SUCCESS && ret_pip->ret_prop.buf &&
6165 		    prop_size && ret_pip->ret_prop.buf_size >= prop_size) {
6166 			status = mdi_prop_pack(pip,
6167 			    &ret_pip->ret_prop.buf,
6168 			    ret_pip->ret_prop.buf_size);
6169 
6170 #ifdef DEBUG
6171 			if (status != MDI_SUCCESS) {
6172 				VHCI_DEBUG(2, (CE_WARN, NULL,
6173 				    "!vhci_get_client_path_list: "
6174 				    "phci <%s>, prop pack failure 0x%x",
6175 				    ret_pip->device.ret_phci, status));
6176 			}
6177 #endif /* DEBUG */
6178 		}
6179 
6180 		mdi_pi_unlock(pip);
6181 		pip = mdi_get_next_phci_path(cdip, pip);
6182 		ret_pip++;
6183 		count++;
6184 		done = (count >= num_elems);
6185 	}
6186 
6187 	ndi_devi_exit(cdip, circular);
6188 
6189 	return (MDI_SUCCESS);
6190 }
6191 
6192 
6193 /*
6194  * Routine to get ioctl argument structure from userland.
6195  */
6196 /* ARGSUSED */
6197 static int
6198 vhci_get_iocdata(const void *data, sv_iocdata_t *pioc, int mode, caddr_t s)
6199 {
6200 	int	retval = 0;
6201 
6202 #ifdef  _MULTI_DATAMODEL
6203 	switch (ddi_model_convert_from(mode & FMODELS)) {
6204 	case DDI_MODEL_ILP32:
6205 	{
6206 		sv_iocdata32_t	ioc32;
6207 
6208 		if (ddi_copyin(data, &ioc32, sizeof (ioc32), mode)) {
6209 			retval = EFAULT;
6210 			break;
6211 		}
6212 		pioc->client	= (caddr_t)(uintptr_t)ioc32.client;
6213 		pioc->phci	= (caddr_t)(uintptr_t)ioc32.phci;
6214 		pioc->addr	= (caddr_t)(uintptr_t)ioc32.addr;
6215 		pioc->buf_elem	= (uint_t)ioc32.buf_elem;
6216 		pioc->ret_buf	= (sv_path_info_t *)(uintptr_t)ioc32.ret_buf;
6217 		pioc->ret_elem	= (uint_t *)(uintptr_t)ioc32.ret_elem;
6218 		break;
6219 	}
6220 
6221 	case DDI_MODEL_NONE:
6222 		if (ddi_copyin(data, pioc, sizeof (*pioc), mode)) {
6223 			retval = EFAULT;
6224 			break;
6225 		}
6226 		break;
6227 	}
6228 #else   /* _MULTI_DATAMODEL */
6229 	if (ddi_copyin(data, pioc, sizeof (*pioc), mode)) {
6230 		retval = EFAULT;
6231 	}
6232 #endif  /* _MULTI_DATAMODEL */
6233 
6234 #ifdef DEBUG
6235 	if (retval) {
6236 		VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_get_ioc: cmd <%s> "
6237 		    "iocdata copyin failed", s));
6238 	}
6239 #endif
6240 
6241 	return (retval);
6242 }
6243 
6244 
6245 /*
6246  * Routine to get the ioctl argument for ioctl causing controller switchover.
6247  */
6248 /* ARGSUSED */
6249 static int
6250 vhci_get_iocswitchdata(const void *data, sv_switch_to_cntlr_iocdata_t *piocsc,
6251     int mode, caddr_t s)
6252 {
6253 	int	retval = 0;
6254 
6255 #ifdef  _MULTI_DATAMODEL
6256 	switch (ddi_model_convert_from(mode & FMODELS)) {
6257 	case DDI_MODEL_ILP32:
6258 	{
6259 		sv_switch_to_cntlr_iocdata32_t	ioc32;
6260 
6261 		if (ddi_copyin(data, &ioc32, sizeof (ioc32), mode)) {
6262 			retval = EFAULT;
6263 			break;
6264 		}
6265 		piocsc->client	= (caddr_t)(uintptr_t)ioc32.client;
6266 		piocsc->class	= (caddr_t)(uintptr_t)ioc32.class;
6267 		break;
6268 	}
6269 
6270 	case DDI_MODEL_NONE:
6271 		if (ddi_copyin(data, piocsc, sizeof (*piocsc), mode)) {
6272 			retval = EFAULT;
6273 		}
6274 		break;
6275 	}
6276 #else   /* _MULTI_DATAMODEL */
6277 	if (ddi_copyin(data, piocsc, sizeof (*piocsc), mode)) {
6278 		retval = EFAULT;
6279 	}
6280 #endif  /* _MULTI_DATAMODEL */
6281 
6282 #ifdef DEBUG
6283 	if (retval) {
6284 		VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_get_ioc: cmd <%s> "
6285 		    "switch_to_cntlr_iocdata copyin failed", s));
6286 	}
6287 #endif
6288 
6289 	return (retval);
6290 }
6291 
6292 
6293 /*
6294  * Routine to allocate memory for the path information structures.
6295  * It allocates two chunks of memory - one for keeping userland
6296  * pointers/values for path information and path properties, second for
6297  * keeping allocating kernel memory for path properties. These path
6298  * properties are finally copied to userland.
6299  */
6300 /* ARGSUSED */
6301 static int
6302 vhci_ioc_alloc_pathinfo(sv_path_info_t **upibuf, sv_path_info_t **kpibuf,
6303     uint_t num_paths, sv_iocdata_t *pioc, int mode, caddr_t s)
6304 {
6305 	sv_path_info_t	*pi;
6306 	uint_t		bufsize;
6307 	int		retval = 0;
6308 	int		index;
6309 
6310 	/* Allocate memory */
6311 	*upibuf = (sv_path_info_t *)
6312 	    kmem_zalloc(sizeof (sv_path_info_t) * num_paths, KM_SLEEP);
6313 	ASSERT(*upibuf != NULL);
6314 	*kpibuf = (sv_path_info_t *)
6315 	    kmem_zalloc(sizeof (sv_path_info_t) * num_paths, KM_SLEEP);
6316 	ASSERT(*kpibuf != NULL);
6317 
6318 	/*
6319 	 * Get the path info structure from the user space.
6320 	 * We are interested in the following fields:
6321 	 *	- user size of buffer for per path properties.
6322 	 *	- user address of buffer for path info properties.
6323 	 *	- user pointer for returning actual buffer size
6324 	 * Keep these fields in the 'upibuf' structures.
6325 	 * Allocate buffer for per path info properties in kernel
6326 	 * structure ('kpibuf').
6327 	 * Size of these buffers will be equal to the size of buffers
6328 	 * in the user space.
6329 	 */
6330 #ifdef  _MULTI_DATAMODEL
6331 	switch (ddi_model_convert_from(mode & FMODELS)) {
6332 	case DDI_MODEL_ILP32:
6333 	{
6334 		sv_path_info32_t	*src;
6335 		sv_path_info32_t	pi32;
6336 
6337 		src  = (sv_path_info32_t *)pioc->ret_buf;
6338 		pi = (sv_path_info_t *)*upibuf;
6339 		for (index = 0; index < num_paths; index++, src++, pi++) {
6340 			if (ddi_copyin(src, &pi32, sizeof (pi32), mode)) {
6341 				retval = EFAULT;
6342 				break;
6343 			}
6344 
6345 			pi->ret_prop.buf_size	=
6346 			    (uint_t)pi32.ret_prop.buf_size;
6347 			pi->ret_prop.ret_buf_size =
6348 			    (uint_t *)(uintptr_t)pi32.ret_prop.ret_buf_size;
6349 			pi->ret_prop.buf	=
6350 			    (caddr_t)(uintptr_t)pi32.ret_prop.buf;
6351 		}
6352 		break;
6353 	}
6354 
6355 	case DDI_MODEL_NONE:
6356 		if (ddi_copyin(pioc->ret_buf, *upibuf,
6357 		    sizeof (sv_path_info_t) * num_paths, mode)) {
6358 			retval = EFAULT;
6359 		}
6360 		break;
6361 	}
6362 #else   /* _MULTI_DATAMODEL */
6363 	if (ddi_copyin(pioc->ret_buf, *upibuf,
6364 	    sizeof (sv_path_info_t) * num_paths, mode)) {
6365 		retval = EFAULT;
6366 	}
6367 #endif  /* _MULTI_DATAMODEL */
6368 
6369 	if (retval != 0) {
6370 		VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_alloc_path_info: "
6371 		    "ioctl <%s> normal: path_info copyin failed", s));
6372 		kmem_free(*upibuf, sizeof (sv_path_info_t) * num_paths);
6373 		kmem_free(*kpibuf, sizeof (sv_path_info_t) * num_paths);
6374 		*upibuf = NULL;
6375 		*kpibuf = NULL;
6376 		return (retval);
6377 	}
6378 
6379 	/*
6380 	 * Allocate memory for per path properties.
6381 	 */
6382 	for (index = 0, pi = *kpibuf; index < num_paths; index++, pi++) {
6383 		bufsize = (*upibuf)[index].ret_prop.buf_size;
6384 
6385 		if (bufsize && bufsize <= SV_PROP_MAX_BUF_SIZE) {
6386 			pi->ret_prop.buf_size = bufsize;
6387 			pi->ret_prop.buf = (caddr_t)
6388 			    kmem_zalloc(bufsize, KM_SLEEP);
6389 			ASSERT(pi->ret_prop.buf != NULL);
6390 		} else {
6391 			pi->ret_prop.buf_size = 0;
6392 			pi->ret_prop.buf = NULL;
6393 		}
6394 
6395 		if ((*upibuf)[index].ret_prop.ret_buf_size != NULL) {
6396 			pi->ret_prop.ret_buf_size = (uint_t *)kmem_zalloc(
6397 			    sizeof (*pi->ret_prop.ret_buf_size), KM_SLEEP);
6398 			ASSERT(pi->ret_prop.ret_buf_size != NULL);
6399 		} else {
6400 			pi->ret_prop.ret_buf_size = NULL;
6401 		}
6402 	}
6403 
6404 	return (0);
6405 }
6406 
6407 
6408 /*
6409  * Routine to free memory for the path information structures.
6410  * This is the memory which was allocated earlier.
6411  */
6412 /* ARGSUSED */
6413 static void
6414 vhci_ioc_free_pathinfo(sv_path_info_t *upibuf, sv_path_info_t *kpibuf,
6415     uint_t num_paths)
6416 {
6417 	sv_path_info_t	*pi;
6418 	int		index;
6419 
6420 	/* Free memory for per path properties */
6421 	for (index = 0, pi = kpibuf; index < num_paths; index++, pi++) {
6422 		if (pi->ret_prop.ret_buf_size != NULL) {
6423 			kmem_free(pi->ret_prop.ret_buf_size,
6424 			    sizeof (*pi->ret_prop.ret_buf_size));
6425 		}
6426 
6427 		if (pi->ret_prop.buf != NULL) {
6428 			kmem_free(pi->ret_prop.buf, pi->ret_prop.buf_size);
6429 		}
6430 	}
6431 
6432 	/* Free memory for path info structures */
6433 	kmem_free(upibuf, sizeof (sv_path_info_t) * num_paths);
6434 	kmem_free(kpibuf, sizeof (sv_path_info_t) * num_paths);
6435 }
6436 
6437 
6438 /*
6439  * Routine to copy path information and path properties to userland.
6440  */
6441 /* ARGSUSED */
6442 static int
6443 vhci_ioc_send_pathinfo(sv_path_info_t *upibuf, sv_path_info_t *kpibuf,
6444     uint_t num_paths, sv_iocdata_t *pioc, int mode, caddr_t s)
6445 {
6446 	int			retval = 0, index;
6447 	sv_path_info_t		*upi_ptr;
6448 	sv_path_info32_t	*upi32_ptr;
6449 
6450 #ifdef  _MULTI_DATAMODEL
6451 	switch (ddi_model_convert_from(mode & FMODELS)) {
6452 	case DDI_MODEL_ILP32:
6453 		goto copy_32bit;
6454 
6455 	case DDI_MODEL_NONE:
6456 		goto copy_normal;
6457 	}
6458 #else   /* _MULTI_DATAMODEL */
6459 
6460 	goto copy_normal;
6461 
6462 #endif  /* _MULTI_DATAMODEL */
6463 
6464 copy_normal:
6465 
6466 	/*
6467 	 * Copy path information and path properties to user land.
6468 	 * Pointer fields inside the path property structure were
6469 	 * saved in the 'upibuf' structure earlier.
6470 	 */
6471 	upi_ptr = pioc->ret_buf;
6472 	for (index = 0; index < num_paths; index++) {
6473 		if (ddi_copyout(kpibuf[index].device.ret_ct,
6474 		    upi_ptr[index].device.ret_ct, MAXPATHLEN, mode)) {
6475 			retval = EFAULT;
6476 			break;
6477 		}
6478 
6479 		if (ddi_copyout(kpibuf[index].ret_addr,
6480 		    upi_ptr[index].ret_addr, MAXNAMELEN, mode)) {
6481 			retval = EFAULT;
6482 			break;
6483 		}
6484 
6485 		if (ddi_copyout(&kpibuf[index].ret_state,
6486 		    &upi_ptr[index].ret_state, sizeof (kpibuf[index].ret_state),
6487 		    mode)) {
6488 			retval = EFAULT;
6489 			break;
6490 		}
6491 
6492 		if (ddi_copyout(&kpibuf[index].ret_ext_state,
6493 		    &upi_ptr[index].ret_ext_state,
6494 		    sizeof (kpibuf[index].ret_ext_state), mode)) {
6495 			retval = EFAULT;
6496 			break;
6497 		}
6498 
6499 		if ((kpibuf[index].ret_prop.ret_buf_size != NULL) &&
6500 		    ddi_copyout(kpibuf[index].ret_prop.ret_buf_size,
6501 		    upibuf[index].ret_prop.ret_buf_size,
6502 		    sizeof (*upibuf[index].ret_prop.ret_buf_size), mode)) {
6503 			retval = EFAULT;
6504 			break;
6505 		}
6506 
6507 		if ((kpibuf[index].ret_prop.buf != NULL) &&
6508 		    ddi_copyout(kpibuf[index].ret_prop.buf,
6509 		    upibuf[index].ret_prop.buf,
6510 		    upibuf[index].ret_prop.buf_size, mode)) {
6511 			retval = EFAULT;
6512 			break;
6513 		}
6514 	}
6515 
6516 #ifdef DEBUG
6517 	if (retval) {
6518 		VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_get_ioc: ioctl <%s> "
6519 		    "normal: path_info copyout failed", s));
6520 	}
6521 #endif
6522 
6523 	return (retval);
6524 
6525 copy_32bit:
6526 	/*
6527 	 * Copy path information and path properties to user land.
6528 	 * Pointer fields inside the path property structure were
6529 	 * saved in the 'upibuf' structure earlier.
6530 	 */
6531 	upi32_ptr = (sv_path_info32_t *)pioc->ret_buf;
6532 	for (index = 0; index < num_paths; index++) {
6533 		if (ddi_copyout(kpibuf[index].device.ret_ct,
6534 		    upi32_ptr[index].device.ret_ct, MAXPATHLEN, mode)) {
6535 			retval = EFAULT;
6536 			break;
6537 		}
6538 
6539 		if (ddi_copyout(kpibuf[index].ret_addr,
6540 		    upi32_ptr[index].ret_addr, MAXNAMELEN, mode)) {
6541 			retval = EFAULT;
6542 			break;
6543 		}
6544 
6545 		if (ddi_copyout(&kpibuf[index].ret_state,
6546 		    &upi32_ptr[index].ret_state,
6547 		    sizeof (kpibuf[index].ret_state), mode)) {
6548 			retval = EFAULT;
6549 			break;
6550 		}
6551 
6552 		if (ddi_copyout(&kpibuf[index].ret_ext_state,
6553 		    &upi32_ptr[index].ret_ext_state,
6554 		    sizeof (kpibuf[index].ret_ext_state), mode)) {
6555 			retval = EFAULT;
6556 			break;
6557 		}
6558 		if ((kpibuf[index].ret_prop.ret_buf_size != NULL) &&
6559 		    ddi_copyout(kpibuf[index].ret_prop.ret_buf_size,
6560 		    upibuf[index].ret_prop.ret_buf_size,
6561 		    sizeof (*upibuf[index].ret_prop.ret_buf_size), mode)) {
6562 			retval = EFAULT;
6563 			break;
6564 		}
6565 
6566 		if ((kpibuf[index].ret_prop.buf != NULL) &&
6567 		    ddi_copyout(kpibuf[index].ret_prop.buf,
6568 		    upibuf[index].ret_prop.buf,
6569 		    upibuf[index].ret_prop.buf_size, mode)) {
6570 			retval = EFAULT;
6571 			break;
6572 		}
6573 	}
6574 
6575 #ifdef DEBUG
6576 	if (retval) {
6577 		VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_get_ioc: ioctl <%s> "
6578 		    "normal: path_info copyout failed", s));
6579 	}
6580 #endif
6581 
6582 	return (retval);
6583 }
6584 
6585 
6586 /*
6587  * vhci_failover()
6588  * This routine expects VHCI_HOLD_LUN before being invoked.  It can be invoked
6589  * as MDI_FAILOVER_ASYNC or MDI_FAILOVER_SYNC.  For Asynchronous failovers
6590  * this routine shall VHCI_RELEASE_LUN on exiting.  For synchronous failovers
6591  * it is the callers responsibility to release lun.
6592  */
6593 
6594 /* ARGSUSED */
6595 static int
6596 vhci_failover(dev_info_t *vdip, dev_info_t *cdip, int flags)
6597 {
6598 	char			*guid;
6599 	scsi_vhci_lun_t		*vlun = NULL;
6600 	struct scsi_vhci	*vhci;
6601 	mdi_pathinfo_t		*pip, *npip;
6602 	char			*s_pclass, *pclass1, *pclass2, *pclass;
6603 	char			active_pclass_copy[255], *active_pclass_ptr;
6604 	char			*ptr1, *ptr2;
6605 	mdi_pathinfo_state_t	pi_state;
6606 	uint32_t		pi_ext_state;
6607 	scsi_vhci_priv_t	*svp;
6608 	struct scsi_device	*sd;
6609 	struct scsi_failover_ops	*sfo;
6610 	int			sps; /* mdi_select_path() status */
6611 	int			activation_done = 0;
6612 	int			rval, retval = MDI_FAILURE;
6613 	int			reserve_pending, check_condition, UA_condition;
6614 	struct scsi_pkt		*pkt;
6615 	struct buf		*bp;
6616 
6617 	vhci = ddi_get_soft_state(vhci_softstate, ddi_get_instance(vdip));
6618 	sd = ddi_get_driver_private(cdip);
6619 	vlun = ADDR2VLUN(&sd->sd_address);
6620 	ASSERT(vlun != 0);
6621 	ASSERT(VHCI_LUN_IS_HELD(vlun));
6622 	guid = vlun->svl_lun_wwn;
6623 	VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(1): guid %s\n", guid));
6624 	vhci_log(CE_NOTE, vdip, "!Initiating failover for device %s "
6625 	    "(GUID %s)", ddi_node_name(cdip), guid);
6626 
6627 	/*
6628 	 * Lets maintain a local copy of the vlun->svl_active_pclass
6629 	 * for the rest of the processing. Accessing the field
6630 	 * directly in the loop below causes loop logic to break
6631 	 * especially when the field gets updated by other threads
6632 	 * update path status etc and causes 'paths are not currently
6633 	 * available' condition to be declared prematurely.
6634 	 */
6635 	mutex_enter(&vlun->svl_mutex);
6636 	if (vlun->svl_active_pclass != NULL) {
6637 		(void) strlcpy(active_pclass_copy, vlun->svl_active_pclass,
6638 		    sizeof (active_pclass_copy));
6639 		active_pclass_ptr = &active_pclass_copy[0];
6640 		mutex_exit(&vlun->svl_mutex);
6641 		if (vhci_quiesce_paths(vdip, cdip, vlun, guid,
6642 		    active_pclass_ptr) != 0) {
6643 			retval = MDI_FAILURE;
6644 		}
6645 	} else {
6646 		/*
6647 		 * can happen only when the available path to device
6648 		 * discovered is a STANDBY path.
6649 		 */
6650 		mutex_exit(&vlun->svl_mutex);
6651 		active_pclass_copy[0] = '\0';
6652 		active_pclass_ptr = NULL;
6653 	}
6654 
6655 	sfo = vlun->svl_fops;
6656 	ASSERT(sfo != NULL);
6657 	pclass1 = s_pclass = active_pclass_ptr;
6658 	VHCI_DEBUG(1, (CE_NOTE, NULL, "!(%s)failing over from %s\n", guid,
6659 	    (s_pclass == NULL ? "<none>" : s_pclass)));
6660 
6661 next_pathclass:
6662 
6663 	rval = (*sfo->sfo_pathclass_next)(pclass1, &pclass2,
6664 	    vlun->svl_fops_ctpriv);
6665 	if (rval == ENOENT) {
6666 		if (s_pclass == NULL) {
6667 			VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(4)(%s): "
6668 			    "failed, no more pathclasses\n", guid));
6669 			goto done;
6670 		} else {
6671 			(*sfo->sfo_pathclass_next)(NULL, &pclass2,
6672 			    vlun->svl_fops_ctpriv);
6673 		}
6674 	} else if (rval == EINVAL) {
6675 		vhci_log(CE_NOTE, vdip, "!Failover operation failed for "
6676 		    "device %s (GUID %s): Invalid path-class %s",
6677 		    ddi_node_name(cdip), guid,
6678 		    ((pclass1 == NULL) ? "<none>" : pclass1));
6679 		goto done;
6680 	}
6681 	if ((s_pclass != NULL) && (strcmp(pclass2, s_pclass) == 0)) {
6682 		/*
6683 		 * paths are not currently available
6684 		 */
6685 		vhci_log(CE_NOTE, vdip, "!Failover path currently unavailable"
6686 		    " for device %s (GUID %s)",
6687 		    ddi_node_name(cdip), guid);
6688 		goto done;
6689 	}
6690 	pip = npip = NULL;
6691 	VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(5.2)(%s): considering "
6692 	    "%s as failover destination\n", guid, pclass2));
6693 	sps = mdi_select_path(cdip, NULL, MDI_SELECT_STANDBY_PATH, NULL, &npip);
6694 	if ((npip == NULL) || (sps != MDI_SUCCESS)) {
6695 		VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(%s): no "
6696 		    "STANDBY paths found (status:%x)!\n", guid, sps));
6697 		pclass1 = pclass2;
6698 		goto next_pathclass;
6699 	}
6700 	do {
6701 		pclass = NULL;
6702 		if ((mdi_prop_lookup_string(npip, "path-class",
6703 		    &pclass) != MDI_SUCCESS) || (strcmp(pclass2,
6704 		    pclass) != 0)) {
6705 			VHCI_DEBUG(1, (CE_NOTE, NULL,
6706 			    "!vhci_failover(5.5)(%s): skipping path "
6707 			    "%p(%s)...\n", guid, (void *)npip, pclass));
6708 			pip = npip;
6709 			sps = mdi_select_path(cdip, NULL,
6710 			    MDI_SELECT_STANDBY_PATH, pip, &npip);
6711 			mdi_rele_path(pip);
6712 			(void) mdi_prop_free(pclass);
6713 			continue;
6714 		}
6715 		svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(npip);
6716 
6717 		/*
6718 		 * Issue READ at non-zer block on this STANDBY path.
6719 		 * Purple returns
6720 		 * 1. RESERVATION_CONFLICT if reservation is pending
6721 		 * 2. POR check condition if it reset happened.
6722 		 * 2. failover Check Conditions if one is already in progress.
6723 		 */
6724 		reserve_pending = 0;
6725 		check_condition = 0;
6726 		UA_condition = 0;
6727 
6728 		bp = scsi_alloc_consistent_buf(&svp->svp_psd->sd_address,
6729 		    (struct buf *)NULL, DEV_BSIZE, B_READ, NULL, NULL);
6730 		if (!bp) {
6731 			VHCI_DEBUG(1, (CE_NOTE, NULL,
6732 			    "vhci_failover !No resources (buf)\n"));
6733 			mdi_rele_path(npip);
6734 			goto done;
6735 		}
6736 		pkt = scsi_init_pkt(&svp->svp_psd->sd_address, NULL, bp,
6737 		    CDB_GROUP1, sizeof (struct scsi_arq_status), 0,
6738 		    PKT_CONSISTENT, NULL, NULL);
6739 		if (pkt) {
6740 			(void) scsi_setup_cdb((union scsi_cdb *)(uintptr_t)
6741 			    pkt->pkt_cdbp, SCMD_READ, 1, 1, 0);
6742 			pkt->pkt_flags = FLAG_NOINTR;
6743 check_path_again:
6744 			pkt->pkt_time = 3*30;
6745 			if (scsi_transport(pkt) == TRAN_ACCEPT) {
6746 				switch (pkt->pkt_reason) {
6747 				case CMD_CMPLT:
6748 					switch (SCBP_C(pkt)) {
6749 					case STATUS_GOOD:
6750 						/* Already failed over */
6751 						activation_done = 1;
6752 						break;
6753 					case STATUS_RESERVATION_CONFLICT:
6754 						reserve_pending = 1;
6755 						break;
6756 					case STATUS_CHECK:
6757 						check_condition = 1;
6758 						break;
6759 					}
6760 				}
6761 			}
6762 			if (check_condition &&
6763 			    (pkt->pkt_state & STATE_ARQ_DONE)) {
6764 				struct scsi_extended_sense *sns =
6765 				    &(((struct scsi_arq_status *)(uintptr_t)
6766 				    (pkt->pkt_scbp))->sts_sensedata);
6767 				if (sns->es_key == KEY_UNIT_ATTENTION &&
6768 				    sns->es_add_code == 0x29) {
6769 					/* Already failed over */
6770 					VHCI_DEBUG(1, (CE_NOTE, NULL,
6771 					    "!vhci_failover(7)(%s): "
6772 					    "path 0x%p POR UA condition\n",
6773 					    guid, (void *)npip));
6774 					if (UA_condition == 0) {
6775 						UA_condition = 1;
6776 						goto check_path_again;
6777 					}
6778 				} else {
6779 					activation_done = 0;
6780 					VHCI_DEBUG(1, (CE_NOTE, NULL,
6781 					    "!vhci_failover(%s): path 0x%p "
6782 					    "unhandled chkcond %x %x %x\n",
6783 					    guid, (void *)npip, sns->es_key,
6784 					    sns->es_add_code,
6785 					    sns->es_qual_code));
6786 				}
6787 			}
6788 			scsi_destroy_pkt(pkt);
6789 		}
6790 		scsi_free_consistent_buf(bp);
6791 
6792 		if (activation_done) {
6793 			mdi_rele_path(npip);
6794 			VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(7)(%s): "
6795 			    "path 0x%p already failedover\n", guid,
6796 			    (void *)npip));
6797 			break;
6798 		}
6799 		if (reserve_pending && (vlun->svl_xlf_capable == 0)) {
6800 			(void) vhci_recovery_reset(vlun,
6801 			    &svp->svp_psd->sd_address,
6802 			    FALSE, VHCI_DEPTH_ALL);
6803 		}
6804 		VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(6)(%s): "
6805 		    "activating path 0x%p(psd:%p)\n", guid, (void *)npip,
6806 		    (void *)svp->svp_psd));
6807 		if ((*sfo->sfo_path_activate)(svp->svp_psd, pclass2,
6808 		    vlun->svl_fops_ctpriv) == 0) {
6809 			activation_done = 1;
6810 			mdi_rele_path(npip);
6811 			VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(7)(%s): "
6812 			    "path 0x%p successfully activated\n", guid,
6813 			    (void *)npip));
6814 			break;
6815 		}
6816 		pip = npip;
6817 		sps = mdi_select_path(cdip, NULL, MDI_SELECT_STANDBY_PATH,
6818 		    pip, &npip);
6819 		mdi_rele_path(pip);
6820 	} while ((npip != NULL) && (sps == MDI_SUCCESS));
6821 	if (activation_done == 0) {
6822 		pclass1 = pclass2;
6823 		goto next_pathclass;
6824 	}
6825 
6826 	/*
6827 	 * if we are here, we have succeeded in activating path npip of
6828 	 * pathclass pclass2; let us validate all paths of pclass2 by
6829 	 * "ping"-ing each one and mark the good ones ONLINE
6830 	 * Also, set the state of the paths belonging to the previously
6831 	 * active pathclass to STANDBY
6832 	 */
6833 	pip = npip = NULL;
6834 	sps = mdi_select_path(cdip, NULL, (MDI_SELECT_ONLINE_PATH |
6835 	    MDI_SELECT_STANDBY_PATH | MDI_SELECT_USER_DISABLE_PATH),
6836 	    NULL, &npip);
6837 	if (npip == NULL || sps != MDI_SUCCESS) {
6838 		VHCI_DEBUG(1, (CE_NOTE, NULL, "!Failover operation failed for "
6839 		    "device %s (GUID %s): paths may be busy\n",
6840 		    ddi_node_name(cdip), guid));
6841 		goto done;
6842 	}
6843 	do {
6844 		(void) mdi_pi_get_state2(npip, &pi_state, &pi_ext_state);
6845 		if (mdi_prop_lookup_string(npip, "path-class", &pclass)
6846 		    != MDI_SUCCESS) {
6847 			pip = npip;
6848 			sps = mdi_select_path(cdip, NULL,
6849 			    (MDI_SELECT_ONLINE_PATH |
6850 			    MDI_SELECT_STANDBY_PATH |
6851 			    MDI_SELECT_USER_DISABLE_PATH),
6852 			    pip, &npip);
6853 			mdi_rele_path(pip);
6854 			continue;
6855 		}
6856 		if (strcmp(pclass, pclass2) == 0) {
6857 			if (pi_state == MDI_PATHINFO_STATE_STANDBY) {
6858 				svp = (scsi_vhci_priv_t *)
6859 				    mdi_pi_get_vhci_private(npip);
6860 				VHCI_DEBUG(1, (CE_NOTE, NULL,
6861 				    "!vhci_failover(8)(%s): "
6862 				    "pinging path 0x%p\n",
6863 				    guid, (void *)npip));
6864 				if ((*sfo->sfo_path_ping)(svp->svp_psd,
6865 				    vlun->svl_fops_ctpriv) == 1) {
6866 					mdi_pi_set_state(npip,
6867 					    MDI_PATHINFO_STATE_ONLINE);
6868 					VHCI_DEBUG(1, (CE_NOTE, NULL,
6869 					    "!vhci_failover(9)(%s): "
6870 					    "path 0x%p ping successful, "
6871 					    "marked online\n", guid,
6872 					    (void *)npip));
6873 					MDI_PI_ERRSTAT(npip, MDI_PI_FAILTO);
6874 				}
6875 			}
6876 		} else if ((s_pclass != NULL) && (strcmp(pclass, s_pclass)
6877 		    == 0)) {
6878 			if (pi_state == MDI_PATHINFO_STATE_ONLINE) {
6879 				mdi_pi_set_state(npip,
6880 				    MDI_PATHINFO_STATE_STANDBY);
6881 				VHCI_DEBUG(1, (CE_NOTE, NULL,
6882 				    "!vhci_failover(10)(%s): path 0x%p marked "
6883 				    "STANDBY\n", guid, (void *)npip));
6884 				MDI_PI_ERRSTAT(npip, MDI_PI_FAILFROM);
6885 			}
6886 		}
6887 		(void) mdi_prop_free(pclass);
6888 		pip = npip;
6889 		sps = mdi_select_path(cdip, NULL, (MDI_SELECT_ONLINE_PATH |
6890 		    MDI_SELECT_STANDBY_PATH|MDI_SELECT_USER_DISABLE_PATH),
6891 		    pip, &npip);
6892 		mdi_rele_path(pip);
6893 	} while ((npip != NULL) && (sps == MDI_SUCCESS));
6894 
6895 	/*
6896 	 * Update the AccessState of related MP-API TPGs
6897 	 */
6898 	(void) vhci_mpapi_update_tpg_acc_state_for_lu(vhci, vlun);
6899 
6900 	vhci_log(CE_NOTE, vdip, "!Failover operation completed successfully "
6901 	    "for device %s (GUID %s): failed over from %s to %s",
6902 	    ddi_node_name(cdip), guid, ((s_pclass == NULL) ? "<none>" :
6903 	    s_pclass), pclass2);
6904 	ptr1 = kmem_alloc(strlen(pclass2)+1, KM_SLEEP);
6905 	(void) strlcpy(ptr1, pclass2, (strlen(pclass2)+1));
6906 	mutex_enter(&vlun->svl_mutex);
6907 	ptr2 = vlun->svl_active_pclass;
6908 	vlun->svl_active_pclass = ptr1;
6909 	mutex_exit(&vlun->svl_mutex);
6910 	if (ptr2) {
6911 		kmem_free(ptr2, strlen(ptr2)+1);
6912 	}
6913 	mutex_enter(&vhci->vhci_mutex);
6914 	scsi_hba_reset_notify_callback(&vhci->vhci_mutex,
6915 	    &vhci->vhci_reset_notify_listf);
6916 	/* All reservations are cleared upon these resets. */
6917 	vlun->svl_flags &= ~VLUN_RESERVE_ACTIVE_FLG;
6918 	mutex_exit(&vhci->vhci_mutex);
6919 	VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(11): DONE! Active "
6920 	    "pathclass for %s is now %s\n", guid, pclass2));
6921 	retval = MDI_SUCCESS;
6922 
6923 done:
6924 	if (flags == MDI_FAILOVER_ASYNC) {
6925 		VHCI_RELEASE_LUN(vlun);
6926 		VHCI_DEBUG(6, (CE_NOTE, NULL, "!vhci_failover(12): DONE! "
6927 		    "releasing lun, as failover was ASYNC\n"));
6928 	} else {
6929 		VHCI_DEBUG(6, (CE_NOTE, NULL, "!vhci_failover(12): DONE! "
6930 		    "NOT releasing lun, as failover was SYNC\n"));
6931 	}
6932 	return (retval);
6933 }
6934 
6935 /*
6936  * vhci_client_attached is called after the successful attach of a
6937  * client devinfo node.
6938  */
6939 static void
6940 vhci_client_attached(dev_info_t *cdip)
6941 {
6942 	mdi_pathinfo_t	*pip;
6943 	int		circular;
6944 
6945 	/*
6946 	 * At this point the client has attached and it's instance number is
6947 	 * valid, so we can set up kstats.  We need to do this here because it
6948 	 * is possible for paths to go online prior to client attach, in which
6949 	 * case the call to vhci_kstat_create_pathinfo in vhci_pathinfo_online
6950 	 * was a noop.
6951 	 */
6952 	ndi_devi_enter(cdip, &circular);
6953 	for (pip = mdi_get_next_phci_path(cdip, NULL); pip;
6954 	    pip = mdi_get_next_phci_path(cdip, pip))
6955 		vhci_kstat_create_pathinfo(pip);
6956 	ndi_devi_exit(cdip, circular);
6957 }
6958 
6959 /*
6960  * quiesce all of the online paths
6961  */
6962 static int
6963 vhci_quiesce_paths(dev_info_t *vdip, dev_info_t *cdip, scsi_vhci_lun_t *vlun,
6964 	char *guid, char *active_pclass_ptr)
6965 {
6966 	scsi_vhci_priv_t	*svp;
6967 	char			*s_pclass = NULL;
6968 	mdi_pathinfo_t		*npip, *pip;
6969 	int			sps;
6970 
6971 	/* quiesce currently active paths */
6972 	s_pclass = NULL;
6973 	pip = npip = NULL;
6974 	sps = mdi_select_path(cdip, NULL, MDI_SELECT_ONLINE_PATH, NULL, &npip);
6975 	if ((npip == NULL) || (sps != MDI_SUCCESS)) {
6976 		return (1);
6977 	}
6978 	do {
6979 		if (mdi_prop_lookup_string(npip, "path-class",
6980 		    &s_pclass) != MDI_SUCCESS) {
6981 			mdi_rele_path(npip);
6982 			vhci_log(CE_NOTE, vdip, "!Failover operation failed "
6983 			    "for device %s (GUID %s) due to an internal "
6984 			    "error", ddi_node_name(cdip), guid);
6985 			return (1);
6986 		}
6987 		if (strcmp(s_pclass, active_pclass_ptr) == 0) {
6988 			/*
6989 			 * quiesce path. Free s_pclass since
6990 			 * we don't need it anymore
6991 			 */
6992 			VHCI_DEBUG(1, (CE_NOTE, NULL,
6993 			    "!vhci_failover(2)(%s): failing over "
6994 			    "from %s; quiescing path %p\n",
6995 			    guid, s_pclass, (void *)npip));
6996 			(void) mdi_prop_free(s_pclass);
6997 			svp = (scsi_vhci_priv_t *)
6998 			    mdi_pi_get_vhci_private(npip);
6999 			if (svp == NULL) {
7000 				VHCI_DEBUG(1, (CE_NOTE, NULL,
7001 				    "!vhci_failover(2.5)(%s): no "
7002 				    "client priv! %p offlined?\n",
7003 				    guid, (void *)npip));
7004 				pip = npip;
7005 				sps = mdi_select_path(cdip, NULL,
7006 				    MDI_SELECT_ONLINE_PATH, pip, &npip);
7007 				mdi_rele_path(pip);
7008 				continue;
7009 			}
7010 			if (scsi_abort(&svp->svp_psd->sd_address, NULL)
7011 			    == 0) {
7012 				(void) vhci_recovery_reset(vlun,
7013 				    &svp->svp_psd->sd_address, FALSE,
7014 				    VHCI_DEPTH_TARGET);
7015 			}
7016 			mutex_enter(&svp->svp_mutex);
7017 			if (svp->svp_cmds == 0) {
7018 				VHCI_DEBUG(1, (CE_NOTE, NULL,
7019 				    "!vhci_failover(3)(%s):"
7020 				    "quiesced path %p\n", guid, (void *)npip));
7021 			} else {
7022 				while (svp->svp_cmds != 0) {
7023 					cv_wait(&svp->svp_cv, &svp->svp_mutex);
7024 					VHCI_DEBUG(1, (CE_NOTE, NULL,
7025 					    "!vhci_failover(3.cv)(%s):"
7026 					    "quiesced path %p\n", guid,
7027 					    (void *)npip));
7028 				}
7029 			}
7030 			mutex_exit(&svp->svp_mutex);
7031 		} else {
7032 			/*
7033 			 * make sure we freeup the memory
7034 			 */
7035 			(void) mdi_prop_free(s_pclass);
7036 		}
7037 		pip = npip;
7038 		sps = mdi_select_path(cdip, NULL, MDI_SELECT_ONLINE_PATH,
7039 		    pip, &npip);
7040 		mdi_rele_path(pip);
7041 	} while ((npip != NULL) && (sps == MDI_SUCCESS));
7042 	return (0);
7043 }
7044 
7045 static struct scsi_vhci_lun *
7046 vhci_lun_lookup(dev_info_t *tgt_dip)
7047 {
7048 	return ((struct scsi_vhci_lun *)
7049 	    mdi_client_get_vhci_private(tgt_dip));
7050 }
7051 
7052 static struct scsi_vhci_lun *
7053 vhci_lun_lookup_alloc(dev_info_t *tgt_dip, char *guid, int *didalloc)
7054 {
7055 	struct scsi_vhci_lun *svl;
7056 
7057 	if (svl = vhci_lun_lookup(tgt_dip)) {
7058 		return (svl);
7059 	}
7060 
7061 	svl = kmem_zalloc(sizeof (*svl), KM_SLEEP);
7062 	svl->svl_lun_wwn = kmem_zalloc(strlen(guid)+1, KM_SLEEP);
7063 	(void) strcpy(svl->svl_lun_wwn,  guid);
7064 	mutex_init(&svl->svl_mutex, NULL, MUTEX_DRIVER, NULL);
7065 	cv_init(&svl->svl_cv, NULL, CV_DRIVER, NULL);
7066 	sema_init(&svl->svl_pgr_sema, 1, NULL, SEMA_DRIVER, NULL);
7067 	svl->svl_waiting_for_activepath = 1;
7068 	svl->svl_sector_size = 1;
7069 	mdi_client_set_vhci_private(tgt_dip, svl);
7070 	*didalloc = 1;
7071 	VHCI_DEBUG(1, (CE_NOTE, NULL,
7072 	    "vhci_lun_lookup_alloc: guid %s vlun 0x%p\n",
7073 	    guid, (void *)svl));
7074 	return (svl);
7075 }
7076 
7077 static void
7078 vhci_lun_free(dev_info_t *tgt_dip)
7079 {
7080 	struct scsi_vhci_lun *dvlp;
7081 	char *guid;
7082 	struct scsi_device *sd;
7083 
7084 	/*
7085 	 * The scsi_device was set to driver private during child node
7086 	 * initialization in the scsi_hba_bus_ctl().
7087 	 */
7088 	sd = (struct scsi_device *)ddi_get_driver_private(tgt_dip);
7089 
7090 	dvlp = (struct scsi_vhci_lun *)
7091 	    mdi_client_get_vhci_private(tgt_dip);
7092 	ASSERT(dvlp != NULL);
7093 
7094 	mdi_client_set_vhci_private(tgt_dip, NULL);
7095 
7096 	guid = dvlp->svl_lun_wwn;
7097 	ASSERT(guid != NULL);
7098 	VHCI_DEBUG(4, (CE_NOTE, NULL, "!vhci_lun_free: %s\n", guid));
7099 
7100 	mutex_enter(&dvlp->svl_mutex);
7101 	if (dvlp->svl_active_pclass != NULL) {
7102 		kmem_free(dvlp->svl_active_pclass,
7103 		    strlen(dvlp->svl_active_pclass)+1);
7104 	}
7105 	dvlp->svl_active_pclass = NULL;
7106 	mutex_exit(&dvlp->svl_mutex);
7107 
7108 	if (dvlp->svl_lun_wwn != NULL) {
7109 		kmem_free(dvlp->svl_lun_wwn, strlen(dvlp->svl_lun_wwn)+1);
7110 	}
7111 	dvlp->svl_lun_wwn = NULL;
7112 
7113 
7114 	if (dvlp->svl_fops_name) {
7115 		kmem_free(dvlp->svl_fops_name, strlen(dvlp->svl_fops_name)+1);
7116 	}
7117 	dvlp->svl_fops_name = NULL;
7118 
7119 	if (dvlp->svl_fops_ctpriv != NULL) {
7120 		dvlp->svl_fops->sfo_device_unprobe(sd, dvlp->svl_fops_ctpriv);
7121 	}
7122 
7123 	if (dvlp->svl_flags & VLUN_TASK_D_ALIVE_FLG)
7124 		taskq_destroy(dvlp->svl_taskq);
7125 
7126 	mutex_destroy(&dvlp->svl_mutex);
7127 	cv_destroy(&dvlp->svl_cv);
7128 	sema_destroy(&dvlp->svl_pgr_sema);
7129 	kmem_free(dvlp, sizeof (*dvlp));
7130 	/*
7131 	 * vhci_lun_free may be called before the tgt_dip
7132 	 * initialization so check if the sd is NULL.
7133 	 */
7134 	if (sd != NULL)
7135 		sd->sd_address.a_hba_tran->tran_tgt_private = NULL;
7136 }
7137 
7138 
7139 int
7140 vhci_do_scsi_cmd(struct scsi_pkt *pkt)
7141 {
7142 	int	err = 0;
7143 	int	retry_cnt = 0;
7144 	struct scsi_extended_sense	*sns;
7145 
7146 retry:
7147 	err = scsi_poll(pkt);
7148 	if (err) {
7149 		if (pkt->pkt_cdbp[0] == SCMD_RELEASE) {
7150 			if (SCBP_C(pkt) == STATUS_RESERVATION_CONFLICT) {
7151 				VHCI_DEBUG(1, (CE_NOTE, NULL,
7152 				    "!v_s_do_s_c: RELEASE conflict\n"));
7153 				return (0);
7154 			}
7155 		}
7156 		if (retry_cnt++ < 3) {
7157 			VHCI_DEBUG(1, (CE_WARN, NULL,
7158 			    "!v_s_do_s_c:retry packet 0x%p "
7159 			    "status 0x%x reason %s",
7160 			    (void *)pkt, SCBP_C(pkt),
7161 			    scsi_rname(pkt->pkt_reason)));
7162 			if ((pkt->pkt_reason == CMD_CMPLT) &&
7163 			    (SCBP_C(pkt) == STATUS_CHECK) &&
7164 			    (pkt->pkt_state & STATE_ARQ_DONE)) {
7165 				sns = &(((struct scsi_arq_status *)(uintptr_t)
7166 				    (pkt->pkt_scbp))->sts_sensedata);
7167 				VHCI_DEBUG(1, (CE_WARN, NULL,
7168 				    "!v_s_do_s_c:retry "
7169 				    "packet 0x%p  sense data %s", (void *)pkt,
7170 				    scsi_sname(sns->es_key)));
7171 			}
7172 			goto retry;
7173 		}
7174 		VHCI_DEBUG(1, (CE_WARN, NULL,
7175 		    "!v_s_do_s_c: failed transport 0x%p 0x%x",
7176 		    (void *)pkt, SCBP_C(pkt)));
7177 		return (0);
7178 	}
7179 
7180 	switch (pkt->pkt_reason) {
7181 		case CMD_TIMEOUT:
7182 			VHCI_DEBUG(1, (CE_WARN, NULL, "!pkt timed "
7183 			    "out (pkt 0x%p)", (void *)pkt));
7184 			return (0);
7185 		case CMD_CMPLT:
7186 			switch (SCBP_C(pkt)) {
7187 				case STATUS_GOOD:
7188 					break;
7189 				case STATUS_CHECK:
7190 					if (pkt->pkt_state & STATE_ARQ_DONE) {
7191 						sns = &(((
7192 						    struct scsi_arq_status *)
7193 						    (uintptr_t)
7194 						    (pkt->pkt_scbp))->
7195 						    sts_sensedata);
7196 						if ((sns->es_key ==
7197 						    KEY_UNIT_ATTENTION) ||
7198 						    (sns->es_key ==
7199 						    KEY_NOT_READY)) {
7200 							/*
7201 							 * clear unit attn.
7202 							 */
7203 
7204 							VHCI_DEBUG(1,
7205 							    (CE_WARN, NULL,
7206 							    "!v_s_do_s_c: "
7207 							    "retry "
7208 							    "packet 0x%p sense "
7209 							    "data %s",
7210 							    (void *)pkt,
7211 							    scsi_sname
7212 							    (sns->es_key)));
7213 							goto retry;
7214 						}
7215 						VHCI_DEBUG(4, (CE_WARN, NULL,
7216 						    "!ARQ while "
7217 						    "transporting "
7218 						    "(pkt 0x%p)",
7219 						    (void *)pkt));
7220 						return (0);
7221 					}
7222 					return (0);
7223 				default:
7224 					VHCI_DEBUG(1, (CE_WARN, NULL,
7225 					    "!Bad status returned "
7226 					    "(pkt 0x%p, status %x)",
7227 					    (void *)pkt, SCBP_C(pkt)));
7228 					return (0);
7229 			}
7230 			break;
7231 		case CMD_INCOMPLETE:
7232 		case CMD_RESET:
7233 		case CMD_ABORTED:
7234 		case CMD_TRAN_ERR:
7235 			if (retry_cnt++ < 1) {
7236 				VHCI_DEBUG(1, (CE_WARN, NULL,
7237 				    "!v_s_do_s_c: retry packet 0x%p %s",
7238 				    (void *)pkt, scsi_rname(pkt->pkt_reason)));
7239 				goto retry;
7240 			}
7241 			/* FALLTHROUGH */
7242 		default:
7243 			VHCI_DEBUG(1, (CE_WARN, NULL, "!pkt did not "
7244 			    "complete successfully (pkt 0x%p,"
7245 			    "reason %x)", (void *)pkt, pkt->pkt_reason));
7246 			return (0);
7247 	}
7248 	return (1);
7249 }
7250 
7251 static int
7252 vhci_quiesce_lun(struct scsi_vhci_lun *vlun)
7253 {
7254 	mdi_pathinfo_t		*pip, *spip;
7255 	dev_info_t		*cdip;
7256 	struct scsi_vhci_priv	*svp;
7257 	mdi_pathinfo_state_t	pstate;
7258 	uint32_t		p_ext_state;
7259 	int			circular;
7260 
7261 	cdip = vlun->svl_dip;
7262 	pip = spip = NULL;
7263 	ndi_devi_enter(cdip, &circular);
7264 	pip = mdi_get_next_phci_path(cdip, NULL);
7265 	while (pip != NULL) {
7266 		(void) mdi_pi_get_state2(pip, &pstate, &p_ext_state);
7267 		if (pstate != MDI_PATHINFO_STATE_ONLINE) {
7268 			spip = pip;
7269 			pip = mdi_get_next_phci_path(cdip, spip);
7270 			continue;
7271 		}
7272 		mdi_hold_path(pip);
7273 		ndi_devi_exit(cdip, circular);
7274 		svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip);
7275 		mutex_enter(&svp->svp_mutex);
7276 		while (svp->svp_cmds != 0) {
7277 			if (cv_timedwait(&svp->svp_cv, &svp->svp_mutex,
7278 			    ddi_get_lbolt() + drv_usectohz
7279 			    (vhci_path_quiesce_timeout * 1000000)) == -1) {
7280 				mutex_exit(&svp->svp_mutex);
7281 				mdi_rele_path(pip);
7282 				VHCI_DEBUG(1, (CE_WARN, NULL,
7283 				    "Quiesce of lun is not successful "
7284 				    "vlun: 0x%p.", (void *)vlun));
7285 				return (0);
7286 			}
7287 		}
7288 		mutex_exit(&svp->svp_mutex);
7289 		ndi_devi_enter(cdip, &circular);
7290 		spip = pip;
7291 		pip = mdi_get_next_phci_path(cdip, spip);
7292 		mdi_rele_path(spip);
7293 	}
7294 	ndi_devi_exit(cdip, circular);
7295 	return (1);
7296 }
7297 
7298 static int
7299 vhci_pgr_validate_and_register(scsi_vhci_priv_t *svp)
7300 {
7301 	scsi_vhci_lun_t		*vlun;
7302 	vhci_prout_t		*prout;
7303 	int			rval, success;
7304 	mdi_pathinfo_t		*pip, *npip;
7305 	scsi_vhci_priv_t	*osvp;
7306 	dev_info_t		*cdip;
7307 	uchar_t			cdb_1;
7308 	uchar_t			temp_res_key[MHIOC_RESV_KEY_SIZE];
7309 
7310 
7311 	/*
7312 	 * see if there are any other paths available; if none,
7313 	 * then there is nothing to do.
7314 	 */
7315 	cdip = svp->svp_svl->svl_dip;
7316 	rval = mdi_select_path(cdip, NULL, MDI_SELECT_ONLINE_PATH |
7317 	    MDI_SELECT_STANDBY_PATH, NULL, &pip);
7318 	if ((rval != MDI_SUCCESS) || (pip == NULL)) {
7319 		VHCI_DEBUG(4, (CE_NOTE, NULL,
7320 		    "%s%d: vhci_pgr_validate_and_register: first path\n",
7321 		    ddi_driver_name(cdip), ddi_get_instance(cdip)));
7322 		return (1);
7323 	}
7324 
7325 	vlun = svp->svp_svl;
7326 	prout = &vlun->svl_prout;
7327 	ASSERT(vlun->svl_pgr_active != 0);
7328 
7329 	/*
7330 	 * When the path was busy/offlined, some other host might have
7331 	 * cleared this key. Validate key on some other path first.
7332 	 * If it fails, return failure.
7333 	 */
7334 
7335 	npip = pip;
7336 	pip = NULL;
7337 	success = 0;
7338 
7339 	/* Save the res key */
7340 	bcopy((const void *)prout->res_key,
7341 	    (void *)temp_res_key, MHIOC_RESV_KEY_SIZE);
7342 
7343 	/*
7344 	 * Sometimes CDB from application can be a Register_And_Ignore.
7345 	 * Instead of validation, this cdb would result in force registration.
7346 	 * Convert it to normal cdb for validation.
7347 	 * After that be sure to restore the cdb.
7348 	 */
7349 	cdb_1 = vlun->svl_cdb[1];
7350 	vlun->svl_cdb[1] &= 0xe0;
7351 
7352 	do {
7353 		osvp = (scsi_vhci_priv_t *)
7354 		    mdi_pi_get_vhci_private(npip);
7355 		if (osvp == NULL) {
7356 			VHCI_DEBUG(4, (CE_NOTE, NULL,
7357 			    "vhci_pgr_validate_and_register: no "
7358 			    "client priv! 0x%p offlined?\n",
7359 			    (void *)npip));
7360 			goto next_path_1;
7361 		}
7362 
7363 		if (osvp == svp) {
7364 			VHCI_DEBUG(4, (CE_NOTE, NULL,
7365 			    "vhci_pgr_validate_and_register: same svp 0x%p"
7366 			    " npip 0x%p vlun 0x%p\n",
7367 			    (void *)svp, (void *)npip, (void *)vlun));
7368 			goto next_path_1;
7369 		}
7370 
7371 		VHCI_DEBUG(4, (CE_NOTE, NULL,
7372 		    "vhci_pgr_validate_and_register: First validate on"
7373 		    " osvp 0x%p being done. vlun 0x%p thread 0x%p Before bcopy"
7374 		    " cdb1 %x\n", (void *)osvp, (void *)vlun,
7375 		    (void *)curthread, vlun->svl_cdb[1]));
7376 		vhci_print_prout_keys(vlun, "v_pgr_val_reg: before bcopy:");
7377 
7378 		bcopy((const void *)prout->service_key,
7379 		    (void *)prout->res_key, MHIOC_RESV_KEY_SIZE);
7380 
7381 		VHCI_DEBUG(4, (CE_WARN, NULL, "vlun 0x%p After bcopy",
7382 		    (void *)vlun));
7383 		vhci_print_prout_keys(vlun, "v_pgr_val_reg: after bcopy: ");
7384 
7385 		rval = vhci_do_prout(osvp);
7386 		if (rval == 1) {
7387 			VHCI_DEBUG(4, (CE_NOTE, NULL,
7388 			    "%s%d: vhci_pgr_validate_and_register: key"
7389 			    " validated thread 0x%p\n", ddi_driver_name(cdip),
7390 			    ddi_get_instance(cdip), (void *)curthread));
7391 			pip = npip;
7392 			success = 1;
7393 			break;
7394 		} else {
7395 			VHCI_DEBUG(4, (CE_NOTE, NULL,
7396 			    "vhci_pgr_validate_and_register: First validation"
7397 			    " on osvp 0x%p failed %x\n", (void *)osvp, rval));
7398 			vhci_print_prout_keys(vlun, "v_pgr_val_reg: failed:");
7399 		}
7400 
7401 		/*
7402 		 * Try other paths
7403 		 */
7404 next_path_1:
7405 		pip = npip;
7406 		rval = mdi_select_path(cdip, NULL,
7407 		    MDI_SELECT_ONLINE_PATH|MDI_SELECT_STANDBY_PATH,
7408 		    pip, &npip);
7409 		mdi_rele_path(pip);
7410 	} while ((rval == MDI_SUCCESS) && (npip != NULL));
7411 
7412 
7413 	/* Be sure to restore original cdb */
7414 	vlun->svl_cdb[1] = cdb_1;
7415 
7416 	/* Restore the res_key */
7417 	bcopy((const void *)temp_res_key,
7418 	    (void *)prout->res_key, MHIOC_RESV_KEY_SIZE);
7419 
7420 	/*
7421 	 * If key could not be registered on any path for the first time,
7422 	 * return success as online should still continue.
7423 	 */
7424 	if (success == 0) {
7425 		return (1);
7426 	}
7427 
7428 	ASSERT(pip != NULL);
7429 
7430 	/*
7431 	 * Force register on new path
7432 	 */
7433 	cdb_1 = vlun->svl_cdb[1];		/* store the cdb */
7434 
7435 	vlun->svl_cdb[1] &= 0xe0;
7436 	vlun->svl_cdb[1] |= VHCI_PROUT_R_AND_IGNORE;
7437 
7438 	vhci_print_prout_keys(vlun, "v_pgr_val_reg: keys before bcopy: ");
7439 
7440 	bcopy((const void *)prout->active_service_key,
7441 	    (void *)prout->service_key, MHIOC_RESV_KEY_SIZE);
7442 	bcopy((const void *)prout->active_res_key,
7443 	    (void *)prout->res_key, MHIOC_RESV_KEY_SIZE);
7444 
7445 	vhci_print_prout_keys(vlun, "v_pgr_val_reg:keys after bcopy: ");
7446 
7447 	rval = vhci_do_prout(svp);
7448 	vlun->svl_cdb[1] = cdb_1;		/* restore the cdb */
7449 	if (rval != 1) {
7450 		VHCI_DEBUG(4, (CE_NOTE, NULL,
7451 		    "vhci_pgr_validate_and_register: register on new"
7452 		    " path 0x%p svp 0x%p failed %x\n",
7453 		    (void *)pip, (void *)svp, rval));
7454 		vhci_print_prout_keys(vlun, "v_pgr_val_reg: reg failed: ");
7455 		mdi_rele_path(pip);
7456 		return (0);
7457 	}
7458 
7459 	if (bcmp(prout->service_key, zero_key, MHIOC_RESV_KEY_SIZE) == 0) {
7460 		VHCI_DEBUG(4, (CE_NOTE, NULL,
7461 		    "vhci_pgr_validate_and_register: zero service key\n"));
7462 		mdi_rele_path(pip);
7463 		return (rval);
7464 	}
7465 
7466 	/*
7467 	 * While the key was force registered, some other host might have
7468 	 * cleared the key. Re-validate key on another pre-existing path
7469 	 * before declaring success.
7470 	 */
7471 	npip = pip;
7472 	pip = NULL;
7473 
7474 	/*
7475 	 * Sometimes CDB from application can be Register and Ignore.
7476 	 * Instead of validation, it would result in force registration.
7477 	 * Convert it to normal cdb for validation.
7478 	 * After that be sure to restore the cdb.
7479 	 */
7480 	cdb_1 = vlun->svl_cdb[1];
7481 	vlun->svl_cdb[1] &= 0xe0;
7482 	success = 0;
7483 
7484 	do {
7485 		osvp = (scsi_vhci_priv_t *)
7486 		    mdi_pi_get_vhci_private(npip);
7487 		if (osvp == NULL) {
7488 			VHCI_DEBUG(4, (CE_NOTE, NULL,
7489 			    "vhci_pgr_validate_and_register: no "
7490 			    "client priv! 0x%p offlined?\n",
7491 			    (void *)npip));
7492 			goto next_path_2;
7493 		}
7494 
7495 		if (osvp == svp) {
7496 			VHCI_DEBUG(4, (CE_NOTE, NULL,
7497 			    "vhci_pgr_validate_and_register: same osvp 0x%p"
7498 			    " npip 0x%p vlun 0x%p\n",
7499 			    (void *)svp, (void *)npip, (void *)vlun));
7500 			goto next_path_2;
7501 		}
7502 
7503 		VHCI_DEBUG(4, (CE_NOTE, NULL,
7504 		    "vhci_pgr_validate_and_register: Re-validation on"
7505 		    " osvp 0x%p being done. vlun 0x%p Before bcopy cdb1 %x\n",
7506 		    (void *)osvp, (void *)vlun, vlun->svl_cdb[1]));
7507 		vhci_print_prout_keys(vlun, "v_pgr_val_reg: before bcopy: ");
7508 
7509 		bcopy((const void *)prout->service_key,
7510 		    (void *)prout->res_key, MHIOC_RESV_KEY_SIZE);
7511 
7512 		vhci_print_prout_keys(vlun, "v_pgr_val_reg: after bcopy: ");
7513 
7514 		rval = vhci_do_prout(osvp);
7515 		if (rval == 1) {
7516 			VHCI_DEBUG(4, (CE_NOTE, NULL,
7517 			    "%s%d: vhci_pgr_validate_and_register: key"
7518 			    " validated thread 0x%p\n", ddi_driver_name(cdip),
7519 			    ddi_get_instance(cdip), (void *)curthread));
7520 			pip = npip;
7521 			success = 1;
7522 			break;
7523 		} else {
7524 			VHCI_DEBUG(4, (CE_NOTE, NULL,
7525 			    "vhci_pgr_validate_and_register: Re-validation on"
7526 			    " osvp 0x%p failed %x\n", (void *)osvp, rval));
7527 			vhci_print_prout_keys(vlun,
7528 			    "v_pgr_val_reg: reval failed: ");
7529 		}
7530 
7531 		/*
7532 		 * Try other paths
7533 		 */
7534 next_path_2:
7535 		pip = npip;
7536 		rval = mdi_select_path(cdip, NULL,
7537 		    MDI_SELECT_ONLINE_PATH|MDI_SELECT_STANDBY_PATH,
7538 		    pip, &npip);
7539 		mdi_rele_path(pip);
7540 	} while ((rval == MDI_SUCCESS) && (npip != NULL));
7541 
7542 	/* Be sure to restore original cdb */
7543 	vlun->svl_cdb[1] = cdb_1;
7544 
7545 	if (success == 1) {
7546 		/* Successfully validated registration */
7547 		mdi_rele_path(pip);
7548 		return (1);
7549 	}
7550 
7551 	VHCI_DEBUG(4, (CE_WARN, NULL, "key validation failed"));
7552 
7553 	/*
7554 	 * key invalid, back out by registering key value of 0
7555 	 */
7556 	VHCI_DEBUG(4, (CE_NOTE, NULL,
7557 	    "vhci_pgr_validate_and_register: backout on"
7558 	    " svp 0x%p being done\n", (void *)svp));
7559 	vhci_print_prout_keys(vlun, "v_pgr_val_reg: before bcopy: ");
7560 
7561 	bcopy((const void *)prout->service_key, (void *)prout->res_key,
7562 	    MHIOC_RESV_KEY_SIZE);
7563 	bzero((void *)prout->service_key, MHIOC_RESV_KEY_SIZE);
7564 
7565 	vhci_print_prout_keys(vlun, "v_pgr_val_reg: before bcopy: ");
7566 
7567 	/*
7568 	 * Get a new path
7569 	 */
7570 	rval = mdi_select_path(cdip, NULL, MDI_SELECT_ONLINE_PATH |
7571 	    MDI_SELECT_STANDBY_PATH, NULL, &pip);
7572 	if ((rval != MDI_SUCCESS) || (pip == NULL)) {
7573 		VHCI_DEBUG(4, (CE_NOTE, NULL,
7574 		    "%s%d: vhci_pgr_validate_and_register: no valid pip\n",
7575 		    ddi_driver_name(cdip), ddi_get_instance(cdip)));
7576 		return (0);
7577 	}
7578 
7579 	if ((rval = vhci_do_prout(svp)) != 1) {
7580 		VHCI_DEBUG(4, (CE_NOTE, NULL,
7581 		    "vhci_pgr_validate_and_register: backout on"
7582 		    " svp 0x%p failed\n", (void *)svp));
7583 		vhci_print_prout_keys(vlun, "backout failed");
7584 
7585 		VHCI_DEBUG(4, (CE_WARN, NULL,
7586 		    "%s%d: vhci_pgr_validate_and_register: key"
7587 		    " validation and backout failed", ddi_driver_name(cdip),
7588 		    ddi_get_instance(cdip)));
7589 		if (rval == VHCI_PGR_ILLEGALOP) {
7590 			VHCI_DEBUG(4, (CE_WARN, NULL,
7591 			    "%s%d: vhci_pgr_validate_and_register: key"
7592 			    " already cleared", ddi_driver_name(cdip),
7593 			    ddi_get_instance(cdip)));
7594 			rval = 1;
7595 		} else
7596 			rval = 0;
7597 	} else {
7598 		VHCI_DEBUG(4, (CE_NOTE, NULL,
7599 		    "%s%d: vhci_pgr_validate_and_register: key"
7600 		    " validation failed, key backed out\n",
7601 		    ddi_driver_name(cdip), ddi_get_instance(cdip)));
7602 		vhci_print_prout_keys(vlun, "v_pgr_val_reg: key backed out: ");
7603 	}
7604 	mdi_rele_path(pip);
7605 
7606 	return (rval);
7607 }
7608 
7609 /*
7610  * taskq routine to dispatch a scsi cmd to vhci_scsi_start.  This ensures
7611  * that vhci_scsi_start is not called in interrupt context.
7612  * As the upper layer gets TRAN_ACCEPT when the command is dispatched, we
7613  * need to complete the command if something goes wrong.
7614  */
7615 static void
7616 vhci_dispatch_scsi_start(void *arg)
7617 {
7618 	struct vhci_pkt *vpkt = (struct vhci_pkt *)arg;
7619 	struct scsi_pkt *tpkt = vpkt->vpkt_tgt_pkt;
7620 	int			rval = TRAN_BUSY;
7621 
7622 	VHCI_DEBUG(6, (CE_NOTE, NULL, "!vhci_dispatch_scsi_start: sending"
7623 	    " scsi-2 reserve for 0x%p\n",
7624 	    (void *)ADDR2DIP(&(vpkt->vpkt_tgt_pkt->pkt_address))));
7625 
7626 	/*
7627 	 * To prevent the taskq from being called recursively we set the
7628 	 * the VHCI_PKT_THRU_TASKQ bit in the vhci_pkt_states.
7629 	 */
7630 	vpkt->vpkt_state |= VHCI_PKT_THRU_TASKQ;
7631 
7632 	/*
7633 	 * Wait for the transport to get ready to send packets
7634 	 * and if it times out, it will return something other than
7635 	 * TRAN_BUSY. The vhci_reserve_delay may want to
7636 	 * get tuned for other transports and is therefore a global.
7637 	 * Using delay since this routine is called by taskq dispatch
7638 	 * and not called during interrupt context.
7639 	 */
7640 	while ((rval = vhci_scsi_start(&(vpkt->vpkt_tgt_pkt->pkt_address),
7641 	    vpkt->vpkt_tgt_pkt)) == TRAN_BUSY) {
7642 		delay(drv_usectohz(vhci_reserve_delay));
7643 	}
7644 
7645 	switch (rval) {
7646 	case TRAN_ACCEPT:
7647 		return;
7648 
7649 	default:
7650 		/*
7651 		 * This pkt shall be retried, and to ensure another taskq
7652 		 * is dispatched for it, clear the VHCI_PKT_THRU_TASKQ
7653 		 * flag.
7654 		 */
7655 		vpkt->vpkt_state &= ~VHCI_PKT_THRU_TASKQ;
7656 
7657 		/* Ensure that the pkt is retried without a reset */
7658 		tpkt->pkt_reason = CMD_ABORTED;
7659 		tpkt->pkt_statistics |= STAT_ABORTED;
7660 		VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_dispatch_scsi_start: "
7661 		    "TRAN_rval %d returned for dip 0x%p", rval,
7662 		    (void *)ADDR2DIP(&(vpkt->vpkt_tgt_pkt->pkt_address))));
7663 		break;
7664 	}
7665 
7666 	/*
7667 	 * vpkt_org_vpkt should always be NULL here if the retry command
7668 	 * has been successfully dispatched.  If vpkt_org_vpkt != NULL at
7669 	 * this point, it is an error so restore the original vpkt and
7670 	 * return an error to the target driver so it can retry the
7671 	 * command as appropriate.
7672 	 */
7673 	if (vpkt->vpkt_org_vpkt != NULL) {
7674 		struct vhci_pkt		*new_vpkt = vpkt;
7675 		scsi_vhci_priv_t	*svp = (scsi_vhci_priv_t *)
7676 		    mdi_pi_get_vhci_private(vpkt->vpkt_path);
7677 
7678 		vpkt = vpkt->vpkt_org_vpkt;
7679 
7680 		vpkt->vpkt_tgt_pkt->pkt_reason = tpkt->pkt_reason;
7681 		vpkt->vpkt_tgt_pkt->pkt_statistics = tpkt->pkt_statistics;
7682 
7683 		vhci_scsi_destroy_pkt(&svp->svp_psd->sd_address,
7684 		    new_vpkt->vpkt_tgt_pkt);
7685 
7686 		tpkt = vpkt->vpkt_tgt_pkt;
7687 	}
7688 
7689 	if (tpkt->pkt_comp) {
7690 		(*tpkt->pkt_comp)(tpkt);
7691 	}
7692 }
7693 
7694 static void
7695 vhci_initiate_auto_failback(void *arg)
7696 {
7697 	struct scsi_vhci_lun	*vlun = (struct scsi_vhci_lun *)arg;
7698 	dev_info_t		*vdip, *cdip;
7699 	int			held;
7700 
7701 	cdip = vlun->svl_dip;
7702 	vdip = ddi_get_parent(cdip);
7703 
7704 	VHCI_HOLD_LUN(vlun, VH_SLEEP, held);
7705 
7706 	/*
7707 	 * Perform a final check to see if the active path class is indeed
7708 	 * not the prefered path class.  As in the time the auto failback
7709 	 * was dispatched, an external failover could have been detected.
7710 	 * [Some other host could have detected this condition and triggered
7711 	 *  the auto failback before].
7712 	 * In such a case if we go ahead with failover we will be negating the
7713 	 * whole purpose of auto failback.
7714 	 */
7715 	mutex_enter(&vlun->svl_mutex);
7716 	if (vlun->svl_active_pclass != NULL) {
7717 		char				*best_pclass;
7718 		struct scsi_failover_ops	*fo;
7719 
7720 		fo = vlun->svl_fops;
7721 
7722 		(*fo->sfo_pathclass_next)(NULL, &best_pclass,
7723 		    vlun->svl_fops_ctpriv);
7724 		if (strcmp(vlun->svl_active_pclass, best_pclass) == 0) {
7725 			mutex_exit(&vlun->svl_mutex);
7726 			VHCI_RELEASE_LUN(vlun);
7727 			VHCI_DEBUG(1, (CE_NOTE, NULL, "Not initiating "
7728 			    "auto failback for %s as %s pathclass already "
7729 			    "active.\n", vlun->svl_lun_wwn, best_pclass));
7730 			return;
7731 		}
7732 	}
7733 	mutex_exit(&vlun->svl_mutex);
7734 	if (mdi_failover(vdip, vlun->svl_dip, MDI_FAILOVER_SYNC)
7735 	    == MDI_SUCCESS) {
7736 		vhci_log(CE_NOTE, vdip, "!Auto failback operation "
7737 		    "succeeded for device %s (GUID %s)",
7738 		    ddi_node_name(cdip), vlun->svl_lun_wwn);
7739 	} else {
7740 		vhci_log(CE_NOTE, vdip, "!Auto failback operation "
7741 		    "failed for device %s (GUID %s)",
7742 		    ddi_node_name(cdip), vlun->svl_lun_wwn);
7743 	}
7744 	VHCI_RELEASE_LUN(vlun);
7745 }
7746 
7747 #ifdef DEBUG
7748 static void
7749 vhci_print_prin_keys(vhci_prin_readkeys_t *prin, int numkeys)
7750 {
7751 	uchar_t index = 0;
7752 	char buf[100];
7753 
7754 	VHCI_DEBUG(5, (CE_NOTE, NULL, "num keys %d\n", numkeys));
7755 
7756 	while (index < numkeys) {
7757 		bcopy(&prin->keylist[index], buf, MHIOC_RESV_KEY_SIZE);
7758 		VHCI_DEBUG(5, (CE_NOTE, NULL,
7759 		    "%02x%02x%02x%02x%02x%02x%02x%02x\t",
7760 		    buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7761 		    buf[7]));
7762 		index++;
7763 	}
7764 }
7765 #endif
7766 
7767 static void
7768 vhci_print_prout_keys(scsi_vhci_lun_t *vlun, char *msg)
7769 {
7770 	int			i;
7771 	vhci_prout_t		*prout;
7772 	char			buf1[4*MHIOC_RESV_KEY_SIZE + 1];
7773 	char			buf2[4*MHIOC_RESV_KEY_SIZE + 1];
7774 	char			buf3[4*MHIOC_RESV_KEY_SIZE + 1];
7775 	char			buf4[4*MHIOC_RESV_KEY_SIZE + 1];
7776 
7777 	prout = &vlun->svl_prout;
7778 
7779 	for (i = 0; i < MHIOC_RESV_KEY_SIZE; i++)
7780 		(void) sprintf(&buf1[4*i], "[%02x]", prout->res_key[i]);
7781 	for (i = 0; i < MHIOC_RESV_KEY_SIZE; i++)
7782 		(void) sprintf(&buf2[(4*i)], "[%02x]", prout->service_key[i]);
7783 	for (i = 0; i < MHIOC_RESV_KEY_SIZE; i++)
7784 		(void) sprintf(&buf3[4*i], "[%02x]", prout->active_res_key[i]);
7785 	for (i = 0; i < MHIOC_RESV_KEY_SIZE; i++)
7786 		(void) sprintf(&buf4[4*i], "[%02x]",
7787 		    prout->active_service_key[i]);
7788 
7789 	/* Printing all in one go. Otherwise it will jumble up */
7790 	VHCI_DEBUG(5, (CE_CONT, NULL, "%s vlun 0x%p, thread 0x%p\n"
7791 	    "res_key:          : %s\n"
7792 	    "service_key       : %s\n"
7793 	    "active_res_key    : %s\n"
7794 	    "active_service_key: %s\n",
7795 	    msg, (void *)vlun, (void *)curthread, buf1, buf2, buf3, buf4));
7796 }
7797 
7798 /*
7799  * Called from vhci_scsi_start to update the pHCI pkt with target packet.
7800  */
7801 static void
7802 vhci_update_pHCI_pkt(struct vhci_pkt *vpkt, struct scsi_pkt *pkt)
7803 {
7804 
7805 	ASSERT(vpkt->vpkt_hba_pkt);
7806 
7807 	vpkt->vpkt_hba_pkt->pkt_flags = pkt->pkt_flags;
7808 	vpkt->vpkt_hba_pkt->pkt_flags |= FLAG_NOQUEUE;
7809 
7810 	if ((vpkt->vpkt_hba_pkt->pkt_flags & FLAG_NOINTR) ||
7811 	    MDI_PI_IS_SUSPENDED(vpkt->vpkt_path)) {
7812 		/*
7813 		 * Polled Command is requested or HBA is in
7814 		 * suspended state
7815 		 */
7816 		vpkt->vpkt_hba_pkt->pkt_flags |= FLAG_NOINTR;
7817 		vpkt->vpkt_hba_pkt->pkt_comp = NULL;
7818 	} else {
7819 		vpkt->vpkt_hba_pkt->pkt_comp = vhci_intr;
7820 	}
7821 	vpkt->vpkt_hba_pkt->pkt_time = pkt->pkt_time;
7822 	bcopy(pkt->pkt_cdbp, vpkt->vpkt_hba_pkt->pkt_cdbp,
7823 	    vpkt->vpkt_tgt_init_cdblen);
7824 	vpkt->vpkt_hba_pkt->pkt_resid = pkt->pkt_resid;
7825 
7826 	/* Re-initialize the following pHCI packet state information */
7827 	vpkt->vpkt_hba_pkt->pkt_state = 0;
7828 	vpkt->vpkt_hba_pkt->pkt_statistics = 0;
7829 	vpkt->vpkt_hba_pkt->pkt_reason = 0;
7830 }
7831 
7832 static int
7833 vhci_scsi_bus_power(dev_info_t *parent, void *impl_arg, pm_bus_power_op_t op,
7834     void *arg, void *result)
7835 {
7836 	int ret = DDI_SUCCESS;
7837 
7838 	/*
7839 	 * Generic processing in MPxIO framework
7840 	 */
7841 	ret = mdi_bus_power(parent, impl_arg, op, arg, result);
7842 
7843 	switch (ret) {
7844 	case MDI_SUCCESS:
7845 		ret = DDI_SUCCESS;
7846 		break;
7847 	case MDI_FAILURE:
7848 		ret = DDI_FAILURE;
7849 		break;
7850 	default:
7851 		break;
7852 	}
7853 
7854 	return (ret);
7855 }
7856 
7857 static int
7858 vhci_pHCI_cap(struct scsi_address *ap, char *cap, int val, int whom,
7859     mdi_pathinfo_t *pip)
7860 {
7861 	dev_info_t		*cdip;
7862 	mdi_pathinfo_t		*npip = NULL;
7863 	scsi_vhci_priv_t	*svp = NULL;
7864 	struct scsi_address	*pap = NULL;
7865 	scsi_hba_tran_t		*hba = NULL;
7866 	int			sps;
7867 	int			mps_flag;
7868 	int			rval = 0;
7869 
7870 	mps_flag = (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH);
7871 	if (pip) {
7872 		/*
7873 		 * If the call is from vhci_pathinfo_state_change,
7874 		 * then this path was busy and is becoming ready to accept IO.
7875 		 */
7876 		ASSERT(ap != NULL);
7877 		hba = ap->a_hba_tran;
7878 		ASSERT(hba != NULL);
7879 		rval = scsi_ifsetcap(ap, cap, val, whom);
7880 
7881 		VHCI_DEBUG(2, (CE_NOTE, NULL,
7882 		    "!vhci_pHCI_cap: only on path %p, ap %p, rval %x\n",
7883 		    (void *)pip, (void *)ap, rval));
7884 
7885 		return (rval);
7886 	}
7887 
7888 	/*
7889 	 * Set capability on all the pHCIs.
7890 	 * If any path is busy, then the capability would be set by
7891 	 * vhci_pathinfo_state_change.
7892 	 */
7893 
7894 	cdip = ADDR2DIP(ap);
7895 	ASSERT(cdip != NULL);
7896 	sps = mdi_select_path(cdip, NULL, mps_flag, NULL, &pip);
7897 	if ((sps != MDI_SUCCESS) || (pip == NULL)) {
7898 		VHCI_DEBUG(2, (CE_WARN, NULL,
7899 		    "!vhci_pHCI_cap: Unable to get a path, dip 0x%p",
7900 		    (void *)cdip));
7901 		return (0);
7902 	}
7903 
7904 again:
7905 	svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip);
7906 	if (svp == NULL) {
7907 		VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_pHCI_cap: "
7908 		    "priv is NULL, pip 0x%p", (void *)pip));
7909 		mdi_rele_path(pip);
7910 		return (rval);
7911 	}
7912 
7913 	if (svp->svp_psd == NULL) {
7914 		VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_pHCI_cap: "
7915 		    "psd is NULL, pip 0x%p, svp 0x%p",
7916 		    (void *)pip, (void *)svp));
7917 		mdi_rele_path(pip);
7918 		return (rval);
7919 	}
7920 
7921 	pap = &svp->svp_psd->sd_address;
7922 	ASSERT(pap != NULL);
7923 	hba = pap->a_hba_tran;
7924 	ASSERT(hba != NULL);
7925 
7926 	if (hba->tran_setcap != NULL) {
7927 		rval = scsi_ifsetcap(pap, cap, val, whom);
7928 
7929 		VHCI_DEBUG(2, (CE_NOTE, NULL,
7930 		    "!vhci_pHCI_cap: path %p, ap %p, rval %x\n",
7931 		    (void *)pip, (void *)ap, rval));
7932 
7933 		/*
7934 		 * Select next path and issue the setcap, repeat
7935 		 * until all paths are exhausted
7936 		 */
7937 		sps = mdi_select_path(cdip, NULL, mps_flag, pip, &npip);
7938 		if ((sps != MDI_SUCCESS) || (npip == NULL)) {
7939 			mdi_rele_path(pip);
7940 			return (1);
7941 		}
7942 		mdi_rele_path(pip);
7943 		pip = npip;
7944 		goto again;
7945 	}
7946 	mdi_rele_path(pip);
7947 	return (rval);
7948 }
7949 
7950 static int
7951 vhci_scsi_bus_config(dev_info_t *pdip, uint_t flags, ddi_bus_config_op_t op,
7952     void *arg, dev_info_t **child)
7953 {
7954 	char *guid;
7955 
7956 	if (op == BUS_CONFIG_ONE || op == BUS_UNCONFIG_ONE)
7957 		guid = vhci_devnm_to_guid((char *)arg);
7958 	else
7959 		guid = NULL;
7960 
7961 	if (mdi_vhci_bus_config(pdip, flags, op, arg, child, guid)
7962 	    == MDI_SUCCESS)
7963 		return (NDI_SUCCESS);
7964 	else
7965 		return (NDI_FAILURE);
7966 }
7967 
7968 /*
7969  * Take the original vhci_pkt, create a duplicate of the pkt for resending
7970  * as though it originated in ssd.
7971  */
7972 static struct scsi_pkt *
7973 vhci_create_retry_pkt(struct vhci_pkt *vpkt)
7974 {
7975 	struct vhci_pkt *new_vpkt = NULL;
7976 	struct scsi_pkt	*pkt = NULL;
7977 
7978 	scsi_vhci_priv_t *svp = (scsi_vhci_priv_t *)
7979 	    mdi_pi_get_vhci_private(vpkt->vpkt_path);
7980 
7981 	/*
7982 	 * Ensure consistent data at completion time by setting PKT_CONSISTENT
7983 	 */
7984 	pkt = vhci_scsi_init_pkt(&svp->svp_psd->sd_address, pkt,
7985 	    vpkt->vpkt_tgt_init_bp, vpkt->vpkt_tgt_init_cdblen,
7986 	    vpkt->vpkt_tgt_init_scblen, 0,
7987 	    PKT_CONSISTENT,
7988 	    NULL_FUNC, NULL);
7989 	if (pkt != NULL) {
7990 		new_vpkt = TGTPKT2VHCIPKT(pkt);
7991 
7992 		pkt->pkt_address = vpkt->vpkt_tgt_pkt->pkt_address;
7993 		pkt->pkt_flags = vpkt->vpkt_tgt_pkt->pkt_flags;
7994 		pkt->pkt_time = vpkt->vpkt_tgt_pkt->pkt_time;
7995 		pkt->pkt_comp = vpkt->vpkt_tgt_pkt->pkt_comp;
7996 
7997 		pkt->pkt_resid = 0;
7998 		pkt->pkt_statistics = 0;
7999 		pkt->pkt_reason = 0;
8000 
8001 		bcopy(vpkt->vpkt_tgt_pkt->pkt_cdbp,
8002 		    pkt->pkt_cdbp, vpkt->vpkt_tgt_init_cdblen);
8003 
8004 		/*
8005 		 * Save a pointer to the original vhci_pkt
8006 		 */
8007 		new_vpkt->vpkt_org_vpkt = vpkt;
8008 	}
8009 
8010 	return (pkt);
8011 }
8012 
8013 /*
8014  * Copy the successful completion information from the hba packet into
8015  * the original target pkt from the upper layer.  Returns the original
8016  * vpkt and destroys the new vpkt from the internal retry.
8017  */
8018 static struct vhci_pkt *
8019 vhci_sync_retry_pkt(struct vhci_pkt *vpkt)
8020 {
8021 	struct vhci_pkt		*ret_vpkt = NULL;
8022 	struct scsi_pkt		*tpkt = NULL;
8023 	struct scsi_pkt		*hba_pkt = NULL;
8024 	scsi_vhci_priv_t	*svp = (scsi_vhci_priv_t *)
8025 	    mdi_pi_get_vhci_private(vpkt->vpkt_path);
8026 
8027 	ASSERT(vpkt->vpkt_org_vpkt != NULL);
8028 	VHCI_DEBUG(0, (CE_NOTE, NULL, "vhci_sync_retry_pkt: Retry pkt "
8029 	    "completed successfully!\n"));
8030 
8031 	ret_vpkt = vpkt->vpkt_org_vpkt;
8032 	tpkt = ret_vpkt->vpkt_tgt_pkt;
8033 	hba_pkt = vpkt->vpkt_hba_pkt;
8034 
8035 	/*
8036 	 * Copy the good status into the target driver's packet
8037 	 */
8038 	*(tpkt->pkt_scbp) = *(hba_pkt->pkt_scbp);
8039 	tpkt->pkt_resid = hba_pkt->pkt_resid;
8040 	tpkt->pkt_state = hba_pkt->pkt_state;
8041 	tpkt->pkt_statistics = hba_pkt->pkt_statistics;
8042 	tpkt->pkt_reason = hba_pkt->pkt_reason;
8043 
8044 	/*
8045 	 * Destroy the internally created vpkt for the retry
8046 	 */
8047 	vhci_scsi_destroy_pkt(&svp->svp_psd->sd_address,
8048 	    vpkt->vpkt_tgt_pkt);
8049 
8050 	return (ret_vpkt);
8051 }
8052 
8053 /* restart the request sense request */
8054 static void
8055 vhci_uscsi_restart_sense(void *arg)
8056 {
8057 	struct buf 	*rqbp;
8058 	struct buf 	*bp;
8059 	struct scsi_pkt *rqpkt = (struct scsi_pkt *)arg;
8060 	mp_uscsi_cmd_t 	*mp_uscmdp;
8061 
8062 	VHCI_DEBUG(4, (CE_WARN, NULL,
8063 	    "vhci_uscsi_restart_sense: enter: rqpkt: %p", (void *)rqpkt));
8064 
8065 	if (scsi_transport(rqpkt) != TRAN_ACCEPT) {
8066 		/* if it fails - need to wakeup the original command */
8067 		mp_uscmdp = rqpkt->pkt_private;
8068 		bp = mp_uscmdp->cmdbp;
8069 		rqbp = mp_uscmdp->rqbp;
8070 		ASSERT(mp_uscmdp && bp && rqbp);
8071 		scsi_free_consistent_buf(rqbp);
8072 		scsi_destroy_pkt(rqpkt);
8073 		bp->b_resid = bp->b_bcount;
8074 		bioerror(bp, EIO);
8075 		biodone(bp);
8076 	}
8077 }
8078 
8079 /*
8080  * auto-rqsense is not enabled so we have to retrieve the request sense
8081  * manually.
8082  */
8083 static int
8084 vhci_uscsi_send_sense(struct scsi_pkt *pkt, mp_uscsi_cmd_t *mp_uscmdp)
8085 {
8086 	struct buf 		*rqbp, *cmdbp;
8087 	struct scsi_pkt 	*rqpkt;
8088 	int			rval = 0;
8089 
8090 	cmdbp = mp_uscmdp->cmdbp;
8091 	ASSERT(cmdbp != NULL);
8092 
8093 	VHCI_DEBUG(4, (CE_WARN, NULL,
8094 	    "vhci_uscsi_send_sense: enter: bp: %p pkt: %p scmd: %p",
8095 	    (void *)cmdbp, (void *)pkt, (void *)mp_uscmdp));
8096 	/* set up the packet information and cdb */
8097 	if ((rqbp = scsi_alloc_consistent_buf(mp_uscmdp->ap, NULL,
8098 	    SENSE_LENGTH, B_READ, NULL, NULL)) == NULL) {
8099 		return (-1);
8100 	}
8101 
8102 	if ((rqpkt = scsi_init_pkt(mp_uscmdp->ap, NULL, rqbp,
8103 	    CDB_GROUP0, 1, 0, PKT_CONSISTENT, NULL, NULL)) == NULL) {
8104 		scsi_free_consistent_buf(rqbp);
8105 		return (-1);
8106 	}
8107 
8108 	(void) scsi_setup_cdb((union scsi_cdb *)(intptr_t)rqpkt->pkt_cdbp,
8109 	    SCMD_REQUEST_SENSE, 0, SENSE_LENGTH, 0);
8110 
8111 	mp_uscmdp->rqbp = rqbp;
8112 	rqbp->b_private = mp_uscmdp;
8113 	rqpkt->pkt_flags |= FLAG_SENSING;
8114 	rqpkt->pkt_time = 60;
8115 	rqpkt->pkt_comp = vhci_uscsi_iodone;
8116 	rqpkt->pkt_private = mp_uscmdp;
8117 
8118 	/* get her done */
8119 	switch (scsi_transport(rqpkt)) {
8120 	case TRAN_ACCEPT:
8121 		VHCI_DEBUG(1, (CE_NOTE, NULL, "vhci_uscsi_send_sense: "
8122 		    "transport accepted."));
8123 		break;
8124 	case TRAN_BUSY:
8125 		VHCI_DEBUG(1, (CE_NOTE, NULL, "vhci_uscsi_send_sense: "
8126 		    "transport busy, setting timeout."));
8127 		vhci_restart_timeid = timeout(vhci_uscsi_restart_sense, rqpkt,
8128 		    (drv_usectohz(5 * 1000000)));
8129 		break;
8130 	default:
8131 		VHCI_DEBUG(1, (CE_NOTE, NULL, "vhci_uscsi_send_sense: "
8132 		    "transport failed"));
8133 		scsi_free_consistent_buf(rqbp);
8134 		scsi_destroy_pkt(rqpkt);
8135 		rval = -1;
8136 	}
8137 
8138 	return (rval);
8139 }
8140 
8141 /*
8142  * done routine for the mpapi uscsi command - this is behaving as though
8143  * FLAG_DIAGNOSE is set meaning there are no retries except for a manual
8144  * request sense.
8145  */
8146 void
8147 vhci_uscsi_iodone(struct scsi_pkt *pkt)
8148 {
8149 	struct buf 			*bp;
8150 	mp_uscsi_cmd_t 			*mp_uscmdp;
8151 	struct uscsi_cmd 		*uscmdp;
8152 	struct scsi_arq_status 		*arqstat;
8153 	int 				err;
8154 
8155 	mp_uscmdp = (mp_uscsi_cmd_t *)pkt->pkt_private;
8156 	uscmdp = mp_uscmdp->uscmdp;
8157 	bp = mp_uscmdp->cmdbp;
8158 	ASSERT(bp != NULL);
8159 	VHCI_DEBUG(4, (CE_WARN, NULL,
8160 	    "vhci_uscsi_iodone: enter: bp: %p pkt: %p scmd: %p",
8161 	    (void *)bp, (void *)pkt, (void *)mp_uscmdp));
8162 	/* Save the status and the residual into the uscsi_cmd struct */
8163 	uscmdp->uscsi_status = ((*(pkt)->pkt_scbp) & STATUS_MASK);
8164 	uscmdp->uscsi_resid = bp->b_resid;
8165 
8166 	/* return on a very successful command */
8167 	if (pkt->pkt_reason == CMD_CMPLT &&
8168 	    SCBP_C(pkt) == 0 && ((pkt->pkt_flags & FLAG_SENSING) == 0) &&
8169 	    pkt->pkt_resid == 0) {
8170 		mdi_pi_kstat_iosupdate(mp_uscmdp->pip, bp);
8171 		scsi_destroy_pkt(pkt);
8172 		biodone(bp);
8173 		return;
8174 	}
8175 	VHCI_DEBUG(4, (CE_NOTE, NULL, "iodone: reason=0x%x "
8176 	    " pkt_resid=%ld pkt_state: 0x%x b_count: %ld b_resid: %ld",
8177 	    pkt->pkt_reason, pkt->pkt_resid,
8178 	    pkt->pkt_state, bp->b_bcount, bp->b_resid));
8179 
8180 	err = EIO;
8181 
8182 	arqstat = (struct scsi_arq_status *)(intptr_t)(pkt->pkt_scbp);
8183 	if (pkt->pkt_reason != CMD_CMPLT) {
8184 		/*
8185 		 * The command did not complete.
8186 		 */
8187 		VHCI_DEBUG(4, (CE_NOTE, NULL,
8188 		    "vhci_uscsi_iodone: command did not complete."
8189 		    " reason: %x flag: %x", pkt->pkt_reason, pkt->pkt_flags));
8190 		if (pkt->pkt_flags & FLAG_SENSING) {
8191 			MDI_PI_ERRSTAT(mp_uscmdp->pip, MDI_PI_TRANSERR);
8192 		} else if (pkt->pkt_reason == CMD_TIMEOUT) {
8193 			MDI_PI_ERRSTAT(mp_uscmdp->pip, MDI_PI_HARDERR);
8194 			err = ETIMEDOUT;
8195 		}
8196 	} else if (pkt->pkt_state & STATE_ARQ_DONE && mp_uscmdp->arq_enabled) {
8197 		/*
8198 		 * The auto-rqsense happened, and the packet has a filled-in
8199 		 * scsi_arq_status structure, pointed to by pkt_scbp.
8200 		 */
8201 		VHCI_DEBUG(4, (CE_NOTE, NULL,
8202 		    "vhci_uscsi_iodone: received auto-requested sense"));
8203 		if (uscmdp->uscsi_flags & USCSI_RQENABLE) {
8204 			/* get the amount of data to copy into rqbuf */
8205 			int rqlen = SENSE_LENGTH - arqstat->sts_rqpkt_resid;
8206 			rqlen = min(((int)uscmdp->uscsi_rqlen), rqlen);
8207 			uscmdp->uscsi_rqresid = uscmdp->uscsi_rqlen - rqlen;
8208 			uscmdp->uscsi_rqstatus =
8209 			    *((char *)&arqstat->sts_rqpkt_status);
8210 			if (uscmdp->uscsi_rqbuf && uscmdp->uscsi_rqlen &&
8211 			    rqlen != 0) {
8212 				bcopy(&(arqstat->sts_sensedata),
8213 				    uscmdp->uscsi_rqbuf, rqlen);
8214 			}
8215 			mdi_pi_kstat_iosupdate(mp_uscmdp->pip, bp);
8216 			VHCI_DEBUG(4, (CE_NOTE, NULL,
8217 			    "vhci_uscsi_iodone: ARQ "
8218 			    "uscsi_rqstatus=0x%x uscsi_rqresid=%d rqlen: %d "
8219 			    "xfer: %d rqpkt_resid: %d\n",
8220 			    uscmdp->uscsi_rqstatus, uscmdp->uscsi_rqresid,
8221 			    uscmdp->uscsi_rqlen, rqlen,
8222 			    arqstat->sts_rqpkt_resid));
8223 		}
8224 	} else if (pkt->pkt_flags & FLAG_SENSING) {
8225 		struct buf *rqbp;
8226 		struct scsi_status *rqstatus;
8227 
8228 		rqstatus = (struct scsi_status *)pkt->pkt_scbp;
8229 		/* a manual request sense was done - get the information */
8230 		if (uscmdp->uscsi_flags & USCSI_RQENABLE) {
8231 			int rqlen = SENSE_LENGTH - pkt->pkt_resid;
8232 
8233 			rqbp = mp_uscmdp->rqbp;
8234 			/* get the amount of data to copy into rqbuf */
8235 			rqlen = min(((int)uscmdp->uscsi_rqlen), rqlen);
8236 			uscmdp->uscsi_rqresid = uscmdp->uscsi_rqlen - rqlen;
8237 			uscmdp->uscsi_rqstatus = *((char *)rqstatus);
8238 			if (uscmdp->uscsi_rqlen && uscmdp->uscsi_rqbuf) {
8239 				bcopy(rqbp->b_un.b_addr, uscmdp->uscsi_rqbuf,
8240 				    rqlen);
8241 			}
8242 			MDI_PI_ERRSTAT(mp_uscmdp->pip, MDI_PI_TRANSERR);
8243 			scsi_free_consistent_buf(rqbp);
8244 		}
8245 		VHCI_DEBUG(4, (CE_NOTE, NULL, "vhci_uscsi_iodone: FLAG_SENSING"
8246 		    "uscsi_rqstatus=0x%x uscsi_rqresid=%d\n",
8247 		    uscmdp->uscsi_rqstatus, uscmdp->uscsi_rqresid));
8248 	} else {
8249 		struct scsi_status *status =
8250 		    (struct scsi_status *)pkt->pkt_scbp;
8251 		/*
8252 		 * Command completed and we're not getting sense. Check for
8253 		 * errors and decide what to do next.
8254 		 */
8255 		VHCI_DEBUG(4, (CE_NOTE, NULL,
8256 		    "vhci_uscsi_iodone: command appears complete: reason: %x",
8257 		    pkt->pkt_reason));
8258 		if (status->sts_chk) {
8259 			/* need to manually get the request sense */
8260 			if (vhci_uscsi_send_sense(pkt, mp_uscmdp) == 0) {
8261 				scsi_destroy_pkt(pkt);
8262 				return;
8263 			}
8264 		} else {
8265 			VHCI_DEBUG(4, (CE_NOTE, NULL,
8266 			    "vhci_chk_err: appears complete"));
8267 			err = 0;
8268 			mdi_pi_kstat_iosupdate(mp_uscmdp->pip, bp);
8269 			if (pkt->pkt_resid) {
8270 				bp->b_resid += pkt->pkt_resid;
8271 			}
8272 		}
8273 	}
8274 
8275 	if (err) {
8276 		if (bp->b_resid == 0)
8277 			bp->b_resid = bp->b_bcount;
8278 		bioerror(bp, err);
8279 		bp->b_flags |= B_ERROR;
8280 	}
8281 
8282 	scsi_destroy_pkt(pkt);
8283 	biodone(bp);
8284 
8285 	VHCI_DEBUG(4, (CE_WARN, NULL, "vhci_uscsi_iodone: exit"));
8286 }
8287 
8288 /*
8289  * start routine for the mpapi uscsi command
8290  */
8291 int
8292 vhci_uscsi_iostart(struct buf *bp)
8293 {
8294 	struct scsi_pkt 	*pkt;
8295 	struct uscsi_cmd	*uscmdp;
8296 	mp_uscsi_cmd_t 		*mp_uscmdp;
8297 	int			stat_size, rval;
8298 	int			retry = 0;
8299 
8300 	ASSERT(bp->b_private != NULL);
8301 
8302 	mp_uscmdp = (mp_uscsi_cmd_t *)bp->b_private;
8303 	uscmdp = mp_uscmdp->uscmdp;
8304 	if (uscmdp->uscsi_flags & USCSI_RQENABLE) {
8305 		stat_size = SENSE_LENGTH;
8306 	} else {
8307 		stat_size = 1;
8308 	}
8309 
8310 	pkt = scsi_init_pkt(mp_uscmdp->ap, NULL, bp, uscmdp->uscsi_cdblen,
8311 	    stat_size, 0, 0, SLEEP_FUNC, NULL);
8312 	if (pkt == NULL) {
8313 		VHCI_DEBUG(4, (CE_NOTE, NULL,
8314 		    "vhci_uscsi_iostart: rval: EINVAL"));
8315 		bp->b_resid = bp->b_bcount;
8316 		uscmdp->uscsi_resid = bp->b_bcount;
8317 		bioerror(bp, EINVAL);
8318 		biodone(bp);
8319 		return (EINVAL);
8320 	}
8321 
8322 	pkt->pkt_time = uscmdp->uscsi_timeout;
8323 	bcopy(uscmdp->uscsi_cdb, pkt->pkt_cdbp, (size_t)uscmdp->uscsi_cdblen);
8324 	pkt->pkt_comp = vhci_uscsi_iodone;
8325 	pkt->pkt_private = mp_uscmdp;
8326 	if (uscmdp->uscsi_flags & USCSI_SILENT)
8327 		pkt->pkt_flags |= FLAG_SILENT;
8328 	if (uscmdp->uscsi_flags & USCSI_ISOLATE)
8329 		pkt->pkt_flags |= FLAG_ISOLATE;
8330 	if (uscmdp->uscsi_flags & USCSI_DIAGNOSE)
8331 		pkt->pkt_flags |= FLAG_DIAGNOSE;
8332 	if (uscmdp->uscsi_flags & USCSI_RENEGOT) {
8333 		pkt->pkt_flags |= FLAG_RENEGOTIATE_WIDE_SYNC;
8334 	}
8335 	VHCI_DEBUG(4, (CE_WARN, NULL,
8336 	    "vhci_uscsi_iostart: ap: %p pkt: %p pcdbp: %p uscmdp: %p"
8337 	    " ucdbp: %p pcdblen: %d bp: %p count: %ld pip: %p"
8338 	    " stat_size: %d",
8339 	    (void *)mp_uscmdp->ap, (void *)pkt, (void *)pkt->pkt_cdbp,
8340 	    (void *)uscmdp, (void *)uscmdp->uscsi_cdb, pkt->pkt_cdblen,
8341 	    (void *)bp, bp->b_bcount, (void *)mp_uscmdp->pip, stat_size));
8342 
8343 	while (((rval = scsi_transport(pkt)) == TRAN_BUSY) &&
8344 	    retry < vhci_uscsi_retry_count) {
8345 		delay(drv_usectohz(vhci_uscsi_delay));
8346 		retry++;
8347 	}
8348 	if (retry >= vhci_uscsi_retry_count) {
8349 		VHCI_DEBUG(4, (CE_NOTE, NULL,
8350 		    "vhci_uscsi_iostart: tran_busy - retry: %d", retry));
8351 	}
8352 	switch (rval) {
8353 	case TRAN_ACCEPT:
8354 		rval =  0;
8355 		break;
8356 
8357 	default:
8358 		VHCI_DEBUG(4, (CE_NOTE, NULL,
8359 		    "vhci_uscsi_iostart: rval: %d count: %ld res: %ld",
8360 		    rval, bp->b_bcount, bp->b_resid));
8361 		bp->b_resid = bp->b_bcount;
8362 		uscmdp->uscsi_resid = bp->b_bcount;
8363 		bioerror(bp, EIO);
8364 		scsi_destroy_pkt(pkt);
8365 		biodone(bp);
8366 		rval = EIO;
8367 		MDI_PI_ERRSTAT(mp_uscmdp->pip, MDI_PI_TRANSERR);
8368 		break;
8369 	}
8370 	VHCI_DEBUG(4, (CE_NOTE, NULL,
8371 	    "vhci_uscsi_iostart: exit: rval: %d", rval));
8372 	return (rval);
8373 }
8374