xref: /illumos-gate/usr/src/uts/common/io/scsi/adapters/scsi_vhci/scsi_vhci.c (revision af28f636873b7156cfd73ceffa927658cca33fd0)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
23  */
24 
25 /*
26  * Multiplexed I/O SCSI vHCI implementation
27  */
28 
29 #include <sys/conf.h>
30 #include <sys/file.h>
31 #include <sys/ddi.h>
32 #include <sys/sunddi.h>
33 #include <sys/scsi/scsi.h>
34 #include <sys/scsi/impl/scsi_reset_notify.h>
35 #include <sys/scsi/impl/services.h>
36 #include <sys/sunmdi.h>
37 #include <sys/mdi_impldefs.h>
38 #include <sys/scsi/adapters/scsi_vhci.h>
39 #include <sys/disp.h>
40 #include <sys/byteorder.h>
41 
42 extern uintptr_t scsi_callback_id;
43 extern ddi_dma_attr_t scsi_alloc_attr;
44 
45 #ifdef	DEBUG
46 int	vhci_debug = VHCI_DEBUG_DEFAULT_VAL;
47 #endif
48 
49 /* retry for the vhci_do_prout command when a not ready is returned */
50 int vhci_prout_not_ready_retry = 180;
51 
52 /*
53  * These values are defined to support the internal retry of
54  * SCSI packets for better sense code handling.
55  */
56 #define	VHCI_CMD_CMPLT	0
57 #define	VHCI_CMD_RETRY	1
58 #define	VHCI_CMD_ERROR	-1
59 
60 #define	PROPFLAGS (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM)
61 #define	VHCI_SCSI_PERR		0x47
62 #define	VHCI_PGR_ILLEGALOP	-2
63 #define	VHCI_NUM_UPDATE_TASKQ	8
64 /* changed to 132 to accomodate HDS */
65 
66 /*
67  * Version Macros
68  */
69 #define	VHCI_NAME_VERSION	"SCSI VHCI Driver"
70 char		vhci_version_name[] = VHCI_NAME_VERSION;
71 
72 int		vhci_first_time = 0;
73 clock_t		vhci_to_ticks = 0;
74 int		vhci_init_wait_timeout = VHCI_INIT_WAIT_TIMEOUT;
75 kcondvar_t	vhci_cv;
76 kmutex_t	vhci_global_mutex;
77 void		*vhci_softstate = NULL; /* for soft state */
78 
79 /*
80  * Flag to delay the retry of the reserve command
81  */
82 int		vhci_reserve_delay = 100000;
83 static int	vhci_path_quiesce_timeout = 60;
84 static uchar_t	zero_key[MHIOC_RESV_KEY_SIZE];
85 
86 /* uscsi delay for a TRAN_BUSY */
87 static int vhci_uscsi_delay = 100000;
88 static int vhci_uscsi_retry_count = 180;
89 /* uscsi_restart_sense timeout id in case it needs to get canceled */
90 static timeout_id_t vhci_restart_timeid = 0;
91 
92 static int	vhci_bus_config_debug = 0;
93 
94 /*
95  * Bidirectional map of 'target-port' to port id <pid> for support of
96  * iostat(1M) '-Xx' and '-Yx' output.
97  */
98 static kmutex_t		vhci_targetmap_mutex;
99 static uint_t		vhci_targetmap_pid = 1;
100 static mod_hash_t	*vhci_targetmap_bypid;	/* <pid> -> 'target-port' */
101 static mod_hash_t	*vhci_targetmap_byport;	/* 'target-port' -> <pid> */
102 
103 /*
104  * functions exported by scsi_vhci struct cb_ops
105  */
106 static int vhci_open(dev_t *, int, int, cred_t *);
107 static int vhci_close(dev_t, int, int, cred_t *);
108 static int vhci_ioctl(dev_t, int, intptr_t, int, cred_t *, int *);
109 
110 /*
111  * functions exported by scsi_vhci struct dev_ops
112  */
113 static int vhci_getinfo(dev_info_t *, ddi_info_cmd_t, void *, void **);
114 static int vhci_attach(dev_info_t *, ddi_attach_cmd_t);
115 static int vhci_detach(dev_info_t *, ddi_detach_cmd_t);
116 
117 /*
118  * functions exported by scsi_vhci scsi_hba_tran_t transport table
119  */
120 static int vhci_scsi_tgt_init(dev_info_t *, dev_info_t *,
121     scsi_hba_tran_t *, struct scsi_device *);
122 static void vhci_scsi_tgt_free(dev_info_t *, dev_info_t *, scsi_hba_tran_t *,
123     struct scsi_device *);
124 static int vhci_pgr_register_start(scsi_vhci_lun_t *, struct scsi_pkt *);
125 static int vhci_scsi_start(struct scsi_address *, struct scsi_pkt *);
126 static int vhci_scsi_abort(struct scsi_address *, struct scsi_pkt *);
127 static int vhci_scsi_reset(struct scsi_address *, int);
128 static int vhci_scsi_reset_target(struct scsi_address *, int level,
129     uint8_t select_path);
130 static int vhci_scsi_reset_bus(struct scsi_address *);
131 static int vhci_scsi_getcap(struct scsi_address *, char *, int);
132 static int vhci_scsi_setcap(struct scsi_address *, char *, int, int);
133 static int vhci_commoncap(struct scsi_address *, char *, int, int, int);
134 static int vhci_pHCI_cap(struct scsi_address *ap, char *cap, int val, int whom,
135     mdi_pathinfo_t *pip);
136 static struct scsi_pkt *vhci_scsi_init_pkt(struct scsi_address *,
137     struct scsi_pkt *, struct buf *, int, int, int, int, int (*)(), caddr_t);
138 static void vhci_scsi_destroy_pkt(struct scsi_address *, struct scsi_pkt *);
139 static void vhci_scsi_dmafree(struct scsi_address *, struct scsi_pkt *);
140 static void vhci_scsi_sync_pkt(struct scsi_address *, struct scsi_pkt *);
141 static int vhci_scsi_reset_notify(struct scsi_address *, int, void (*)(caddr_t),
142     caddr_t);
143 static int vhci_scsi_get_bus_addr(struct scsi_device *, char *, int);
144 static int vhci_scsi_get_name(struct scsi_device *, char *, int);
145 static int vhci_scsi_bus_power(dev_info_t *, void *, pm_bus_power_op_t,
146     void *, void *);
147 static int vhci_scsi_bus_config(dev_info_t *, uint_t, ddi_bus_config_op_t,
148     void *, dev_info_t **);
149 static int vhci_scsi_bus_unconfig(dev_info_t *, uint_t, ddi_bus_config_op_t,
150     void *);
151 static struct scsi_failover_ops *vhci_dev_fo(dev_info_t *, struct scsi_device *,
152     void **, char **);
153 
154 /*
155  * functions registered with the mpxio framework via mdi_vhci_ops_t
156  */
157 static int vhci_pathinfo_init(dev_info_t *, mdi_pathinfo_t *, int);
158 static int vhci_pathinfo_uninit(dev_info_t *, mdi_pathinfo_t *, int);
159 static int vhci_pathinfo_state_change(dev_info_t *, mdi_pathinfo_t *,
160 		mdi_pathinfo_state_t, uint32_t, int);
161 static int vhci_pathinfo_online(dev_info_t *, mdi_pathinfo_t *, int);
162 static int vhci_pathinfo_offline(dev_info_t *, mdi_pathinfo_t *, int);
163 static int vhci_failover(dev_info_t *, dev_info_t *, int);
164 static void vhci_client_attached(dev_info_t *);
165 static int vhci_is_dev_supported(dev_info_t *, dev_info_t *, void *);
166 
167 static int vhci_ctl(dev_t, int, intptr_t, int, cred_t *, int *);
168 static int vhci_devctl(dev_t, int, intptr_t, int, cred_t *, int *);
169 static int vhci_ioc_get_phci_path(sv_iocdata_t *, caddr_t, int, caddr_t);
170 static int vhci_ioc_get_client_path(sv_iocdata_t *, caddr_t, int, caddr_t);
171 static int vhci_ioc_get_paddr(sv_iocdata_t *, caddr_t, int, caddr_t);
172 static int vhci_ioc_send_client_path(caddr_t, sv_iocdata_t *, int, caddr_t);
173 static void vhci_ioc_devi_to_path(dev_info_t *, caddr_t);
174 static int vhci_get_phci_path_list(dev_info_t *, sv_path_info_t *, uint_t);
175 static int vhci_get_client_path_list(dev_info_t *, sv_path_info_t *, uint_t);
176 static int vhci_get_iocdata(const void *, sv_iocdata_t *, int, caddr_t);
177 static int vhci_get_iocswitchdata(const void *, sv_switch_to_cntlr_iocdata_t *,
178     int, caddr_t);
179 static int vhci_ioc_alloc_pathinfo(sv_path_info_t **, sv_path_info_t **,
180     uint_t, sv_iocdata_t *, int, caddr_t);
181 static void vhci_ioc_free_pathinfo(sv_path_info_t *, sv_path_info_t *, uint_t);
182 static int vhci_ioc_send_pathinfo(sv_path_info_t *, sv_path_info_t *, uint_t,
183     sv_iocdata_t *, int, caddr_t);
184 static int vhci_handle_ext_fo(struct scsi_pkt *, int);
185 static int vhci_efo_watch_cb(caddr_t, struct scsi_watch_result *);
186 static int vhci_quiesce_lun(struct scsi_vhci_lun *);
187 static int vhci_pgr_validate_and_register(scsi_vhci_priv_t *);
188 static void vhci_dispatch_scsi_start(void *);
189 static void vhci_efo_done(void *);
190 static void vhci_initiate_auto_failback(void *);
191 static void vhci_update_pHCI_pkt(struct vhci_pkt *, struct scsi_pkt *);
192 static int vhci_update_pathinfo(struct scsi_device *, mdi_pathinfo_t *,
193     struct scsi_failover_ops *, scsi_vhci_lun_t *, struct scsi_vhci *);
194 static void vhci_kstat_create_pathinfo(mdi_pathinfo_t *);
195 static int vhci_quiesce_paths(dev_info_t *, dev_info_t *,
196     scsi_vhci_lun_t *, char *, char *);
197 
198 static char *vhci_devnm_to_guid(char *);
199 static int vhci_bind_transport(struct scsi_address *, struct vhci_pkt *,
200     int, int (*func)(caddr_t));
201 static void vhci_intr(struct scsi_pkt *);
202 static int vhci_do_prout(scsi_vhci_priv_t *);
203 static void vhci_run_cmd(void *);
204 static int vhci_do_prin(struct vhci_pkt **);
205 static struct scsi_pkt *vhci_create_retry_pkt(struct vhci_pkt *);
206 static struct vhci_pkt *vhci_sync_retry_pkt(struct vhci_pkt *);
207 static struct scsi_vhci_lun *vhci_lun_lookup(dev_info_t *);
208 static struct scsi_vhci_lun *vhci_lun_lookup_alloc(dev_info_t *, char *, int *);
209 static void vhci_lun_free(dev_info_t *);
210 static int vhci_recovery_reset(scsi_vhci_lun_t *, struct scsi_address *,
211     uint8_t, uint8_t);
212 void vhci_update_pathstates(void *);
213 
214 #ifdef DEBUG
215 static void vhci_print_prin_keys(vhci_prin_readkeys_t *, int);
216 static void vhci_print_cdb(dev_info_t *dip, uint_t level,
217     char *title, uchar_t *cdb);
218 static void vhci_clean_print(dev_info_t *dev, uint_t level,
219     char *title, uchar_t *data, int len);
220 #endif
221 static void vhci_print_prout_keys(scsi_vhci_lun_t *, char *);
222 static void vhci_uscsi_iodone(struct scsi_pkt *pkt);
223 static void vhci_invalidate_mpapi_lu(struct scsi_vhci *, scsi_vhci_lun_t *);
224 
225 /*
226  * MP-API related functions
227  */
228 extern int vhci_mpapi_init(struct scsi_vhci *);
229 extern void vhci_mpapi_add_dev_prod(struct scsi_vhci *, char *);
230 extern int vhci_mpapi_ctl(dev_t, int, intptr_t, int, cred_t *, int *);
231 extern void vhci_update_mpapi_data(struct scsi_vhci *,
232     scsi_vhci_lun_t *, mdi_pathinfo_t *);
233 extern void* vhci_get_mpapi_item(struct scsi_vhci *, mpapi_list_header_t *,
234     uint8_t, void*);
235 extern void vhci_mpapi_set_path_state(dev_info_t *, mdi_pathinfo_t *, int);
236 extern int vhci_mpapi_update_tpg_acc_state_for_lu(struct scsi_vhci *,
237     scsi_vhci_lun_t *);
238 
239 #define	VHCI_DMA_MAX_XFER_CAP	INT_MAX
240 
241 #define	VHCI_MAX_PGR_RETRIES	3
242 
243 /*
244  * Macros for the device-type mpxio options
245  */
246 #define	LOAD_BALANCE_OPTIONS		"load-balance-options"
247 #define	LOGICAL_BLOCK_REGION_SIZE	"region-size"
248 #define	MPXIO_OPTIONS_LIST		"device-type-mpxio-options-list"
249 #define	DEVICE_TYPE_STR			"device-type"
250 #define	isdigit(ch)			((ch) >= '0' && (ch) <= '9')
251 
252 static struct cb_ops vhci_cb_ops = {
253 	vhci_open,			/* open */
254 	vhci_close,			/* close */
255 	nodev,				/* strategy */
256 	nodev,				/* print */
257 	nodev,				/* dump */
258 	nodev,				/* read */
259 	nodev,				/* write */
260 	vhci_ioctl,			/* ioctl */
261 	nodev,				/* devmap */
262 	nodev,				/* mmap */
263 	nodev,				/* segmap */
264 	nochpoll,			/* chpoll */
265 	ddi_prop_op,			/* cb_prop_op */
266 	0,				/* streamtab */
267 	D_NEW | D_MP,			/* cb_flag */
268 	CB_REV,				/* rev */
269 	nodev,				/* aread */
270 	nodev				/* awrite */
271 };
272 
273 static struct dev_ops vhci_ops = {
274 	DEVO_REV,
275 	0,
276 	vhci_getinfo,
277 	nulldev,		/* identify */
278 	nulldev,		/* probe */
279 	vhci_attach,		/* attach and detach are mandatory */
280 	vhci_detach,
281 	nodev,			/* reset */
282 	&vhci_cb_ops,		/* cb_ops */
283 	NULL,			/* bus_ops */
284 	NULL,			/* power */
285 	ddi_quiesce_not_needed,	/* quiesce */
286 };
287 
288 extern struct mod_ops mod_driverops;
289 
290 static struct modldrv modldrv = {
291 	&mod_driverops,
292 	vhci_version_name,	/* module name */
293 	&vhci_ops
294 };
295 
296 static struct modlinkage modlinkage = {
297 	MODREV_1,
298 	&modldrv,
299 	NULL
300 };
301 
302 static mdi_vhci_ops_t vhci_opinfo = {
303 	MDI_VHCI_OPS_REV,
304 	vhci_pathinfo_init,		/* Pathinfo node init callback */
305 	vhci_pathinfo_uninit,		/* Pathinfo uninit callback */
306 	vhci_pathinfo_state_change,	/* Pathinfo node state change */
307 	vhci_failover,			/* failover callback */
308 	vhci_client_attached,		/* client attached callback	*/
309 	vhci_is_dev_supported		/* is device supported by mdi */
310 };
311 
312 /*
313  * The scsi_failover table defines an ordered set of 'fops' modules supported
314  * by scsi_vhci.  Currently, initialize this table from the 'ddi-forceload'
315  * property specified in scsi_vhci.conf.
316  */
317 static struct scsi_failover {
318 	ddi_modhandle_t			sf_mod;
319 	struct scsi_failover_ops	*sf_sfo;
320 } *scsi_failover_table;
321 static uint_t	scsi_nfailover;
322 
323 int
324 _init(void)
325 {
326 	int	rval;
327 
328 	/*
329 	 * Allocate soft state and prepare to do ddi_soft_state_zalloc()
330 	 * before registering with the transport first.
331 	 */
332 	if ((rval = ddi_soft_state_init(&vhci_softstate,
333 	    sizeof (struct scsi_vhci), 1)) != 0) {
334 		VHCI_DEBUG(1, (CE_NOTE, NULL,
335 		    "!_init:soft state init failed\n"));
336 		return (rval);
337 	}
338 
339 	if ((rval = scsi_hba_init(&modlinkage)) != 0) {
340 		VHCI_DEBUG(1, (CE_NOTE, NULL,
341 		    "!_init: scsi hba init failed\n"));
342 		ddi_soft_state_fini(&vhci_softstate);
343 		return (rval);
344 	}
345 
346 	mutex_init(&vhci_global_mutex, NULL, MUTEX_DRIVER, NULL);
347 	cv_init(&vhci_cv, NULL, CV_DRIVER, NULL);
348 
349 	mutex_init(&vhci_targetmap_mutex, NULL, MUTEX_DRIVER, NULL);
350 	vhci_targetmap_byport = mod_hash_create_strhash(
351 	    "vhci_targetmap_byport", 256, mod_hash_null_valdtor);
352 	vhci_targetmap_bypid = mod_hash_create_idhash(
353 	    "vhci_targetmap_bypid", 256, mod_hash_null_valdtor);
354 
355 	if ((rval = mod_install(&modlinkage)) != 0) {
356 		VHCI_DEBUG(1, (CE_NOTE, NULL, "!_init: mod_install failed\n"));
357 		if (vhci_targetmap_bypid)
358 			mod_hash_destroy_idhash(vhci_targetmap_bypid);
359 		if (vhci_targetmap_byport)
360 			mod_hash_destroy_strhash(vhci_targetmap_byport);
361 		mutex_destroy(&vhci_targetmap_mutex);
362 		cv_destroy(&vhci_cv);
363 		mutex_destroy(&vhci_global_mutex);
364 		scsi_hba_fini(&modlinkage);
365 		ddi_soft_state_fini(&vhci_softstate);
366 	}
367 	return (rval);
368 }
369 
370 
371 /*
372  * the system is done with us as a driver, so clean up
373  */
374 int
375 _fini(void)
376 {
377 	int rval;
378 
379 	/*
380 	 * don't start cleaning up until we know that the module remove
381 	 * has worked  -- if this works, then we know that each instance
382 	 * has successfully been DDI_DETACHed
383 	 */
384 	if ((rval = mod_remove(&modlinkage)) != 0) {
385 		VHCI_DEBUG(4, (CE_NOTE, NULL, "!_fini: mod_remove failed\n"));
386 		return (rval);
387 	}
388 
389 	if (vhci_targetmap_bypid)
390 		mod_hash_destroy_idhash(vhci_targetmap_bypid);
391 	if (vhci_targetmap_byport)
392 		mod_hash_destroy_strhash(vhci_targetmap_byport);
393 	mutex_destroy(&vhci_targetmap_mutex);
394 	cv_destroy(&vhci_cv);
395 	mutex_destroy(&vhci_global_mutex);
396 	scsi_hba_fini(&modlinkage);
397 	ddi_soft_state_fini(&vhci_softstate);
398 
399 	return (rval);
400 }
401 
402 int
403 _info(struct modinfo *modinfop)
404 {
405 	return (mod_info(&modlinkage, modinfop));
406 }
407 
408 /*
409  * Lookup scsi_failover by "short name" of failover module.
410  */
411 struct scsi_failover_ops *
412 vhci_failover_ops_by_name(char *name)
413 {
414 	struct scsi_failover	*sf;
415 
416 	for (sf = scsi_failover_table; sf->sf_mod; sf++) {
417 		if (sf->sf_sfo == NULL)
418 			continue;
419 		if (strcmp(sf->sf_sfo->sfo_name, name) == 0)
420 			return (sf->sf_sfo);
421 	}
422 	return (NULL);
423 }
424 
425 /*
426  * Load all scsi_failover_ops 'fops' modules.
427  */
428 static void
429 vhci_failover_modopen(struct scsi_vhci *vhci)
430 {
431 	char			**module;
432 	int			i;
433 	struct scsi_failover	*sf;
434 	char			**dt;
435 	int			e;
436 
437 	if (scsi_failover_table)
438 		return;
439 
440 	/* Get the list of modules from scsi_vhci.conf */
441 	if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY,
442 	    vhci->vhci_dip, DDI_PROP_DONTPASS, "ddi-forceload",
443 	    &module, &scsi_nfailover) != DDI_PROP_SUCCESS) {
444 		cmn_err(CE_WARN, "scsi_vhci: "
445 		    "scsi_vhci.conf is missing 'ddi-forceload'");
446 		return;
447 	}
448 	if (scsi_nfailover == 0) {
449 		cmn_err(CE_WARN, "scsi_vhci: "
450 		    "scsi_vhci.conf has empty 'ddi-forceload'");
451 		ddi_prop_free(module);
452 		return;
453 	}
454 
455 	/* allocate failover table based on number of modules */
456 	scsi_failover_table = (struct scsi_failover *)
457 	    kmem_zalloc(sizeof (struct scsi_failover) * (scsi_nfailover + 1),
458 	    KM_SLEEP);
459 
460 	/* loop over modules specified in scsi_vhci.conf and open each module */
461 	for (i = 0, sf = scsi_failover_table; i < scsi_nfailover; i++) {
462 		if (module[i] == NULL)
463 			continue;
464 
465 		sf->sf_mod = ddi_modopen(module[i], KRTLD_MODE_FIRST, &e);
466 		if (sf->sf_mod == NULL) {
467 			/*
468 			 * A module returns EEXIST if other software is
469 			 * supporting the intended function: for example
470 			 * the scsi_vhci_f_sum_emc module returns EEXIST
471 			 * from _init if EMC powerpath software is installed.
472 			 */
473 			if (e != EEXIST)
474 				cmn_err(CE_WARN, "scsi_vhci: unable to open "
475 				    "module '%s', error %d", module[i], e);
476 			continue;
477 		}
478 		sf->sf_sfo = ddi_modsym(sf->sf_mod,
479 		    "scsi_vhci_failover_ops", &e);
480 		if (sf->sf_sfo == NULL) {
481 			cmn_err(CE_WARN, "scsi_vhci: "
482 			    "unable to import 'scsi_failover_ops' from '%s', "
483 			    "error %d", module[i], e);
484 			(void) ddi_modclose(sf->sf_mod);
485 			sf->sf_mod = NULL;
486 			continue;
487 		}
488 
489 		/* register vid/pid of devices supported with mpapi */
490 		for (dt = sf->sf_sfo->sfo_devices; *dt; dt++)
491 			vhci_mpapi_add_dev_prod(vhci, *dt);
492 		sf++;
493 	}
494 
495 	/* verify that at least the "well-known" modules were there */
496 	if (vhci_failover_ops_by_name(SFO_NAME_SYM) == NULL)
497 		cmn_err(CE_WARN, "scsi_vhci: well-known module \""
498 		    SFO_NAME_SYM "\" not defined in scsi_vhci.conf's "
499 		    "'ddi-forceload'");
500 	if (vhci_failover_ops_by_name(SFO_NAME_TPGS) == NULL)
501 		cmn_err(CE_WARN, "scsi_vhci: well-known module \""
502 		    SFO_NAME_TPGS "\" not defined in scsi_vhci.conf's "
503 		    "'ddi-forceload'");
504 
505 	/* call sfo_init for modules that need it */
506 	for (sf = scsi_failover_table; sf->sf_mod; sf++) {
507 		if (sf->sf_sfo && sf->sf_sfo->sfo_init)
508 			sf->sf_sfo->sfo_init();
509 	}
510 
511 	ddi_prop_free(module);
512 }
513 
514 /*
515  * unload all loaded scsi_failover_ops modules
516  */
517 static void
518 vhci_failover_modclose()
519 {
520 	struct scsi_failover	*sf;
521 
522 	for (sf = scsi_failover_table; sf->sf_mod; sf++) {
523 		if ((sf->sf_mod == NULL) || (sf->sf_sfo == NULL))
524 			continue;
525 		(void) ddi_modclose(sf->sf_mod);
526 		sf->sf_mod = NULL;
527 		sf->sf_sfo = NULL;
528 	}
529 
530 	if (scsi_failover_table && scsi_nfailover)
531 		kmem_free(scsi_failover_table,
532 		    sizeof (struct scsi_failover) * (scsi_nfailover + 1));
533 	scsi_failover_table = NULL;
534 	scsi_nfailover = 0;
535 }
536 
537 /* ARGSUSED */
538 static int
539 vhci_open(dev_t *devp, int flag, int otype, cred_t *credp)
540 {
541 	struct scsi_vhci	*vhci;
542 
543 	if (otype != OTYP_CHR) {
544 		return (EINVAL);
545 	}
546 
547 	vhci = ddi_get_soft_state(vhci_softstate, MINOR2INST(getminor(*devp)));
548 	if (vhci == NULL) {
549 		VHCI_DEBUG(1, (CE_NOTE, NULL, "vhci_open: failed ENXIO\n"));
550 		return (ENXIO);
551 	}
552 
553 	mutex_enter(&vhci->vhci_mutex);
554 	if ((flag & FEXCL) && (vhci->vhci_state & VHCI_STATE_OPEN)) {
555 		mutex_exit(&vhci->vhci_mutex);
556 		vhci_log(CE_NOTE, vhci->vhci_dip,
557 		    "!vhci%d: Already open\n", getminor(*devp));
558 		return (EBUSY);
559 	}
560 
561 	vhci->vhci_state |= VHCI_STATE_OPEN;
562 	mutex_exit(&vhci->vhci_mutex);
563 	return (0);
564 }
565 
566 
567 /* ARGSUSED */
568 static int
569 vhci_close(dev_t dev, int flag, int otype, cred_t *credp)
570 {
571 	struct scsi_vhci	*vhci;
572 
573 	if (otype != OTYP_CHR) {
574 		return (EINVAL);
575 	}
576 
577 	vhci = ddi_get_soft_state(vhci_softstate, MINOR2INST(getminor(dev)));
578 	if (vhci == NULL) {
579 		VHCI_DEBUG(1, (CE_NOTE, NULL, "vhci_close: failed ENXIO\n"));
580 		return (ENXIO);
581 	}
582 
583 	mutex_enter(&vhci->vhci_mutex);
584 	vhci->vhci_state &= ~VHCI_STATE_OPEN;
585 	mutex_exit(&vhci->vhci_mutex);
586 
587 	return (0);
588 }
589 
590 /* ARGSUSED */
591 static int
592 vhci_ioctl(dev_t dev, int cmd, intptr_t data, int mode,
593 	cred_t *credp, int *rval)
594 {
595 	if (IS_DEVCTL(cmd)) {
596 		return (vhci_devctl(dev, cmd, data, mode, credp, rval));
597 	} else if (cmd == MP_CMD) {
598 		return (vhci_mpapi_ctl(dev, cmd, data, mode, credp, rval));
599 	} else {
600 		return (vhci_ctl(dev, cmd, data, mode, credp, rval));
601 	}
602 }
603 
604 /*
605  * attach the module
606  */
607 static int
608 vhci_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
609 {
610 	int			rval = DDI_FAILURE;
611 	int			scsi_hba_attached = 0;
612 	int			vhci_attached = 0;
613 	int			mutex_initted = 0;
614 	int			instance;
615 	struct scsi_vhci	*vhci;
616 	scsi_hba_tran_t		*tran;
617 	char			cache_name_buf[64];
618 	char			*data;
619 
620 	VHCI_DEBUG(4, (CE_NOTE, NULL, "vhci_attach: cmd=0x%x\n", cmd));
621 
622 	instance = ddi_get_instance(dip);
623 
624 	switch (cmd) {
625 	case DDI_ATTACH:
626 		break;
627 
628 	case DDI_RESUME:
629 	case DDI_PM_RESUME:
630 		VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_attach: resume not yet"
631 		    "implemented\n"));
632 		return (rval);
633 
634 	default:
635 		VHCI_DEBUG(1, (CE_NOTE, NULL,
636 		    "!vhci_attach: unknown ddi command\n"));
637 		return (rval);
638 	}
639 
640 	/*
641 	 * Allocate vhci data structure.
642 	 */
643 	if (ddi_soft_state_zalloc(vhci_softstate, instance) != DDI_SUCCESS) {
644 		VHCI_DEBUG(1, (CE_NOTE, dip, "!vhci_attach:"
645 		    "soft state alloc failed\n"));
646 		return (DDI_FAILURE);
647 	}
648 
649 	if ((vhci = ddi_get_soft_state(vhci_softstate, instance)) == NULL) {
650 		VHCI_DEBUG(1, (CE_NOTE, dip, "!vhci_attach:"
651 		    "bad soft state\n"));
652 		ddi_soft_state_free(vhci_softstate, instance);
653 		return (DDI_FAILURE);
654 	}
655 
656 	/* Allocate packet cache */
657 	(void) snprintf(cache_name_buf, sizeof (cache_name_buf),
658 	    "vhci%d_cache", instance);
659 
660 	mutex_init(&vhci->vhci_mutex, NULL, MUTEX_DRIVER, NULL);
661 	mutex_initted++;
662 
663 	/*
664 	 * Allocate a transport structure
665 	 */
666 	tran = scsi_hba_tran_alloc(dip, SCSI_HBA_CANSLEEP);
667 	ASSERT(tran != NULL);
668 
669 	vhci->vhci_tran		= tran;
670 	vhci->vhci_dip		= dip;
671 	vhci->vhci_instance	= instance;
672 
673 	tran->tran_hba_private	= vhci;
674 	tran->tran_tgt_init	= vhci_scsi_tgt_init;
675 	tran->tran_tgt_probe	= NULL;
676 	tran->tran_tgt_free	= vhci_scsi_tgt_free;
677 
678 	tran->tran_start	= vhci_scsi_start;
679 	tran->tran_abort	= vhci_scsi_abort;
680 	tran->tran_reset	= vhci_scsi_reset;
681 	tran->tran_getcap	= vhci_scsi_getcap;
682 	tran->tran_setcap	= vhci_scsi_setcap;
683 	tran->tran_init_pkt	= vhci_scsi_init_pkt;
684 	tran->tran_destroy_pkt	= vhci_scsi_destroy_pkt;
685 	tran->tran_dmafree	= vhci_scsi_dmafree;
686 	tran->tran_sync_pkt	= vhci_scsi_sync_pkt;
687 	tran->tran_reset_notify = vhci_scsi_reset_notify;
688 
689 	tran->tran_get_bus_addr	= vhci_scsi_get_bus_addr;
690 	tran->tran_get_name	= vhci_scsi_get_name;
691 	tran->tran_bus_reset	= NULL;
692 	tran->tran_quiesce	= NULL;
693 	tran->tran_unquiesce	= NULL;
694 
695 	/*
696 	 * register event notification routines with scsa
697 	 */
698 	tran->tran_get_eventcookie = NULL;
699 	tran->tran_add_eventcall = NULL;
700 	tran->tran_remove_eventcall = NULL;
701 	tran->tran_post_event	= NULL;
702 
703 	tran->tran_bus_power	= vhci_scsi_bus_power;
704 
705 	tran->tran_bus_config	= vhci_scsi_bus_config;
706 	tran->tran_bus_unconfig	= vhci_scsi_bus_unconfig;
707 
708 	/*
709 	 * Attach this instance with the mpxio framework
710 	 */
711 	if (mdi_vhci_register(MDI_HCI_CLASS_SCSI, dip, &vhci_opinfo, 0)
712 	    != MDI_SUCCESS) {
713 		VHCI_DEBUG(1, (CE_NOTE, dip, "!vhci_attach:"
714 		    "mdi_vhci_register failed\n"));
715 		goto attach_fail;
716 	}
717 	vhci_attached++;
718 
719 	/*
720 	 * Attach this instance of the hba.
721 	 *
722 	 * Regarding dma attributes: Since scsi_vhci is a virtual scsi HBA
723 	 * driver, it has nothing to do with DMA. However, when calling
724 	 * scsi_hba_attach_setup() we need to pass something valid in the
725 	 * dma attributes parameter. So we just use scsi_alloc_attr.
726 	 * SCSA itself seems to care only for dma_attr_minxfer and
727 	 * dma_attr_burstsizes fields of dma attributes structure.
728 	 * It expects those fileds to be non-zero.
729 	 */
730 	if (scsi_hba_attach_setup(dip, &scsi_alloc_attr, tran,
731 	    SCSI_HBA_ADDR_COMPLEX) != DDI_SUCCESS) {
732 		VHCI_DEBUG(1, (CE_NOTE, dip, "!vhci_attach:"
733 		    "hba attach failed\n"));
734 		goto attach_fail;
735 	}
736 	scsi_hba_attached++;
737 
738 	if (ddi_create_minor_node(dip, "devctl", S_IFCHR,
739 	    INST2DEVCTL(instance), DDI_NT_SCSI_NEXUS, 0) != DDI_SUCCESS) {
740 		VHCI_DEBUG(1, (CE_NOTE, dip, "!vhci_attach:"
741 		    " ddi_create_minor_node failed\n"));
742 		goto attach_fail;
743 	}
744 
745 	/*
746 	 * Set pm-want-child-notification property for
747 	 * power management of the phci and client
748 	 */
749 	if (ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP,
750 	    "pm-want-child-notification?", NULL, NULL) != DDI_PROP_SUCCESS) {
751 		cmn_err(CE_WARN,
752 		    "%s%d fail to create pm-want-child-notification? prop",
753 		    ddi_driver_name(dip), ddi_get_instance(dip));
754 		goto attach_fail;
755 	}
756 
757 	vhci->vhci_taskq = taskq_create("vhci_taskq", 1, MINCLSYSPRI, 1, 4, 0);
758 	vhci->vhci_update_pathstates_taskq =
759 	    taskq_create("vhci_update_pathstates", VHCI_NUM_UPDATE_TASKQ,
760 	    MINCLSYSPRI, 1, 4, 0);
761 	ASSERT(vhci->vhci_taskq);
762 	ASSERT(vhci->vhci_update_pathstates_taskq);
763 
764 	/*
765 	 * Set appropriate configuration flags based on options set in
766 	 * conf file.
767 	 */
768 	vhci->vhci_conf_flags = 0;
769 	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, PROPFLAGS,
770 	    "auto-failback", &data) == DDI_SUCCESS) {
771 		if (strcmp(data, "enable") == 0)
772 			vhci->vhci_conf_flags |= VHCI_CONF_FLAGS_AUTO_FAILBACK;
773 		ddi_prop_free(data);
774 	}
775 
776 	if (!(vhci->vhci_conf_flags & VHCI_CONF_FLAGS_AUTO_FAILBACK))
777 		vhci_log(CE_NOTE, dip, "!Auto-failback capability "
778 		    "disabled through scsi_vhci.conf file.");
779 
780 	/*
781 	 * Allocate an mpapi private structure
782 	 */
783 	vhci->mp_priv = kmem_zalloc(sizeof (mpapi_priv_t), KM_SLEEP);
784 	if (vhci_mpapi_init(vhci) != 0) {
785 		VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_attach: "
786 		    "vhci_mpapi_init() failed"));
787 	}
788 
789 	vhci_failover_modopen(vhci);		/* load failover modules */
790 
791 	ddi_report_dev(dip);
792 	return (DDI_SUCCESS);
793 
794 attach_fail:
795 	if (vhci_attached)
796 		(void) mdi_vhci_unregister(dip, 0);
797 
798 	if (scsi_hba_attached)
799 		(void) scsi_hba_detach(dip);
800 
801 	if (vhci->vhci_tran)
802 		scsi_hba_tran_free(vhci->vhci_tran);
803 
804 	if (mutex_initted) {
805 		mutex_destroy(&vhci->vhci_mutex);
806 	}
807 
808 	ddi_soft_state_free(vhci_softstate, instance);
809 	return (DDI_FAILURE);
810 }
811 
812 
813 /*ARGSUSED*/
814 static int
815 vhci_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
816 {
817 	int			instance = ddi_get_instance(dip);
818 	scsi_hba_tran_t		*tran;
819 	struct scsi_vhci	*vhci;
820 
821 	VHCI_DEBUG(4, (CE_NOTE, NULL, "vhci_detach: cmd=0x%x\n", cmd));
822 
823 	if ((tran = ddi_get_driver_private(dip)) == NULL)
824 		return (DDI_FAILURE);
825 
826 	vhci = TRAN2HBAPRIVATE(tran);
827 	if (!vhci) {
828 		return (DDI_FAILURE);
829 	}
830 
831 	switch (cmd) {
832 	case DDI_DETACH:
833 		break;
834 
835 	case DDI_SUSPEND:
836 	case DDI_PM_SUSPEND:
837 		VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_detach: suspend/pm not yet"
838 		    "implemented\n"));
839 		return (DDI_FAILURE);
840 
841 	default:
842 		VHCI_DEBUG(1, (CE_NOTE, NULL,
843 		    "!vhci_detach: unknown ddi command\n"));
844 		return (DDI_FAILURE);
845 	}
846 
847 	(void) mdi_vhci_unregister(dip, 0);
848 	(void) scsi_hba_detach(dip);
849 	scsi_hba_tran_free(tran);
850 
851 	if (ddi_prop_remove(DDI_DEV_T_NONE, dip,
852 	    "pm-want-child-notification?") != DDI_PROP_SUCCESS) {
853 		cmn_err(CE_WARN,
854 		    "%s%d unable to remove prop pm-want_child_notification?",
855 		    ddi_driver_name(dip), ddi_get_instance(dip));
856 	}
857 	if (vhci_restart_timeid != 0) {
858 		(void) untimeout(vhci_restart_timeid);
859 	}
860 	vhci_restart_timeid = 0;
861 
862 	mutex_destroy(&vhci->vhci_mutex);
863 	vhci->vhci_dip = NULL;
864 	vhci->vhci_tran = NULL;
865 	taskq_destroy(vhci->vhci_taskq);
866 	taskq_destroy(vhci->vhci_update_pathstates_taskq);
867 	ddi_remove_minor_node(dip, NULL);
868 	ddi_soft_state_free(vhci_softstate, instance);
869 
870 	vhci_failover_modclose();		/* unload failover modules */
871 	return (DDI_SUCCESS);
872 }
873 
874 /*
875  * vhci_getinfo()
876  * Given the device number, return the devinfo pointer or the
877  * instance number.
878  * Note: always succeed DDI_INFO_DEVT2INSTANCE, even before attach.
879  */
880 
881 /*ARGSUSED*/
882 static int
883 vhci_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result)
884 {
885 	struct scsi_vhci	*vhcip;
886 	int			instance = MINOR2INST(getminor((dev_t)arg));
887 
888 	switch (cmd) {
889 	case DDI_INFO_DEVT2DEVINFO:
890 		vhcip = ddi_get_soft_state(vhci_softstate, instance);
891 		if (vhcip != NULL)
892 			*result = vhcip->vhci_dip;
893 		else {
894 			*result = NULL;
895 			return (DDI_FAILURE);
896 		}
897 		break;
898 
899 	case DDI_INFO_DEVT2INSTANCE:
900 		*result = (void *)(uintptr_t)instance;
901 		break;
902 
903 	default:
904 		return (DDI_FAILURE);
905 	}
906 
907 	return (DDI_SUCCESS);
908 }
909 
910 /*ARGSUSED*/
911 static int
912 vhci_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
913 	scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
914 {
915 	char			*guid;
916 	scsi_vhci_lun_t		*vlun;
917 	struct scsi_vhci	*vhci;
918 	clock_t			from_ticks;
919 	mdi_pathinfo_t		*pip;
920 	int			rval;
921 
922 	ASSERT(hba_dip != NULL);
923 	ASSERT(tgt_dip != NULL);
924 
925 	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, tgt_dip, PROPFLAGS,
926 	    MDI_CLIENT_GUID_PROP, &guid) != DDI_SUCCESS) {
927 		/*
928 		 * This must be the .conf node without GUID property.
929 		 * The node under fp already inserts a delay, so we
930 		 * just return from here. We rely on this delay to have
931 		 * all dips be posted to the ndi hotplug thread's newdev
932 		 * list. This is necessary for the deferred attach
933 		 * mechanism to work and opens() done soon after boot to
934 		 * succeed.
935 		 */
936 		VHCI_DEBUG(4, (CE_WARN, hba_dip, "tgt_init: lun guid "
937 		    "property failed"));
938 		return (DDI_NOT_WELL_FORMED);
939 	}
940 
941 	if (ndi_dev_is_persistent_node(tgt_dip) == 0) {
942 		/*
943 		 * This must be .conf node with the GUID property. We don't
944 		 * merge property by ndi_merge_node() here  because the
945 		 * devi_addr_buf of .conf node is "" always according the
946 		 * implementation of vhci_scsi_get_name_bus_addr().
947 		 */
948 		ddi_set_name_addr(tgt_dip, NULL);
949 		return (DDI_FAILURE);
950 	}
951 
952 	vhci = ddi_get_soft_state(vhci_softstate, ddi_get_instance(hba_dip));
953 	ASSERT(vhci != NULL);
954 
955 	VHCI_DEBUG(4, (CE_NOTE, hba_dip,
956 	    "!tgt_init: called for %s (instance %d)\n",
957 	    ddi_driver_name(tgt_dip), ddi_get_instance(tgt_dip)));
958 
959 	vlun = vhci_lun_lookup(tgt_dip);
960 
961 	mutex_enter(&vhci_global_mutex);
962 
963 	from_ticks = ddi_get_lbolt();
964 	if (vhci_to_ticks == 0) {
965 		vhci_to_ticks = from_ticks +
966 		    drv_usectohz(vhci_init_wait_timeout);
967 	}
968 
969 #if DEBUG
970 	if (vlun) {
971 		VHCI_DEBUG(1, (CE_WARN, hba_dip, "tgt_init: "
972 		    "vhci_scsi_tgt_init: guid %s : found vlun 0x%p "
973 		    "from_ticks %lx to_ticks %lx",
974 		    guid, (void *)vlun, from_ticks, vhci_to_ticks));
975 	} else {
976 		VHCI_DEBUG(1, (CE_WARN, hba_dip, "tgt_init: "
977 		    "vhci_scsi_tgt_init: guid %s : vlun not found "
978 		    "from_ticks %lx to_ticks %lx", guid, from_ticks,
979 		    vhci_to_ticks));
980 	}
981 #endif
982 
983 	rval = mdi_select_path(tgt_dip, NULL,
984 	    (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH), NULL, &pip);
985 	if (rval == MDI_SUCCESS) {
986 		mdi_rele_path(pip);
987 	}
988 
989 	/*
990 	 * Wait for the following conditions :
991 	 *	1. no vlun available yet
992 	 *	2. no path established
993 	 *	3. timer did not expire
994 	 */
995 	while ((vlun == NULL) || (mdi_client_get_path_count(tgt_dip) == 0) ||
996 	    (rval != MDI_SUCCESS)) {
997 		if (vlun && vlun->svl_not_supported) {
998 			VHCI_DEBUG(1, (CE_WARN, hba_dip, "tgt_init: "
999 			    "vlun 0x%p lun guid %s not supported!",
1000 			    (void *)vlun, guid));
1001 			mutex_exit(&vhci_global_mutex);
1002 			ddi_prop_free(guid);
1003 			return (DDI_NOT_WELL_FORMED);
1004 		}
1005 		if ((vhci_first_time == 0) && (from_ticks >= vhci_to_ticks)) {
1006 			vhci_first_time = 1;
1007 		}
1008 		if (vhci_first_time == 1) {
1009 			VHCI_DEBUG(1, (CE_WARN, hba_dip, "vhci_scsi_tgt_init: "
1010 			    "no wait for %s. from_tick %lx, to_tick %lx",
1011 			    guid, from_ticks, vhci_to_ticks));
1012 			mutex_exit(&vhci_global_mutex);
1013 			ddi_prop_free(guid);
1014 			return (DDI_NOT_WELL_FORMED);
1015 		}
1016 
1017 		if (cv_timedwait(&vhci_cv,
1018 		    &vhci_global_mutex, vhci_to_ticks) == -1) {
1019 			/* Timed out */
1020 #ifdef DEBUG
1021 			if (vlun == NULL) {
1022 				VHCI_DEBUG(1, (CE_WARN, hba_dip,
1023 				    "tgt_init: no vlun for %s!", guid));
1024 			} else if (mdi_client_get_path_count(tgt_dip) == 0) {
1025 				VHCI_DEBUG(1, (CE_WARN, hba_dip,
1026 				    "tgt_init: client path count is "
1027 				    "zero for %s!", guid));
1028 			} else {
1029 				VHCI_DEBUG(1, (CE_WARN, hba_dip,
1030 				    "tgt_init: client path not "
1031 				    "available yet for %s!", guid));
1032 			}
1033 #endif /* DEBUG */
1034 			mutex_exit(&vhci_global_mutex);
1035 			ddi_prop_free(guid);
1036 			return (DDI_NOT_WELL_FORMED);
1037 		}
1038 		vlun = vhci_lun_lookup(tgt_dip);
1039 		rval = mdi_select_path(tgt_dip, NULL,
1040 		    (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH),
1041 		    NULL, &pip);
1042 		if (rval == MDI_SUCCESS) {
1043 			mdi_rele_path(pip);
1044 		}
1045 		from_ticks = ddi_get_lbolt();
1046 	}
1047 	mutex_exit(&vhci_global_mutex);
1048 
1049 	ASSERT(vlun != NULL);
1050 	ddi_prop_free(guid);
1051 
1052 	scsi_device_hba_private_set(sd, vlun);
1053 
1054 	return (DDI_SUCCESS);
1055 }
1056 
1057 /*ARGSUSED*/
1058 static void
1059 vhci_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
1060 	scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
1061 {
1062 }
1063 
1064 /*
1065  * a PGR register command has started; copy the info we need
1066  */
1067 int
1068 vhci_pgr_register_start(scsi_vhci_lun_t *vlun, struct scsi_pkt *pkt)
1069 {
1070 	struct vhci_pkt		*vpkt = TGTPKT2VHCIPKT(pkt);
1071 	void			*addr;
1072 
1073 	if (!vpkt->vpkt_tgt_init_bp)
1074 		return (TRAN_BADPKT);
1075 
1076 	addr = bp_mapin_common(vpkt->vpkt_tgt_init_bp,
1077 	    (vpkt->vpkt_flags & CFLAG_NOWAIT) ? VM_NOSLEEP : VM_SLEEP);
1078 	if (addr == NULL)
1079 		return (TRAN_BUSY);
1080 
1081 	mutex_enter(&vlun->svl_mutex);
1082 
1083 	vhci_print_prout_keys(vlun, "v_pgr_reg_start: before bcopy:");
1084 
1085 	bcopy(addr, &vlun->svl_prout, sizeof (vhci_prout_t) -
1086 	    (2 * MHIOC_RESV_KEY_SIZE*sizeof (char)));
1087 	bcopy(pkt->pkt_cdbp, vlun->svl_cdb, sizeof (vlun->svl_cdb));
1088 
1089 	vhci_print_prout_keys(vlun, "v_pgr_reg_start: after bcopy:");
1090 
1091 	vlun->svl_time = pkt->pkt_time;
1092 	vlun->svl_bcount = vpkt->vpkt_tgt_init_bp->b_bcount;
1093 	vlun->svl_first_path = vpkt->vpkt_path;
1094 	mutex_exit(&vlun->svl_mutex);
1095 	return (0);
1096 }
1097 
1098 /*
1099  * Function name : vhci_scsi_start()
1100  *
1101  * Return Values : TRAN_FATAL_ERROR	- vhci has been shutdown
1102  *					  or other fatal failure
1103  *					  preventing packet transportation
1104  *		   TRAN_BUSY		- request queue is full
1105  *		   TRAN_ACCEPT		- pkt has been submitted to phci
1106  *					  (or is held in the waitQ)
1107  * Description	 : Implements SCSA's tran_start() entry point for
1108  *		   packet transport
1109  *
1110  */
1111 static int
1112 vhci_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt)
1113 {
1114 	int			rval = TRAN_ACCEPT;
1115 	int			instance, held;
1116 	struct scsi_vhci	*vhci = ADDR2VHCI(ap);
1117 	struct scsi_vhci_lun	*vlun = ADDR2VLUN(ap);
1118 	struct vhci_pkt		*vpkt = TGTPKT2VHCIPKT(pkt);
1119 	int			flags = 0;
1120 	scsi_vhci_priv_t	*svp, *svp_resrv;
1121 	dev_info_t 		*cdip;
1122 	client_lb_t		lbp;
1123 	int			restore_lbp = 0;
1124 	/* set if pkt is SCSI-II RESERVE cmd */
1125 	int			pkt_reserve_cmd = 0;
1126 	int			reserve_failed = 0;
1127 	int			resrv_instance = 0;
1128 	mdi_pathinfo_t		*pip;
1129 	struct scsi_pkt		*rel_pkt;
1130 
1131 	ASSERT(vhci != NULL);
1132 	ASSERT(vpkt != NULL);
1133 	ASSERT(vpkt->vpkt_state != VHCI_PKT_ISSUED);
1134 	cdip = ADDR2DIP(ap);
1135 
1136 	/*
1137 	 * Block IOs if LUN is held or QUIESCED for IOs.
1138 	 */
1139 	if ((VHCI_LUN_IS_HELD(vlun)) ||
1140 	    ((vlun->svl_flags & VLUN_QUIESCED_FLG) == VLUN_QUIESCED_FLG)) {
1141 		return (TRAN_BUSY);
1142 	}
1143 
1144 	/*
1145 	 * vhci_lun needs to be quiesced before SCSI-II RESERVE command
1146 	 * can be issued.  This may require a cv_timedwait, which is
1147 	 * dangerous to perform in an interrupt context.  So if this
1148 	 * is a RESERVE command a taskq is dispatched to service it.
1149 	 * This taskq shall again call vhci_scsi_start, but we shall be
1150 	 * sure its not in an interrupt context.
1151 	 */
1152 	if ((pkt->pkt_cdbp[0] == SCMD_RESERVE) ||
1153 	    (pkt->pkt_cdbp[0] == SCMD_RESERVE_G1)) {
1154 		if (!(vpkt->vpkt_state & VHCI_PKT_THRU_TASKQ)) {
1155 			if (taskq_dispatch(vhci->vhci_taskq,
1156 			    vhci_dispatch_scsi_start, (void *) vpkt,
1157 			    KM_NOSLEEP)) {
1158 				return (TRAN_ACCEPT);
1159 			} else {
1160 				return (TRAN_BUSY);
1161 			}
1162 		}
1163 
1164 		/*
1165 		 * Here we ensure that simultaneous SCSI-II RESERVE cmds don't
1166 		 * get serviced for a lun.
1167 		 */
1168 		VHCI_HOLD_LUN(vlun, VH_NOSLEEP, held);
1169 		if (!held) {
1170 			return (TRAN_BUSY);
1171 		} else if ((vlun->svl_flags & VLUN_QUIESCED_FLG) ==
1172 		    VLUN_QUIESCED_FLG) {
1173 			VHCI_RELEASE_LUN(vlun);
1174 			return (TRAN_BUSY);
1175 		}
1176 
1177 		/*
1178 		 * To ensure that no IOs occur for this LUN for the duration
1179 		 * of this pkt set the VLUN_QUIESCED_FLG.
1180 		 * In case this routine needs to exit on error make sure that
1181 		 * this flag is cleared.
1182 		 */
1183 		vlun->svl_flags |= VLUN_QUIESCED_FLG;
1184 		pkt_reserve_cmd = 1;
1185 
1186 		/*
1187 		 * if this is a SCSI-II RESERVE command, set load balancing
1188 		 * policy to be ALTERNATE PATH to ensure that all subsequent
1189 		 * IOs are routed on the same path.  This is because if commands
1190 		 * are routed across multiple paths then IOs on paths other than
1191 		 * the one on which the RESERVE was executed will get a
1192 		 * RESERVATION CONFLICT
1193 		 */
1194 		lbp = mdi_get_lb_policy(cdip);
1195 		if (lbp != LOAD_BALANCE_NONE) {
1196 			if (vhci_quiesce_lun(vlun) != 1) {
1197 				vlun->svl_flags &= ~VLUN_QUIESCED_FLG;
1198 				VHCI_RELEASE_LUN(vlun);
1199 				return (TRAN_FATAL_ERROR);
1200 			}
1201 			vlun->svl_lb_policy_save = lbp;
1202 			if (mdi_set_lb_policy(cdip, LOAD_BALANCE_NONE) !=
1203 			    MDI_SUCCESS) {
1204 				vlun->svl_flags &= ~VLUN_QUIESCED_FLG;
1205 				VHCI_RELEASE_LUN(vlun);
1206 				return (TRAN_FATAL_ERROR);
1207 			}
1208 			restore_lbp = 1;
1209 		}
1210 
1211 		VHCI_DEBUG(2, (CE_NOTE, vhci->vhci_dip,
1212 		    "!vhci_scsi_start: sending SCSI-2 RESERVE, vlun 0x%p, "
1213 		    "svl_resrv_pip 0x%p, svl_flags: %x, lb_policy %x",
1214 		    (void *)vlun, (void *)vlun->svl_resrv_pip, vlun->svl_flags,
1215 		    mdi_get_lb_policy(cdip)));
1216 
1217 		/*
1218 		 * See comments for VLUN_RESERVE_ACTIVE_FLG in scsi_vhci.h
1219 		 * To narrow this window where a reserve command may be sent
1220 		 * down an inactive path the path states first need to be
1221 		 * updated.  Before calling vhci_update_pathstates reset
1222 		 * VLUN_RESERVE_ACTIVE_FLG, just in case it was already set
1223 		 * for this lun.  This shall prevent an unnecessary reset
1224 		 * from being sent out.  Also remember currently reserved path
1225 		 * just for a case the new reservation will go to another path.
1226 		 */
1227 		if (vlun->svl_flags & VLUN_RESERVE_ACTIVE_FLG) {
1228 			resrv_instance = mdi_pi_get_path_instance(
1229 			    vlun->svl_resrv_pip);
1230 		}
1231 		vlun->svl_flags &= ~VLUN_RESERVE_ACTIVE_FLG;
1232 		vhci_update_pathstates((void *)vlun);
1233 	}
1234 
1235 	instance = ddi_get_instance(vhci->vhci_dip);
1236 
1237 	/*
1238 	 * If the command is PRIN with action of zero, then the cmd
1239 	 * is reading PR keys which requires filtering on completion.
1240 	 * Data cache sync must be guaranteed.
1241 	 */
1242 	if ((pkt->pkt_cdbp[0] == SCMD_PRIN) && (pkt->pkt_cdbp[1] == 0) &&
1243 	    (vpkt->vpkt_org_vpkt == NULL)) {
1244 		vpkt->vpkt_tgt_init_pkt_flags |= PKT_CONSISTENT;
1245 	}
1246 
1247 	/*
1248 	 * Do not defer bind for PKT_DMA_PARTIAL
1249 	 */
1250 	if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) == 0) {
1251 
1252 		/* This is a non pkt_dma_partial case */
1253 		if ((rval = vhci_bind_transport(
1254 		    ap, vpkt, vpkt->vpkt_tgt_init_pkt_flags, NULL_FUNC))
1255 		    != TRAN_ACCEPT) {
1256 			VHCI_DEBUG(6, (CE_WARN, vhci->vhci_dip,
1257 			    "!vhci%d %x: failed to bind transport: "
1258 			    "vlun 0x%p pkt_reserved %x restore_lbp %x,"
1259 			    "lbp %x", instance, rval, (void *)vlun,
1260 			    pkt_reserve_cmd, restore_lbp, lbp));
1261 			if (restore_lbp)
1262 				(void) mdi_set_lb_policy(cdip, lbp);
1263 			if (pkt_reserve_cmd)
1264 				vlun->svl_flags &= ~VLUN_QUIESCED_FLG;
1265 			return (rval);
1266 		}
1267 		VHCI_DEBUG(8, (CE_NOTE, NULL,
1268 		    "vhci_scsi_start: v_b_t called 0x%p\n", (void *)vpkt));
1269 	}
1270 	ASSERT(vpkt->vpkt_hba_pkt != NULL);
1271 	ASSERT(vpkt->vpkt_path != NULL);
1272 
1273 	/*
1274 	 * This is the chance to adjust the pHCI's pkt and other information
1275 	 * from target driver's pkt.
1276 	 */
1277 	VHCI_DEBUG(8, (CE_NOTE, vhci->vhci_dip, "vhci_scsi_start vpkt %p\n",
1278 	    (void *)vpkt));
1279 	vhci_update_pHCI_pkt(vpkt, pkt);
1280 
1281 	if (vlun->svl_flags & VLUN_RESERVE_ACTIVE_FLG) {
1282 		if (vpkt->vpkt_path != vlun->svl_resrv_pip) {
1283 			VHCI_DEBUG(1, (CE_WARN, vhci->vhci_dip,
1284 			    "!vhci_bind: reserve flag set for vlun 0x%p, but, "
1285 			    "pktpath 0x%p resrv path 0x%p differ. lb_policy %x",
1286 			    (void *)vlun, (void *)vpkt->vpkt_path,
1287 			    (void *)vlun->svl_resrv_pip,
1288 			    mdi_get_lb_policy(cdip)));
1289 			reserve_failed = 1;
1290 		}
1291 	}
1292 
1293 	svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(vpkt->vpkt_path);
1294 	if (svp == NULL || reserve_failed) {
1295 		if (pkt_reserve_cmd) {
1296 			VHCI_DEBUG(6, (CE_WARN, vhci->vhci_dip,
1297 			    "!vhci_bind returned null svp vlun 0x%p",
1298 			    (void *)vlun));
1299 			vlun->svl_flags &= ~VLUN_QUIESCED_FLG;
1300 			if (restore_lbp)
1301 				(void) mdi_set_lb_policy(cdip, lbp);
1302 		}
1303 pkt_cleanup:
1304 		if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) == 0) {
1305 			scsi_destroy_pkt(vpkt->vpkt_hba_pkt);
1306 			vpkt->vpkt_hba_pkt = NULL;
1307 			if (vpkt->vpkt_path) {
1308 				mdi_rele_path(vpkt->vpkt_path);
1309 				vpkt->vpkt_path = NULL;
1310 			}
1311 		}
1312 		if ((pkt->pkt_cdbp[0] == SCMD_PROUT) &&
1313 		    (((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_REGISTER) ||
1314 		    ((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_R_AND_IGNORE))) {
1315 			sema_v(&vlun->svl_pgr_sema);
1316 		}
1317 		return (TRAN_BUSY);
1318 	}
1319 
1320 	if ((resrv_instance != 0) && (resrv_instance !=
1321 	    mdi_pi_get_path_instance(vpkt->vpkt_path))) {
1322 		/*
1323 		 * This is an attempt to reserve vpkt->vpkt_path.  But the
1324 		 * previously reserved path referred by resrv_instance might
1325 		 * still be reserved.  Hence we will send a release command
1326 		 * there in order to avoid a reservation conflict.
1327 		 */
1328 		VHCI_DEBUG(1, (CE_NOTE, vhci->vhci_dip, "!vhci_scsi_start: "
1329 		    "conflicting reservation on another path, vlun 0x%p, "
1330 		    "reserved instance %d, new instance: %d, pip: 0x%p",
1331 		    (void *)vlun, resrv_instance,
1332 		    mdi_pi_get_path_instance(vpkt->vpkt_path),
1333 		    (void *)vpkt->vpkt_path));
1334 
1335 		/*
1336 		 * In rare cases, the path referred by resrv_instance could
1337 		 * disappear in the meantime. Calling mdi_select_path() below
1338 		 * is an attempt to find out if the path still exists. It also
1339 		 * ensures that the path will be held when the release is sent.
1340 		 */
1341 		rval = mdi_select_path(cdip, NULL, MDI_SELECT_PATH_INSTANCE,
1342 		    (void *)(intptr_t)resrv_instance, &pip);
1343 
1344 		if ((rval == MDI_SUCCESS) && (pip != NULL)) {
1345 			svp_resrv = (scsi_vhci_priv_t *)
1346 			    mdi_pi_get_vhci_private(pip);
1347 			rel_pkt = scsi_init_pkt(&svp_resrv->svp_psd->sd_address,
1348 			    NULL, NULL, CDB_GROUP0,
1349 			    sizeof (struct scsi_arq_status), 0, 0, SLEEP_FUNC,
1350 			    NULL);
1351 
1352 			if (rel_pkt == NULL) {
1353 				char	*p_path;
1354 
1355 				/*
1356 				 * This is very unlikely.
1357 				 * scsi_init_pkt(SLEEP_FUNC) does not fail
1358 				 * because of resources. But in theory it could
1359 				 * fail for some other reason. There is not an
1360 				 * easy way how to recover though. Log a warning
1361 				 * and return.
1362 				 */
1363 				p_path = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
1364 				vhci_log(CE_WARN, vhci->vhci_dip, "!Sending "
1365 				    "RELEASE(6) to %s failed, a potential "
1366 				    "reservation conflict ahead.",
1367 				    ddi_pathname(mdi_pi_get_phci(pip), p_path));
1368 				kmem_free(p_path, MAXPATHLEN);
1369 
1370 				if (restore_lbp)
1371 					(void) mdi_set_lb_policy(cdip, lbp);
1372 
1373 				/* no need to check pkt_reserve_cmd here */
1374 				vlun->svl_flags &= ~VLUN_QUIESCED_FLG;
1375 				return (TRAN_FATAL_ERROR);
1376 			}
1377 
1378 			rel_pkt->pkt_cdbp[0] = SCMD_RELEASE;
1379 			rel_pkt->pkt_time = 60;
1380 
1381 			/*
1382 			 * Ignore the return value.  If it will fail
1383 			 * then most likely it is no longer reserved
1384 			 * anyway.
1385 			 */
1386 			(void) vhci_do_scsi_cmd(rel_pkt);
1387 			VHCI_DEBUG(1, (CE_NOTE, NULL,
1388 			    "!vhci_scsi_start: path 0x%p, issued SCSI-2"
1389 			    " RELEASE\n", (void *)pip));
1390 			scsi_destroy_pkt(rel_pkt);
1391 			mdi_rele_path(pip);
1392 		}
1393 	}
1394 
1395 	VHCI_INCR_PATH_CMDCOUNT(svp);
1396 
1397 	/*
1398 	 * Ensure that no other IOs raced ahead, while a RESERVE cmd was
1399 	 * QUIESCING the same lun.
1400 	 */
1401 	if ((!pkt_reserve_cmd) &&
1402 	    ((vlun->svl_flags & VLUN_QUIESCED_FLG) == VLUN_QUIESCED_FLG)) {
1403 		VHCI_DECR_PATH_CMDCOUNT(svp);
1404 		goto pkt_cleanup;
1405 	}
1406 
1407 	if ((pkt->pkt_cdbp[0] == SCMD_PRIN) ||
1408 	    (pkt->pkt_cdbp[0] == SCMD_PROUT)) {
1409 		/*
1410 		 * currently this thread only handles running PGR
1411 		 * commands, so don't bother creating it unless
1412 		 * something interesting is going to happen (like
1413 		 * either a PGR out, or a PGR in with enough space
1414 		 * to hold the keys that are getting returned)
1415 		 */
1416 		mutex_enter(&vlun->svl_mutex);
1417 		if (((vlun->svl_flags & VLUN_TASK_D_ALIVE_FLG) == 0) &&
1418 		    (pkt->pkt_cdbp[0] == SCMD_PROUT)) {
1419 			vlun->svl_taskq = taskq_create("vlun_pgr_task_daemon",
1420 			    1, MINCLSYSPRI, 1, 4, 0);
1421 			vlun->svl_flags |= VLUN_TASK_D_ALIVE_FLG;
1422 		}
1423 		mutex_exit(&vlun->svl_mutex);
1424 		if ((pkt->pkt_cdbp[0] == SCMD_PROUT) &&
1425 		    (((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_REGISTER) ||
1426 		    ((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_R_AND_IGNORE))) {
1427 			if (rval = vhci_pgr_register_start(vlun, pkt)) {
1428 				/* an error */
1429 				sema_v(&vlun->svl_pgr_sema);
1430 				return (rval);
1431 			}
1432 		}
1433 	}
1434 
1435 	/*
1436 	 * SCSI-II RESERVE cmd is not expected in polled mode.
1437 	 * If this changes it needs to be handled for the polled scenario.
1438 	 */
1439 	flags = vpkt->vpkt_hba_pkt->pkt_flags;
1440 
1441 	/*
1442 	 * Set the path_instance *before* sending the scsi_pkt down the path
1443 	 * to mpxio's pHCI so that additional path abstractions at a pHCI
1444 	 * level (like maybe iSCSI at some point in the future) can update
1445 	 * the path_instance.
1446 	 */
1447 	if (scsi_pkt_allocated_correctly(vpkt->vpkt_hba_pkt))
1448 		vpkt->vpkt_hba_pkt->pkt_path_instance =
1449 		    mdi_pi_get_path_instance(vpkt->vpkt_path);
1450 
1451 	rval = scsi_transport(vpkt->vpkt_hba_pkt);
1452 	if (rval == TRAN_ACCEPT) {
1453 		if (flags & FLAG_NOINTR) {
1454 			struct scsi_pkt *tpkt = vpkt->vpkt_tgt_pkt;
1455 			struct scsi_pkt *pkt = vpkt->vpkt_hba_pkt;
1456 
1457 			ASSERT(tpkt != NULL);
1458 			*(tpkt->pkt_scbp) = *(pkt->pkt_scbp);
1459 			tpkt->pkt_resid = pkt->pkt_resid;
1460 			tpkt->pkt_state = pkt->pkt_state;
1461 			tpkt->pkt_statistics = pkt->pkt_statistics;
1462 			tpkt->pkt_reason = pkt->pkt_reason;
1463 
1464 			if ((*(pkt->pkt_scbp) == STATUS_CHECK) &&
1465 			    (pkt->pkt_state & STATE_ARQ_DONE)) {
1466 				bcopy(pkt->pkt_scbp, tpkt->pkt_scbp,
1467 				    vpkt->vpkt_tgt_init_scblen);
1468 			}
1469 
1470 			VHCI_DECR_PATH_CMDCOUNT(svp);
1471 			if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) == 0) {
1472 				scsi_destroy_pkt(vpkt->vpkt_hba_pkt);
1473 				vpkt->vpkt_hba_pkt = NULL;
1474 				if (vpkt->vpkt_path) {
1475 					mdi_rele_path(vpkt->vpkt_path);
1476 					vpkt->vpkt_path = NULL;
1477 				}
1478 			}
1479 			/*
1480 			 * This path will not automatically retry pkts
1481 			 * internally, therefore, vpkt_org_vpkt should
1482 			 * never be set.
1483 			 */
1484 			ASSERT(vpkt->vpkt_org_vpkt == NULL);
1485 			scsi_hba_pkt_comp(tpkt);
1486 		}
1487 		return (rval);
1488 	} else if ((pkt->pkt_cdbp[0] == SCMD_PROUT) &&
1489 	    (((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_REGISTER) ||
1490 	    ((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_R_AND_IGNORE))) {
1491 		/* the command exited with bad status */
1492 		sema_v(&vlun->svl_pgr_sema);
1493 	} else if (vpkt->vpkt_tgt_pkt->pkt_cdbp[0] == SCMD_PRIN) {
1494 		/* the command exited with bad status */
1495 		sema_v(&vlun->svl_pgr_sema);
1496 	} else if (pkt_reserve_cmd) {
1497 		VHCI_DEBUG(6, (CE_WARN, vhci->vhci_dip,
1498 		    "!vhci_scsi_start: reserve failed vlun 0x%p",
1499 		    (void *)vlun));
1500 		vlun->svl_flags &= ~VLUN_QUIESCED_FLG;
1501 		if (restore_lbp)
1502 			(void) mdi_set_lb_policy(cdip, lbp);
1503 	}
1504 
1505 	ASSERT(vpkt->vpkt_hba_pkt != NULL);
1506 	VHCI_DECR_PATH_CMDCOUNT(svp);
1507 
1508 	/* Do not destroy phci packet information for PKT_DMA_PARTIAL */
1509 	if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) == 0) {
1510 		scsi_destroy_pkt(vpkt->vpkt_hba_pkt);
1511 		vpkt->vpkt_hba_pkt = NULL;
1512 		if (vpkt->vpkt_path) {
1513 			MDI_PI_ERRSTAT(vpkt->vpkt_path, MDI_PI_TRANSERR);
1514 			mdi_rele_path(vpkt->vpkt_path);
1515 			vpkt->vpkt_path = NULL;
1516 		}
1517 	}
1518 	return (TRAN_BUSY);
1519 }
1520 
1521 /*
1522  * Function name : vhci_scsi_reset()
1523  *
1524  * Return Values : 0 - reset failed
1525  *		   1 - reset succeeded
1526  */
1527 
1528 /* ARGSUSED */
1529 static int
1530 vhci_scsi_reset(struct scsi_address *ap, int level)
1531 {
1532 	int rval = 0;
1533 
1534 	cmn_err(CE_WARN, "!vhci_scsi_reset 0x%x", level);
1535 	if ((level == RESET_TARGET) || (level == RESET_LUN)) {
1536 		return (vhci_scsi_reset_target(ap, level, TRUE));
1537 	} else if (level == RESET_ALL) {
1538 		return (vhci_scsi_reset_bus(ap));
1539 	}
1540 
1541 	return (rval);
1542 }
1543 
1544 /*
1545  * vhci_recovery_reset:
1546  *	Issues reset to the device
1547  * Input:
1548  *	vlun - vhci lun pointer of the device
1549  *	ap - address of the device
1550  *	select_path:
1551  *		If select_path is FALSE, then the address specified in ap is
1552  *		the path on which reset will be issued.
1553  *		If select_path is TRUE, then path is obtained by calling
1554  *		mdi_select_path.
1555  *
1556  *	recovery_depth:
1557  *		Caller can specify the level of reset.
1558  *		VHCI_DEPTH_LUN -
1559  *			Issues LUN RESET if device supports lun reset.
1560  *		VHCI_DEPTH_TARGET -
1561  *			If Lun Reset fails or the device does not support
1562  *			Lun Reset, issues TARGET RESET
1563  *		VHCI_DEPTH_ALL -
1564  *			If Lun Reset fails or the device does not support
1565  *			Lun Reset, issues TARGET RESET.
1566  *			If TARGET RESET does not succeed, issues Bus Reset.
1567  */
1568 
1569 static int
1570 vhci_recovery_reset(scsi_vhci_lun_t *vlun, struct scsi_address *ap,
1571 	uint8_t select_path, uint8_t recovery_depth)
1572 {
1573 	int	ret = 0;
1574 
1575 	ASSERT(ap != NULL);
1576 
1577 	if (vlun && vlun->svl_support_lun_reset == 1) {
1578 		ret = vhci_scsi_reset_target(ap, RESET_LUN,
1579 		    select_path);
1580 	}
1581 
1582 	recovery_depth--;
1583 
1584 	if ((ret == 0) && recovery_depth) {
1585 		ret = vhci_scsi_reset_target(ap, RESET_TARGET,
1586 		    select_path);
1587 		recovery_depth--;
1588 	}
1589 
1590 	if ((ret == 0) && recovery_depth) {
1591 		(void) scsi_reset(ap, RESET_ALL);
1592 	}
1593 
1594 	return (ret);
1595 }
1596 
1597 /*
1598  * Note: The scsi_address passed to this routine could be the scsi_address
1599  * for the virtual device or the physical device. No assumptions should be
1600  * made in this routine about the contents of the ap structure.
1601  * Further, note that the child dip would be the dip of the ssd node regardless
1602  * of the scsi_address passed in.
1603  */
1604 static int
1605 vhci_scsi_reset_target(struct scsi_address *ap, int level, uint8_t select_path)
1606 {
1607 	dev_info_t		*vdip, *cdip;
1608 	mdi_pathinfo_t		*pip = NULL;
1609 	mdi_pathinfo_t		*npip = NULL;
1610 	int			rval = -1;
1611 	scsi_vhci_priv_t	*svp = NULL;
1612 	struct scsi_address	*pap = NULL;
1613 	scsi_hba_tran_t		*hba = NULL;
1614 	int			sps;
1615 	struct scsi_vhci	*vhci = NULL;
1616 
1617 	if (select_path != TRUE) {
1618 		ASSERT(ap != NULL);
1619 		if (level == RESET_LUN) {
1620 			hba = ap->a_hba_tran;
1621 			ASSERT(hba != NULL);
1622 			return (hba->tran_reset(ap, RESET_LUN));
1623 		}
1624 		return (scsi_reset(ap, level));
1625 	}
1626 
1627 	cdip = ADDR2DIP(ap);
1628 	ASSERT(cdip != NULL);
1629 	vdip = ddi_get_parent(cdip);
1630 	ASSERT(vdip != NULL);
1631 	vhci = ddi_get_soft_state(vhci_softstate, ddi_get_instance(vdip));
1632 	ASSERT(vhci != NULL);
1633 
1634 	rval = mdi_select_path(cdip, NULL, MDI_SELECT_ONLINE_PATH, NULL, &pip);
1635 	if ((rval != MDI_SUCCESS) || (pip == NULL)) {
1636 		VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_scsi_reset_target: "
1637 		    "Unable to get a path, dip 0x%p", (void *)cdip));
1638 		return (0);
1639 	}
1640 again:
1641 	svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip);
1642 	if (svp == NULL) {
1643 		VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_scsi_reset_target: "
1644 		    "priv is NULL, pip 0x%p", (void *)pip));
1645 		mdi_rele_path(pip);
1646 		return (0);
1647 	}
1648 
1649 	if (svp->svp_psd == NULL) {
1650 		VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_scsi_reset_target: "
1651 		    "psd is NULL, pip 0x%p, svp 0x%p",
1652 		    (void *)pip, (void *)svp));
1653 		mdi_rele_path(pip);
1654 		return (0);
1655 	}
1656 
1657 	pap = &svp->svp_psd->sd_address;
1658 	hba = pap->a_hba_tran;
1659 
1660 	ASSERT(pap != NULL);
1661 	ASSERT(hba != NULL);
1662 
1663 	if (hba->tran_reset != NULL) {
1664 		if (hba->tran_reset(pap, level) == 0) {
1665 			vhci_log(CE_WARN, vdip, "!%s%d: "
1666 			    "path %s, reset %d failed",
1667 			    ddi_driver_name(cdip), ddi_get_instance(cdip),
1668 			    mdi_pi_spathname(pip), level);
1669 
1670 			/*
1671 			 * Select next path and issue the reset, repeat
1672 			 * until all paths are exhausted
1673 			 */
1674 			sps = mdi_select_path(cdip, NULL,
1675 			    MDI_SELECT_ONLINE_PATH, pip, &npip);
1676 			if ((sps != MDI_SUCCESS) || (npip == NULL)) {
1677 				mdi_rele_path(pip);
1678 				return (0);
1679 			}
1680 			mdi_rele_path(pip);
1681 			pip = npip;
1682 			goto again;
1683 		}
1684 		mdi_rele_path(pip);
1685 		mutex_enter(&vhci->vhci_mutex);
1686 		scsi_hba_reset_notify_callback(&vhci->vhci_mutex,
1687 		    &vhci->vhci_reset_notify_listf);
1688 		mutex_exit(&vhci->vhci_mutex);
1689 		VHCI_DEBUG(6, (CE_NOTE, NULL, "!vhci_scsi_reset_target: "
1690 		    "reset %d sent down pip:%p for cdip:%p\n", level,
1691 		    (void *)pip, (void *)cdip));
1692 		return (1);
1693 	}
1694 	mdi_rele_path(pip);
1695 	return (0);
1696 }
1697 
1698 
1699 /* ARGSUSED */
1700 static int
1701 vhci_scsi_reset_bus(struct scsi_address *ap)
1702 {
1703 	return (1);
1704 }
1705 
1706 
1707 /*
1708  * called by vhci_getcap and vhci_setcap to get and set (respectively)
1709  * SCSI capabilities
1710  */
1711 /* ARGSUSED */
1712 static int
1713 vhci_commoncap(struct scsi_address *ap, char *cap,
1714     int val, int tgtonly, int doset)
1715 {
1716 	struct scsi_vhci		*vhci = ADDR2VHCI(ap);
1717 	struct scsi_vhci_lun		*vlun = ADDR2VLUN(ap);
1718 	int			cidx;
1719 	int			rval = 0;
1720 
1721 	if (cap == (char *)0) {
1722 		VHCI_DEBUG(3, (CE_WARN, vhci->vhci_dip,
1723 		    "!vhci_commoncap: invalid arg"));
1724 		return (rval);
1725 	}
1726 
1727 	if (vlun == NULL) {
1728 		VHCI_DEBUG(3, (CE_WARN, vhci->vhci_dip,
1729 		    "!vhci_commoncap: vlun is null"));
1730 		return (rval);
1731 	}
1732 
1733 	if ((cidx = scsi_hba_lookup_capstr(cap)) == -1) {
1734 		return (UNDEFINED);
1735 	}
1736 
1737 	/*
1738 	 * Process setcap request.
1739 	 */
1740 	if (doset) {
1741 		/*
1742 		 * At present, we can only set binary (0/1) values
1743 		 */
1744 		switch (cidx) {
1745 		case SCSI_CAP_ARQ:
1746 			if (val == 0) {
1747 				rval = 0;
1748 			} else {
1749 				rval = 1;
1750 			}
1751 			break;
1752 
1753 		case SCSI_CAP_LUN_RESET:
1754 			if (tgtonly == 0) {
1755 				VHCI_DEBUG(1, (CE_WARN, vhci->vhci_dip,
1756 				    "scsi_vhci_setcap: "
1757 				    "Returning error since whom = 0"));
1758 				rval = -1;
1759 				break;
1760 			}
1761 			/*
1762 			 * Set the capability accordingly.
1763 			 */
1764 			mutex_enter(&vlun->svl_mutex);
1765 			vlun->svl_support_lun_reset = val;
1766 			rval = val;
1767 			mutex_exit(&vlun->svl_mutex);
1768 			break;
1769 
1770 		case SCSI_CAP_SECTOR_SIZE:
1771 			mutex_enter(&vlun->svl_mutex);
1772 			vlun->svl_sector_size = val;
1773 			vlun->svl_setcap_done = 1;
1774 			mutex_exit(&vlun->svl_mutex);
1775 			(void) vhci_pHCI_cap(ap, cap, val, tgtonly, NULL);
1776 
1777 			/* Always return success */
1778 			rval = 1;
1779 			break;
1780 
1781 		default:
1782 			VHCI_DEBUG(6, (CE_WARN, vhci->vhci_dip,
1783 			    "!vhci_setcap: unsupported %d", cidx));
1784 			rval = UNDEFINED;
1785 			break;
1786 		}
1787 
1788 		VHCI_DEBUG(6, (CE_NOTE, vhci->vhci_dip,
1789 		    "!set cap: cap=%s, val/tgtonly/doset/rval = "
1790 		    "0x%x/0x%x/0x%x/%d\n",
1791 		    cap, val, tgtonly, doset, rval));
1792 
1793 	} else {
1794 		/*
1795 		 * Process getcap request.
1796 		 */
1797 		switch (cidx) {
1798 		case SCSI_CAP_DMA_MAX:
1799 			/*
1800 			 * For X86 this capability is caught in scsi_ifgetcap().
1801 			 * XXX Should this be getting the value from the pHCI?
1802 			 */
1803 			rval = (int)VHCI_DMA_MAX_XFER_CAP;
1804 			break;
1805 
1806 		case SCSI_CAP_INITIATOR_ID:
1807 			rval = 0x00;
1808 			break;
1809 
1810 		case SCSI_CAP_ARQ:
1811 		case SCSI_CAP_RESET_NOTIFICATION:
1812 		case SCSI_CAP_TAGGED_QING:
1813 			rval = 1;
1814 			break;
1815 
1816 		case SCSI_CAP_SCSI_VERSION:
1817 			rval = 3;
1818 			break;
1819 
1820 		case SCSI_CAP_INTERCONNECT_TYPE:
1821 			rval = INTERCONNECT_FABRIC;
1822 			break;
1823 
1824 		case SCSI_CAP_LUN_RESET:
1825 			/*
1826 			 * scsi_vhci will always return success for LUN reset.
1827 			 * When request for doing LUN reset comes
1828 			 * through scsi_reset entry point, at that time attempt
1829 			 * will be made to do reset through all the possible
1830 			 * paths.
1831 			 */
1832 			mutex_enter(&vlun->svl_mutex);
1833 			rval = vlun->svl_support_lun_reset;
1834 			mutex_exit(&vlun->svl_mutex);
1835 			VHCI_DEBUG(4, (CE_WARN, vhci->vhci_dip,
1836 			    "scsi_vhci_getcap:"
1837 			    "Getting the Lun reset capability %d", rval));
1838 			break;
1839 
1840 		case SCSI_CAP_SECTOR_SIZE:
1841 			mutex_enter(&vlun->svl_mutex);
1842 			rval = vlun->svl_sector_size;
1843 			mutex_exit(&vlun->svl_mutex);
1844 			break;
1845 
1846 		case SCSI_CAP_CDB_LEN:
1847 			rval = VHCI_SCSI_CDB_SIZE;
1848 			break;
1849 
1850 		case SCSI_CAP_DMA_MAX_ARCH:
1851 			/*
1852 			 * For X86 this capability is caught in scsi_ifgetcap().
1853 			 * XXX Should this be getting the value from the pHCI?
1854 			 */
1855 			rval = 0;
1856 			break;
1857 
1858 		default:
1859 			VHCI_DEBUG(6, (CE_WARN, vhci->vhci_dip,
1860 			    "!vhci_getcap: unsupported %d", cidx));
1861 			rval = UNDEFINED;
1862 			break;
1863 		}
1864 
1865 		VHCI_DEBUG(6, (CE_NOTE, vhci->vhci_dip,
1866 		    "!get cap: cap=%s, val/tgtonly/doset/rval = "
1867 		    "0x%x/0x%x/0x%x/%d\n",
1868 		    cap, val, tgtonly, doset, rval));
1869 	}
1870 	return (rval);
1871 }
1872 
1873 
1874 /*
1875  * Function name : vhci_scsi_getcap()
1876  *
1877  */
1878 static int
1879 vhci_scsi_getcap(struct scsi_address *ap, char *cap, int whom)
1880 {
1881 	return (vhci_commoncap(ap, cap, 0, whom, 0));
1882 }
1883 
1884 static int
1885 vhci_scsi_setcap(struct scsi_address *ap, char *cap, int value, int whom)
1886 {
1887 	return (vhci_commoncap(ap, cap, value, whom, 1));
1888 }
1889 
1890 /*
1891  * Function name : vhci_scsi_abort()
1892  */
1893 /* ARGSUSED */
1894 static int
1895 vhci_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt)
1896 {
1897 	return (0);
1898 }
1899 
1900 /*
1901  * Function name : vhci_scsi_init_pkt
1902  *
1903  * Return Values : pointer to scsi_pkt, or NULL
1904  */
1905 /* ARGSUSED */
1906 static struct scsi_pkt *
1907 vhci_scsi_init_pkt(struct scsi_address *ap, struct scsi_pkt *pkt,
1908 	struct buf *bp, int cmdlen, int statuslen, int tgtlen,
1909 	int flags, int (*callback)(caddr_t), caddr_t arg)
1910 {
1911 	struct scsi_vhci	*vhci = ADDR2VHCI(ap);
1912 	struct vhci_pkt		*vpkt;
1913 	int			rval;
1914 	int			newpkt = 0;
1915 	struct scsi_pkt		*pktp;
1916 
1917 
1918 	if (pkt == NULL) {
1919 		if (cmdlen > VHCI_SCSI_CDB_SIZE) {
1920 			if ((cmdlen != VHCI_SCSI_OSD_CDB_SIZE) ||
1921 			    ((flags & VHCI_SCSI_OSD_PKT_FLAGS) !=
1922 			    VHCI_SCSI_OSD_PKT_FLAGS)) {
1923 				VHCI_DEBUG(1, (CE_NOTE, NULL,
1924 				    "!init pkt: cdb size not supported\n"));
1925 				return (NULL);
1926 			}
1927 		}
1928 
1929 		pktp = scsi_hba_pkt_alloc(vhci->vhci_dip,
1930 		    ap, cmdlen, statuslen, tgtlen, sizeof (*vpkt), callback,
1931 		    arg);
1932 
1933 		if (pktp == NULL) {
1934 			return (NULL);
1935 		}
1936 
1937 		/* Get the vhci's private structure */
1938 		vpkt = (struct vhci_pkt *)(pktp->pkt_ha_private);
1939 		ASSERT(vpkt);
1940 
1941 		/* Save the target driver's packet */
1942 		vpkt->vpkt_tgt_pkt = pktp;
1943 
1944 		/*
1945 		 * Save pkt_tgt_init_pkt fields if deferred binding
1946 		 * is needed or for other purposes.
1947 		 */
1948 		vpkt->vpkt_tgt_init_pkt_flags = flags;
1949 		vpkt->vpkt_flags = (callback == NULL_FUNC) ? CFLAG_NOWAIT : 0;
1950 		vpkt->vpkt_state = VHCI_PKT_IDLE;
1951 		vpkt->vpkt_tgt_init_cdblen = cmdlen;
1952 		vpkt->vpkt_tgt_init_scblen = statuslen;
1953 		newpkt = 1;
1954 	} else { /* pkt not NULL */
1955 		vpkt = pkt->pkt_ha_private;
1956 	}
1957 
1958 	VHCI_DEBUG(8, (CE_NOTE, NULL, "vhci_scsi_init_pkt "
1959 	    "vpkt %p flags %x\n", (void *)vpkt, flags));
1960 
1961 	/* Clear any stale error flags */
1962 	if (bp) {
1963 		bioerror(bp, 0);
1964 	}
1965 
1966 	vpkt->vpkt_tgt_init_bp = bp;
1967 
1968 	if (flags & PKT_DMA_PARTIAL) {
1969 
1970 		/*
1971 		 * Immediate binding is needed.
1972 		 * Target driver may not set this flag in next invocation.
1973 		 * vhci has to remember this flag was set during first
1974 		 * invocation of vhci_scsi_init_pkt.
1975 		 */
1976 		vpkt->vpkt_flags |= CFLAG_DMA_PARTIAL;
1977 	}
1978 
1979 	if (vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) {
1980 
1981 		/*
1982 		 * Re-initialize some of the target driver packet state
1983 		 * information.
1984 		 */
1985 		vpkt->vpkt_tgt_pkt->pkt_state = 0;
1986 		vpkt->vpkt_tgt_pkt->pkt_statistics = 0;
1987 		vpkt->vpkt_tgt_pkt->pkt_reason = 0;
1988 
1989 		/*
1990 		 * Binding a vpkt->vpkt_path for this IO at init_time.
1991 		 * If an IO error happens later, target driver will clear
1992 		 * this vpkt->vpkt_path binding before re-init IO again.
1993 		 */
1994 		VHCI_DEBUG(8, (CE_NOTE, NULL,
1995 		    "vhci_scsi_init_pkt: calling v_b_t %p, newpkt %d\n",
1996 		    (void *)vpkt, newpkt));
1997 		if (pkt && vpkt->vpkt_hba_pkt) {
1998 			VHCI_DEBUG(4, (CE_NOTE, NULL,
1999 			    "v_s_i_p calling update_pHCI_pkt resid %ld\n",
2000 			    pkt->pkt_resid));
2001 			vhci_update_pHCI_pkt(vpkt, pkt);
2002 		}
2003 		if (callback == SLEEP_FUNC) {
2004 			rval = vhci_bind_transport(
2005 			    ap, vpkt, flags, callback);
2006 		} else {
2007 			rval = vhci_bind_transport(
2008 			    ap, vpkt, flags, NULL_FUNC);
2009 		}
2010 		VHCI_DEBUG(8, (CE_NOTE, NULL,
2011 		    "vhci_scsi_init_pkt: v_b_t called 0x%p rval 0x%x\n",
2012 		    (void *)vpkt, rval));
2013 		if (bp) {
2014 			if (rval == TRAN_FATAL_ERROR) {
2015 				/*
2016 				 * No paths available. Could not bind
2017 				 * any pHCI. Setting EFAULT as a way
2018 				 * to indicate no DMA is mapped.
2019 				 */
2020 				bioerror(bp, EFAULT);
2021 			} else {
2022 				/*
2023 				 * Do not indicate any pHCI errors to
2024 				 * target driver otherwise.
2025 				 */
2026 				bioerror(bp, 0);
2027 			}
2028 		}
2029 		if (rval != TRAN_ACCEPT) {
2030 			VHCI_DEBUG(8, (CE_NOTE, NULL,
2031 			    "vhci_scsi_init_pkt: "
2032 			    "v_b_t failed 0x%p newpkt %x\n",
2033 			    (void *)vpkt, newpkt));
2034 			if (newpkt) {
2035 				scsi_hba_pkt_free(ap,
2036 				    vpkt->vpkt_tgt_pkt);
2037 			}
2038 			return (NULL);
2039 		}
2040 		ASSERT(vpkt->vpkt_hba_pkt != NULL);
2041 		ASSERT(vpkt->vpkt_path != NULL);
2042 
2043 		/* Update the resid for the target driver */
2044 		vpkt->vpkt_tgt_pkt->pkt_resid =
2045 		    vpkt->vpkt_hba_pkt->pkt_resid;
2046 	}
2047 
2048 	return (vpkt->vpkt_tgt_pkt);
2049 }
2050 
2051 /*
2052  * Function name : vhci_scsi_destroy_pkt
2053  *
2054  * Return Values : none
2055  */
2056 static void
2057 vhci_scsi_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
2058 {
2059 	struct vhci_pkt		*vpkt = (struct vhci_pkt *)pkt->pkt_ha_private;
2060 
2061 	VHCI_DEBUG(8, (CE_NOTE, NULL,
2062 	    "vhci_scsi_destroy_pkt: vpkt 0x%p\n", (void *)vpkt));
2063 
2064 	vpkt->vpkt_tgt_init_pkt_flags = 0;
2065 	if (vpkt->vpkt_hba_pkt) {
2066 		scsi_destroy_pkt(vpkt->vpkt_hba_pkt);
2067 		vpkt->vpkt_hba_pkt = NULL;
2068 	}
2069 	if (vpkt->vpkt_path) {
2070 		mdi_rele_path(vpkt->vpkt_path);
2071 		vpkt->vpkt_path = NULL;
2072 	}
2073 
2074 	ASSERT(vpkt->vpkt_state != VHCI_PKT_ISSUED);
2075 	scsi_hba_pkt_free(ap, vpkt->vpkt_tgt_pkt);
2076 }
2077 
2078 /*
2079  * Function name : vhci_scsi_dmafree()
2080  *
2081  * Return Values : none
2082  */
2083 /*ARGSUSED*/
2084 static void
2085 vhci_scsi_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
2086 {
2087 	struct vhci_pkt	*vpkt = (struct vhci_pkt *)pkt->pkt_ha_private;
2088 
2089 	VHCI_DEBUG(6, (CE_NOTE, NULL,
2090 	    "vhci_scsi_dmafree: vpkt 0x%p\n", (void *)vpkt));
2091 
2092 	ASSERT(vpkt != NULL);
2093 	if (vpkt->vpkt_hba_pkt) {
2094 		scsi_destroy_pkt(vpkt->vpkt_hba_pkt);
2095 		vpkt->vpkt_hba_pkt = NULL;
2096 	}
2097 	if (vpkt->vpkt_path) {
2098 		mdi_rele_path(vpkt->vpkt_path);
2099 		vpkt->vpkt_path = NULL;
2100 	}
2101 }
2102 
2103 /*
2104  * Function name : vhci_scsi_sync_pkt()
2105  *
2106  * Return Values : none
2107  */
2108 /*ARGSUSED*/
2109 static void
2110 vhci_scsi_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
2111 {
2112 	struct vhci_pkt	*vpkt = (struct vhci_pkt *)pkt->pkt_ha_private;
2113 
2114 	ASSERT(vpkt != NULL);
2115 	if (vpkt->vpkt_hba_pkt) {
2116 		scsi_sync_pkt(vpkt->vpkt_hba_pkt);
2117 	}
2118 }
2119 
2120 /*
2121  * routine for reset notification setup, to register or cancel.
2122  */
2123 static int
2124 vhci_scsi_reset_notify(struct scsi_address *ap, int flag,
2125     void (*callback)(caddr_t), caddr_t arg)
2126 {
2127 	struct scsi_vhci *vhci = ADDR2VHCI(ap);
2128 	return (scsi_hba_reset_notify_setup(ap, flag, callback, arg,
2129 	    &vhci->vhci_mutex, &vhci->vhci_reset_notify_listf));
2130 }
2131 
2132 static int
2133 vhci_scsi_get_name_bus_addr(struct scsi_device *sd,
2134     char *name, int len, int bus_addr)
2135 {
2136 	dev_info_t		*cdip;
2137 	char			*guid;
2138 	scsi_vhci_lun_t		*vlun;
2139 
2140 	ASSERT(sd != NULL);
2141 	ASSERT(name != NULL);
2142 
2143 	*name = 0;
2144 	cdip = sd->sd_dev;
2145 
2146 	ASSERT(cdip != NULL);
2147 
2148 	if (mdi_component_is_client(cdip, NULL) != MDI_SUCCESS)
2149 		return (1);
2150 
2151 	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, cdip, PROPFLAGS,
2152 	    MDI_CLIENT_GUID_PROP, &guid) != DDI_SUCCESS)
2153 		return (1);
2154 
2155 	/*
2156 	 * Message is "sd# at scsi_vhci0: unit-address <guid>: <bus_addr>".
2157 	 *	<guid>		bus_addr argument == 0
2158 	 *	<bus_addr>	bus_addr argument != 0
2159 	 * Since the <guid> is already provided with unit-address, we just
2160 	 * provide failover module in <bus_addr> to keep output shorter.
2161 	 */
2162 	vlun = ADDR2VLUN(&sd->sd_address);
2163 	if (bus_addr == 0) {
2164 		/* report the guid:  */
2165 		(void) snprintf(name, len, "g%s", guid);
2166 	} else if (vlun && vlun->svl_fops_name) {
2167 		/* report the name of the failover module */
2168 		(void) snprintf(name, len, "%s", vlun->svl_fops_name);
2169 	}
2170 
2171 	ddi_prop_free(guid);
2172 	return (1);
2173 }
2174 
2175 static int
2176 vhci_scsi_get_bus_addr(struct scsi_device *sd, char *name, int len)
2177 {
2178 	return (vhci_scsi_get_name_bus_addr(sd, name, len, 1));
2179 }
2180 
2181 static int
2182 vhci_scsi_get_name(struct scsi_device *sd, char *name, int len)
2183 {
2184 	return (vhci_scsi_get_name_bus_addr(sd, name, len, 0));
2185 }
2186 
2187 /*
2188  * Return a pointer to the guid part of the devnm.
2189  * devnm format is "nodename@busaddr", busaddr format is "gGUID".
2190  */
2191 static char *
2192 vhci_devnm_to_guid(char *devnm)
2193 {
2194 	char *cp = devnm;
2195 
2196 	if (devnm == NULL)
2197 		return (NULL);
2198 
2199 	while (*cp != '\0' && *cp != '@')
2200 		cp++;
2201 	if (*cp == '@' && *(cp + 1) == 'g')
2202 		return (cp + 2);
2203 	return (NULL);
2204 }
2205 
2206 static int
2207 vhci_bind_transport(struct scsi_address *ap, struct vhci_pkt *vpkt, int flags,
2208     int (*func)(caddr_t))
2209 {
2210 	struct scsi_vhci	*vhci = ADDR2VHCI(ap);
2211 	dev_info_t		*cdip = ADDR2DIP(ap);
2212 	mdi_pathinfo_t		*pip = NULL;
2213 	mdi_pathinfo_t		*npip = NULL;
2214 	scsi_vhci_priv_t	*svp = NULL;
2215 	struct scsi_device	*psd = NULL;
2216 	struct scsi_address	*address = NULL;
2217 	struct scsi_pkt		*pkt = NULL;
2218 	int			rval = -1;
2219 	int			pgr_sema_held = 0;
2220 	int			held;
2221 	int			mps_flag = MDI_SELECT_ONLINE_PATH;
2222 	struct scsi_vhci_lun	*vlun;
2223 	time_t			tnow;
2224 	int			path_instance = 0;
2225 
2226 	vlun = ADDR2VLUN(ap);
2227 	ASSERT(vlun != 0);
2228 
2229 	if ((vpkt->vpkt_tgt_pkt->pkt_cdbp[0] == SCMD_PROUT) &&
2230 	    (((vpkt->vpkt_tgt_pkt->pkt_cdbp[1] & 0x1f) ==
2231 	    VHCI_PROUT_REGISTER) ||
2232 	    ((vpkt->vpkt_tgt_pkt->pkt_cdbp[1] & 0x1f) ==
2233 	    VHCI_PROUT_R_AND_IGNORE))) {
2234 		if (!sema_tryp(&vlun->svl_pgr_sema))
2235 			return (TRAN_BUSY);
2236 		pgr_sema_held = 1;
2237 		if (vlun->svl_first_path != NULL) {
2238 			rval = mdi_select_path(cdip, NULL,
2239 			    MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH,
2240 			    NULL, &pip);
2241 			if ((rval != MDI_SUCCESS) || (pip == NULL)) {
2242 				VHCI_DEBUG(4, (CE_NOTE, NULL,
2243 				    "vhci_bind_transport: path select fail\n"));
2244 			} else {
2245 				npip = pip;
2246 				do {
2247 					if (npip == vlun->svl_first_path) {
2248 						VHCI_DEBUG(4, (CE_NOTE, NULL,
2249 						    "vhci_bind_transport: "
2250 						    "valid first path 0x%p\n",
2251 						    (void *)
2252 						    vlun->svl_first_path));
2253 						pip = vlun->svl_first_path;
2254 						goto bind_path;
2255 					}
2256 					pip = npip;
2257 					rval = mdi_select_path(cdip, NULL,
2258 					    MDI_SELECT_ONLINE_PATH |
2259 					    MDI_SELECT_STANDBY_PATH,
2260 					    pip, &npip);
2261 					mdi_rele_path(pip);
2262 				} while ((rval == MDI_SUCCESS) &&
2263 				    (npip != NULL));
2264 			}
2265 		}
2266 
2267 		if (vlun->svl_first_path) {
2268 			VHCI_DEBUG(4, (CE_NOTE, NULL,
2269 			    "vhci_bind_transport: invalid first path 0x%p\n",
2270 			    (void *)vlun->svl_first_path));
2271 			vlun->svl_first_path = NULL;
2272 		}
2273 	} else if (vpkt->vpkt_tgt_pkt->pkt_cdbp[0] == SCMD_PRIN) {
2274 		if ((vpkt->vpkt_state & VHCI_PKT_THRU_TASKQ) == 0) {
2275 			if (!sema_tryp(&vlun->svl_pgr_sema))
2276 				return (TRAN_BUSY);
2277 		}
2278 		pgr_sema_held = 1;
2279 	}
2280 
2281 	/*
2282 	 * If the path is already bound for PKT_PARTIAL_DMA case,
2283 	 * try to use the same path.
2284 	 */
2285 	if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) && vpkt->vpkt_path) {
2286 		VHCI_DEBUG(4, (CE_NOTE, NULL,
2287 		    "vhci_bind_transport: PKT_PARTIAL_DMA "
2288 		    "vpkt 0x%p, path 0x%p\n",
2289 		    (void *)vpkt, (void *)vpkt->vpkt_path));
2290 		pip = vpkt->vpkt_path;
2291 		goto bind_path;
2292 	}
2293 
2294 	/*
2295 	 * Get path_instance. Non-zero with FLAG_PKT_PATH_INSTANCE set
2296 	 * indicates that mdi_select_path should be called to select a
2297 	 * specific instance.
2298 	 *
2299 	 * NB: Condition pkt_path_instance reference on proper allocation.
2300 	 */
2301 	if ((vpkt->vpkt_tgt_pkt->pkt_flags & FLAG_PKT_PATH_INSTANCE) &&
2302 	    scsi_pkt_allocated_correctly(vpkt->vpkt_tgt_pkt)) {
2303 		path_instance = vpkt->vpkt_tgt_pkt->pkt_path_instance;
2304 	}
2305 
2306 	/*
2307 	 * If reservation is active bind the transport directly to the pip
2308 	 * with the reservation.
2309 	 */
2310 	if (vpkt->vpkt_hba_pkt == NULL) {
2311 		if (vlun->svl_flags & VLUN_RESERVE_ACTIVE_FLG) {
2312 			if (MDI_PI_IS_ONLINE(vlun->svl_resrv_pip)) {
2313 				pip = vlun->svl_resrv_pip;
2314 				mdi_hold_path(pip);
2315 				vlun->svl_waiting_for_activepath = 0;
2316 				rval = MDI_SUCCESS;
2317 				goto bind_path;
2318 			} else {
2319 				if (pgr_sema_held) {
2320 					sema_v(&vlun->svl_pgr_sema);
2321 				}
2322 				return (TRAN_BUSY);
2323 			}
2324 		}
2325 try_again:
2326 		rval = mdi_select_path(cdip, vpkt->vpkt_tgt_init_bp,
2327 		    path_instance ? MDI_SELECT_PATH_INSTANCE : 0,
2328 		    (void *)(intptr_t)path_instance, &pip);
2329 		if (rval == MDI_BUSY) {
2330 			if (pgr_sema_held) {
2331 				sema_v(&vlun->svl_pgr_sema);
2332 			}
2333 			return (TRAN_BUSY);
2334 		} else if (rval == MDI_DEVI_ONLINING) {
2335 			/*
2336 			 * if we are here then we are in the midst of
2337 			 * an attach/probe of the client device.
2338 			 * We attempt to bind to ONLINE path if available,
2339 			 * else it is OK to bind to a STANDBY path (instead
2340 			 * of triggering a failover) because IO associated
2341 			 * with attach/probe (eg. INQUIRY, block 0 read)
2342 			 * are completed by targets even on passive paths
2343 			 * If no ONLINE paths available, it is important
2344 			 * to set svl_waiting_for_activepath for two
2345 			 * reasons: (1) avoid sense analysis in the
2346 			 * "external failure detection" codepath in
2347 			 * vhci_intr().  Failure to do so will result in
2348 			 * infinite loop (unless an ONLINE path becomes
2349 			 * available at some point) (2) avoid
2350 			 * unnecessary failover (see "---Waiting For Active
2351 			 * Path---" comment below).
2352 			 */
2353 			VHCI_DEBUG(1, (CE_NOTE, NULL, "!%p in onlining "
2354 			    "state\n", (void *)cdip));
2355 			pip = NULL;
2356 			rval = mdi_select_path(cdip, vpkt->vpkt_tgt_init_bp,
2357 			    mps_flag, NULL, &pip);
2358 			if ((rval != MDI_SUCCESS) || (pip == NULL)) {
2359 				if (vlun->svl_waiting_for_activepath == 0) {
2360 					vlun->svl_waiting_for_activepath = 1;
2361 					vlun->svl_wfa_time = ddi_get_time();
2362 				}
2363 				mps_flag |= MDI_SELECT_STANDBY_PATH;
2364 				rval = mdi_select_path(cdip,
2365 				    vpkt->vpkt_tgt_init_bp,
2366 				    mps_flag, NULL, &pip);
2367 				if ((rval != MDI_SUCCESS) || (pip == NULL)) {
2368 					if (pgr_sema_held) {
2369 						sema_v(&vlun->svl_pgr_sema);
2370 					}
2371 					return (TRAN_FATAL_ERROR);
2372 				}
2373 				goto bind_path;
2374 			}
2375 		} else if ((rval == MDI_FAILURE) ||
2376 		    ((rval == MDI_NOPATH) && (path_instance))) {
2377 			if (pgr_sema_held) {
2378 				sema_v(&vlun->svl_pgr_sema);
2379 			}
2380 			return (TRAN_FATAL_ERROR);
2381 		}
2382 
2383 		if ((pip == NULL) || (rval == MDI_NOPATH)) {
2384 			while (vlun->svl_waiting_for_activepath) {
2385 				/*
2386 				 * ---Waiting For Active Path---
2387 				 * This device was discovered across a
2388 				 * passive path; lets wait for a little
2389 				 * bit, hopefully an active path will
2390 				 * show up obviating the need for a
2391 				 * failover
2392 				 */
2393 				tnow = ddi_get_time();
2394 				if (tnow - vlun->svl_wfa_time >= 60) {
2395 					vlun->svl_waiting_for_activepath = 0;
2396 				} else {
2397 					drv_usecwait(1000);
2398 					if (vlun->svl_waiting_for_activepath
2399 					    == 0) {
2400 						/*
2401 						 * an active path has come
2402 						 * online!
2403 						 */
2404 						goto try_again;
2405 					}
2406 				}
2407 			}
2408 			VHCI_HOLD_LUN(vlun, VH_NOSLEEP, held);
2409 			if (!held) {
2410 				VHCI_DEBUG(4, (CE_NOTE, NULL,
2411 				    "!Lun not held\n"));
2412 				if (pgr_sema_held) {
2413 					sema_v(&vlun->svl_pgr_sema);
2414 				}
2415 				return (TRAN_BUSY);
2416 			}
2417 			/*
2418 			 * now that the LUN is stable, one last check
2419 			 * to make sure no other changes sneaked in
2420 			 * (like a path coming online or a
2421 			 * failover initiated by another thread)
2422 			 */
2423 			pip = NULL;
2424 			rval = mdi_select_path(cdip, vpkt->vpkt_tgt_init_bp,
2425 			    0, NULL, &pip);
2426 			if (pip != NULL) {
2427 				VHCI_RELEASE_LUN(vlun);
2428 				vlun->svl_waiting_for_activepath = 0;
2429 				goto bind_path;
2430 			}
2431 
2432 			/*
2433 			 * Check if there is an ONLINE path OR a STANDBY path
2434 			 * available. If none is available, do not attempt
2435 			 * to do a failover, just return a fatal error at this
2436 			 * point.
2437 			 */
2438 			npip = NULL;
2439 			rval = mdi_select_path(cdip, NULL,
2440 			    (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH),
2441 			    NULL, &npip);
2442 			if ((npip == NULL) || (rval != MDI_SUCCESS)) {
2443 				/*
2444 				 * No paths available, jus return FATAL error.
2445 				 */
2446 				VHCI_RELEASE_LUN(vlun);
2447 				if (pgr_sema_held) {
2448 					sema_v(&vlun->svl_pgr_sema);
2449 				}
2450 				return (TRAN_FATAL_ERROR);
2451 			}
2452 			mdi_rele_path(npip);
2453 			if (!(vpkt->vpkt_state & VHCI_PKT_IN_FAILOVER)) {
2454 				VHCI_DEBUG(1, (CE_NOTE, NULL, "!invoking "
2455 				    "mdi_failover\n"));
2456 				rval = mdi_failover(vhci->vhci_dip, cdip,
2457 				    MDI_FAILOVER_ASYNC);
2458 			} else {
2459 				rval = vlun->svl_failover_status;
2460 			}
2461 			if (rval == MDI_FAILURE) {
2462 				VHCI_RELEASE_LUN(vlun);
2463 				if (pgr_sema_held) {
2464 					sema_v(&vlun->svl_pgr_sema);
2465 				}
2466 				return (TRAN_FATAL_ERROR);
2467 			} else if (rval == MDI_BUSY) {
2468 				VHCI_RELEASE_LUN(vlun);
2469 				if (pgr_sema_held) {
2470 					sema_v(&vlun->svl_pgr_sema);
2471 				}
2472 				return (TRAN_BUSY);
2473 			} else {
2474 				if (pgr_sema_held) {
2475 					sema_v(&vlun->svl_pgr_sema);
2476 				}
2477 				vpkt->vpkt_state |= VHCI_PKT_IN_FAILOVER;
2478 				return (TRAN_BUSY);
2479 			}
2480 		}
2481 		vlun->svl_waiting_for_activepath = 0;
2482 bind_path:
2483 		vpkt->vpkt_path = pip;
2484 		svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip);
2485 		ASSERT(svp != NULL);
2486 
2487 		psd = svp->svp_psd;
2488 		ASSERT(psd != NULL);
2489 		address = &psd->sd_address;
2490 	} else {
2491 		pkt = vpkt->vpkt_hba_pkt;
2492 		address = &pkt->pkt_address;
2493 	}
2494 
2495 	/* Verify match of specified path_instance and selected path_instance */
2496 	ASSERT((path_instance == 0) ||
2497 	    (path_instance == mdi_pi_get_path_instance(vpkt->vpkt_path)));
2498 
2499 	/*
2500 	 * For PKT_PARTIAL_DMA case, call pHCI's scsi_init_pkt whenever
2501 	 * target driver calls vhci_scsi_init_pkt.
2502 	 */
2503 	if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) &&
2504 	    vpkt->vpkt_path && vpkt->vpkt_hba_pkt) {
2505 		VHCI_DEBUG(4, (CE_NOTE, NULL,
2506 		    "vhci_bind_transport: PKT_PARTIAL_DMA "
2507 		    "vpkt 0x%p, path 0x%p hba_pkt 0x%p\n",
2508 		    (void *)vpkt, (void *)vpkt->vpkt_path, (void *)pkt));
2509 		pkt = vpkt->vpkt_hba_pkt;
2510 		address = &pkt->pkt_address;
2511 	}
2512 
2513 	if (pkt == NULL || (vpkt->vpkt_flags & CFLAG_DMA_PARTIAL)) {
2514 		pkt = scsi_init_pkt(address, pkt,
2515 		    vpkt->vpkt_tgt_init_bp, vpkt->vpkt_tgt_init_cdblen,
2516 		    vpkt->vpkt_tgt_init_scblen, 0, flags, func, NULL);
2517 
2518 		if (pkt == NULL) {
2519 			VHCI_DEBUG(4, (CE_NOTE, NULL,
2520 			    "!bind transport: 0x%p 0x%p 0x%p\n",
2521 			    (void *)vhci, (void *)psd, (void *)vpkt));
2522 			if ((vpkt->vpkt_hba_pkt == NULL) && vpkt->vpkt_path) {
2523 				MDI_PI_ERRSTAT(vpkt->vpkt_path,
2524 				    MDI_PI_TRANSERR);
2525 				mdi_rele_path(vpkt->vpkt_path);
2526 				vpkt->vpkt_path = NULL;
2527 			}
2528 			if (pgr_sema_held) {
2529 				sema_v(&vlun->svl_pgr_sema);
2530 			}
2531 			/*
2532 			 * Consider it a fatal error if b_error is
2533 			 * set as a result of DMA binding failure
2534 			 * vs. a condition of being temporarily out of
2535 			 * some resource
2536 			 */
2537 			if (vpkt->vpkt_tgt_init_bp == NULL ||
2538 			    geterror(vpkt->vpkt_tgt_init_bp))
2539 				return (TRAN_FATAL_ERROR);
2540 			else
2541 				return (TRAN_BUSY);
2542 		}
2543 	}
2544 
2545 	pkt->pkt_private = vpkt;
2546 	vpkt->vpkt_hba_pkt = pkt;
2547 	return (TRAN_ACCEPT);
2548 }
2549 
2550 
2551 /*PRINTFLIKE3*/
2552 void
2553 vhci_log(int level, dev_info_t *dip, const char *fmt, ...)
2554 {
2555 	char		buf[256];
2556 	va_list		ap;
2557 
2558 	va_start(ap, fmt);
2559 	(void) vsprintf(buf, fmt, ap);
2560 	va_end(ap);
2561 
2562 	scsi_log(dip, "scsi_vhci", level, buf);
2563 }
2564 
2565 /* do a PGR out with the information we've saved away */
2566 static int
2567 vhci_do_prout(scsi_vhci_priv_t *svp)
2568 {
2569 
2570 	struct scsi_pkt			*new_pkt;
2571 	struct buf			*bp;
2572 	scsi_vhci_lun_t			*vlun = svp->svp_svl;
2573 	int				rval, retry, nr_retry, ua_retry;
2574 	uint8_t				*sns, skey;
2575 
2576 	bp = getrbuf(KM_SLEEP);
2577 	bp->b_flags = B_WRITE;
2578 	bp->b_resid = 0;
2579 	bp->b_un.b_addr = (caddr_t)&vlun->svl_prout;
2580 	bp->b_bcount = vlun->svl_bcount;
2581 
2582 	VHCI_INCR_PATH_CMDCOUNT(svp);
2583 
2584 	new_pkt = scsi_init_pkt(&svp->svp_psd->sd_address, NULL, bp,
2585 	    CDB_GROUP1, sizeof (struct scsi_arq_status), 0, 0,
2586 	    SLEEP_FUNC, NULL);
2587 	if (new_pkt == NULL) {
2588 		VHCI_DECR_PATH_CMDCOUNT(svp);
2589 		freerbuf(bp);
2590 		cmn_err(CE_WARN, "!vhci_do_prout: scsi_init_pkt failed");
2591 		return (0);
2592 	}
2593 	mutex_enter(&vlun->svl_mutex);
2594 	bp->b_un.b_addr = (caddr_t)&vlun->svl_prout;
2595 	bp->b_bcount = vlun->svl_bcount;
2596 	bcopy(vlun->svl_cdb, new_pkt->pkt_cdbp,
2597 	    sizeof (vlun->svl_cdb));
2598 	new_pkt->pkt_time = vlun->svl_time;
2599 	mutex_exit(&vlun->svl_mutex);
2600 	new_pkt->pkt_flags = FLAG_NOINTR;
2601 
2602 	ua_retry = nr_retry = retry = 0;
2603 again:
2604 	rval = vhci_do_scsi_cmd(new_pkt);
2605 	if (rval != 1) {
2606 		if ((new_pkt->pkt_reason == CMD_CMPLT) &&
2607 		    (SCBP_C(new_pkt) == STATUS_CHECK) &&
2608 		    (new_pkt->pkt_state & STATE_ARQ_DONE)) {
2609 			sns = (uint8_t *)
2610 			    &(((struct scsi_arq_status *)(uintptr_t)
2611 			    (new_pkt->pkt_scbp))->sts_sensedata);
2612 			skey = scsi_sense_key(sns);
2613 			if ((skey == KEY_UNIT_ATTENTION) ||
2614 			    (skey == KEY_NOT_READY)) {
2615 				int max_retry;
2616 				struct scsi_failover_ops *fops;
2617 				fops = vlun->svl_fops;
2618 				rval = fops->sfo_analyze_sense(svp->svp_psd,
2619 				    sns, vlun->svl_fops_ctpriv);
2620 				if (rval == SCSI_SENSE_NOT_READY) {
2621 					max_retry = vhci_prout_not_ready_retry;
2622 					retry = nr_retry++;
2623 					delay(1*drv_usectohz(1000000));
2624 				} else {
2625 					/* chk for state change and update */
2626 					if (rval == SCSI_SENSE_STATE_CHANGED) {
2627 						int held;
2628 						VHCI_HOLD_LUN(vlun,
2629 						    VH_NOSLEEP, held);
2630 						if (!held) {
2631 							rval = TRAN_BUSY;
2632 						} else {
2633 							/* chk for alua first */
2634 							vhci_update_pathstates(
2635 							    (void *)vlun);
2636 						}
2637 					}
2638 					retry = ua_retry++;
2639 					max_retry = VHCI_MAX_PGR_RETRIES;
2640 				}
2641 				if (retry < max_retry) {
2642 					VHCI_DEBUG(4, (CE_WARN, NULL,
2643 					    "!vhci_do_prout retry 0x%x "
2644 					    "(0x%x 0x%x 0x%x)",
2645 					    SCBP_C(new_pkt),
2646 					    new_pkt->pkt_cdbp[0],
2647 					    new_pkt->pkt_cdbp[1],
2648 					    new_pkt->pkt_cdbp[2]));
2649 					goto again;
2650 				}
2651 				rval = 0;
2652 				VHCI_DEBUG(4, (CE_WARN, NULL,
2653 				    "!vhci_do_prout 0x%x "
2654 				    "(0x%x 0x%x 0x%x)",
2655 				    SCBP_C(new_pkt),
2656 				    new_pkt->pkt_cdbp[0],
2657 				    new_pkt->pkt_cdbp[1],
2658 				    new_pkt->pkt_cdbp[2]));
2659 			} else if (skey == KEY_ILLEGAL_REQUEST)
2660 				rval = VHCI_PGR_ILLEGALOP;
2661 		}
2662 	} else {
2663 		rval = 1;
2664 	}
2665 	scsi_destroy_pkt(new_pkt);
2666 	VHCI_DECR_PATH_CMDCOUNT(svp);
2667 	freerbuf(bp);
2668 	return (rval);
2669 }
2670 
2671 static void
2672 vhci_run_cmd(void *arg)
2673 {
2674 	struct scsi_pkt		*pkt = (struct scsi_pkt *)arg;
2675 	struct scsi_pkt		*tpkt;
2676 	scsi_vhci_priv_t	*svp;
2677 	mdi_pathinfo_t		*pip, *npip;
2678 	scsi_vhci_lun_t		*vlun;
2679 	dev_info_t		*cdip;
2680 	scsi_vhci_priv_t	*nsvp;
2681 	int			fail = 0;
2682 	int			rval;
2683 	struct vhci_pkt		*vpkt;
2684 	uchar_t			cdb_1;
2685 	vhci_prout_t		*prout;
2686 
2687 	vpkt = (struct vhci_pkt *)pkt->pkt_private;
2688 	tpkt = vpkt->vpkt_tgt_pkt;
2689 	pip = vpkt->vpkt_path;
2690 	svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip);
2691 	if (svp == NULL) {
2692 		tpkt->pkt_reason = CMD_TRAN_ERR;
2693 		tpkt->pkt_statistics = STAT_ABORTED;
2694 		goto done;
2695 	}
2696 	vlun = svp->svp_svl;
2697 	prout = &vlun->svl_prout;
2698 	if (SCBP_C(pkt) != STATUS_GOOD)
2699 		fail++;
2700 	cdip = vlun->svl_dip;
2701 	pip = npip = NULL;
2702 	rval = mdi_select_path(cdip, NULL,
2703 	    MDI_SELECT_ONLINE_PATH|MDI_SELECT_STANDBY_PATH, NULL, &npip);
2704 	if ((rval != MDI_SUCCESS) || (npip == NULL)) {
2705 		VHCI_DEBUG(4, (CE_NOTE, NULL,
2706 		    "vhci_run_cmd: no path! 0x%p\n", (void *)svp));
2707 		tpkt->pkt_reason = CMD_TRAN_ERR;
2708 		tpkt->pkt_statistics = STAT_ABORTED;
2709 		goto done;
2710 	}
2711 
2712 	cdb_1 = vlun->svl_cdb[1];
2713 	vlun->svl_cdb[1] &= 0xe0;
2714 	vlun->svl_cdb[1] |= VHCI_PROUT_R_AND_IGNORE;
2715 
2716 	do {
2717 		nsvp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(npip);
2718 		if (nsvp == NULL) {
2719 			VHCI_DEBUG(4, (CE_NOTE, NULL,
2720 			    "vhci_run_cmd: no "
2721 			    "client priv! 0x%p offlined?\n",
2722 			    (void *)npip));
2723 			goto next_path;
2724 		}
2725 		if (vlun->svl_first_path == npip) {
2726 			goto next_path;
2727 		} else {
2728 			if (vhci_do_prout(nsvp) != 1)
2729 				fail++;
2730 		}
2731 next_path:
2732 		pip = npip;
2733 		rval = mdi_select_path(cdip, NULL,
2734 		    MDI_SELECT_ONLINE_PATH|MDI_SELECT_STANDBY_PATH,
2735 		    pip, &npip);
2736 		mdi_rele_path(pip);
2737 	} while ((rval == MDI_SUCCESS) && (npip != NULL));
2738 
2739 	vlun->svl_cdb[1] = cdb_1;
2740 
2741 	if (fail) {
2742 		VHCI_DEBUG(4, (CE_WARN, NULL, "%s%d: key registration failed, "
2743 		    "couldn't be replicated on all paths",
2744 		    ddi_driver_name(cdip), ddi_get_instance(cdip)));
2745 		vhci_print_prout_keys(vlun, "vhci_run_cmd: ");
2746 
2747 		if (SCBP_C(pkt) != STATUS_GOOD) {
2748 			tpkt->pkt_reason = CMD_TRAN_ERR;
2749 			tpkt->pkt_statistics = STAT_ABORTED;
2750 		}
2751 	} else {
2752 		vlun->svl_pgr_active = 1;
2753 		vhci_print_prout_keys(vlun, "vhci_run_cmd: before bcopy:");
2754 
2755 		bcopy((const void *)prout->service_key,
2756 		    (void *)prout->active_service_key, MHIOC_RESV_KEY_SIZE);
2757 		bcopy((const void *)prout->res_key,
2758 		    (void *)prout->active_res_key, MHIOC_RESV_KEY_SIZE);
2759 
2760 		vhci_print_prout_keys(vlun, "vhci_run_cmd: after bcopy:");
2761 	}
2762 done:
2763 	if (SCBP_C(pkt) == STATUS_GOOD)
2764 		vlun->svl_first_path = NULL;
2765 
2766 	if (svp)
2767 		VHCI_DECR_PATH_CMDCOUNT(svp);
2768 
2769 	if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) == 0) {
2770 		scsi_destroy_pkt(pkt);
2771 		vpkt->vpkt_hba_pkt = NULL;
2772 		if (vpkt->vpkt_path) {
2773 			mdi_rele_path(vpkt->vpkt_path);
2774 			vpkt->vpkt_path = NULL;
2775 		}
2776 	}
2777 
2778 	sema_v(&vlun->svl_pgr_sema);
2779 	/*
2780 	 * The PROUT commands are not included in the automatic retry
2781 	 * mechanism, therefore, vpkt_org_vpkt should never be set here.
2782 	 */
2783 	ASSERT(vpkt->vpkt_org_vpkt == NULL);
2784 	scsi_hba_pkt_comp(tpkt);
2785 }
2786 
2787 /*
2788  * Get the keys registered with this target.  Since we will have
2789  * registered the same key with multiple initiators, strip out
2790  * any duplicate keys.
2791  *
2792  * The pointers which will be used to filter the registered keys from
2793  * the device will be stored in filter_prin and filter_pkt.  If the
2794  * allocation length of the buffer was sufficient for the number of
2795  * parameter data bytes available to be returned by the device then the
2796  * key filtering will use the keylist returned from the original
2797  * request.  If the allocation length of the buffer was not sufficient,
2798  * then the filtering will use the keylist returned from the request
2799  * that is resent below.
2800  *
2801  * If the device returns an additional length field that is greater than
2802  * the allocation length of the buffer, then allocate a new buffer which
2803  * can accommodate the number of parameter data bytes available to be
2804  * returned.  Resend the scsi PRIN command, filter out the duplicate
2805  * keys and return as many of the unique keys found that was originally
2806  * requested and set the additional length field equal to the data bytes
2807  * of unique reservation keys available to be returned.
2808  *
2809  * If the device returns an additional length field that is less than or
2810  * equal to the allocation length of the buffer, then all the available
2811  * keys registered were returned by the device.  Filter out the
2812  * duplicate keys and return all of the unique keys found and set the
2813  * additional length field equal to the data bytes of the reservation
2814  * keys to be returned.
2815  */
2816 
2817 #define	VHCI_PRIN_HEADER_SZ (sizeof (prin->length) + sizeof (prin->generation))
2818 
2819 static int
2820 vhci_do_prin(struct vhci_pkt **intr_vpkt)
2821 {
2822 	scsi_vhci_priv_t *svp;
2823 	struct vhci_pkt *vpkt = *intr_vpkt;
2824 	vhci_prin_readkeys_t *prin;
2825 	scsi_vhci_lun_t *vlun;
2826 	struct scsi_vhci *vhci = ADDR2VHCI(&vpkt->vpkt_tgt_pkt->pkt_address);
2827 
2828 	struct buf		*new_bp = NULL;
2829 	struct scsi_pkt		*new_pkt = NULL;
2830 	struct vhci_pkt		*new_vpkt = NULL;
2831 	uint32_t		needed_length;
2832 	int			rval = VHCI_CMD_CMPLT;
2833 	uint32_t		prin_length = 0;
2834 	uint32_t		svl_prin_length = 0;
2835 
2836 	ASSERT(vpkt->vpkt_path);
2837 	svp = mdi_pi_get_vhci_private(vpkt->vpkt_path);
2838 	ASSERT(svp);
2839 	vlun = svp->svp_svl;
2840 	ASSERT(vlun);
2841 
2842 	/*
2843 	 * If the caller only asked for an amount of data that would not
2844 	 * be enough to include any key data it is likely that they will
2845 	 * send the next command with a buffer size based on the information
2846 	 * from this header. Doing recovery on this would be a duplication
2847 	 * of efforts.
2848 	 */
2849 	if (vpkt->vpkt_tgt_init_bp->b_bcount <= VHCI_PRIN_HEADER_SZ) {
2850 		rval = VHCI_CMD_CMPLT;
2851 		goto exit;
2852 	}
2853 
2854 	if (vpkt->vpkt_org_vpkt == NULL) {
2855 		/*
2856 		 * Can fail as sleep is not allowed.
2857 		 */
2858 		prin = (vhci_prin_readkeys_t *)
2859 		    bp_mapin_common(vpkt->vpkt_tgt_init_bp, VM_NOSLEEP);
2860 	} else {
2861 		/*
2862 		 * The retry buf doesn't need to be mapped in.
2863 		 */
2864 		prin = (vhci_prin_readkeys_t *)
2865 		    vpkt->vpkt_tgt_init_bp->b_un.b_daddr;
2866 	}
2867 
2868 	if (prin == NULL) {
2869 		VHCI_DEBUG(5, (CE_WARN, NULL,
2870 		    "vhci_do_prin: bp_mapin_common failed."));
2871 		rval = VHCI_CMD_ERROR;
2872 		goto fail;
2873 	}
2874 
2875 	prin_length = BE_32(prin->length);
2876 
2877 	/*
2878 	 * According to SPC-3r22, sec 4.3.4.6: "If the amount of
2879 	 * information to be transferred exceeds the maximum value
2880 	 * that the ALLOCATION LENGTH field is capable of specifying,
2881 	 * the device server shall...terminate the command with CHECK
2882 	 * CONDITION status".  The ALLOCATION LENGTH field of the
2883 	 * PERSISTENT RESERVE IN command is 2 bytes. We should never
2884 	 * get here with an ADDITIONAL LENGTH greater than 0xFFFF
2885 	 * so if we do, then it is an error!
2886 	 */
2887 
2888 
2889 	if ((prin_length + VHCI_PRIN_HEADER_SZ) > 0xFFFF) {
2890 		VHCI_DEBUG(5, (CE_NOTE, NULL,
2891 		    "vhci_do_prin: Device returned invalid "
2892 		    "length 0x%x\n", prin_length));
2893 		rval = VHCI_CMD_ERROR;
2894 		goto fail;
2895 	}
2896 	needed_length = prin_length + VHCI_PRIN_HEADER_SZ;
2897 
2898 	/*
2899 	 * If prin->length is greater than the byte count allocated in the
2900 	 * original buffer, then resend the request with enough buffer
2901 	 * allocated to get all of the available registered keys.
2902 	 */
2903 	if ((vpkt->vpkt_tgt_init_bp->b_bcount < needed_length) &&
2904 	    (vpkt->vpkt_org_vpkt == NULL)) {
2905 
2906 		new_pkt = vhci_create_retry_pkt(vpkt);
2907 		if (new_pkt == NULL) {
2908 			rval = VHCI_CMD_ERROR;
2909 			goto fail;
2910 		}
2911 		new_vpkt = TGTPKT2VHCIPKT(new_pkt);
2912 
2913 		/*
2914 		 * This is the buf with buffer pointer
2915 		 * where the prin readkeys will be
2916 		 * returned from the device
2917 		 */
2918 		new_bp = scsi_alloc_consistent_buf(&svp->svp_psd->sd_address,
2919 		    NULL, needed_length, B_READ, NULL_FUNC, NULL);
2920 		if ((new_bp == NULL) || (new_bp->b_un.b_addr == NULL)) {
2921 			if (new_bp) {
2922 				scsi_free_consistent_buf(new_bp);
2923 			}
2924 			vhci_scsi_destroy_pkt(&new_pkt->pkt_address, new_pkt);
2925 			rval = VHCI_CMD_ERROR;
2926 			goto fail;
2927 		}
2928 		new_bp->b_bcount = needed_length;
2929 		new_pkt->pkt_cdbp[7] = (uchar_t)(needed_length >> 8);
2930 		new_pkt->pkt_cdbp[8] = (uchar_t)needed_length;
2931 
2932 		rval = VHCI_CMD_RETRY;
2933 
2934 		new_vpkt->vpkt_tgt_init_bp = new_bp;
2935 	}
2936 
2937 	if (rval == VHCI_CMD_RETRY) {
2938 
2939 		/*
2940 		 * There were more keys then the original request asked for.
2941 		 */
2942 		mdi_pathinfo_t *path_holder = vpkt->vpkt_path;
2943 
2944 		/*
2945 		 * Release the old path because it does not matter which path
2946 		 * this command is sent down.  This allows the normal bind
2947 		 * transport mechanism to be used.
2948 		 */
2949 		if (vpkt->vpkt_path != NULL) {
2950 			mdi_rele_path(vpkt->vpkt_path);
2951 			vpkt->vpkt_path = NULL;
2952 		}
2953 
2954 		/*
2955 		 * Dispatch the retry command
2956 		 */
2957 		if (taskq_dispatch(vhci->vhci_taskq, vhci_dispatch_scsi_start,
2958 		    (void *) new_vpkt, KM_NOSLEEP) == NULL) {
2959 			if (path_holder) {
2960 				vpkt->vpkt_path = path_holder;
2961 				mdi_hold_path(path_holder);
2962 			}
2963 			scsi_free_consistent_buf(new_bp);
2964 			vhci_scsi_destroy_pkt(&new_pkt->pkt_address, new_pkt);
2965 			rval = VHCI_CMD_ERROR;
2966 			goto fail;
2967 		}
2968 
2969 		/*
2970 		 * If we return VHCI_CMD_RETRY, that means the caller
2971 		 * is going to bail and wait for the reissued command
2972 		 * to complete.  In that case, we need to decrement
2973 		 * the path command count right now.  In any other
2974 		 * case, it'll be decremented by the caller.
2975 		 */
2976 		VHCI_DECR_PATH_CMDCOUNT(svp);
2977 		goto exit;
2978 
2979 	}
2980 
2981 	if (rval == VHCI_CMD_CMPLT) {
2982 		/*
2983 		 * The original request got all of the keys or the recovery
2984 		 * packet returns.
2985 		 */
2986 		int new;
2987 		int old;
2988 		int num_keys = prin_length / MHIOC_RESV_KEY_SIZE;
2989 
2990 		VHCI_DEBUG(4, (CE_NOTE, NULL, "vhci_do_prin: %d keys read\n",
2991 		    num_keys));
2992 
2993 #ifdef DEBUG
2994 		VHCI_DEBUG(5, (CE_NOTE, NULL, "vhci_do_prin: from storage\n"));
2995 		if (vhci_debug == 5)
2996 			vhci_print_prin_keys(prin, num_keys);
2997 		VHCI_DEBUG(5, (CE_NOTE, NULL,
2998 		    "vhci_do_prin: MPxIO old keys:\n"));
2999 		if (vhci_debug == 5)
3000 			vhci_print_prin_keys(&vlun->svl_prin, num_keys);
3001 #endif
3002 
3003 		/*
3004 		 * Filter out all duplicate keys returned from the device
3005 		 * We know that we use a different key for every host, so we
3006 		 * can simply strip out duplicates. Otherwise we would need to
3007 		 * do more bookkeeping to figure out which keys to strip out.
3008 		 */
3009 
3010 		new = 0;
3011 
3012 		/*
3013 		 * If we got at least 1 key copy it.
3014 		 */
3015 		if (num_keys > 0) {
3016 			vlun->svl_prin.keylist[0] = prin->keylist[0];
3017 			new++;
3018 		}
3019 
3020 		/*
3021 		 * find next unique key.
3022 		 */
3023 		for (old = 1; old < num_keys; old++) {
3024 			int j;
3025 			int match = 0;
3026 
3027 			if (new >= VHCI_NUM_RESV_KEYS)
3028 				break;
3029 			for (j = 0; j < new; j++) {
3030 				if (bcmp(&prin->keylist[old],
3031 				    &vlun->svl_prin.keylist[j],
3032 				    sizeof (mhioc_resv_key_t)) == 0) {
3033 					match = 1;
3034 					break;
3035 				}
3036 			}
3037 			if (!match) {
3038 				vlun->svl_prin.keylist[new] =
3039 				    prin->keylist[old];
3040 				new++;
3041 			}
3042 		}
3043 
3044 		/* Stored Big Endian */
3045 		vlun->svl_prin.generation = prin->generation;
3046 		svl_prin_length = new * sizeof (mhioc_resv_key_t);
3047 		/* Stored Big Endian */
3048 		vlun->svl_prin.length = BE_32(svl_prin_length);
3049 		svl_prin_length += VHCI_PRIN_HEADER_SZ;
3050 
3051 		/*
3052 		 * If we arrived at this point after issuing a retry, make sure
3053 		 * that we put everything back the way it originally was so
3054 		 * that the target driver can complete the command correctly.
3055 		 */
3056 		if (vpkt->vpkt_org_vpkt != NULL) {
3057 			new_bp = vpkt->vpkt_tgt_init_bp;
3058 
3059 			scsi_free_consistent_buf(new_bp);
3060 
3061 			vpkt = vhci_sync_retry_pkt(vpkt);
3062 			*intr_vpkt = vpkt;
3063 
3064 			/*
3065 			 * Make sure the original buffer is mapped into kernel
3066 			 * space before we try to copy the filtered keys into
3067 			 * it.
3068 			 */
3069 			prin = (vhci_prin_readkeys_t *)bp_mapin_common(
3070 			    vpkt->vpkt_tgt_init_bp, VM_NOSLEEP);
3071 		}
3072 
3073 		/*
3074 		 * Now copy the desired number of prin keys into the original
3075 		 * target buffer.
3076 		 */
3077 		if (svl_prin_length <= vpkt->vpkt_tgt_init_bp->b_bcount) {
3078 			/*
3079 			 * It is safe to return all of the available unique
3080 			 * keys
3081 			 */
3082 			bcopy(&vlun->svl_prin, prin, svl_prin_length);
3083 		} else {
3084 			/*
3085 			 * Not all of the available keys were requested by the
3086 			 * original command.
3087 			 */
3088 			bcopy(&vlun->svl_prin, prin,
3089 			    vpkt->vpkt_tgt_init_bp->b_bcount);
3090 		}
3091 #ifdef DEBUG
3092 		VHCI_DEBUG(5, (CE_NOTE, NULL,
3093 		    "vhci_do_prin: To Application:\n"));
3094 		if (vhci_debug == 5)
3095 			vhci_print_prin_keys(prin, new);
3096 		VHCI_DEBUG(5, (CE_NOTE, NULL,
3097 		    "vhci_do_prin: MPxIO new keys:\n"));
3098 		if (vhci_debug == 5)
3099 			vhci_print_prin_keys(&vlun->svl_prin, new);
3100 #endif
3101 	}
3102 fail:
3103 	if (rval == VHCI_CMD_ERROR) {
3104 		/*
3105 		 * If we arrived at this point after issuing a
3106 		 * retry, make sure that we put everything back
3107 		 * the way it originally was so that ssd can
3108 		 * complete the command correctly.
3109 		 */
3110 
3111 		if (vpkt->vpkt_org_vpkt != NULL) {
3112 			new_bp = vpkt->vpkt_tgt_init_bp;
3113 			if (new_bp != NULL) {
3114 				scsi_free_consistent_buf(new_bp);
3115 			}
3116 
3117 			new_vpkt = vpkt;
3118 			vpkt = vpkt->vpkt_org_vpkt;
3119 
3120 			vhci_scsi_destroy_pkt(&svp->svp_psd->sd_address,
3121 			    new_vpkt->vpkt_tgt_pkt);
3122 		}
3123 
3124 		/*
3125 		 * Mark this command completion as having an error so that
3126 		 * ssd will retry the command.
3127 		 */
3128 
3129 		vpkt->vpkt_tgt_pkt->pkt_reason = CMD_ABORTED;
3130 		vpkt->vpkt_tgt_pkt->pkt_statistics |= STAT_ABORTED;
3131 
3132 		rval = VHCI_CMD_CMPLT;
3133 	}
3134 exit:
3135 	/*
3136 	 * Make sure that the semaphore is only released once.
3137 	 */
3138 	if (rval == VHCI_CMD_CMPLT) {
3139 		sema_v(&vlun->svl_pgr_sema);
3140 	}
3141 
3142 	return (rval);
3143 }
3144 
3145 static void
3146 vhci_intr(struct scsi_pkt *pkt)
3147 {
3148 	struct vhci_pkt		*vpkt = (struct vhci_pkt *)pkt->pkt_private;
3149 	struct scsi_pkt		*tpkt;
3150 	scsi_vhci_priv_t	*svp;
3151 	scsi_vhci_lun_t		*vlun;
3152 	int			rval, held;
3153 	struct scsi_failover_ops	*fops;
3154 	uint8_t			*sns, skey, asc, ascq;
3155 	mdi_pathinfo_t		*lpath;
3156 	static char		*timeout_err = "Command Timeout";
3157 	static char		*parity_err = "Parity Error";
3158 	char			*err_str = NULL;
3159 	dev_info_t		*vdip, *cdip;
3160 	char			*cpath;
3161 
3162 	ASSERT(vpkt != NULL);
3163 	tpkt = vpkt->vpkt_tgt_pkt;
3164 	ASSERT(tpkt != NULL);
3165 	svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(vpkt->vpkt_path);
3166 	ASSERT(svp != NULL);
3167 	vlun = svp->svp_svl;
3168 	ASSERT(vlun != NULL);
3169 	lpath = vpkt->vpkt_path;
3170 
3171 	/*
3172 	 * sync up the target driver's pkt with the pkt that
3173 	 * we actually used
3174 	 */
3175 	*(tpkt->pkt_scbp) = *(pkt->pkt_scbp);
3176 	tpkt->pkt_resid = pkt->pkt_resid;
3177 	tpkt->pkt_state = pkt->pkt_state;
3178 	tpkt->pkt_statistics = pkt->pkt_statistics;
3179 	tpkt->pkt_reason = pkt->pkt_reason;
3180 
3181 	/* Return path_instance information back to the target driver. */
3182 	if (scsi_pkt_allocated_correctly(tpkt)) {
3183 		if (scsi_pkt_allocated_correctly(pkt)) {
3184 			/*
3185 			 * If both packets were correctly allocated,
3186 			 * return path returned by pHCI.
3187 			 */
3188 			tpkt->pkt_path_instance = pkt->pkt_path_instance;
3189 		} else {
3190 			/* Otherwise return path of pHCI we used */
3191 			tpkt->pkt_path_instance =
3192 			    mdi_pi_get_path_instance(lpath);
3193 		}
3194 	}
3195 
3196 	if (pkt->pkt_cdbp[0] == SCMD_PROUT &&
3197 	    ((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_REGISTER) ||
3198 	    ((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_R_AND_IGNORE)) {
3199 		if ((SCBP_C(pkt) != STATUS_GOOD) ||
3200 		    (pkt->pkt_reason != CMD_CMPLT)) {
3201 			sema_v(&vlun->svl_pgr_sema);
3202 		}
3203 	} else if (pkt->pkt_cdbp[0] == SCMD_PRIN) {
3204 		if (pkt->pkt_reason != CMD_CMPLT ||
3205 		    (SCBP_C(pkt) != STATUS_GOOD)) {
3206 			sema_v(&vlun->svl_pgr_sema);
3207 		}
3208 	}
3209 
3210 	switch (pkt->pkt_reason) {
3211 	case CMD_CMPLT:
3212 		/*
3213 		 * cmd completed successfully, check for scsi errors
3214 		 */
3215 		switch (*(pkt->pkt_scbp)) {
3216 		case STATUS_CHECK:
3217 			if (pkt->pkt_state & STATE_ARQ_DONE) {
3218 				sns = (uint8_t *)
3219 				    &(((struct scsi_arq_status *)(uintptr_t)
3220 				    (pkt->pkt_scbp))->sts_sensedata);
3221 				skey = scsi_sense_key(sns);
3222 				asc = scsi_sense_asc(sns);
3223 				ascq = scsi_sense_ascq(sns);
3224 				fops = vlun->svl_fops;
3225 				ASSERT(fops != NULL);
3226 				VHCI_DEBUG(4, (CE_NOTE, NULL, "vhci_intr: "
3227 				    "Received sns key %x  esc %x  escq %x\n",
3228 				    skey, asc, ascq));
3229 
3230 				if (vlun->svl_waiting_for_activepath == 1) {
3231 					/*
3232 					 * if we are here it means we are
3233 					 * in the midst of a probe/attach
3234 					 * through a passive path; this
3235 					 * case is exempt from sense analysis
3236 					 * for detection of ext. failover
3237 					 * because that would unnecessarily
3238 					 * increase attach time.
3239 					 */
3240 					bcopy(pkt->pkt_scbp, tpkt->pkt_scbp,
3241 					    vpkt->vpkt_tgt_init_scblen);
3242 					break;
3243 				}
3244 				if (asc == VHCI_SCSI_PERR) {
3245 					/*
3246 					 * parity error
3247 					 */
3248 					err_str = parity_err;
3249 					bcopy(pkt->pkt_scbp, tpkt->pkt_scbp,
3250 					    vpkt->vpkt_tgt_init_scblen);
3251 					break;
3252 				}
3253 				rval = fops->sfo_analyze_sense(svp->svp_psd,
3254 				    sns, vlun->svl_fops_ctpriv);
3255 				if ((rval == SCSI_SENSE_NOFAILOVER) ||
3256 				    (rval == SCSI_SENSE_UNKNOWN) ||
3257 				    (rval == SCSI_SENSE_NOT_READY)) {
3258 					bcopy(pkt->pkt_scbp, tpkt->pkt_scbp,
3259 					    vpkt->vpkt_tgt_init_scblen);
3260 					break;
3261 				} else if (rval == SCSI_SENSE_STATE_CHANGED) {
3262 					struct scsi_vhci	*vhci;
3263 					vhci = ADDR2VHCI(&tpkt->pkt_address);
3264 					VHCI_HOLD_LUN(vlun, VH_NOSLEEP, held);
3265 					if (!held) {
3266 						/*
3267 						 * looks like some other thread
3268 						 * has already detected this
3269 						 * condition
3270 						 */
3271 						tpkt->pkt_state &=
3272 						    ~STATE_ARQ_DONE;
3273 						*(tpkt->pkt_scbp) =
3274 						    STATUS_BUSY;
3275 						break;
3276 					}
3277 					(void) taskq_dispatch(
3278 					    vhci->vhci_update_pathstates_taskq,
3279 					    vhci_update_pathstates,
3280 					    (void *)vlun, KM_SLEEP);
3281 				} else {
3282 					/*
3283 					 * externally initiated failover
3284 					 * has occurred or is in progress
3285 					 */
3286 					VHCI_HOLD_LUN(vlun, VH_NOSLEEP, held);
3287 					if (!held) {
3288 						/*
3289 						 * looks like some other thread
3290 						 * has already detected this
3291 						 * condition
3292 						 */
3293 						tpkt->pkt_state &=
3294 						    ~STATE_ARQ_DONE;
3295 						*(tpkt->pkt_scbp) =
3296 						    STATUS_BUSY;
3297 						break;
3298 					} else {
3299 						rval = vhci_handle_ext_fo
3300 						    (pkt, rval);
3301 						if (rval == BUSY_RETURN) {
3302 							tpkt->pkt_state &=
3303 							    ~STATE_ARQ_DONE;
3304 							*(tpkt->pkt_scbp) =
3305 							    STATUS_BUSY;
3306 							break;
3307 						}
3308 						bcopy(pkt->pkt_scbp,
3309 						    tpkt->pkt_scbp,
3310 						    vpkt->vpkt_tgt_init_scblen);
3311 						break;
3312 					}
3313 				}
3314 			}
3315 			break;
3316 
3317 		/*
3318 		 * If this is a good SCSI-II RELEASE cmd completion then restore
3319 		 * the load balancing policy and reset VLUN_RESERVE_ACTIVE_FLG.
3320 		 * If this is a good SCSI-II RESERVE cmd completion then set
3321 		 * VLUN_RESERVE_ACTIVE_FLG.
3322 		 */
3323 		case STATUS_GOOD:
3324 			if ((pkt->pkt_cdbp[0] == SCMD_RELEASE) ||
3325 			    (pkt->pkt_cdbp[0] == SCMD_RELEASE_G1)) {
3326 				(void) mdi_set_lb_policy(vlun->svl_dip,
3327 				    vlun->svl_lb_policy_save);
3328 				vlun->svl_flags &= ~VLUN_RESERVE_ACTIVE_FLG;
3329 				VHCI_DEBUG(1, (CE_WARN, NULL,
3330 				    "!vhci_intr: vlun 0x%p release path 0x%p",
3331 				    (void *)vlun, (void *)vpkt->vpkt_path));
3332 			}
3333 
3334 			if ((pkt->pkt_cdbp[0] == SCMD_RESERVE) ||
3335 			    (pkt->pkt_cdbp[0] == SCMD_RESERVE_G1)) {
3336 				vlun->svl_flags |= VLUN_RESERVE_ACTIVE_FLG;
3337 				vlun->svl_resrv_pip = vpkt->vpkt_path;
3338 				VHCI_DEBUG(1, (CE_WARN, NULL,
3339 				    "!vhci_intr: vlun 0x%p reserved path 0x%p",
3340 				    (void *)vlun, (void *)vpkt->vpkt_path));
3341 			}
3342 			break;
3343 
3344 		case STATUS_RESERVATION_CONFLICT:
3345 			VHCI_DEBUG(1, (CE_WARN, NULL,
3346 			    "!vhci_intr: vlun 0x%p "
3347 			    "reserve conflict on path 0x%p",
3348 			    (void *)vlun, (void *)vpkt->vpkt_path));
3349 			/* FALLTHROUGH */
3350 		default:
3351 			break;
3352 		}
3353 
3354 		/*
3355 		 * Update I/O completion statistics for the path
3356 		 */
3357 		mdi_pi_kstat_iosupdate(vpkt->vpkt_path, vpkt->vpkt_tgt_init_bp);
3358 
3359 		/*
3360 		 * Command completed successfully, release the dma binding and
3361 		 * destroy the transport side of the packet.
3362 		 */
3363 		if ((pkt->pkt_cdbp[0] == SCMD_PROUT) &&
3364 		    (((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_REGISTER) ||
3365 		    ((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_R_AND_IGNORE))) {
3366 			if (SCBP_C(pkt) == STATUS_GOOD) {
3367 				ASSERT(vlun->svl_taskq);
3368 				svp->svp_last_pkt_reason = pkt->pkt_reason;
3369 				(void) taskq_dispatch(vlun->svl_taskq,
3370 				    vhci_run_cmd, pkt, KM_SLEEP);
3371 				return;
3372 			}
3373 		}
3374 		if ((SCBP_C(pkt) == STATUS_GOOD) &&
3375 		    (pkt->pkt_cdbp[0] == SCMD_PRIN) && vpkt->vpkt_tgt_init_bp) {
3376 			/*
3377 			 * If the action (value in byte 1 of the cdb) is zero,
3378 			 * we're reading keys, and that's the only condition
3379 			 * where we need to be concerned with filtering keys
3380 			 * and potential retries.  Otherwise, we simply signal
3381 			 * the semaphore and move on.
3382 			 */
3383 			if (pkt->pkt_cdbp[1] == 0) {
3384 				/*
3385 				 * If this is the completion of an internal
3386 				 * retry then we need to make sure that the
3387 				 * pkt and tpkt pointers are readjusted so
3388 				 * the calls to scsi_destroy_pkt and pkt_comp
3389 				 * below work * correctly.
3390 				 */
3391 				if (vpkt->vpkt_org_vpkt != NULL) {
3392 					pkt = vpkt->vpkt_org_vpkt->vpkt_hba_pkt;
3393 					tpkt = vpkt->vpkt_org_vpkt->
3394 					    vpkt_tgt_pkt;
3395 
3396 					/*
3397 					 * If this command was issued through
3398 					 * the taskq then we need to clear
3399 					 * this flag for proper processing in
3400 					 * the case of a retry from the target
3401 					 * driver.
3402 					 */
3403 					vpkt->vpkt_state &=
3404 					    ~VHCI_PKT_THRU_TASKQ;
3405 				}
3406 
3407 				/*
3408 				 * if vhci_do_prin returns VHCI_CMD_CMPLT then
3409 				 * vpkt will contain the address of the
3410 				 * original vpkt
3411 				 */
3412 				if (vhci_do_prin(&vpkt) == VHCI_CMD_RETRY) {
3413 					/*
3414 					 * The command has been resent to get
3415 					 * all the keys from the device.  Don't
3416 					 * complete the command with ssd until
3417 					 * the retry completes.
3418 					 */
3419 					return;
3420 				}
3421 			} else {
3422 				sema_v(&vlun->svl_pgr_sema);
3423 			}
3424 		}
3425 
3426 		break;
3427 
3428 	case CMD_TIMEOUT:
3429 		if ((pkt->pkt_statistics &
3430 		    (STAT_BUS_RESET|STAT_DEV_RESET|STAT_ABORTED)) == 0) {
3431 
3432 			VHCI_DEBUG(1, (CE_NOTE, NULL,
3433 			    "!scsi vhci timeout invoked\n"));
3434 
3435 			(void) vhci_recovery_reset(vlun, &pkt->pkt_address,
3436 			    FALSE, VHCI_DEPTH_ALL);
3437 		}
3438 		MDI_PI_ERRSTAT(lpath, MDI_PI_TRANSERR);
3439 		tpkt->pkt_statistics |= STAT_ABORTED;
3440 		err_str = timeout_err;
3441 		break;
3442 
3443 	case CMD_TRAN_ERR:
3444 		/*
3445 		 * This status is returned if the transport has sent the cmd
3446 		 * down the link to the target and then some error occurs.
3447 		 * In case of SCSI-II RESERVE cmd, we don't know if the
3448 		 * reservation been accepted by the target or not, so we need
3449 		 * to clear the reservation.
3450 		 */
3451 		if ((pkt->pkt_cdbp[0] == SCMD_RESERVE) ||
3452 		    (pkt->pkt_cdbp[0] == SCMD_RESERVE_G1)) {
3453 			VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_intr received"
3454 			    " cmd_tran_err for scsi-2 reserve cmd\n"));
3455 			if (!vhci_recovery_reset(vlun, &pkt->pkt_address,
3456 			    TRUE, VHCI_DEPTH_TARGET)) {
3457 				VHCI_DEBUG(1, (CE_WARN, NULL,
3458 				    "!vhci_intr cmd_tran_err reset failed!"));
3459 			}
3460 		}
3461 		break;
3462 
3463 	case CMD_DEV_GONE:
3464 		/*
3465 		 * If this is the last path then report CMD_DEV_GONE to the
3466 		 * target driver, otherwise report BUSY to triggger retry.
3467 		 */
3468 		if (vlun->svl_dip &&
3469 		    (mdi_client_get_path_count(vlun->svl_dip) <= 1)) {
3470 			struct scsi_vhci	*vhci;
3471 			vhci = ADDR2VHCI(&tpkt->pkt_address);
3472 			VHCI_DEBUG(1, (CE_NOTE, NULL, "vhci_intr received "
3473 			    "cmd_dev_gone on last path\n"));
3474 			(void) vhci_invalidate_mpapi_lu(vhci, vlun);
3475 			break;
3476 		}
3477 
3478 		/* Report CMD_CMPLT-with-BUSY to cause retry. */
3479 		VHCI_DEBUG(1, (CE_NOTE, NULL, "vhci_intr received "
3480 		    "cmd_dev_gone\n"));
3481 		tpkt->pkt_reason = CMD_CMPLT;
3482 		tpkt->pkt_state = STATE_GOT_BUS |
3483 		    STATE_GOT_TARGET | STATE_SENT_CMD |
3484 		    STATE_GOT_STATUS;
3485 		*(tpkt->pkt_scbp) = STATUS_BUSY;
3486 		break;
3487 
3488 	default:
3489 		break;
3490 	}
3491 
3492 	/*
3493 	 * SCSI-II RESERVE cmd has been serviced by the lower layers clear
3494 	 * the flag so the lun is not QUIESCED any longer.
3495 	 * Also clear the VHCI_PKT_THRU_TASKQ flag, to ensure that if this pkt
3496 	 * is retried, a taskq shall again be dispatched to service it.  Else
3497 	 * it may lead to a system hang if the retry is within interrupt
3498 	 * context.
3499 	 */
3500 	if ((pkt->pkt_cdbp[0] == SCMD_RESERVE) ||
3501 	    (pkt->pkt_cdbp[0] == SCMD_RESERVE_G1)) {
3502 		vlun->svl_flags &= ~VLUN_QUIESCED_FLG;
3503 		vpkt->vpkt_state &= ~VHCI_PKT_THRU_TASKQ;
3504 	}
3505 
3506 	/*
3507 	 * vpkt_org_vpkt should always be NULL here if the retry command
3508 	 * has been successfully processed.  If vpkt_org_vpkt != NULL at
3509 	 * this point, it is an error so restore the original vpkt and
3510 	 * return an error to the target driver so it can retry the
3511 	 * command as appropriate.
3512 	 */
3513 	if (vpkt->vpkt_org_vpkt != NULL) {
3514 		struct vhci_pkt *new_vpkt = vpkt;
3515 		vpkt = vpkt->vpkt_org_vpkt;
3516 
3517 		vhci_scsi_destroy_pkt(&svp->svp_psd->sd_address,
3518 		    new_vpkt->vpkt_tgt_pkt);
3519 
3520 		/*
3521 		 * Mark this command completion as having an error so that
3522 		 * ssd will retry the command.
3523 		 */
3524 		vpkt->vpkt_tgt_pkt->pkt_reason = CMD_ABORTED;
3525 		vpkt->vpkt_tgt_pkt->pkt_statistics |= STAT_ABORTED;
3526 
3527 		pkt = vpkt->vpkt_hba_pkt;
3528 		tpkt = vpkt->vpkt_tgt_pkt;
3529 	}
3530 
3531 	if ((err_str != NULL) && (pkt->pkt_reason !=
3532 	    svp->svp_last_pkt_reason)) {
3533 		cdip = vlun->svl_dip;
3534 		vdip = ddi_get_parent(cdip);
3535 		cpath = kmem_alloc(MAXPATHLEN, KM_SLEEP);
3536 		vhci_log(CE_WARN, vdip, "!%s (%s%d): %s on path %s",
3537 		    ddi_pathname(cdip, cpath), ddi_driver_name(cdip),
3538 		    ddi_get_instance(cdip), err_str,
3539 		    mdi_pi_spathname(vpkt->vpkt_path));
3540 		kmem_free(cpath, MAXPATHLEN);
3541 	}
3542 	svp->svp_last_pkt_reason = pkt->pkt_reason;
3543 	VHCI_DECR_PATH_CMDCOUNT(svp);
3544 
3545 	/*
3546 	 * For PARTIAL_DMA, vhci should not free the path.
3547 	 * Target driver will call into vhci_scsi_dmafree or
3548 	 * destroy pkt to release this path.
3549 	 */
3550 	if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) == 0) {
3551 		scsi_destroy_pkt(pkt);
3552 		vpkt->vpkt_hba_pkt = NULL;
3553 		if (vpkt->vpkt_path) {
3554 			mdi_rele_path(vpkt->vpkt_path);
3555 			vpkt->vpkt_path = NULL;
3556 		}
3557 	}
3558 
3559 	scsi_hba_pkt_comp(tpkt);
3560 }
3561 
3562 /*
3563  * two possibilities: (1) failover has completed
3564  * or (2) is in progress; update our path states for
3565  * the former case; for the latter case,
3566  * initiate a scsi_watch request to
3567  * determine when failover completes - vlun is HELD
3568  * until failover completes; BUSY is returned to upper
3569  * layer in both the cases
3570  */
3571 static int
3572 vhci_handle_ext_fo(struct scsi_pkt *pkt, int fostat)
3573 {
3574 	struct vhci_pkt		*vpkt = (struct vhci_pkt *)pkt->pkt_private;
3575 	struct scsi_pkt		*tpkt;
3576 	scsi_vhci_priv_t	*svp;
3577 	scsi_vhci_lun_t		*vlun;
3578 	struct scsi_vhci	*vhci;
3579 	scsi_vhci_swarg_t	*swarg;
3580 	char			*path;
3581 
3582 	ASSERT(vpkt != NULL);
3583 	tpkt = vpkt->vpkt_tgt_pkt;
3584 	ASSERT(tpkt != NULL);
3585 	svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(vpkt->vpkt_path);
3586 	ASSERT(svp != NULL);
3587 	vlun = svp->svp_svl;
3588 	ASSERT(vlun != NULL);
3589 	ASSERT(VHCI_LUN_IS_HELD(vlun));
3590 
3591 	vhci = ADDR2VHCI(&tpkt->pkt_address);
3592 
3593 	if (fostat == SCSI_SENSE_INACTIVE) {
3594 		VHCI_DEBUG(1, (CE_NOTE, NULL, "!Failover "
3595 		    "detected for %s; updating path states...\n",
3596 		    vlun->svl_lun_wwn));
3597 		/*
3598 		 * set the vlun flag to indicate to the task that the target
3599 		 * port group needs updating
3600 		 */
3601 		vlun->svl_flags |= VLUN_UPDATE_TPG;
3602 		(void) taskq_dispatch(vhci->vhci_update_pathstates_taskq,
3603 		    vhci_update_pathstates, (void *)vlun, KM_SLEEP);
3604 	} else {
3605 		path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
3606 		vhci_log(CE_NOTE, ddi_get_parent(vlun->svl_dip),
3607 		    "!%s (%s%d): Waiting for externally initiated failover "
3608 		    "to complete", ddi_pathname(vlun->svl_dip, path),
3609 		    ddi_driver_name(vlun->svl_dip),
3610 		    ddi_get_instance(vlun->svl_dip));
3611 		kmem_free(path, MAXPATHLEN);
3612 		swarg = kmem_alloc(sizeof (*swarg), KM_NOSLEEP);
3613 		if (swarg == NULL) {
3614 			VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_handle_ext_fo: "
3615 			    "request packet allocation for %s failed....\n",
3616 			    vlun->svl_lun_wwn));
3617 			VHCI_RELEASE_LUN(vlun);
3618 			return (PKT_RETURN);
3619 		}
3620 		swarg->svs_svp = svp;
3621 		swarg->svs_tos = ddi_get_time();
3622 		swarg->svs_pi = vpkt->vpkt_path;
3623 		swarg->svs_release_lun = 0;
3624 		swarg->svs_done = 0;
3625 		/*
3626 		 * place a hold on the path...we don't want it to
3627 		 * vanish while scsi_watch is in progress
3628 		 */
3629 		mdi_hold_path(vpkt->vpkt_path);
3630 		svp->svp_sw_token = scsi_watch_request_submit(svp->svp_psd,
3631 		    VHCI_FOWATCH_INTERVAL, SENSE_LENGTH, vhci_efo_watch_cb,
3632 		    (caddr_t)swarg);
3633 	}
3634 	return (BUSY_RETURN);
3635 }
3636 
3637 /*
3638  * vhci_efo_watch_cb:
3639  *	Callback from scsi_watch request to check the failover status.
3640  *	Completion is either due to successful failover or timeout.
3641  *	Upon successful completion, vhci_update_path_states is called.
3642  *	For timeout condition, vhci_efo_done is called.
3643  *	Always returns 0 to scsi_watch to keep retrying till vhci_efo_done
3644  *	terminates this request properly in a separate thread.
3645  */
3646 
3647 static int
3648 vhci_efo_watch_cb(caddr_t arg, struct scsi_watch_result *resultp)
3649 {
3650 	struct scsi_status		*statusp = resultp->statusp;
3651 	uint8_t				*sensep = (uint8_t *)resultp->sensep;
3652 	struct scsi_pkt			*pkt = resultp->pkt;
3653 	scsi_vhci_swarg_t		*swarg;
3654 	scsi_vhci_priv_t		*svp;
3655 	scsi_vhci_lun_t			*vlun;
3656 	struct scsi_vhci		*vhci;
3657 	dev_info_t			*vdip;
3658 	int				rval, updt_paths;
3659 
3660 	swarg = (scsi_vhci_swarg_t *)(uintptr_t)arg;
3661 	svp = swarg->svs_svp;
3662 	if (swarg->svs_done) {
3663 		/*
3664 		 * Already completed failover or timedout.
3665 		 * Waiting for vhci_efo_done to terminate this scsi_watch.
3666 		 */
3667 		return (0);
3668 	}
3669 
3670 	ASSERT(svp != NULL);
3671 	vlun = svp->svp_svl;
3672 	ASSERT(vlun != NULL);
3673 	ASSERT(VHCI_LUN_IS_HELD(vlun));
3674 	vlun->svl_efo_update_path = 0;
3675 	vdip = ddi_get_parent(vlun->svl_dip);
3676 	vhci = ddi_get_soft_state(vhci_softstate,
3677 	    ddi_get_instance(vdip));
3678 
3679 	updt_paths = 0;
3680 
3681 	if (pkt->pkt_reason != CMD_CMPLT) {
3682 		if ((ddi_get_time() - swarg->svs_tos) >= VHCI_EXTFO_TIMEOUT) {
3683 			swarg->svs_release_lun = 1;
3684 			goto done;
3685 		}
3686 		return (0);
3687 	}
3688 	if (*((unsigned char *)statusp) == STATUS_CHECK) {
3689 		rval = vlun->svl_fops->sfo_analyze_sense(svp->svp_psd, sensep,
3690 		    vlun->svl_fops_ctpriv);
3691 		switch (rval) {
3692 			/*
3693 			 * Only update path states in case path is definitely
3694 			 * inactive, or no failover occurred.  For all other
3695 			 * check conditions continue pinging.  A unexpected
3696 			 * check condition shouldn't cause pinging to complete
3697 			 * prematurely.
3698 			 */
3699 			case SCSI_SENSE_INACTIVE:
3700 			case SCSI_SENSE_NOFAILOVER:
3701 				updt_paths = 1;
3702 				break;
3703 			default:
3704 				if ((ddi_get_time() - swarg->svs_tos)
3705 				    >= VHCI_EXTFO_TIMEOUT) {
3706 					swarg->svs_release_lun = 1;
3707 					goto done;
3708 				}
3709 				return (0);
3710 		}
3711 	} else if (*((unsigned char *)statusp) ==
3712 	    STATUS_RESERVATION_CONFLICT) {
3713 		updt_paths = 1;
3714 	} else if ((*((unsigned char *)statusp)) &
3715 	    (STATUS_BUSY | STATUS_QFULL)) {
3716 		return (0);
3717 	}
3718 	if ((*((unsigned char *)statusp) == STATUS_GOOD) ||
3719 	    (updt_paths == 1)) {
3720 		/*
3721 		 * we got here because we had detected an
3722 		 * externally initiated failover; things
3723 		 * have settled down now, so let's
3724 		 * start up a task to update the
3725 		 * path states and target port group
3726 		 */
3727 		vlun->svl_efo_update_path = 1;
3728 		swarg->svs_done = 1;
3729 		vlun->svl_swarg = swarg;
3730 		vlun->svl_flags |= VLUN_UPDATE_TPG;
3731 		(void) taskq_dispatch(vhci->vhci_update_pathstates_taskq,
3732 		    vhci_update_pathstates, (void *)vlun,
3733 		    KM_SLEEP);
3734 		return (0);
3735 	}
3736 	if ((ddi_get_time() - swarg->svs_tos) >= VHCI_EXTFO_TIMEOUT) {
3737 		swarg->svs_release_lun = 1;
3738 		goto done;
3739 	}
3740 	return (0);
3741 done:
3742 	swarg->svs_done = 1;
3743 	(void) taskq_dispatch(vhci->vhci_taskq,
3744 	    vhci_efo_done, (void *)swarg, KM_SLEEP);
3745 	return (0);
3746 }
3747 
3748 /*
3749  * vhci_efo_done:
3750  *	cleanly terminates scsi_watch and free up resources.
3751  *	Called as taskq function in vhci_efo_watch_cb for EFO timeout condition
3752  *	or by vhci_update_path_states invoked during external initiated
3753  *	failover completion.
3754  */
3755 static void
3756 vhci_efo_done(void *arg)
3757 {
3758 	scsi_vhci_lun_t			*vlun;
3759 	scsi_vhci_swarg_t		*swarg = (scsi_vhci_swarg_t *)arg;
3760 	scsi_vhci_priv_t		*svp = swarg->svs_svp;
3761 	ASSERT(svp);
3762 
3763 	vlun = svp->svp_svl;
3764 	ASSERT(vlun);
3765 
3766 	/* Wait for clean termination of scsi_watch */
3767 	(void) scsi_watch_request_terminate(svp->svp_sw_token,
3768 	    SCSI_WATCH_TERMINATE_ALL_WAIT);
3769 	svp->svp_sw_token = NULL;
3770 
3771 	/* release path and freeup resources to indicate failover completion */
3772 	mdi_rele_path(swarg->svs_pi);
3773 	if (swarg->svs_release_lun) {
3774 		VHCI_RELEASE_LUN(vlun);
3775 	}
3776 	kmem_free((void *)swarg, sizeof (*swarg));
3777 }
3778 
3779 /*
3780  * Update the path states
3781  * vlun should be HELD when this is invoked.
3782  * Calls vhci_efo_done to cleanup resources allocated for EFO.
3783  */
3784 void
3785 vhci_update_pathstates(void *arg)
3786 {
3787 	mdi_pathinfo_t			*pip, *npip;
3788 	dev_info_t			*dip;
3789 	struct scsi_failover_ops	*fo;
3790 	struct scsi_vhci_priv		*svp;
3791 	struct scsi_device		*psd;
3792 	struct scsi_path_opinfo		opinfo;
3793 	char				*pclass, *tptr;
3794 	struct scsi_vhci_lun		*vlun = (struct scsi_vhci_lun *)arg;
3795 	int				sps; /* mdi_select_path() status */
3796 	char				*cpath;
3797 	struct scsi_vhci		*vhci;
3798 	struct scsi_pkt			*pkt;
3799 	struct buf			*bp;
3800 	struct scsi_vhci_priv		*svp_conflict = NULL;
3801 
3802 	ASSERT(VHCI_LUN_IS_HELD(vlun));
3803 	dip  = vlun->svl_dip;
3804 	pip = npip = NULL;
3805 
3806 	vhci = ddi_get_soft_state(vhci_softstate,
3807 	    ddi_get_instance(ddi_get_parent(dip)));
3808 
3809 	sps = mdi_select_path(dip, NULL, (MDI_SELECT_ONLINE_PATH |
3810 	    MDI_SELECT_STANDBY_PATH | MDI_SELECT_NO_PREFERRED), NULL, &npip);
3811 	if ((npip == NULL) || (sps != MDI_SUCCESS)) {
3812 		goto done;
3813 	}
3814 
3815 	fo = vlun->svl_fops;
3816 	do {
3817 		pip = npip;
3818 		svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip);
3819 		psd = svp->svp_psd;
3820 		if (fo->sfo_path_get_opinfo(psd, &opinfo,
3821 		    vlun->svl_fops_ctpriv) != 0) {
3822 			sps = mdi_select_path(dip, NULL,
3823 			    (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH |
3824 			    MDI_SELECT_NO_PREFERRED), pip, &npip);
3825 			mdi_rele_path(pip);
3826 			continue;
3827 		}
3828 
3829 		if (mdi_prop_lookup_string(pip, "path-class", &pclass) !=
3830 		    MDI_SUCCESS) {
3831 			VHCI_DEBUG(1, (CE_NOTE, NULL,
3832 			    "!vhci_update_pathstates: prop lookup failed for "
3833 			    "path 0x%p\n", (void *)pip));
3834 			sps = mdi_select_path(dip, NULL,
3835 			    (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH |
3836 			    MDI_SELECT_NO_PREFERRED), pip, &npip);
3837 			mdi_rele_path(pip);
3838 			continue;
3839 		}
3840 
3841 		/*
3842 		 * Need to update the "path-class" property
3843 		 * value in the device tree if different
3844 		 * from the existing value.
3845 		 */
3846 		if (strcmp(pclass, opinfo.opinfo_path_attr) != 0) {
3847 			(void) mdi_prop_update_string(pip, "path-class",
3848 			    opinfo.opinfo_path_attr);
3849 		}
3850 
3851 		/*
3852 		 * Only change the state if needed. i.e. Don't call
3853 		 * mdi_pi_set_state to ONLINE a path if its already
3854 		 * ONLINE. Same for STANDBY paths.
3855 		 */
3856 
3857 		if ((opinfo.opinfo_path_state == SCSI_PATH_ACTIVE ||
3858 		    opinfo.opinfo_path_state == SCSI_PATH_ACTIVE_NONOPT)) {
3859 			if (!(MDI_PI_IS_ONLINE(pip))) {
3860 				VHCI_DEBUG(1, (CE_NOTE, NULL,
3861 				    "!vhci_update_pathstates: marking path"
3862 				    " 0x%p as ONLINE\n", (void *)pip));
3863 				cpath = kmem_alloc(MAXPATHLEN, KM_SLEEP);
3864 				vhci_log(CE_NOTE, ddi_get_parent(dip), "!%s "
3865 				    "(%s%d): path %s "
3866 				    "is now ONLINE because of "
3867 				    "an externally initiated failover",
3868 				    ddi_pathname(dip, cpath),
3869 				    ddi_driver_name(dip),
3870 				    ddi_get_instance(dip),
3871 				    mdi_pi_spathname(pip));
3872 				kmem_free(cpath, MAXPATHLEN);
3873 				mdi_pi_set_state(pip,
3874 				    MDI_PATHINFO_STATE_ONLINE);
3875 				mdi_pi_set_preferred(pip,
3876 				    opinfo.opinfo_preferred);
3877 				tptr = kmem_alloc(strlen
3878 				    (opinfo.opinfo_path_attr)+1, KM_SLEEP);
3879 				(void) strlcpy(tptr, opinfo.opinfo_path_attr,
3880 				    (strlen(opinfo.opinfo_path_attr)+1));
3881 				mutex_enter(&vlun->svl_mutex);
3882 				if (vlun->svl_active_pclass != NULL) {
3883 					kmem_free(vlun->svl_active_pclass,
3884 					    strlen(vlun->svl_active_pclass)+1);
3885 				}
3886 				vlun->svl_active_pclass = tptr;
3887 				if (vlun->svl_waiting_for_activepath) {
3888 					vlun->svl_waiting_for_activepath = 0;
3889 				}
3890 				mutex_exit(&vlun->svl_mutex);
3891 			} else if (MDI_PI_IS_ONLINE(pip)) {
3892 				if (strcmp(pclass, opinfo.opinfo_path_attr)
3893 				    != 0) {
3894 					mdi_pi_set_preferred(pip,
3895 					    opinfo.opinfo_preferred);
3896 					mutex_enter(&vlun->svl_mutex);
3897 					if (vlun->svl_active_pclass == NULL ||
3898 					    strcmp(opinfo.opinfo_path_attr,
3899 					    vlun->svl_active_pclass) != 0) {
3900 						mutex_exit(&vlun->svl_mutex);
3901 						tptr = kmem_alloc(strlen
3902 						    (opinfo.opinfo_path_attr)+1,
3903 						    KM_SLEEP);
3904 						(void) strlcpy(tptr,
3905 						    opinfo.opinfo_path_attr,
3906 						    (strlen
3907 						    (opinfo.opinfo_path_attr)
3908 						    +1));
3909 						mutex_enter(&vlun->svl_mutex);
3910 					} else {
3911 						/*
3912 						 * No need to update
3913 						 * svl_active_pclass
3914 						 */
3915 						tptr = NULL;
3916 						mutex_exit(&vlun->svl_mutex);
3917 					}
3918 					if (tptr) {
3919 						if (vlun->svl_active_pclass
3920 						    != NULL) {
3921 							kmem_free(vlun->
3922 							    svl_active_pclass,
3923 							    strlen(vlun->
3924 							    svl_active_pclass)
3925 							    +1);
3926 						}
3927 						vlun->svl_active_pclass = tptr;
3928 						mutex_exit(&vlun->svl_mutex);
3929 					}
3930 				}
3931 			}
3932 
3933 			/* Check for Reservation Conflict */
3934 			bp = scsi_alloc_consistent_buf(
3935 			    &svp->svp_psd->sd_address, (struct buf *)NULL,
3936 			    DEV_BSIZE, B_READ, NULL, NULL);
3937 			if (!bp) {
3938 				VHCI_DEBUG(1, (CE_NOTE, NULL,
3939 				    "!vhci_update_pathstates: No resources "
3940 				    "(buf)\n"));
3941 				mdi_rele_path(pip);
3942 				goto done;
3943 			}
3944 			pkt = scsi_init_pkt(&svp->svp_psd->sd_address, NULL, bp,
3945 			    CDB_GROUP1, sizeof (struct scsi_arq_status), 0,
3946 			    PKT_CONSISTENT, NULL, NULL);
3947 			if (pkt) {
3948 				(void) scsi_setup_cdb((union scsi_cdb *)
3949 				    (uintptr_t)pkt->pkt_cdbp, SCMD_READ, 1, 1,
3950 				    0);
3951 				pkt->pkt_time = 3*30;
3952 				pkt->pkt_flags = FLAG_NOINTR;
3953 				pkt->pkt_path_instance =
3954 				    mdi_pi_get_path_instance(pip);
3955 
3956 				if ((scsi_transport(pkt) == TRAN_ACCEPT) &&
3957 				    (pkt->pkt_reason == CMD_CMPLT) &&
3958 				    (SCBP_C(pkt) ==
3959 				    STATUS_RESERVATION_CONFLICT)) {
3960 					VHCI_DEBUG(1, (CE_NOTE, NULL,
3961 					    "!vhci_update_pathstates: reserv. "
3962 					    "conflict to be resolved on 0x%p\n",
3963 					    (void *)pip));
3964 					svp_conflict = svp;
3965 				}
3966 				scsi_destroy_pkt(pkt);
3967 			}
3968 			scsi_free_consistent_buf(bp);
3969 		} else if ((opinfo.opinfo_path_state == SCSI_PATH_INACTIVE) &&
3970 		    !(MDI_PI_IS_STANDBY(pip))) {
3971 			VHCI_DEBUG(1, (CE_NOTE, NULL,
3972 			    "!vhci_update_pathstates: marking path"
3973 			    " 0x%p as STANDBY\n", (void *)pip));
3974 			cpath = kmem_alloc(MAXPATHLEN, KM_SLEEP);
3975 			vhci_log(CE_NOTE, ddi_get_parent(dip), "!%s "
3976 			    "(%s%d): path %s "
3977 			    "is now STANDBY because of "
3978 			    "an externally initiated failover",
3979 			    ddi_pathname(dip, cpath),
3980 			    ddi_driver_name(dip),
3981 			    ddi_get_instance(dip),
3982 			    mdi_pi_spathname(pip));
3983 			kmem_free(cpath, MAXPATHLEN);
3984 			mdi_pi_set_state(pip,
3985 			    MDI_PATHINFO_STATE_STANDBY);
3986 			mdi_pi_set_preferred(pip,
3987 			    opinfo.opinfo_preferred);
3988 			mutex_enter(&vlun->svl_mutex);
3989 			if (vlun->svl_active_pclass != NULL) {
3990 				if (strcmp(vlun->svl_active_pclass,
3991 				    opinfo.opinfo_path_attr) == 0) {
3992 					kmem_free(vlun->
3993 					    svl_active_pclass,
3994 					    strlen(vlun->
3995 					    svl_active_pclass)+1);
3996 					vlun->svl_active_pclass = NULL;
3997 				}
3998 			}
3999 			mutex_exit(&vlun->svl_mutex);
4000 		}
4001 		(void) mdi_prop_free(pclass);
4002 		sps = mdi_select_path(dip, NULL,
4003 		    (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH |
4004 		    MDI_SELECT_NO_PREFERRED), pip, &npip);
4005 		mdi_rele_path(pip);
4006 
4007 	} while ((npip != NULL) && (sps == MDI_SUCCESS));
4008 
4009 	/*
4010 	 * Check to see if this vlun has an active SCSI-II RESERVE.  If so
4011 	 * clear the reservation by sending a reset, so the host doesn't
4012 	 * receive a reservation conflict.  The reset has to be sent via a
4013 	 * working path.  Let's use a path referred to by svp_conflict as it
4014 	 * should be working.
4015 	 * Reset VLUN_RESERVE_ACTIVE_FLG for this vlun.  Also notify ssd
4016 	 * of the reset, explicitly.
4017 	 */
4018 	if (vlun->svl_flags & VLUN_RESERVE_ACTIVE_FLG) {
4019 		if (svp_conflict && (vlun->svl_xlf_capable == 0)) {
4020 			VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_update_pathstates:"
4021 			    " sending recovery reset on 0x%p, path_state: %x",
4022 			    svp_conflict->svp_psd->sd_private,
4023 			    mdi_pi_get_state((mdi_pathinfo_t *)
4024 			    svp_conflict->svp_psd->sd_private)));
4025 
4026 			(void) vhci_recovery_reset(vlun,
4027 			    &svp_conflict->svp_psd->sd_address, FALSE,
4028 			    VHCI_DEPTH_TARGET);
4029 		}
4030 		vlun->svl_flags &= ~VLUN_RESERVE_ACTIVE_FLG;
4031 		mutex_enter(&vhci->vhci_mutex);
4032 		scsi_hba_reset_notify_callback(&vhci->vhci_mutex,
4033 		    &vhci->vhci_reset_notify_listf);
4034 		mutex_exit(&vhci->vhci_mutex);
4035 	}
4036 	if (vlun->svl_flags & VLUN_UPDATE_TPG) {
4037 		/*
4038 		 * Update the AccessState of related MP-API TPGs
4039 		 */
4040 		(void) vhci_mpapi_update_tpg_acc_state_for_lu(vhci, vlun);
4041 		vlun->svl_flags &= ~VLUN_UPDATE_TPG;
4042 	}
4043 done:
4044 	if (vlun->svl_efo_update_path) {
4045 		vlun->svl_efo_update_path = 0;
4046 		vhci_efo_done(vlun->svl_swarg);
4047 		vlun->svl_swarg = 0;
4048 	}
4049 	VHCI_RELEASE_LUN(vlun);
4050 }
4051 
4052 /* ARGSUSED */
4053 static int
4054 vhci_pathinfo_init(dev_info_t *vdip, mdi_pathinfo_t *pip, int flags)
4055 {
4056 	scsi_hba_tran_t		*hba = NULL;
4057 	struct scsi_device	*psd = NULL;
4058 	scsi_vhci_lun_t		*vlun = NULL;
4059 	dev_info_t		*pdip = NULL;
4060 	dev_info_t		*tgt_dip;
4061 	struct scsi_vhci	*vhci;
4062 	char			*guid;
4063 	scsi_vhci_priv_t	*svp = NULL;
4064 	int			rval = MDI_FAILURE;
4065 	int			vlun_alloced = 0;
4066 
4067 	ASSERT(vdip != NULL);
4068 	ASSERT(pip != NULL);
4069 
4070 	vhci = ddi_get_soft_state(vhci_softstate, ddi_get_instance(vdip));
4071 	ASSERT(vhci != NULL);
4072 
4073 	pdip = mdi_pi_get_phci(pip);
4074 	ASSERT(pdip != NULL);
4075 
4076 	hba = ddi_get_driver_private(pdip);
4077 	ASSERT(hba != NULL);
4078 
4079 	tgt_dip = mdi_pi_get_client(pip);
4080 	ASSERT(tgt_dip != NULL);
4081 
4082 	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, tgt_dip, PROPFLAGS,
4083 	    MDI_CLIENT_GUID_PROP, &guid) != DDI_SUCCESS) {
4084 		VHCI_DEBUG(1, (CE_WARN, NULL,
4085 		    "vhci_pathinfo_init: lun guid property failed"));
4086 		goto failure;
4087 	}
4088 
4089 	vlun = vhci_lun_lookup_alloc(tgt_dip, guid, &vlun_alloced);
4090 	ddi_prop_free(guid);
4091 
4092 	vlun->svl_dip = tgt_dip;
4093 
4094 	svp = kmem_zalloc(sizeof (*svp), KM_SLEEP);
4095 	svp->svp_svl = vlun;
4096 
4097 	/*
4098 	 * Initialize svl_lb_policy_save only for newly allocated vlun. Writing
4099 	 * to svl_lb_policy_save later could accidentally overwrite saved lb
4100 	 * policy.
4101 	 */
4102 	if (vlun_alloced) {
4103 		vlun->svl_lb_policy_save = mdi_get_lb_policy(tgt_dip);
4104 	}
4105 
4106 	mutex_init(&svp->svp_mutex, NULL, MUTEX_DRIVER, NULL);
4107 	cv_init(&svp->svp_cv, NULL, CV_DRIVER, NULL);
4108 
4109 	psd = kmem_zalloc(sizeof (*psd), KM_SLEEP);
4110 	mutex_init(&psd->sd_mutex, NULL, MUTEX_DRIVER, NULL);
4111 
4112 	if (hba->tran_hba_flags & SCSI_HBA_ADDR_COMPLEX) {
4113 		/*
4114 		 * For a SCSI_HBA_ADDR_COMPLEX transport we store a pointer to
4115 		 * scsi_device in the scsi_address structure.  This allows an
4116 		 * an HBA driver to find its scsi_device(9S) and
4117 		 * per-scsi_device(9S) HBA private data given a
4118 		 * scsi_address(9S) by using scsi_address_device(9F) and
4119 		 * scsi_device_hba_private_get(9F)).
4120 		 */
4121 		psd->sd_address.a.a_sd = psd;
4122 	} else if (hba->tran_hba_flags & SCSI_HBA_TRAN_CLONE) {
4123 		/*
4124 		 * Clone transport structure if requested, so
4125 		 * Self enumerating HBAs always need to use cloning
4126 		 */
4127 		scsi_hba_tran_t	*clone =
4128 		    kmem_alloc(sizeof (scsi_hba_tran_t), KM_SLEEP);
4129 		bcopy(hba, clone, sizeof (scsi_hba_tran_t));
4130 		hba = clone;
4131 		hba->tran_sd = psd;
4132 	} else {
4133 		/*
4134 		 * SPI pHCI unit-address. If we ever need to support this
4135 		 * we could set a.spi.a_target/a.spi.a_lun based on pathinfo
4136 		 * node unit-address properties.  For now we fail...
4137 		 */
4138 		goto failure;
4139 	}
4140 
4141 	psd->sd_dev = tgt_dip;
4142 	psd->sd_address.a_hba_tran = hba;
4143 
4144 	/*
4145 	 * Mark scsi_device as being associated with a pathinfo node. For
4146 	 * a scsi_device structure associated with a devinfo node,
4147 	 * scsi_ctlops_initchild sets this field to NULL.
4148 	 */
4149 	psd->sd_pathinfo = pip;
4150 
4151 	/*
4152 	 * LEGACY: sd_private: set for older mpxio-capable pHCI drivers with
4153 	 * too much scsi_vhci/mdi/ndi knowledge. Remove this code when all
4154 	 * mpxio-capable pHCI drivers use SCSA enumeration services (or at
4155 	 * least have been changed to use sd_pathinfo instead).
4156 	 */
4157 	psd->sd_private = (caddr_t)pip;
4158 
4159 	/* See scsi_hba.c for info on sd_tran_safe kludge */
4160 	psd->sd_tran_safe = hba;
4161 
4162 	svp->svp_psd = psd;
4163 	mdi_pi_set_vhci_private(pip, (caddr_t)svp);
4164 
4165 	/*
4166 	 * call hba's target init entry point if it exists
4167 	 */
4168 	if (hba->tran_tgt_init != NULL) {
4169 		psd->sd_tran_tgt_free_done = 0;
4170 		if ((rval = (*hba->tran_tgt_init)(pdip, tgt_dip,
4171 		    hba, psd)) != DDI_SUCCESS) {
4172 			VHCI_DEBUG(1, (CE_WARN, pdip,
4173 			    "!vhci_pathinfo_init: tran_tgt_init failed for "
4174 			    "path=0x%p rval=%x", (void *)pip, rval));
4175 			goto failure;
4176 		}
4177 	}
4178 
4179 	svp->svp_new_path = 1;
4180 
4181 	VHCI_DEBUG(4, (CE_NOTE, NULL, "!vhci_pathinfo_init: path:%p\n",
4182 	    (void *)pip));
4183 	return (MDI_SUCCESS);
4184 
4185 failure:
4186 	if (psd) {
4187 		mutex_destroy(&psd->sd_mutex);
4188 		kmem_free(psd, sizeof (*psd));
4189 	}
4190 	if (svp) {
4191 		mdi_pi_set_vhci_private(pip, NULL);
4192 		mutex_destroy(&svp->svp_mutex);
4193 		cv_destroy(&svp->svp_cv);
4194 		kmem_free(svp, sizeof (*svp));
4195 	}
4196 	if (hba && (hba->tran_hba_flags & SCSI_HBA_TRAN_CLONE))
4197 		kmem_free(hba, sizeof (scsi_hba_tran_t));
4198 
4199 	if (vlun_alloced)
4200 		vhci_lun_free(tgt_dip);
4201 
4202 	return (rval);
4203 }
4204 
4205 /* ARGSUSED */
4206 static int
4207 vhci_pathinfo_uninit(dev_info_t *vdip, mdi_pathinfo_t *pip, int flags)
4208 {
4209 	scsi_hba_tran_t		*hba = NULL;
4210 	struct scsi_device	*psd = NULL;
4211 	dev_info_t		*pdip = NULL;
4212 	dev_info_t		*cdip = NULL;
4213 	scsi_vhci_priv_t	*svp = NULL;
4214 
4215 	ASSERT(vdip != NULL);
4216 	ASSERT(pip != NULL);
4217 
4218 	pdip = mdi_pi_get_phci(pip);
4219 	ASSERT(pdip != NULL);
4220 
4221 	cdip = mdi_pi_get_client(pip);
4222 	ASSERT(cdip != NULL);
4223 
4224 	hba = ddi_get_driver_private(pdip);
4225 	ASSERT(hba != NULL);
4226 
4227 	vhci_mpapi_set_path_state(vdip, pip, MP_DRVR_PATH_STATE_UNINIT);
4228 	svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip);
4229 	if (svp == NULL) {
4230 		/* path already freed. Nothing to do. */
4231 		return (MDI_SUCCESS);
4232 	}
4233 
4234 	psd = svp->svp_psd;
4235 	ASSERT(psd != NULL);
4236 
4237 	if (hba->tran_hba_flags & SCSI_HBA_ADDR_COMPLEX) {
4238 		/* Verify plumbing */
4239 		ASSERT(psd->sd_address.a_hba_tran == hba);
4240 		ASSERT(psd->sd_address.a.a_sd == psd);
4241 	} else if (hba->tran_hba_flags & SCSI_HBA_TRAN_CLONE) {
4242 		/* Switch to cloned scsi_hba_tran(9S) structure */
4243 		hba = psd->sd_address.a_hba_tran;
4244 		ASSERT(hba->tran_hba_flags & SCSI_HBA_TRAN_CLONE);
4245 		ASSERT(hba->tran_sd == psd);
4246 	}
4247 
4248 	if ((hba->tran_tgt_free != NULL) && !psd->sd_tran_tgt_free_done) {
4249 		(*hba->tran_tgt_free) (pdip, cdip, hba, psd);
4250 		psd->sd_tran_tgt_free_done = 1;
4251 	}
4252 	mutex_destroy(&psd->sd_mutex);
4253 	if (hba->tran_hba_flags & SCSI_HBA_TRAN_CLONE) {
4254 		kmem_free(hba, sizeof (*hba));
4255 	}
4256 
4257 	mdi_pi_set_vhci_private(pip, NULL);
4258 
4259 	/*
4260 	 * Free the pathinfo related scsi_device inquiry data. Note that this
4261 	 * matches what happens for scsi_hba.c devinfo case at uninitchild time.
4262 	 */
4263 	if (psd->sd_inq)
4264 		kmem_free((caddr_t)psd->sd_inq, sizeof (struct scsi_inquiry));
4265 	kmem_free((caddr_t)psd, sizeof (*psd));
4266 
4267 	mutex_destroy(&svp->svp_mutex);
4268 	cv_destroy(&svp->svp_cv);
4269 	kmem_free((caddr_t)svp, sizeof (*svp));
4270 
4271 	/*
4272 	 * If this is the last path to the client,
4273 	 * then free up the vlun as well.
4274 	 */
4275 	if (mdi_client_get_path_count(cdip) == 1) {
4276 		vhci_lun_free(cdip);
4277 	}
4278 
4279 	VHCI_DEBUG(4, (CE_NOTE, NULL, "!vhci_pathinfo_uninit: path=0x%p\n",
4280 	    (void *)pip));
4281 	return (MDI_SUCCESS);
4282 }
4283 
4284 /* ARGSUSED */
4285 static int
4286 vhci_pathinfo_state_change(dev_info_t *vdip, mdi_pathinfo_t *pip,
4287     mdi_pathinfo_state_t state, uint32_t ext_state, int flags)
4288 {
4289 	int			rval = MDI_SUCCESS;
4290 	scsi_vhci_priv_t	*svp;
4291 	scsi_vhci_lun_t		*vlun;
4292 	int			held;
4293 	int			op = (flags & 0xf00) >> 8;
4294 	struct scsi_vhci	*vhci;
4295 
4296 	vhci = ddi_get_soft_state(vhci_softstate, ddi_get_instance(vdip));
4297 
4298 	if (flags & MDI_EXT_STATE_CHANGE) {
4299 		/*
4300 		 * We do not want to issue any commands down the path in case
4301 		 * sync flag is set. Lower layers might not be ready to accept
4302 		 * any I/O commands.
4303 		 */
4304 		if (op == DRIVER_DISABLE)
4305 			return (MDI_SUCCESS);
4306 
4307 		svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip);
4308 		if (svp == NULL) {
4309 			return (MDI_FAILURE);
4310 		}
4311 		vlun = svp->svp_svl;
4312 
4313 		if (flags & MDI_BEFORE_STATE_CHANGE) {
4314 			/*
4315 			 * Hold the LUN.
4316 			 */
4317 			VHCI_HOLD_LUN(vlun, VH_SLEEP, held);
4318 			if (flags & MDI_DISABLE_OP)  {
4319 				/*
4320 				 * Issue scsi reset if it happens to be
4321 				 * reserved path.
4322 				 */
4323 				if (vlun->svl_flags & VLUN_RESERVE_ACTIVE_FLG) {
4324 					/*
4325 					 * if reservation pending on
4326 					 * this path, dont' mark the
4327 					 * path busy
4328 					 */
4329 					if (op == DRIVER_DISABLE_TRANSIENT) {
4330 						VHCI_DEBUG(1, (CE_NOTE, NULL,
4331 						    "!vhci_pathinfo"
4332 						    "_state_change (pip:%p): "
4333 						    " reservation: fail busy\n",
4334 						    (void *)pip));
4335 						return (MDI_FAILURE);
4336 					}
4337 					if (pip == vlun->svl_resrv_pip) {
4338 						if (vhci_recovery_reset(
4339 						    svp->svp_svl,
4340 						    &svp->svp_psd->sd_address,
4341 						    TRUE,
4342 						    VHCI_DEPTH_TARGET) == 0) {
4343 							VHCI_DEBUG(1,
4344 							    (CE_NOTE, NULL,
4345 							    "!vhci_pathinfo"
4346 							    "_state_change "
4347 							    " (pip:%p): "
4348 							    "reset failed, "
4349 							    "give up!\n",
4350 							    (void *)pip));
4351 						}
4352 						vlun->svl_flags &=
4353 						    ~VLUN_RESERVE_ACTIVE_FLG;
4354 					}
4355 				}
4356 			} else if (flags & MDI_ENABLE_OP)  {
4357 				if (((vhci->vhci_conf_flags &
4358 				    VHCI_CONF_FLAGS_AUTO_FAILBACK) ==
4359 				    VHCI_CONF_FLAGS_AUTO_FAILBACK) &&
4360 				    MDI_PI_IS_USER_DISABLE(pip) &&
4361 				    MDI_PI_IS_STANDBY(pip)) {
4362 					struct scsi_failover_ops	*fo;
4363 					char *best_pclass, *pclass = NULL;
4364 					int  best_class, rv;
4365 					/*
4366 					 * Failback if enabling a standby path
4367 					 * and it is the primary class or
4368 					 * preferred class
4369 					 */
4370 					best_class = mdi_pi_get_preferred(pip);
4371 					if (best_class == 0) {
4372 						/*
4373 						 * if not preferred - compare
4374 						 * path-class with class
4375 						 */
4376 						fo = vlun->svl_fops;
4377 						(void) fo->sfo_pathclass_next(
4378 						    NULL, &best_pclass,
4379 						    vlun->svl_fops_ctpriv);
4380 						pclass = NULL;
4381 						rv = mdi_prop_lookup_string(pip,
4382 						    "path-class", &pclass);
4383 						if (rv != MDI_SUCCESS ||
4384 						    pclass == NULL) {
4385 							vhci_log(CE_NOTE, vdip,
4386 							    "!path-class "
4387 							    " lookup "
4388 							    "failed. rv: %d"
4389 							    "class: %p", rv,
4390 							    (void *)pclass);
4391 						} else if (strncmp(pclass,
4392 						    best_pclass,
4393 						    strlen(best_pclass)) == 0) {
4394 							best_class = 1;
4395 						}
4396 						if (rv == MDI_SUCCESS &&
4397 						    pclass != NULL) {
4398 							rv = mdi_prop_free(
4399 							    pclass);
4400 							if (rv !=
4401 							    DDI_PROP_SUCCESS) {
4402 								vhci_log(
4403 								    CE_NOTE,
4404 								    vdip,
4405 								    "!path-"
4406 								    "class"
4407 								    " free"
4408 								    " failed"
4409 								    " rv: %d"
4410 								    " class: "
4411 								    "%p",
4412 								    rv,
4413 								    (void *)
4414 								    pclass);
4415 							}
4416 						}
4417 					}
4418 					if (best_class == 1) {
4419 						VHCI_DEBUG(1, (CE_NOTE, NULL,
4420 						    "preferred path: %p "
4421 						    "USER_DISABLE->USER_ENABLE "
4422 						    "transition for lun %s\n",
4423 						    (void *)pip,
4424 						    vlun->svl_lun_wwn));
4425 						(void) taskq_dispatch(
4426 						    vhci->vhci_taskq,
4427 						    vhci_initiate_auto_failback,
4428 						    (void *) vlun, KM_SLEEP);
4429 					}
4430 				}
4431 				/*
4432 				 * if PGR is active, revalidate key and
4433 				 * register on this path also, if key is
4434 				 * still valid
4435 				 */
4436 				sema_p(&vlun->svl_pgr_sema);
4437 				if (vlun->svl_pgr_active)
4438 					(void)
4439 					    vhci_pgr_validate_and_register(svp);
4440 				sema_v(&vlun->svl_pgr_sema);
4441 				/*
4442 				 * Inform target driver about any
4443 				 * reservations to be reinstated if target
4444 				 * has dropped reservation during the busy
4445 				 * period.
4446 				 */
4447 				mutex_enter(&vhci->vhci_mutex);
4448 				scsi_hba_reset_notify_callback(
4449 				    &vhci->vhci_mutex,
4450 				    &vhci->vhci_reset_notify_listf);
4451 				mutex_exit(&vhci->vhci_mutex);
4452 			}
4453 		}
4454 		if (flags & MDI_AFTER_STATE_CHANGE) {
4455 			if (flags & MDI_ENABLE_OP)  {
4456 				mutex_enter(&vhci_global_mutex);
4457 				cv_broadcast(&vhci_cv);
4458 				mutex_exit(&vhci_global_mutex);
4459 			}
4460 			if (vlun->svl_setcap_done) {
4461 				(void) vhci_pHCI_cap(&svp->svp_psd->sd_address,
4462 				    "sector-size", vlun->svl_sector_size,
4463 				    1, pip);
4464 			}
4465 
4466 			/*
4467 			 * Release the LUN
4468 			 */
4469 			VHCI_RELEASE_LUN(vlun);
4470 
4471 			/*
4472 			 * Path transition is complete.
4473 			 * Run callback to indicate target driver to
4474 			 * retry to prevent IO starvation.
4475 			 */
4476 			if (scsi_callback_id != 0) {
4477 				ddi_run_callback(&scsi_callback_id);
4478 			}
4479 		}
4480 	} else {
4481 		switch (state) {
4482 		case MDI_PATHINFO_STATE_ONLINE:
4483 			rval = vhci_pathinfo_online(vdip, pip, flags);
4484 			break;
4485 
4486 		case MDI_PATHINFO_STATE_OFFLINE:
4487 			rval = vhci_pathinfo_offline(vdip, pip, flags);
4488 			break;
4489 
4490 		default:
4491 			break;
4492 		}
4493 		/*
4494 		 * Path transition is complete.
4495 		 * Run callback to indicate target driver to
4496 		 * retry to prevent IO starvation.
4497 		 */
4498 		if ((rval == MDI_SUCCESS) && (scsi_callback_id != 0)) {
4499 			ddi_run_callback(&scsi_callback_id);
4500 		}
4501 		return (rval);
4502 	}
4503 
4504 	return (MDI_SUCCESS);
4505 }
4506 
4507 /*
4508  * Parse the mpxio load balancing options. The datanameptr
4509  * will point to a string containing the load-balance-options value.
4510  * The load-balance-options value will be a property that
4511  * defines the load-balance algorithm and any arguments to that
4512  * algorithm.
4513  * For example:
4514  * device-type-mpxio-options-list=
4515  * "device-type=SUN    SENA", "load-balance-options=logical-block-options"
4516  * "device-type=SUN     SE6920", "round-robin-options";
4517  * logical-block-options="load-balance=logical-block", "region-size=15";
4518  * round-robin-options="load-balance=round-robin";
4519  *
4520  * If the load-balance is not defined the load balance algorithm will
4521  * default to the global setting. There will be default values assigned
4522  * to the arguments (region-size=18) and if an argument is one
4523  * that is not known, it will be ignored.
4524  */
4525 static void
4526 vhci_parse_mpxio_lb_options(dev_info_t *dip, dev_info_t *cdip,
4527 	caddr_t datanameptr)
4528 {
4529 	char			*dataptr, *next_entry;
4530 	caddr_t			config_list	= NULL;
4531 	int			config_list_len = 0, list_len = 0;
4532 	int			region_size = -1;
4533 	client_lb_t		load_balance;
4534 
4535 	if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, datanameptr,
4536 	    (caddr_t)&config_list, &config_list_len) != DDI_PROP_SUCCESS) {
4537 		return;
4538 	}
4539 
4540 	list_len = config_list_len;
4541 	next_entry = config_list;
4542 	while (config_list_len > 0) {
4543 		dataptr = next_entry;
4544 
4545 		if (strncmp(mdi_load_balance, dataptr,
4546 		    strlen(mdi_load_balance)) == 0) {
4547 			/* get the load-balance scheme */
4548 			dataptr += strlen(mdi_load_balance) + 1;
4549 			if (strcmp(dataptr, LOAD_BALANCE_PROP_RR) == 0) {
4550 				(void) mdi_set_lb_policy(cdip, LOAD_BALANCE_RR);
4551 				load_balance = LOAD_BALANCE_RR;
4552 			} else if (strcmp(dataptr,
4553 			    LOAD_BALANCE_PROP_LBA) == 0) {
4554 				(void) mdi_set_lb_policy(cdip,
4555 				    LOAD_BALANCE_LBA);
4556 				load_balance = LOAD_BALANCE_LBA;
4557 			} else if (strcmp(dataptr,
4558 			    LOAD_BALANCE_PROP_NONE) == 0) {
4559 				(void) mdi_set_lb_policy(cdip,
4560 				    LOAD_BALANCE_NONE);
4561 				load_balance = LOAD_BALANCE_NONE;
4562 			}
4563 		} else if (strncmp(dataptr, LOGICAL_BLOCK_REGION_SIZE,
4564 		    strlen(LOGICAL_BLOCK_REGION_SIZE)) == 0) {
4565 			int	i = 0;
4566 			char	*ptr;
4567 			char	*tmp;
4568 
4569 			tmp = dataptr + (strlen(LOGICAL_BLOCK_REGION_SIZE) + 1);
4570 			/* check for numeric value */
4571 			for (ptr = tmp; i < strlen(tmp); i++, ptr++) {
4572 				if (!isdigit(*ptr)) {
4573 					cmn_err(CE_WARN,
4574 					    "Illegal region size: %s."
4575 					    " Setting to default value: %d",
4576 					    tmp,
4577 					    LOAD_BALANCE_DEFAULT_REGION_SIZE);
4578 					region_size =
4579 					    LOAD_BALANCE_DEFAULT_REGION_SIZE;
4580 					break;
4581 				}
4582 			}
4583 			if (i >= strlen(tmp)) {
4584 				region_size = stoi(&tmp);
4585 			}
4586 			(void) mdi_set_lb_region_size(cdip, region_size);
4587 		}
4588 		config_list_len -= (strlen(next_entry) + 1);
4589 		next_entry += strlen(next_entry) + 1;
4590 	}
4591 #ifdef DEBUG
4592 	if ((region_size >= 0) && (load_balance != LOAD_BALANCE_LBA)) {
4593 		VHCI_DEBUG(1, (CE_NOTE, dip,
4594 		    "!vhci_parse_mpxio_lb_options: region-size: %d"
4595 		    "only valid for load-balance=logical-block\n",
4596 		    region_size));
4597 	}
4598 #endif
4599 	if ((region_size == -1) && (load_balance == LOAD_BALANCE_LBA)) {
4600 		VHCI_DEBUG(1, (CE_NOTE, dip,
4601 		    "!vhci_parse_mpxio_lb_options: No region-size"
4602 		    " defined load-balance=logical-block."
4603 		    " Default to: %d\n", LOAD_BALANCE_DEFAULT_REGION_SIZE));
4604 		(void) mdi_set_lb_region_size(cdip,
4605 		    LOAD_BALANCE_DEFAULT_REGION_SIZE);
4606 	}
4607 	if (list_len > 0) {
4608 		kmem_free(config_list, list_len);
4609 	}
4610 }
4611 
4612 /*
4613  * Parse the device-type-mpxio-options-list looking for the key of
4614  * "load-balance-options". If found, parse the load balancing options.
4615  * Check the comment of the vhci_get_device_type_mpxio_options()
4616  * for the device-type-mpxio-options-list.
4617  */
4618 static void
4619 vhci_parse_mpxio_options(dev_info_t *dip, dev_info_t *cdip,
4620 		caddr_t datanameptr, int list_len)
4621 {
4622 	char		*dataptr;
4623 	int		len;
4624 
4625 	/*
4626 	 * get the data list
4627 	 */
4628 	dataptr = datanameptr;
4629 	len = 0;
4630 	while (len < list_len &&
4631 	    strncmp(dataptr, DEVICE_TYPE_STR, strlen(DEVICE_TYPE_STR))
4632 	    != 0) {
4633 		if (strncmp(dataptr, LOAD_BALANCE_OPTIONS,
4634 		    strlen(LOAD_BALANCE_OPTIONS)) == 0) {
4635 			len += strlen(LOAD_BALANCE_OPTIONS) + 1;
4636 			dataptr += strlen(LOAD_BALANCE_OPTIONS) + 1;
4637 			vhci_parse_mpxio_lb_options(dip, cdip, dataptr);
4638 		}
4639 		len += strlen(dataptr) + 1;
4640 		dataptr += strlen(dataptr) + 1;
4641 	}
4642 }
4643 
4644 /*
4645  * Check the inquriy string returned from the device with the device-type
4646  * Check for the existence of the device-type-mpxio-options-list and
4647  * if found parse the list checking for a match with the device-type
4648  * value and the inquiry string returned from the device. If a match
4649  * is found, parse the mpxio options list. The format of the
4650  * device-type-mpxio-options-list is:
4651  * device-type-mpxio-options-list=
4652  * "device-type=SUN    SENA", "load-balance-options=logical-block-options"
4653  * "device-type=SUN     SE6920", "round-robin-options";
4654  * logical-block-options="load-balance=logical-block", "region-size=15";
4655  * round-robin-options="load-balance=round-robin";
4656  */
4657 void
4658 vhci_get_device_type_mpxio_options(dev_info_t *dip, dev_info_t *cdip,
4659 	struct scsi_device *devp)
4660 {
4661 
4662 	caddr_t			config_list	= NULL;
4663 	caddr_t			vidptr, datanameptr;
4664 	int			vidlen, dupletlen = 0;
4665 	int			config_list_len = 0, len;
4666 	struct scsi_inquiry	*inq = devp->sd_inq;
4667 
4668 	/*
4669 	 * look up the device-type-mpxio-options-list and walk thru
4670 	 * the list compare the vendor ids of the earlier inquiry command and
4671 	 * with those vids in the list if there is a match, lookup
4672 	 * the mpxio-options value
4673 	 */
4674 	if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
4675 	    MPXIO_OPTIONS_LIST,
4676 	    (caddr_t)&config_list, &config_list_len) == DDI_PROP_SUCCESS) {
4677 
4678 		/*
4679 		 * Compare vids in each duplet - if it matches,
4680 		 * parse the mpxio options list.
4681 		 */
4682 		for (len = config_list_len, vidptr = config_list; len > 0;
4683 		    len -= dupletlen) {
4684 
4685 			dupletlen = 0;
4686 
4687 			if (strlen(vidptr) != 0 &&
4688 			    strncmp(vidptr, DEVICE_TYPE_STR,
4689 			    strlen(DEVICE_TYPE_STR)) == 0) {
4690 				/* point to next duplet */
4691 				datanameptr = vidptr + strlen(vidptr) + 1;
4692 				/* add len of this duplet */
4693 				dupletlen += strlen(vidptr) + 1;
4694 				/* get to device type */
4695 				vidptr += strlen(DEVICE_TYPE_STR) + 1;
4696 				vidlen = strlen(vidptr);
4697 				if ((vidlen != 0) &&
4698 				    bcmp(inq->inq_vid, vidptr, vidlen) == 0) {
4699 					vhci_parse_mpxio_options(dip, cdip,
4700 					    datanameptr, len - dupletlen);
4701 					break;
4702 				}
4703 				/* get to next duplet */
4704 				vidptr += strlen(vidptr) + 1;
4705 			}
4706 			/* get to the next device-type */
4707 			while (len - dupletlen > 0 &&
4708 			    strlen(vidptr) != 0 &&
4709 			    strncmp(vidptr, DEVICE_TYPE_STR,
4710 			    strlen(DEVICE_TYPE_STR)) != 0) {
4711 				dupletlen += strlen(vidptr) + 1;
4712 				vidptr += strlen(vidptr) + 1;
4713 			}
4714 		}
4715 		if (config_list_len > 0) {
4716 			kmem_free(config_list, config_list_len);
4717 		}
4718 	}
4719 }
4720 
4721 static int
4722 vhci_update_pathinfo(struct scsi_device *psd,  mdi_pathinfo_t *pip,
4723 	struct scsi_failover_ops *fo,
4724 	scsi_vhci_lun_t		*vlun,
4725 	struct scsi_vhci	*vhci)
4726 {
4727 	struct scsi_path_opinfo		opinfo;
4728 	char				*pclass, *best_pclass;
4729 	char				*resrv_pclass = NULL;
4730 	int				force_rereserve = 0;
4731 	int				update_pathinfo_done = 0;
4732 
4733 	if (fo->sfo_path_get_opinfo(psd, &opinfo, vlun->svl_fops_ctpriv) != 0) {
4734 		VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_update_pathinfo: "
4735 		    "Failed to get operation info for path:%p\n", (void *)pip));
4736 		return (MDI_FAILURE);
4737 	}
4738 	/* set the xlf capable flag in the vlun for future use */
4739 	vlun->svl_xlf_capable = opinfo.opinfo_xlf_capable;
4740 	(void) mdi_prop_update_string(pip, "path-class",
4741 	    opinfo.opinfo_path_attr);
4742 
4743 	pclass = opinfo.opinfo_path_attr;
4744 	if (opinfo.opinfo_path_state == SCSI_PATH_ACTIVE) {
4745 		mutex_enter(&vlun->svl_mutex);
4746 		if (vlun->svl_active_pclass != NULL) {
4747 			if (strcmp(vlun->svl_active_pclass, pclass) != 0) {
4748 				mutex_exit(&vlun->svl_mutex);
4749 				/*
4750 				 * Externally initiated failover has happened;
4751 				 * force the path state to be STANDBY/ONLINE,
4752 				 * next IO will trigger failover and thus
4753 				 * sync-up the pathstates.  Reason we don't
4754 				 * sync-up immediately by invoking
4755 				 * vhci_update_pathstates() is because it
4756 				 * needs a VHCI_HOLD_LUN() and we don't
4757 				 * want to block here.
4758 				 *
4759 				 * Further, if the device is an ALUA device,
4760 				 * then failure to exactly match 'pclass' and
4761 				 * 'svl_active_pclass'(as is the case here)
4762 				 * indicates that the currently active path
4763 				 * is a 'non-optimized' path - which means
4764 				 * that 'svl_active_pclass' needs to be
4765 				 * replaced with opinfo.opinfo_path_state
4766 				 * value.
4767 				 */
4768 
4769 				if (SCSI_FAILOVER_IS_TPGS(vlun->svl_fops)) {
4770 					char	*tptr;
4771 
4772 					/*
4773 					 * The device is ALUA compliant. The
4774 					 * state need to be changed to online
4775 					 * rather than standby state which is
4776 					 * done typically for a asymmetric
4777 					 * device that is non ALUA compliant.
4778 					 */
4779 					mdi_pi_set_state(pip,
4780 					    MDI_PATHINFO_STATE_ONLINE);
4781 					tptr = kmem_alloc(strlen
4782 					    (opinfo.opinfo_path_attr)+1,
4783 					    KM_SLEEP);
4784 					(void) strlcpy(tptr,
4785 					    opinfo.opinfo_path_attr,
4786 					    (strlen(opinfo.opinfo_path_attr)
4787 					    +1));
4788 					mutex_enter(&vlun->svl_mutex);
4789 					kmem_free(vlun->svl_active_pclass,
4790 					    strlen(vlun->svl_active_pclass)+1);
4791 					vlun->svl_active_pclass = tptr;
4792 					mutex_exit(&vlun->svl_mutex);
4793 				} else {
4794 					/*
4795 					 * Non ALUA device case.
4796 					 */
4797 					mdi_pi_set_state(pip,
4798 					    MDI_PATHINFO_STATE_STANDBY);
4799 				}
4800 				vlun->svl_fo_support = opinfo.opinfo_mode;
4801 				mdi_pi_set_preferred(pip,
4802 				    opinfo.opinfo_preferred);
4803 				update_pathinfo_done = 1;
4804 			}
4805 
4806 			/*
4807 			 * Find out a class of currently reserved path if there
4808 			 * is any.
4809 			 */
4810 			if ((vlun->svl_flags & VLUN_RESERVE_ACTIVE_FLG) &&
4811 			    mdi_prop_lookup_string(vlun->svl_resrv_pip,
4812 			    "path-class", &resrv_pclass) != MDI_SUCCESS) {
4813 				VHCI_DEBUG(1, (CE_NOTE, NULL,
4814 				    "!vhci_update_pathinfo: prop lookup "
4815 				    "failed for path 0x%p\n",
4816 				    (void *)vlun->svl_resrv_pip));
4817 				/*
4818 				 * Something is wrong with the reserved path.
4819 				 * We can't do much with that right here. Just
4820 				 * force re-reservation to another path.
4821 				 */
4822 				force_rereserve = 1;
4823 			}
4824 
4825 			(void) fo->sfo_pathclass_next(NULL, &best_pclass,
4826 			    vlun->svl_fops_ctpriv);
4827 			if ((force_rereserve == 1) || ((resrv_pclass != NULL) &&
4828 			    (strcmp(pclass, best_pclass) == 0) &&
4829 			    (strcmp(resrv_pclass, best_pclass) != 0))) {
4830 				/*
4831 				 * Inform target driver that a reservation
4832 				 * should be reinstated because the reserved
4833 				 * path is not the most preferred one.
4834 				 */
4835 				mutex_enter(&vhci->vhci_mutex);
4836 				scsi_hba_reset_notify_callback(
4837 				    &vhci->vhci_mutex,
4838 				    &vhci->vhci_reset_notify_listf);
4839 				mutex_exit(&vhci->vhci_mutex);
4840 			}
4841 
4842 			if (update_pathinfo_done == 1) {
4843 				return (MDI_SUCCESS);
4844 			}
4845 		} else {
4846 			char	*tptr;
4847 
4848 			/*
4849 			 * lets release the mutex before we try to
4850 			 * allocate since the potential to sleep is
4851 			 * possible.
4852 			 */
4853 			mutex_exit(&vlun->svl_mutex);
4854 			tptr = kmem_alloc(strlen(pclass)+1, KM_SLEEP);
4855 			(void) strlcpy(tptr, pclass, (strlen(pclass)+1));
4856 			mutex_enter(&vlun->svl_mutex);
4857 			vlun->svl_active_pclass = tptr;
4858 		}
4859 		mutex_exit(&vlun->svl_mutex);
4860 		mdi_pi_set_state(pip, MDI_PATHINFO_STATE_ONLINE);
4861 		vlun->svl_waiting_for_activepath = 0;
4862 	} else if (opinfo.opinfo_path_state == SCSI_PATH_ACTIVE_NONOPT) {
4863 		mutex_enter(&vlun->svl_mutex);
4864 		if (vlun->svl_active_pclass == NULL) {
4865 			char	*tptr;
4866 
4867 			mutex_exit(&vlun->svl_mutex);
4868 			tptr = kmem_alloc(strlen(pclass)+1, KM_SLEEP);
4869 			(void) strlcpy(tptr, pclass, (strlen(pclass)+1));
4870 			mutex_enter(&vlun->svl_mutex);
4871 			vlun->svl_active_pclass = tptr;
4872 		}
4873 		mutex_exit(&vlun->svl_mutex);
4874 		mdi_pi_set_state(pip, MDI_PATHINFO_STATE_ONLINE);
4875 		vlun->svl_waiting_for_activepath = 0;
4876 	} else if (opinfo.opinfo_path_state == SCSI_PATH_INACTIVE) {
4877 		mutex_enter(&vlun->svl_mutex);
4878 		if (vlun->svl_active_pclass != NULL) {
4879 			if (strcmp(vlun->svl_active_pclass, pclass) == 0) {
4880 				mutex_exit(&vlun->svl_mutex);
4881 				/*
4882 				 * externally initiated failover has happened;
4883 				 * force state to ONLINE (see comment above)
4884 				 */
4885 				mdi_pi_set_state(pip,
4886 				    MDI_PATHINFO_STATE_ONLINE);
4887 				vlun->svl_fo_support = opinfo.opinfo_mode;
4888 				mdi_pi_set_preferred(pip,
4889 				    opinfo.opinfo_preferred);
4890 				return (MDI_SUCCESS);
4891 			}
4892 		}
4893 		mutex_exit(&vlun->svl_mutex);
4894 		mdi_pi_set_state(pip, MDI_PATHINFO_STATE_STANDBY);
4895 
4896 		/*
4897 		 * Initiate auto-failback, if enabled, for path if path-state
4898 		 * is transitioning from OFFLINE->STANDBY and pathclass is the
4899 		 * preferred pathclass for this storage.
4900 		 * NOTE: In case where opinfo_path_state is SCSI_PATH_ACTIVE
4901 		 * (above), where the pi state is set to STANDBY, we don't
4902 		 * initiate auto-failback as the next IO shall take care of.
4903 		 * this. See comment above.
4904 		 */
4905 		(void) fo->sfo_pathclass_next(NULL, &best_pclass,
4906 		    vlun->svl_fops_ctpriv);
4907 		if (((vhci->vhci_conf_flags & VHCI_CONF_FLAGS_AUTO_FAILBACK) ==
4908 		    VHCI_CONF_FLAGS_AUTO_FAILBACK) &&
4909 		    (strcmp(pclass, best_pclass) == 0) &&
4910 		    ((MDI_PI_OLD_STATE(pip) == MDI_PATHINFO_STATE_OFFLINE)||
4911 		    (MDI_PI_OLD_STATE(pip) == MDI_PATHINFO_STATE_INIT))) {
4912 			VHCI_DEBUG(1, (CE_NOTE, NULL, "%s pathclass path: %p"
4913 			    " OFFLINE->STANDBY transition for lun %s\n",
4914 			    best_pclass, (void *)pip, vlun->svl_lun_wwn));
4915 			(void) taskq_dispatch(vhci->vhci_taskq,
4916 			    vhci_initiate_auto_failback, (void *) vlun,
4917 			    KM_SLEEP);
4918 		}
4919 	}
4920 	vlun->svl_fo_support = opinfo.opinfo_mode;
4921 	mdi_pi_set_preferred(pip, opinfo.opinfo_preferred);
4922 
4923 	VHCI_DEBUG(8, (CE_NOTE, NULL, "vhci_update_pathinfo: opinfo_rev = %x,"
4924 	    " opinfo_path_state = %x opinfo_preferred = %x, opinfo_mode = %x\n",
4925 	    opinfo.opinfo_rev, opinfo.opinfo_path_state,
4926 	    opinfo.opinfo_preferred, opinfo.opinfo_mode));
4927 
4928 	return (MDI_SUCCESS);
4929 }
4930 
4931 /*
4932  * Form the kstat name and and call mdi_pi_kstat_create()
4933  */
4934 void
4935 vhci_kstat_create_pathinfo(mdi_pathinfo_t *pip)
4936 {
4937 	dev_info_t	*tgt_dip;
4938 	dev_info_t	*pdip;
4939 	char		*guid;
4940 	char		*target_port, *target_port_dup;
4941 	char		ks_name[KSTAT_STRLEN];
4942 	uint_t		pid;
4943 	int		by_id;
4944 	mod_hash_val_t	hv;
4945 
4946 
4947 	/* return if we have already allocated kstats */
4948 	if (mdi_pi_kstat_exists(pip))
4949 		return;
4950 
4951 	/*
4952 	 * We need instance numbers to create a kstat name, return if we don't
4953 	 * have instance numbers assigned yet.
4954 	 */
4955 	tgt_dip = mdi_pi_get_client(pip);
4956 	pdip = mdi_pi_get_phci(pip);
4957 	if ((ddi_get_instance(tgt_dip) == -1) || (ddi_get_instance(pdip) == -1))
4958 		return;
4959 
4960 	/*
4961 	 * A path oriented kstat has a ks_name of the form:
4962 	 *
4963 	 * <client-driver><instance>.t<pid>.<pHCI-driver><instance>
4964 	 *
4965 	 * We maintain a bidirectional 'target-port' to <pid> map,
4966 	 * called targetmap. All pathinfo nodes with the same
4967 	 * 'target-port' map to the same <pid>. The iostat(1M) code,
4968 	 * when parsing a path oriented kstat name, uses the <pid> as
4969 	 * a SCSI_VHCI_GET_TARGET_LONGNAME ioctl argument in order
4970 	 * to get the 'target-port'. For KSTAT_FLAG_PERSISTENT kstats,
4971 	 * this ioctl needs to translate a <pid> to a 'target-port'
4972 	 * even after all pathinfo nodes associated with the
4973 	 * 'target-port' have been destroyed. This is needed to support
4974 	 * consistent first-iteration activity-since-boot iostat(1M)
4975 	 * output. Because of this requirement, the mapping can't be
4976 	 * based on pathinfo information in a devinfo snapshot.
4977 	 */
4978 
4979 	/* determine 'target-port' */
4980 	if (mdi_prop_lookup_string(pip,
4981 	    SCSI_ADDR_PROP_TARGET_PORT, &target_port) == MDI_SUCCESS) {
4982 		target_port_dup = i_ddi_strdup(target_port, KM_SLEEP);
4983 		(void) mdi_prop_free(target_port);
4984 		by_id = 1;
4985 	} else {
4986 		/*
4987 		 * If the pHCI did not set up 'target-port' on this
4988 		 * pathinfo node, assume that our client is the only
4989 		 * one with paths to the device by using the guid
4990 		 * value as the 'target-port'. Since no other client
4991 		 * will have the same guid, no other client will use
4992 		 * the same <pid>.  NOTE: a client with an instance
4993 		 * number always has a guid.
4994 		 */
4995 		(void) ddi_prop_lookup_string(DDI_DEV_T_ANY, tgt_dip,
4996 		    PROPFLAGS, MDI_CLIENT_GUID_PROP, &guid);
4997 		target_port_dup = i_ddi_strdup(guid, KM_SLEEP);
4998 		ddi_prop_free(guid);
4999 
5000 		/*
5001 		 * For this type of mapping we don't want the
5002 		 * <id> -> 'target-port' mapping to be made.  This
5003 		 * will cause the SCSI_VHCI_GET_TARGET_LONGNAME ioctl
5004 		 * to fail, and the iostat(1M) long '-n' output will
5005 		 * still use the <pid>.  We do this because we just
5006 		 * made up the 'target-port' using the guid, and we
5007 		 * don't want to expose that fact in iostat output.
5008 		 */
5009 		by_id = 0;
5010 	}
5011 
5012 	/* find/establish <pid> given 'target-port' */
5013 	mutex_enter(&vhci_targetmap_mutex);
5014 	if (mod_hash_find(vhci_targetmap_byport,
5015 	    (mod_hash_key_t)target_port_dup, &hv) == 0) {
5016 		pid = (int)(intptr_t)hv;	/* mapping exists */
5017 	} else {
5018 		pid = vhci_targetmap_pid++;	/* new mapping */
5019 
5020 		(void) mod_hash_insert(vhci_targetmap_byport,
5021 		    (mod_hash_key_t)target_port_dup,
5022 		    (mod_hash_val_t)(intptr_t)pid);
5023 		if (by_id) {
5024 			(void) mod_hash_insert(vhci_targetmap_bypid,
5025 			    (mod_hash_key_t)(uintptr_t)pid,
5026 			    (mod_hash_val_t)(uintptr_t)target_port_dup);
5027 		}
5028 		target_port_dup = NULL;		/* owned by hash */
5029 	}
5030 	mutex_exit(&vhci_targetmap_mutex);
5031 
5032 	/* form kstat name */
5033 	(void) snprintf(ks_name, KSTAT_STRLEN, "%s%d.t%d.%s%d",
5034 	    ddi_driver_name(tgt_dip), ddi_get_instance(tgt_dip),
5035 	    pid, ddi_driver_name(pdip), ddi_get_instance(pdip));
5036 
5037 	VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_path_online: path:%p "
5038 	    "kstat %s: pid %x <-> port %s\n", (void *)pip,
5039 	    ks_name, pid, target_port_dup));
5040 	if (target_port_dup)
5041 		kmem_free(target_port_dup, strlen(target_port_dup) + 1);
5042 
5043 	/* call mdi to create kstats with the name we built */
5044 	(void) mdi_pi_kstat_create(pip, ks_name);
5045 }
5046 
5047 /* ARGSUSED */
5048 static int
5049 vhci_pathinfo_online(dev_info_t *vdip, mdi_pathinfo_t *pip, int flags)
5050 {
5051 	scsi_hba_tran_t			*hba = NULL;
5052 	struct scsi_device		*psd = NULL;
5053 	scsi_vhci_lun_t			*vlun = NULL;
5054 	dev_info_t			*pdip = NULL;
5055 	dev_info_t			*cdip;
5056 	dev_info_t			*tgt_dip;
5057 	struct scsi_vhci		*vhci;
5058 	char				*guid;
5059 	struct scsi_failover_ops	*sfo;
5060 	scsi_vhci_priv_t		*svp = NULL;
5061 	struct scsi_address		*ap;
5062 	struct scsi_pkt			*pkt;
5063 	int				rval = MDI_FAILURE;
5064 	mpapi_item_list_t		*list_ptr;
5065 	mpapi_lu_data_t			*ld;
5066 
5067 	ASSERT(vdip != NULL);
5068 	ASSERT(pip != NULL);
5069 
5070 	vhci = ddi_get_soft_state(vhci_softstate, ddi_get_instance(vdip));
5071 	ASSERT(vhci != NULL);
5072 
5073 	pdip = mdi_pi_get_phci(pip);
5074 	hba = ddi_get_driver_private(pdip);
5075 	ASSERT(hba != NULL);
5076 
5077 	svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip);
5078 	ASSERT(svp != NULL);
5079 
5080 	cdip = mdi_pi_get_client(pip);
5081 	ASSERT(cdip != NULL);
5082 	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, cdip, PROPFLAGS,
5083 	    MDI_CLIENT_GUID_PROP, &guid) != DDI_SUCCESS) {
5084 		VHCI_DEBUG(1, (CE_WARN, NULL, "vhci_path_online: lun guid "
5085 		    "property failed"));
5086 		goto failure;
5087 	}
5088 
5089 	vlun = vhci_lun_lookup(cdip);
5090 	ASSERT(vlun != NULL);
5091 
5092 	ddi_prop_free(guid);
5093 
5094 	vlun->svl_dip = mdi_pi_get_client(pip);
5095 	ASSERT(vlun->svl_dip != NULL);
5096 
5097 	psd = svp->svp_psd;
5098 	ASSERT(psd != NULL);
5099 
5100 	/*
5101 	 * Get inquiry data into pathinfo related scsi_device structure.
5102 	 * Free sq_inq when pathinfo related scsi_device structure is destroyed
5103 	 * by vhci_pathinfo_uninit(). In other words, vhci maintains its own
5104 	 * copy of scsi_device and scsi_inquiry data on a per-path basis.
5105 	 */
5106 	if (scsi_probe(psd, SLEEP_FUNC) != SCSIPROBE_EXISTS) {
5107 		VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_pathinfo_online: "
5108 		    "scsi_probe failed path:%p rval:%x\n", (void *)pip, rval));
5109 		rval = MDI_FAILURE;
5110 		goto failure;
5111 	}
5112 
5113 	/*
5114 	 * See if we have a failover module to support the device.
5115 	 *
5116 	 * We re-probe to determine the failover ops for each path. This
5117 	 * is done in case there are any path-specific side-effects associated
5118 	 * with the sfo_device_probe implementation.
5119 	 *
5120 	 * Give the first successfull sfo_device_probe the opportunity to
5121 	 * establish 'ctpriv', vlun/client private data. The ctpriv will
5122 	 * then be passed into the failover module on all other sfo_device_*()
5123 	 * operations (and must be freed by sfo_device_unprobe implementation).
5124 	 *
5125 	 * NOTE: While sfo_device_probe is done once per path,
5126 	 * sfo_device_unprobe only occurs once - when the vlun is destroyed.
5127 	 *
5128 	 * NOTE: We don't currently support per-path fops private data
5129 	 * mechanism.
5130 	 */
5131 	sfo = vhci_dev_fo(vdip, psd,
5132 	    &vlun->svl_fops_ctpriv, &vlun->svl_fops_name);
5133 
5134 	/* check path configuration result with current vlun state */
5135 	if (((sfo && vlun->svl_fops) && (sfo != vlun->svl_fops)) ||
5136 	    (sfo && vlun->svl_not_supported) ||
5137 	    ((sfo == NULL) && vlun->svl_fops)) {
5138 		/* Getting different results for different paths. */
5139 		VHCI_DEBUG(1, (CE_NOTE, vhci->vhci_dip,
5140 		    "!vhci_pathinfo_online: dev (path 0x%p) contradiction\n",
5141 		    (void *)pip));
5142 		cmn_err(CE_WARN, "scsi_vhci: failover contradiction: "
5143 		    "'%s'.vs.'%s': path %s\n",
5144 		    vlun->svl_fops ? vlun->svl_fops->sfo_name : "NULL",
5145 		    sfo ? sfo->sfo_name : "NULL", mdi_pi_pathname(pip));
5146 		vlun->svl_not_supported = 1;
5147 		rval = MDI_NOT_SUPPORTED;
5148 		goto done;
5149 	} else if (sfo == NULL) {
5150 		/* No failover module - device not supported under vHCI.  */
5151 		VHCI_DEBUG(1, (CE_NOTE, vhci->vhci_dip,
5152 		    "!vhci_pathinfo_online: dev (path 0x%p) not "
5153 		    "supported\n", (void *)pip));
5154 
5155 		/* XXX does this contradict vhci_is_dev_supported ? */
5156 		vlun->svl_not_supported = 1;
5157 		rval = MDI_NOT_SUPPORTED;
5158 		goto done;
5159 	}
5160 
5161 	/* failover supported for device - save failover_ops in vlun */
5162 	vlun->svl_fops = sfo;
5163 	ASSERT(vlun->svl_fops_name != NULL);
5164 
5165 	/*
5166 	 * Obtain the device-type based mpxio options as specified in
5167 	 * scsi_vhci.conf file.
5168 	 *
5169 	 * NOTE: currently, the end result is a call to
5170 	 * mdi_set_lb_region_size().
5171 	 */
5172 	tgt_dip = psd->sd_dev;
5173 	ASSERT(tgt_dip != NULL);
5174 	vhci_get_device_type_mpxio_options(vdip, tgt_dip, psd);
5175 
5176 	/*
5177 	 * if PGR is active, revalidate key and register on this path also,
5178 	 * if key is still valid
5179 	 */
5180 	sema_p(&vlun->svl_pgr_sema);
5181 	if (vlun->svl_pgr_active) {
5182 		rval = vhci_pgr_validate_and_register(svp);
5183 		if (rval != 1) {
5184 			rval = MDI_FAILURE;
5185 			sema_v(&vlun->svl_pgr_sema);
5186 			goto failure;
5187 		}
5188 	}
5189 	sema_v(&vlun->svl_pgr_sema);
5190 
5191 	if (svp->svp_new_path) {
5192 		/*
5193 		 * Last chance to perform any cleanup operations on this
5194 		 * new path before making this path completely online.
5195 		 */
5196 		svp->svp_new_path = 0;
5197 
5198 		/*
5199 		 * If scsi_vhci knows the lun is alread RESERVE'd,
5200 		 * then skip the issue of RELEASE on new path.
5201 		 */
5202 		if ((vlun->svl_flags & VLUN_RESERVE_ACTIVE_FLG) == 0) {
5203 			/*
5204 			 * Issue SCSI-2 RELEASE only for the first time on
5205 			 * a new path just in case the host rebooted and
5206 			 * a reservation is still pending on this path.
5207 			 * IBM Shark storage does not clear RESERVE upon
5208 			 * host reboot.
5209 			 */
5210 			ap = &psd->sd_address;
5211 			pkt = scsi_init_pkt(ap, NULL, NULL, CDB_GROUP0,
5212 			    sizeof (struct scsi_arq_status), 0, 0,
5213 			    SLEEP_FUNC, NULL);
5214 			if (pkt == NULL) {
5215 				VHCI_DEBUG(1, (CE_NOTE, NULL,
5216 				    "!vhci_pathinfo_online: "
5217 				    "Release init_pkt failed :%p\n",
5218 				    (void *)pip));
5219 				rval = MDI_FAILURE;
5220 				goto failure;
5221 			}
5222 			pkt->pkt_cdbp[0] = SCMD_RELEASE;
5223 			pkt->pkt_time = 60;
5224 
5225 			VHCI_DEBUG(1, (CE_NOTE, NULL,
5226 			    "!vhci_path_online: path:%p "
5227 			    "Issued SCSI-2 RELEASE\n", (void *)pip));
5228 
5229 			/* Ignore the return value */
5230 			(void) vhci_do_scsi_cmd(pkt);
5231 			scsi_destroy_pkt(pkt);
5232 		}
5233 	}
5234 
5235 	rval = vhci_update_pathinfo(psd, pip, sfo, vlun, vhci);
5236 	if (rval == MDI_FAILURE) {
5237 		goto failure;
5238 	}
5239 
5240 	/* Initialize MP-API data */
5241 	vhci_update_mpapi_data(vhci, vlun, pip);
5242 
5243 	/*
5244 	 * MP-API also needs the Inquiry data to be maintained in the
5245 	 * mp_vendor_prop_t structure, so find the lun and update its
5246 	 * structure with this data.
5247 	 */
5248 	list_ptr = (mpapi_item_list_t *)vhci_get_mpapi_item(vhci, NULL,
5249 	    MP_OBJECT_TYPE_MULTIPATH_LU, (void *)vlun);
5250 	ld = (mpapi_lu_data_t *)list_ptr->item->idata;
5251 	if (ld != NULL) {
5252 		bcopy(psd->sd_inq->inq_vid, ld->prop.prodInfo.vendor, 8);
5253 		bcopy(psd->sd_inq->inq_pid, ld->prop.prodInfo.product, 16);
5254 		bcopy(psd->sd_inq->inq_revision, ld->prop.prodInfo.revision, 4);
5255 	} else {
5256 		VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_pathinfo_online: "
5257 		    "mpapi_lu_data_t is NULL"));
5258 	}
5259 
5260 	/* create kstats for path */
5261 	vhci_kstat_create_pathinfo(pip);
5262 
5263 done:
5264 	mutex_enter(&vhci_global_mutex);
5265 	cv_broadcast(&vhci_cv);
5266 	mutex_exit(&vhci_global_mutex);
5267 
5268 	if (vlun->svl_setcap_done) {
5269 		(void) vhci_pHCI_cap(ap, "sector-size",
5270 		    vlun->svl_sector_size, 1, pip);
5271 	}
5272 
5273 	VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_path_online: path:%p\n",
5274 	    (void *)pip));
5275 
5276 failure:
5277 	return (rval);
5278 }
5279 
5280 /*
5281  * path offline handler.  Release all bindings that will not be
5282  * released by the normal packet transport/completion code path.
5283  * Since we don't (presently) keep any bindings alive outside of
5284  * the in-transport packets (which will be released on completion)
5285  * there is not much to do here.
5286  */
5287 /* ARGSUSED */
5288 static int
5289 vhci_pathinfo_offline(dev_info_t *vdip, mdi_pathinfo_t *pip, int flags)
5290 {
5291 	scsi_hba_tran_t		*hba = NULL;
5292 	struct scsi_device	*psd = NULL;
5293 	dev_info_t		*pdip = NULL;
5294 	dev_info_t		*cdip = NULL;
5295 	scsi_vhci_priv_t	*svp = NULL;
5296 
5297 	ASSERT(vdip != NULL);
5298 	ASSERT(pip != NULL);
5299 
5300 	pdip = mdi_pi_get_phci(pip);
5301 	ASSERT(pdip != NULL);
5302 	if (pdip == NULL) {
5303 		VHCI_DEBUG(1, (CE_WARN, vdip, "Invalid path 0x%p: NULL "
5304 		    "phci dip", (void *)pip));
5305 		return (MDI_FAILURE);
5306 	}
5307 
5308 	cdip = mdi_pi_get_client(pip);
5309 	ASSERT(cdip != NULL);
5310 	if (cdip == NULL) {
5311 		VHCI_DEBUG(1, (CE_WARN, vdip, "Invalid path 0x%p: NULL "
5312 		    "client dip", (void *)pip));
5313 		return (MDI_FAILURE);
5314 	}
5315 
5316 	hba = ddi_get_driver_private(pdip);
5317 	ASSERT(hba != NULL);
5318 
5319 	svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip);
5320 	if (svp == NULL) {
5321 		/*
5322 		 * mdi_pathinfo node in INIT state can have vHCI private
5323 		 * information set to null
5324 		 */
5325 		VHCI_DEBUG(1, (CE_NOTE, vdip, "!vhci_pathinfo_offline: "
5326 		    "svp is NULL for pip 0x%p\n", (void *)pip));
5327 		return (MDI_SUCCESS);
5328 	}
5329 
5330 	psd = svp->svp_psd;
5331 	ASSERT(psd != NULL);
5332 
5333 	mutex_enter(&svp->svp_mutex);
5334 
5335 	VHCI_DEBUG(1, (CE_NOTE, vdip, "!vhci_pathinfo_offline: "
5336 	    "%d cmds pending on path: 0x%p\n", svp->svp_cmds, (void *)pip));
5337 	while (svp->svp_cmds != 0) {
5338 		if (cv_reltimedwait(&svp->svp_cv, &svp->svp_mutex,
5339 		    drv_usectohz(vhci_path_quiesce_timeout * 1000000),
5340 		    TR_CLOCK_TICK) == -1) {
5341 			/*
5342 			 * The timeout time reached without the condition
5343 			 * being signaled.
5344 			 */
5345 			VHCI_DEBUG(1, (CE_NOTE, vdip, "!vhci_pathinfo_offline: "
5346 			    "Timeout reached on path 0x%p without the cond\n",
5347 			    (void *)pip));
5348 			VHCI_DEBUG(1, (CE_NOTE, vdip, "!vhci_pathinfo_offline: "
5349 			    "%d cmds still pending on path: 0x%p\n",
5350 			    svp->svp_cmds, (void *)pip));
5351 			break;
5352 		}
5353 	}
5354 	mutex_exit(&svp->svp_mutex);
5355 
5356 	/*
5357 	 * Check to see if this vlun has an active SCSI-II RESERVE. And this
5358 	 * is the pip for the path that has been reserved.
5359 	 * If so clear the reservation by sending a reset, so the host will not
5360 	 * get a reservation conflict.  Reset the flag VLUN_RESERVE_ACTIVE_FLG
5361 	 * for this lun.  Also a reset notify is sent to the target driver
5362 	 * just in case the POR check condition is cleared by some other layer
5363 	 * in the stack.
5364 	 */
5365 	if (svp->svp_svl->svl_flags & VLUN_RESERVE_ACTIVE_FLG) {
5366 		if (pip == svp->svp_svl->svl_resrv_pip) {
5367 			if (vhci_recovery_reset(svp->svp_svl,
5368 			    &svp->svp_psd->sd_address, TRUE,
5369 			    VHCI_DEPTH_TARGET) == 0) {
5370 				VHCI_DEBUG(1, (CE_NOTE, NULL,
5371 				    "!vhci_pathinfo_offline (pip:%p):"
5372 				    "reset failed, retrying\n", (void *)pip));
5373 				delay(1*drv_usectohz(1000000));
5374 				if (vhci_recovery_reset(svp->svp_svl,
5375 				    &svp->svp_psd->sd_address, TRUE,
5376 				    VHCI_DEPTH_TARGET) == 0) {
5377 					VHCI_DEBUG(1, (CE_NOTE, NULL,
5378 					    "!vhci_pathinfo_offline "
5379 					    "(pip:%p): reset failed, "
5380 					    "giving up!\n", (void *)pip));
5381 				}
5382 			}
5383 			svp->svp_svl->svl_flags &= ~VLUN_RESERVE_ACTIVE_FLG;
5384 		}
5385 	}
5386 
5387 	mdi_pi_set_state(pip, MDI_PATHINFO_STATE_OFFLINE);
5388 	vhci_mpapi_set_path_state(vdip, pip, MP_DRVR_PATH_STATE_REMOVED);
5389 
5390 	VHCI_DEBUG(1, (CE_NOTE, NULL,
5391 	    "!vhci_pathinfo_offline: offlined path 0x%p\n", (void *)pip));
5392 	return (MDI_SUCCESS);
5393 }
5394 
5395 
5396 /*
5397  * routine for SCSI VHCI IOCTL implementation.
5398  */
5399 /* ARGSUSED */
5400 static int
5401 vhci_ctl(dev_t dev, int cmd, intptr_t data, int mode, cred_t *credp, int *rval)
5402 {
5403 	struct scsi_vhci		*vhci;
5404 	dev_info_t			*vdip;
5405 	mdi_pathinfo_t			*pip;
5406 	int				instance, held;
5407 	int				retval = 0;
5408 	caddr_t				phci_path = NULL, client_path = NULL;
5409 	caddr_t				paddr = NULL;
5410 	sv_iocdata_t			ioc;
5411 	sv_iocdata_t			*pioc = &ioc;
5412 	sv_switch_to_cntlr_iocdata_t	iocsc;
5413 	sv_switch_to_cntlr_iocdata_t	*piocsc = &iocsc;
5414 	caddr_t				s;
5415 	scsi_vhci_lun_t			*vlun;
5416 	struct scsi_failover_ops	*fo;
5417 	char				*pclass;
5418 
5419 	/* Check for validity of vhci structure */
5420 	vhci = ddi_get_soft_state(vhci_softstate, MINOR2INST(getminor(dev)));
5421 	if (vhci == NULL) {
5422 		return (ENXIO);
5423 	}
5424 
5425 	mutex_enter(&vhci->vhci_mutex);
5426 	if ((vhci->vhci_state & VHCI_STATE_OPEN) == 0) {
5427 		mutex_exit(&vhci->vhci_mutex);
5428 		return (ENXIO);
5429 	}
5430 	mutex_exit(&vhci->vhci_mutex);
5431 
5432 	/* Get the vhci dip */
5433 	vdip = vhci->vhci_dip;
5434 	ASSERT(vdip != NULL);
5435 	instance = ddi_get_instance(vdip);
5436 
5437 	/* Allocate memory for getting parameters from userland */
5438 	phci_path	= kmem_zalloc(MAXPATHLEN, KM_SLEEP);
5439 	client_path	= kmem_zalloc(MAXPATHLEN, KM_SLEEP);
5440 	paddr		= kmem_zalloc(MAXNAMELEN, KM_SLEEP);
5441 
5442 	/*
5443 	 * Set a local variable indicating the ioctl name. Used for
5444 	 * printing debug strings.
5445 	 */
5446 	switch (cmd) {
5447 	case SCSI_VHCI_GET_CLIENT_MULTIPATH_INFO:
5448 		s = "GET_CLIENT_MULTIPATH_INFO";
5449 		break;
5450 
5451 	case SCSI_VHCI_GET_PHCI_MULTIPATH_INFO:
5452 		s = "GET_PHCI_MULTIPATH_INFO";
5453 		break;
5454 
5455 	case SCSI_VHCI_GET_CLIENT_NAME:
5456 		s = "GET_CLIENT_NAME";
5457 		break;
5458 
5459 	case SCSI_VHCI_PATH_ONLINE:
5460 		s = "PATH_ONLINE";
5461 		break;
5462 
5463 	case SCSI_VHCI_PATH_OFFLINE:
5464 		s = "PATH_OFFLINE";
5465 		break;
5466 
5467 	case SCSI_VHCI_PATH_STANDBY:
5468 		s = "PATH_STANDBY";
5469 		break;
5470 
5471 	case SCSI_VHCI_PATH_TEST:
5472 		s = "PATH_TEST";
5473 		break;
5474 
5475 	case SCSI_VHCI_SWITCH_TO_CNTLR:
5476 		s = "SWITCH_TO_CNTLR";
5477 		break;
5478 	case SCSI_VHCI_PATH_DISABLE:
5479 		s = "PATH_DISABLE";
5480 		break;
5481 	case SCSI_VHCI_PATH_ENABLE:
5482 		s = "PATH_ENABLE";
5483 		break;
5484 
5485 	case SCSI_VHCI_GET_TARGET_LONGNAME:
5486 		s = "GET_TARGET_LONGNAME";
5487 		break;
5488 
5489 #ifdef	DEBUG
5490 	case SCSI_VHCI_CONFIGURE_PHCI:
5491 		s = "CONFIGURE_PHCI";
5492 		break;
5493 
5494 	case SCSI_VHCI_UNCONFIGURE_PHCI:
5495 		s = "UNCONFIGURE_PHCI";
5496 		break;
5497 #endif
5498 
5499 	default:
5500 		s = "Unknown";
5501 		vhci_log(CE_NOTE, vdip,
5502 		    "!vhci%d: ioctl %x (unsupported ioctl)", instance, cmd);
5503 		retval = ENOTSUP;
5504 		break;
5505 	}
5506 	if (retval != 0) {
5507 		goto end;
5508 	}
5509 
5510 	VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci%d: ioctl <%s>", instance, s));
5511 
5512 	/*
5513 	 * Get IOCTL parameters from userland
5514 	 */
5515 	switch (cmd) {
5516 	case SCSI_VHCI_GET_CLIENT_MULTIPATH_INFO:
5517 	case SCSI_VHCI_GET_PHCI_MULTIPATH_INFO:
5518 	case SCSI_VHCI_GET_CLIENT_NAME:
5519 	case SCSI_VHCI_PATH_ONLINE:
5520 	case SCSI_VHCI_PATH_OFFLINE:
5521 	case SCSI_VHCI_PATH_STANDBY:
5522 	case SCSI_VHCI_PATH_TEST:
5523 	case SCSI_VHCI_PATH_DISABLE:
5524 	case SCSI_VHCI_PATH_ENABLE:
5525 	case SCSI_VHCI_GET_TARGET_LONGNAME:
5526 #ifdef	DEBUG
5527 	case SCSI_VHCI_CONFIGURE_PHCI:
5528 	case SCSI_VHCI_UNCONFIGURE_PHCI:
5529 #endif
5530 		retval = vhci_get_iocdata((const void *)data, pioc, mode, s);
5531 		break;
5532 
5533 	case SCSI_VHCI_SWITCH_TO_CNTLR:
5534 		retval = vhci_get_iocswitchdata((const void *)data, piocsc,
5535 		    mode, s);
5536 		break;
5537 	}
5538 	if (retval != 0) {
5539 		goto end;
5540 	}
5541 
5542 
5543 	/*
5544 	 * Process the IOCTL
5545 	 */
5546 	switch (cmd) {
5547 	case SCSI_VHCI_GET_CLIENT_MULTIPATH_INFO:
5548 	{
5549 		uint_t		num_paths;	/* Num paths to client dev */
5550 		sv_path_info_t	*upibuf = NULL;	/* To keep userland values */
5551 		sv_path_info_t	*kpibuf = NULL; /* Kernel data for ioctls */
5552 		dev_info_t	*cdip;		/* Client device dip */
5553 
5554 		if (pioc->ret_elem == NULL) {
5555 			retval = EINVAL;
5556 			break;
5557 		}
5558 
5559 		/* Get client device path from user land */
5560 		if (vhci_ioc_get_client_path(pioc, client_path, mode, s)) {
5561 			retval = EFAULT;
5562 			break;
5563 		}
5564 
5565 		VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> "
5566 		    "client <%s>", s, client_path));
5567 
5568 		/* Get number of paths to this client device */
5569 		if ((cdip = mdi_client_path2devinfo(vdip, client_path))
5570 		    == NULL) {
5571 			retval = ENXIO;
5572 			VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
5573 			    "client dip doesn't exist. invalid path <%s>",
5574 			    s, client_path));
5575 			break;
5576 		}
5577 		num_paths = mdi_client_get_path_count(cdip);
5578 
5579 		if (ddi_copyout(&num_paths, pioc->ret_elem,
5580 		    sizeof (num_paths), mode)) {
5581 			VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
5582 			    "num_paths copyout failed", s));
5583 			retval = EFAULT;
5584 			break;
5585 		}
5586 
5587 		/* If  user just wanted num_paths, then return */
5588 		if (pioc->buf_elem == 0 || pioc->ret_buf == NULL ||
5589 		    num_paths == 0) {
5590 			break;
5591 		}
5592 
5593 		/* Set num_paths to value as much as can be sent to userland */
5594 		if (num_paths > pioc->buf_elem) {
5595 			num_paths = pioc->buf_elem;
5596 		}
5597 
5598 		/* Allocate memory and get userland pointers */
5599 		if (vhci_ioc_alloc_pathinfo(&upibuf, &kpibuf, num_paths,
5600 		    pioc, mode, s) != 0) {
5601 			retval = EFAULT;
5602 			break;
5603 		}
5604 		ASSERT(upibuf != NULL);
5605 		ASSERT(kpibuf != NULL);
5606 
5607 		/*
5608 		 * Get the path information and send it to userland.
5609 		 */
5610 		if (vhci_get_client_path_list(cdip, kpibuf, num_paths)
5611 		    != MDI_SUCCESS) {
5612 			retval = ENXIO;
5613 			vhci_ioc_free_pathinfo(upibuf, kpibuf, num_paths);
5614 			break;
5615 		}
5616 
5617 		if (vhci_ioc_send_pathinfo(upibuf, kpibuf, num_paths,
5618 		    pioc, mode, s)) {
5619 			retval = EFAULT;
5620 			vhci_ioc_free_pathinfo(upibuf, kpibuf, num_paths);
5621 			break;
5622 		}
5623 
5624 		/* Free the memory allocated for path information */
5625 		vhci_ioc_free_pathinfo(upibuf, kpibuf, num_paths);
5626 		break;
5627 	}
5628 
5629 	case SCSI_VHCI_GET_PHCI_MULTIPATH_INFO:
5630 	{
5631 		uint_t		num_paths;	/* Num paths to client dev */
5632 		sv_path_info_t	*upibuf = NULL;	/* To keep userland values */
5633 		sv_path_info_t	*kpibuf = NULL; /* Kernel data for ioctls */
5634 		dev_info_t	*pdip;		/* PHCI device dip */
5635 
5636 		if (pioc->ret_elem == NULL) {
5637 			retval = EINVAL;
5638 			break;
5639 		}
5640 
5641 		/* Get PHCI device path from user land */
5642 		if (vhci_ioc_get_phci_path(pioc, phci_path, mode, s)) {
5643 			retval = EFAULT;
5644 			break;
5645 		}
5646 
5647 		VHCI_DEBUG(6, (CE_WARN, vdip,
5648 		    "!vhci_ioctl: ioctl <%s> phci <%s>", s, phci_path));
5649 
5650 		/* Get number of devices associated with this PHCI device */
5651 		if ((pdip = mdi_phci_path2devinfo(vdip, phci_path)) == NULL) {
5652 			VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
5653 			    "phci dip doesn't exist. invalid path <%s>",
5654 			    s, phci_path));
5655 			retval = ENXIO;
5656 			break;
5657 		}
5658 
5659 		num_paths = mdi_phci_get_path_count(pdip);
5660 
5661 		if (ddi_copyout(&num_paths, pioc->ret_elem,
5662 		    sizeof (num_paths), mode)) {
5663 			VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
5664 			    "num_paths copyout failed", s));
5665 			retval = EFAULT;
5666 			break;
5667 		}
5668 
5669 		/* If  user just wanted num_paths, then return */
5670 		if (pioc->buf_elem == 0 || pioc->ret_buf == NULL ||
5671 		    num_paths == 0) {
5672 			break;
5673 		}
5674 
5675 		/* Set num_paths to value as much as can be sent to userland */
5676 		if (num_paths > pioc->buf_elem) {
5677 			num_paths = pioc->buf_elem;
5678 		}
5679 
5680 		/* Allocate memory and get userland pointers */
5681 		if (vhci_ioc_alloc_pathinfo(&upibuf, &kpibuf, num_paths,
5682 		    pioc, mode, s) != 0) {
5683 			retval = EFAULT;
5684 			break;
5685 		}
5686 		ASSERT(upibuf != NULL);
5687 		ASSERT(kpibuf != NULL);
5688 
5689 		/*
5690 		 * Get the path information and send it to userland.
5691 		 */
5692 		if (vhci_get_phci_path_list(pdip, kpibuf, num_paths)
5693 		    != MDI_SUCCESS) {
5694 			retval = ENXIO;
5695 			vhci_ioc_free_pathinfo(upibuf, kpibuf, num_paths);
5696 			break;
5697 		}
5698 
5699 		if (vhci_ioc_send_pathinfo(upibuf, kpibuf, num_paths,
5700 		    pioc, mode, s)) {
5701 			retval = EFAULT;
5702 			vhci_ioc_free_pathinfo(upibuf, kpibuf, num_paths);
5703 			break;
5704 		}
5705 
5706 		/* Free the memory allocated for path information */
5707 		vhci_ioc_free_pathinfo(upibuf, kpibuf, num_paths);
5708 		break;
5709 	}
5710 
5711 	case SCSI_VHCI_GET_CLIENT_NAME:
5712 	{
5713 		dev_info_t		*cdip, *pdip;
5714 
5715 		/* Get PHCI path and device address from user land */
5716 		if (vhci_ioc_get_phci_path(pioc, phci_path, mode, s) ||
5717 		    vhci_ioc_get_paddr(pioc, paddr, mode, s)) {
5718 			retval = EFAULT;
5719 			break;
5720 		}
5721 
5722 		VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> "
5723 		    "phci <%s>, paddr <%s>", s, phci_path, paddr));
5724 
5725 		/* Get the PHCI dip */
5726 		if ((pdip = mdi_phci_path2devinfo(vdip, phci_path)) == NULL) {
5727 			VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
5728 			    "phci dip doesn't exist. invalid path <%s>",
5729 			    s, phci_path));
5730 			retval = ENXIO;
5731 			break;
5732 		}
5733 
5734 		if ((pip = mdi_pi_find(pdip, NULL, paddr)) == NULL) {
5735 			VHCI_DEBUG(1, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> "
5736 			    "pathinfo doesn't exist. invalid device addr", s));
5737 			retval = ENXIO;
5738 			break;
5739 		}
5740 
5741 		/* Get the client device pathname and send to userland */
5742 		cdip = mdi_pi_get_client(pip);
5743 		vhci_ioc_devi_to_path(cdip, client_path);
5744 
5745 		VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> "
5746 		    "client <%s>", s, client_path));
5747 
5748 		if (vhci_ioc_send_client_path(client_path, pioc, mode, s)) {
5749 			retval = EFAULT;
5750 			break;
5751 		}
5752 		break;
5753 	}
5754 
5755 	case SCSI_VHCI_PATH_ONLINE:
5756 	case SCSI_VHCI_PATH_OFFLINE:
5757 	case SCSI_VHCI_PATH_STANDBY:
5758 	case SCSI_VHCI_PATH_TEST:
5759 	{
5760 		dev_info_t		*pdip;	/* PHCI dip */
5761 
5762 		/* Get PHCI path and device address from user land */
5763 		if (vhci_ioc_get_phci_path(pioc, phci_path, mode, s) ||
5764 		    vhci_ioc_get_paddr(pioc, paddr, mode, s)) {
5765 			retval = EFAULT;
5766 			break;
5767 		}
5768 
5769 		VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> "
5770 		    "phci <%s>, paddr <%s>", s, phci_path, paddr));
5771 
5772 		/* Get the PHCI dip */
5773 		if ((pdip = mdi_phci_path2devinfo(vdip, phci_path)) == NULL) {
5774 			VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
5775 			    "phci dip doesn't exist. invalid path <%s>",
5776 			    s, phci_path));
5777 			retval = ENXIO;
5778 			break;
5779 		}
5780 
5781 		if ((pip = mdi_pi_find(pdip, NULL, paddr)) == NULL) {
5782 			VHCI_DEBUG(1, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> "
5783 			    "pathinfo doesn't exist. invalid device addr", s));
5784 			retval = ENXIO;
5785 			break;
5786 		}
5787 
5788 		VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> "
5789 		    "Calling MDI function to change device state", s));
5790 
5791 		switch (cmd) {
5792 		case SCSI_VHCI_PATH_ONLINE:
5793 			retval = mdi_pi_online(pip, 0);
5794 			break;
5795 
5796 		case SCSI_VHCI_PATH_OFFLINE:
5797 			retval = mdi_pi_offline(pip, 0);
5798 			break;
5799 
5800 		case SCSI_VHCI_PATH_STANDBY:
5801 			retval = mdi_pi_standby(pip, 0);
5802 			break;
5803 
5804 		case SCSI_VHCI_PATH_TEST:
5805 			break;
5806 		}
5807 		break;
5808 	}
5809 
5810 	case SCSI_VHCI_SWITCH_TO_CNTLR:
5811 	{
5812 		dev_info_t *cdip;
5813 		struct scsi_device *devp;
5814 
5815 		/* Get the client device pathname */
5816 		if (ddi_copyin(piocsc->client, client_path,
5817 		    MAXPATHLEN, mode)) {
5818 			VHCI_DEBUG(2, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> "
5819 			    "client_path copyin failed", s));
5820 			retval = EFAULT;
5821 			break;
5822 		}
5823 
5824 		/* Get the path class to which user wants to switch */
5825 		if (ddi_copyin(piocsc->class, paddr, MAXNAMELEN, mode)) {
5826 			VHCI_DEBUG(2, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> "
5827 			    "controller_class copyin failed", s));
5828 			retval = EFAULT;
5829 			break;
5830 		}
5831 
5832 		/* Perform validity checks */
5833 		if ((cdip = mdi_client_path2devinfo(vdip,
5834 		    client_path)) == NULL) {
5835 			VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
5836 			    "client dip doesn't exist. invalid path <%s>",
5837 			    s, client_path));
5838 			retval = ENXIO;
5839 			break;
5840 		}
5841 
5842 		VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: Calling MDI func "
5843 		    "to switch controller"));
5844 		VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: client <%s> "
5845 		    "class <%s>", client_path, paddr));
5846 
5847 		if (strcmp(paddr, PCLASS_PRIMARY) &&
5848 		    strcmp(paddr, PCLASS_SECONDARY)) {
5849 			VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
5850 			    "invalid path class <%s>", s, paddr));
5851 			retval = ENXIO;
5852 			break;
5853 		}
5854 
5855 		devp = ddi_get_driver_private(cdip);
5856 		if (devp == NULL) {
5857 			VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
5858 			    "invalid scsi device <%s>", s, client_path));
5859 			retval = ENXIO;
5860 			break;
5861 		}
5862 		vlun = ADDR2VLUN(&devp->sd_address);
5863 		ASSERT(vlun);
5864 
5865 		/*
5866 		 * Checking to see if device has only one pclass, PRIMARY.
5867 		 * If so this device doesn't support failovers.  Assumed
5868 		 * that the devices with one pclass is PRIMARY, as thats the
5869 		 * case today.  If this is not true and in future other
5870 		 * symmetric devices are supported with other pclass, this
5871 		 * IOCTL shall have to be overhauled anyways as now the only
5872 		 * arguments it accepts are PRIMARY and SECONDARY.
5873 		 */
5874 		fo = vlun->svl_fops;
5875 		if (fo->sfo_pathclass_next(PCLASS_PRIMARY, &pclass,
5876 		    vlun->svl_fops_ctpriv)) {
5877 			retval = ENOTSUP;
5878 			break;
5879 		}
5880 
5881 		VHCI_HOLD_LUN(vlun, VH_SLEEP, held);
5882 		mutex_enter(&vlun->svl_mutex);
5883 		if (vlun->svl_active_pclass != NULL) {
5884 			if (strcmp(vlun->svl_active_pclass, paddr) == 0) {
5885 				mutex_exit(&vlun->svl_mutex);
5886 				retval = EALREADY;
5887 				VHCI_RELEASE_LUN(vlun);
5888 				break;
5889 			}
5890 		}
5891 		mutex_exit(&vlun->svl_mutex);
5892 		/* Call mdi function to cause  a switch over */
5893 		retval = mdi_failover(vdip, cdip, MDI_FAILOVER_SYNC);
5894 		if (retval == MDI_SUCCESS) {
5895 			retval = 0;
5896 		} else if (retval == MDI_BUSY) {
5897 			retval = EBUSY;
5898 		} else {
5899 			retval = EIO;
5900 		}
5901 		VHCI_RELEASE_LUN(vlun);
5902 		break;
5903 	}
5904 
5905 	case SCSI_VHCI_PATH_ENABLE:
5906 	case SCSI_VHCI_PATH_DISABLE:
5907 	{
5908 		dev_info_t	*cdip, *pdip;
5909 
5910 		/*
5911 		 * Get client device path from user land
5912 		 */
5913 		if (vhci_ioc_get_client_path(pioc, client_path, mode, s)) {
5914 			retval = EFAULT;
5915 			break;
5916 		}
5917 
5918 		/*
5919 		 * Get Phci device path from user land
5920 		 */
5921 		if (vhci_ioc_get_phci_path(pioc, phci_path, mode, s)) {
5922 			retval = EFAULT;
5923 			break;
5924 		}
5925 
5926 		/*
5927 		 * Get the devinfo for the Phci.
5928 		 */
5929 		if ((pdip = mdi_phci_path2devinfo(vdip, phci_path)) == NULL) {
5930 			VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
5931 			    "phci dip doesn't exist. invalid path <%s>",
5932 			    s, phci_path));
5933 			retval = ENXIO;
5934 			break;
5935 		}
5936 
5937 		/*
5938 		 * If the client path is set to /scsi_vhci then we need
5939 		 * to do the operation on all the clients so set cdip to NULL.
5940 		 * Else, try to get the client dip.
5941 		 */
5942 		if (strcmp(client_path, "/scsi_vhci") == 0) {
5943 			cdip = NULL;
5944 		} else {
5945 			if ((cdip = mdi_client_path2devinfo(vdip,
5946 			    client_path)) == NULL) {
5947 				retval = ENXIO;
5948 				VHCI_DEBUG(1, (CE_WARN, NULL,
5949 				    "!vhci_ioctl: ioctl <%s> client dip "
5950 				    "doesn't exist. invalid path <%s>",
5951 				    s, client_path));
5952 				break;
5953 			}
5954 		}
5955 
5956 		if (cmd == SCSI_VHCI_PATH_ENABLE)
5957 			retval = mdi_pi_enable(cdip, pdip, USER_DISABLE);
5958 		else
5959 			retval = mdi_pi_disable(cdip, pdip, USER_DISABLE);
5960 
5961 		break;
5962 	}
5963 
5964 	case SCSI_VHCI_GET_TARGET_LONGNAME:
5965 	{
5966 		uint_t		pid = pioc->buf_elem;
5967 		char		*target_port;
5968 		mod_hash_val_t	hv;
5969 
5970 		/* targetmap lookup of 'target-port' by <pid> */
5971 		if (mod_hash_find(vhci_targetmap_bypid,
5972 		    (mod_hash_key_t)(uintptr_t)pid, &hv) != 0) {
5973 			/*
5974 			 * NOTE: failure to find the mapping is OK for guid
5975 			 * based 'target-port' values.
5976 			 */
5977 			VHCI_DEBUG(3, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
5978 			    "targetport mapping doesn't exist: pid %d",
5979 			    s, pid));
5980 			retval = ENXIO;
5981 			break;
5982 		}
5983 
5984 		/* copyout 'target-port' result */
5985 		target_port = (char *)hv;
5986 		if (copyoutstr(target_port, pioc->addr, MAXNAMELEN, NULL)) {
5987 			VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
5988 			    "targetport copyout failed: len: %d",
5989 			    s, (int)strlen(target_port)));
5990 			retval = EFAULT;
5991 		}
5992 		break;
5993 	}
5994 
5995 #ifdef	DEBUG
5996 	case SCSI_VHCI_CONFIGURE_PHCI:
5997 	{
5998 		dev_info_t		*pdip;
5999 
6000 		/* Get PHCI path and device address from user land */
6001 		if (vhci_ioc_get_phci_path(pioc, phci_path, mode, s)) {
6002 			retval = EFAULT;
6003 			break;
6004 		}
6005 
6006 		VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> "
6007 		    "phci <%s>", s, phci_path));
6008 
6009 		/* Get the PHCI dip */
6010 		if ((pdip = e_ddi_hold_devi_by_path(phci_path, 0)) == NULL) {
6011 			VHCI_DEBUG(3, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
6012 			    "phci dip doesn't exist. invalid path <%s>",
6013 			    s, phci_path));
6014 			retval = ENXIO;
6015 			break;
6016 		}
6017 
6018 		if (ndi_devi_config(pdip,
6019 		    NDI_DEVFS_CLEAN|NDI_DEVI_PERSIST) != NDI_SUCCESS) {
6020 			retval = EIO;
6021 		}
6022 
6023 		ddi_release_devi(pdip);
6024 		break;
6025 	}
6026 
6027 	case SCSI_VHCI_UNCONFIGURE_PHCI:
6028 	{
6029 		dev_info_t		*pdip;
6030 
6031 		/* Get PHCI path and device address from user land */
6032 		if (vhci_ioc_get_phci_path(pioc, phci_path, mode, s)) {
6033 			retval = EFAULT;
6034 			break;
6035 		}
6036 
6037 		VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> "
6038 		    "phci <%s>", s, phci_path));
6039 
6040 		/* Get the PHCI dip */
6041 		if ((pdip = e_ddi_hold_devi_by_path(phci_path, 0)) == NULL) {
6042 			VHCI_DEBUG(3, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
6043 			    "phci dip doesn't exist. invalid path <%s>",
6044 			    s, phci_path));
6045 			retval = ENXIO;
6046 			break;
6047 		}
6048 
6049 		if (ndi_devi_unconfig(pdip,
6050 		    NDI_DEVI_REMOVE|NDI_DEVFS_CLEAN) != NDI_SUCCESS) {
6051 			retval = EBUSY;
6052 		}
6053 
6054 		ddi_release_devi(pdip);
6055 		break;
6056 	}
6057 #endif
6058 	}
6059 
6060 end:
6061 	/* Free the memory allocated above */
6062 	if (phci_path != NULL) {
6063 		kmem_free(phci_path, MAXPATHLEN);
6064 	}
6065 	if (client_path != NULL) {
6066 		kmem_free(client_path, MAXPATHLEN);
6067 	}
6068 	if (paddr != NULL) {
6069 		kmem_free(paddr, MAXNAMELEN);
6070 	}
6071 	return (retval);
6072 }
6073 
6074 /*
6075  * devctl IOCTL support for client device DR
6076  */
6077 /* ARGSUSED */
6078 int
6079 vhci_devctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp,
6080     int *rvalp)
6081 {
6082 	dev_info_t *self;
6083 	dev_info_t *child;
6084 	scsi_hba_tran_t *hba;
6085 	struct devctl_iocdata *dcp;
6086 	struct scsi_vhci *vhci;
6087 	int rv = 0;
6088 	int retval = 0;
6089 	scsi_vhci_priv_t *svp;
6090 	mdi_pathinfo_t  *pip;
6091 
6092 	if ((vhci = ddi_get_soft_state(vhci_softstate,
6093 	    MINOR2INST(getminor(dev)))) == NULL)
6094 		return (ENXIO);
6095 
6096 	/*
6097 	 * check if :devctl minor device has been opened
6098 	 */
6099 	mutex_enter(&vhci->vhci_mutex);
6100 	if ((vhci->vhci_state & VHCI_STATE_OPEN) == 0) {
6101 		mutex_exit(&vhci->vhci_mutex);
6102 		return (ENXIO);
6103 	}
6104 	mutex_exit(&vhci->vhci_mutex);
6105 
6106 	self = vhci->vhci_dip;
6107 	hba = ddi_get_driver_private(self);
6108 	if (hba == NULL)
6109 		return (ENXIO);
6110 
6111 	/*
6112 	 * We can use the generic implementation for these ioctls
6113 	 */
6114 	switch (cmd) {
6115 	case DEVCTL_DEVICE_GETSTATE:
6116 	case DEVCTL_DEVICE_ONLINE:
6117 	case DEVCTL_DEVICE_OFFLINE:
6118 	case DEVCTL_DEVICE_REMOVE:
6119 	case DEVCTL_BUS_GETSTATE:
6120 		return (ndi_devctl_ioctl(self, cmd, arg, mode, 0));
6121 	}
6122 
6123 	/*
6124 	 * read devctl ioctl data
6125 	 */
6126 	if (ndi_dc_allochdl((void *)arg, &dcp) != NDI_SUCCESS)
6127 		return (EFAULT);
6128 
6129 	switch (cmd) {
6130 
6131 	case DEVCTL_DEVICE_RESET:
6132 		/*
6133 		 * lookup and hold child device
6134 		 */
6135 		if ((child = ndi_devi_find(self, ndi_dc_getname(dcp),
6136 		    ndi_dc_getaddr(dcp))) == NULL) {
6137 			rv = ENXIO;
6138 			break;
6139 		}
6140 		retval = mdi_select_path(child, NULL,
6141 		    (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH),
6142 		    NULL, &pip);
6143 		if ((retval != MDI_SUCCESS) || (pip == NULL)) {
6144 			VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioctl:"
6145 			    "Unable to get a path, dip 0x%p", (void *)child));
6146 			rv = ENXIO;
6147 			break;
6148 		}
6149 		svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip);
6150 		if (vhci_recovery_reset(svp->svp_svl,
6151 		    &svp->svp_psd->sd_address, TRUE,
6152 		    VHCI_DEPTH_TARGET) == 0) {
6153 			VHCI_DEBUG(1, (CE_NOTE, NULL,
6154 			    "!vhci_ioctl(pip:%p): "
6155 			    "reset failed\n", (void *)pip));
6156 			rv = ENXIO;
6157 		}
6158 		mdi_rele_path(pip);
6159 		break;
6160 
6161 	case DEVCTL_BUS_QUIESCE:
6162 	case DEVCTL_BUS_UNQUIESCE:
6163 	case DEVCTL_BUS_RESET:
6164 	case DEVCTL_BUS_RESETALL:
6165 #ifdef	DEBUG
6166 	case DEVCTL_BUS_CONFIGURE:
6167 	case DEVCTL_BUS_UNCONFIGURE:
6168 #endif
6169 		rv = ENOTSUP;
6170 		break;
6171 
6172 	default:
6173 		rv = ENOTTY;
6174 	} /* end of outer switch */
6175 
6176 	ndi_dc_freehdl(dcp);
6177 	return (rv);
6178 }
6179 
6180 /*
6181  * Routine to get the PHCI pathname from ioctl structures in userland
6182  */
6183 /* ARGSUSED */
6184 static int
6185 vhci_ioc_get_phci_path(sv_iocdata_t *pioc, caddr_t phci_path,
6186 	int mode, caddr_t s)
6187 {
6188 	int retval = 0;
6189 
6190 	if (ddi_copyin(pioc->phci, phci_path, MAXPATHLEN, mode)) {
6191 		VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioc_get_phci: ioctl <%s> "
6192 		    "phci_path copyin failed", s));
6193 		retval = EFAULT;
6194 	}
6195 	return (retval);
6196 
6197 }
6198 
6199 
6200 /*
6201  * Routine to get the Client device pathname from ioctl structures in userland
6202  */
6203 /* ARGSUSED */
6204 static int
6205 vhci_ioc_get_client_path(sv_iocdata_t *pioc, caddr_t client_path,
6206 	int mode, caddr_t s)
6207 {
6208 	int retval = 0;
6209 
6210 	if (ddi_copyin(pioc->client, client_path, MAXPATHLEN, mode)) {
6211 		VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioc_get_client: "
6212 		    "ioctl <%s> client_path copyin failed", s));
6213 		retval = EFAULT;
6214 	}
6215 	return (retval);
6216 }
6217 
6218 
6219 /*
6220  * Routine to get physical device address from ioctl structure in userland
6221  */
6222 /* ARGSUSED */
6223 static int
6224 vhci_ioc_get_paddr(sv_iocdata_t *pioc, caddr_t paddr, int mode, caddr_t s)
6225 {
6226 	int retval = 0;
6227 
6228 	if (ddi_copyin(pioc->addr, paddr, MAXNAMELEN, mode)) {
6229 		VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioc_get_paddr: "
6230 		    "ioctl <%s> device addr copyin failed", s));
6231 		retval = EFAULT;
6232 	}
6233 	return (retval);
6234 }
6235 
6236 
6237 /*
6238  * Routine to send client device pathname to userland.
6239  */
6240 /* ARGSUSED */
6241 static int
6242 vhci_ioc_send_client_path(caddr_t client_path, sv_iocdata_t *pioc,
6243 	int mode, caddr_t s)
6244 {
6245 	int retval = 0;
6246 
6247 	if (ddi_copyout(client_path, pioc->client, MAXPATHLEN, mode)) {
6248 		VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioc_send_client: "
6249 		    "ioctl <%s> client_path copyout failed", s));
6250 		retval = EFAULT;
6251 	}
6252 	return (retval);
6253 }
6254 
6255 
6256 /*
6257  * Routine to translated dev_info pointer (dip) to device pathname.
6258  */
6259 static void
6260 vhci_ioc_devi_to_path(dev_info_t *dip, caddr_t path)
6261 {
6262 	(void) ddi_pathname(dip, path);
6263 }
6264 
6265 
6266 /*
6267  * vhci_get_phci_path_list:
6268  *		get information about devices associated with a
6269  *		given PHCI device.
6270  *
6271  * Return Values:
6272  *		path information elements
6273  */
6274 int
6275 vhci_get_phci_path_list(dev_info_t *pdip, sv_path_info_t *pibuf,
6276 	uint_t num_elems)
6277 {
6278 	uint_t			count, done;
6279 	mdi_pathinfo_t		*pip;
6280 	sv_path_info_t		*ret_pip;
6281 	int			status;
6282 	size_t			prop_size;
6283 	int			circular;
6284 
6285 	/*
6286 	 * Get the PHCI structure and retrieve the path information
6287 	 * from the GUID hash table.
6288 	 */
6289 
6290 	ret_pip = pibuf;
6291 	count = 0;
6292 
6293 	ndi_devi_enter(pdip, &circular);
6294 
6295 	done = (count >= num_elems);
6296 	pip = mdi_get_next_client_path(pdip, NULL);
6297 	while (pip && !done) {
6298 		mdi_pi_lock(pip);
6299 		(void) ddi_pathname(mdi_pi_get_phci(pip),
6300 		    ret_pip->device.ret_phci);
6301 		(void) strcpy(ret_pip->ret_addr, mdi_pi_get_addr(pip));
6302 		(void) mdi_pi_get_state2(pip, &ret_pip->ret_state,
6303 		    &ret_pip->ret_ext_state);
6304 
6305 		status = mdi_prop_size(pip, &prop_size);
6306 		if (status == MDI_SUCCESS && ret_pip->ret_prop.ret_buf_size) {
6307 			*ret_pip->ret_prop.ret_buf_size = (uint_t)prop_size;
6308 		}
6309 
6310 #ifdef DEBUG
6311 		if (status != MDI_SUCCESS) {
6312 			VHCI_DEBUG(2, (CE_WARN, NULL,
6313 			    "!vhci_get_phci_path_list: "
6314 			    "phci <%s>, prop size failure 0x%x",
6315 			    ret_pip->device.ret_phci, status));
6316 		}
6317 #endif /* DEBUG */
6318 
6319 
6320 		if (status == MDI_SUCCESS && ret_pip->ret_prop.buf &&
6321 		    prop_size && ret_pip->ret_prop.buf_size >= prop_size) {
6322 			status = mdi_prop_pack(pip,
6323 			    &ret_pip->ret_prop.buf,
6324 			    ret_pip->ret_prop.buf_size);
6325 
6326 #ifdef DEBUG
6327 			if (status != MDI_SUCCESS) {
6328 				VHCI_DEBUG(2, (CE_WARN, NULL,
6329 				    "!vhci_get_phci_path_list: "
6330 				    "phci <%s>, prop pack failure 0x%x",
6331 				    ret_pip->device.ret_phci, status));
6332 			}
6333 #endif /* DEBUG */
6334 		}
6335 
6336 		mdi_pi_unlock(pip);
6337 		pip = mdi_get_next_client_path(pdip, pip);
6338 		ret_pip++;
6339 		count++;
6340 		done = (count >= num_elems);
6341 	}
6342 
6343 	ndi_devi_exit(pdip, circular);
6344 
6345 	return (MDI_SUCCESS);
6346 }
6347 
6348 
6349 /*
6350  * vhci_get_client_path_list:
6351  *		get information about various paths associated with a
6352  *		given client device.
6353  *
6354  * Return Values:
6355  *		path information elements
6356  */
6357 int
6358 vhci_get_client_path_list(dev_info_t *cdip, sv_path_info_t *pibuf,
6359 	uint_t num_elems)
6360 {
6361 	uint_t			count, done;
6362 	mdi_pathinfo_t		*pip;
6363 	sv_path_info_t		*ret_pip;
6364 	int			status;
6365 	size_t			prop_size;
6366 	int			circular;
6367 
6368 	ret_pip = pibuf;
6369 	count = 0;
6370 
6371 	ndi_devi_enter(cdip, &circular);
6372 
6373 	done = (count >= num_elems);
6374 	pip = mdi_get_next_phci_path(cdip, NULL);
6375 	while (pip && !done) {
6376 		mdi_pi_lock(pip);
6377 		(void) ddi_pathname(mdi_pi_get_phci(pip),
6378 		    ret_pip->device.ret_phci);
6379 		(void) strcpy(ret_pip->ret_addr, mdi_pi_get_addr(pip));
6380 		(void) mdi_pi_get_state2(pip, &ret_pip->ret_state,
6381 		    &ret_pip->ret_ext_state);
6382 
6383 		status = mdi_prop_size(pip, &prop_size);
6384 		if (status == MDI_SUCCESS && ret_pip->ret_prop.ret_buf_size) {
6385 			*ret_pip->ret_prop.ret_buf_size = (uint_t)prop_size;
6386 		}
6387 
6388 #ifdef DEBUG
6389 		if (status != MDI_SUCCESS) {
6390 			VHCI_DEBUG(2, (CE_WARN, NULL,
6391 			    "!vhci_get_client_path_list: "
6392 			    "phci <%s>, prop size failure 0x%x",
6393 			    ret_pip->device.ret_phci, status));
6394 		}
6395 #endif /* DEBUG */
6396 
6397 
6398 		if (status == MDI_SUCCESS && ret_pip->ret_prop.buf &&
6399 		    prop_size && ret_pip->ret_prop.buf_size >= prop_size) {
6400 			status = mdi_prop_pack(pip,
6401 			    &ret_pip->ret_prop.buf,
6402 			    ret_pip->ret_prop.buf_size);
6403 
6404 #ifdef DEBUG
6405 			if (status != MDI_SUCCESS) {
6406 				VHCI_DEBUG(2, (CE_WARN, NULL,
6407 				    "!vhci_get_client_path_list: "
6408 				    "phci <%s>, prop pack failure 0x%x",
6409 				    ret_pip->device.ret_phci, status));
6410 			}
6411 #endif /* DEBUG */
6412 		}
6413 
6414 		mdi_pi_unlock(pip);
6415 		pip = mdi_get_next_phci_path(cdip, pip);
6416 		ret_pip++;
6417 		count++;
6418 		done = (count >= num_elems);
6419 	}
6420 
6421 	ndi_devi_exit(cdip, circular);
6422 
6423 	return (MDI_SUCCESS);
6424 }
6425 
6426 
6427 /*
6428  * Routine to get ioctl argument structure from userland.
6429  */
6430 /* ARGSUSED */
6431 static int
6432 vhci_get_iocdata(const void *data, sv_iocdata_t *pioc, int mode, caddr_t s)
6433 {
6434 	int	retval = 0;
6435 
6436 #ifdef  _MULTI_DATAMODEL
6437 	switch (ddi_model_convert_from(mode & FMODELS)) {
6438 	case DDI_MODEL_ILP32:
6439 	{
6440 		sv_iocdata32_t	ioc32;
6441 
6442 		if (ddi_copyin(data, &ioc32, sizeof (ioc32), mode)) {
6443 			retval = EFAULT;
6444 			break;
6445 		}
6446 		pioc->client	= (caddr_t)(uintptr_t)ioc32.client;
6447 		pioc->phci	= (caddr_t)(uintptr_t)ioc32.phci;
6448 		pioc->addr	= (caddr_t)(uintptr_t)ioc32.addr;
6449 		pioc->buf_elem	= (uint_t)ioc32.buf_elem;
6450 		pioc->ret_buf	= (sv_path_info_t *)(uintptr_t)ioc32.ret_buf;
6451 		pioc->ret_elem	= (uint_t *)(uintptr_t)ioc32.ret_elem;
6452 		break;
6453 	}
6454 
6455 	case DDI_MODEL_NONE:
6456 		if (ddi_copyin(data, pioc, sizeof (*pioc), mode)) {
6457 			retval = EFAULT;
6458 			break;
6459 		}
6460 		break;
6461 	}
6462 #else   /* _MULTI_DATAMODEL */
6463 	if (ddi_copyin(data, pioc, sizeof (*pioc), mode)) {
6464 		retval = EFAULT;
6465 	}
6466 #endif  /* _MULTI_DATAMODEL */
6467 
6468 #ifdef DEBUG
6469 	if (retval) {
6470 		VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_get_ioc: cmd <%s> "
6471 		    "iocdata copyin failed", s));
6472 	}
6473 #endif
6474 
6475 	return (retval);
6476 }
6477 
6478 
6479 /*
6480  * Routine to get the ioctl argument for ioctl causing controller switchover.
6481  */
6482 /* ARGSUSED */
6483 static int
6484 vhci_get_iocswitchdata(const void *data, sv_switch_to_cntlr_iocdata_t *piocsc,
6485     int mode, caddr_t s)
6486 {
6487 	int	retval = 0;
6488 
6489 #ifdef  _MULTI_DATAMODEL
6490 	switch (ddi_model_convert_from(mode & FMODELS)) {
6491 	case DDI_MODEL_ILP32:
6492 	{
6493 		sv_switch_to_cntlr_iocdata32_t	ioc32;
6494 
6495 		if (ddi_copyin(data, &ioc32, sizeof (ioc32), mode)) {
6496 			retval = EFAULT;
6497 			break;
6498 		}
6499 		piocsc->client	= (caddr_t)(uintptr_t)ioc32.client;
6500 		piocsc->class	= (caddr_t)(uintptr_t)ioc32.class;
6501 		break;
6502 	}
6503 
6504 	case DDI_MODEL_NONE:
6505 		if (ddi_copyin(data, piocsc, sizeof (*piocsc), mode)) {
6506 			retval = EFAULT;
6507 		}
6508 		break;
6509 	}
6510 #else   /* _MULTI_DATAMODEL */
6511 	if (ddi_copyin(data, piocsc, sizeof (*piocsc), mode)) {
6512 		retval = EFAULT;
6513 	}
6514 #endif  /* _MULTI_DATAMODEL */
6515 
6516 #ifdef DEBUG
6517 	if (retval) {
6518 		VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_get_ioc: cmd <%s> "
6519 		    "switch_to_cntlr_iocdata copyin failed", s));
6520 	}
6521 #endif
6522 
6523 	return (retval);
6524 }
6525 
6526 
6527 /*
6528  * Routine to allocate memory for the path information structures.
6529  * It allocates two chunks of memory - one for keeping userland
6530  * pointers/values for path information and path properties, second for
6531  * keeping allocating kernel memory for path properties. These path
6532  * properties are finally copied to userland.
6533  */
6534 /* ARGSUSED */
6535 static int
6536 vhci_ioc_alloc_pathinfo(sv_path_info_t **upibuf, sv_path_info_t **kpibuf,
6537     uint_t num_paths, sv_iocdata_t *pioc, int mode, caddr_t s)
6538 {
6539 	sv_path_info_t	*pi;
6540 	uint_t		bufsize;
6541 	int		retval = 0;
6542 	int		index;
6543 
6544 	/* Allocate memory */
6545 	*upibuf = (sv_path_info_t *)
6546 	    kmem_zalloc(sizeof (sv_path_info_t) * num_paths, KM_SLEEP);
6547 	ASSERT(*upibuf != NULL);
6548 	*kpibuf = (sv_path_info_t *)
6549 	    kmem_zalloc(sizeof (sv_path_info_t) * num_paths, KM_SLEEP);
6550 	ASSERT(*kpibuf != NULL);
6551 
6552 	/*
6553 	 * Get the path info structure from the user space.
6554 	 * We are interested in the following fields:
6555 	 *	- user size of buffer for per path properties.
6556 	 *	- user address of buffer for path info properties.
6557 	 *	- user pointer for returning actual buffer size
6558 	 * Keep these fields in the 'upibuf' structures.
6559 	 * Allocate buffer for per path info properties in kernel
6560 	 * structure ('kpibuf').
6561 	 * Size of these buffers will be equal to the size of buffers
6562 	 * in the user space.
6563 	 */
6564 #ifdef  _MULTI_DATAMODEL
6565 	switch (ddi_model_convert_from(mode & FMODELS)) {
6566 	case DDI_MODEL_ILP32:
6567 	{
6568 		sv_path_info32_t	*src;
6569 		sv_path_info32_t	pi32;
6570 
6571 		src  = (sv_path_info32_t *)pioc->ret_buf;
6572 		pi = (sv_path_info_t *)*upibuf;
6573 		for (index = 0; index < num_paths; index++, src++, pi++) {
6574 			if (ddi_copyin(src, &pi32, sizeof (pi32), mode)) {
6575 				retval = EFAULT;
6576 				break;
6577 			}
6578 
6579 			pi->ret_prop.buf_size	=
6580 			    (uint_t)pi32.ret_prop.buf_size;
6581 			pi->ret_prop.ret_buf_size =
6582 			    (uint_t *)(uintptr_t)pi32.ret_prop.ret_buf_size;
6583 			pi->ret_prop.buf	=
6584 			    (caddr_t)(uintptr_t)pi32.ret_prop.buf;
6585 		}
6586 		break;
6587 	}
6588 
6589 	case DDI_MODEL_NONE:
6590 		if (ddi_copyin(pioc->ret_buf, *upibuf,
6591 		    sizeof (sv_path_info_t) * num_paths, mode)) {
6592 			retval = EFAULT;
6593 		}
6594 		break;
6595 	}
6596 #else   /* _MULTI_DATAMODEL */
6597 	if (ddi_copyin(pioc->ret_buf, *upibuf,
6598 	    sizeof (sv_path_info_t) * num_paths, mode)) {
6599 		retval = EFAULT;
6600 	}
6601 #endif  /* _MULTI_DATAMODEL */
6602 
6603 	if (retval != 0) {
6604 		VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_alloc_path_info: "
6605 		    "ioctl <%s> normal: path_info copyin failed", s));
6606 		kmem_free(*upibuf, sizeof (sv_path_info_t) * num_paths);
6607 		kmem_free(*kpibuf, sizeof (sv_path_info_t) * num_paths);
6608 		*upibuf = NULL;
6609 		*kpibuf = NULL;
6610 		return (retval);
6611 	}
6612 
6613 	/*
6614 	 * Allocate memory for per path properties.
6615 	 */
6616 	for (index = 0, pi = *kpibuf; index < num_paths; index++, pi++) {
6617 		bufsize = (*upibuf)[index].ret_prop.buf_size;
6618 
6619 		if (bufsize && bufsize <= SV_PROP_MAX_BUF_SIZE) {
6620 			pi->ret_prop.buf_size = bufsize;
6621 			pi->ret_prop.buf = (caddr_t)
6622 			    kmem_zalloc(bufsize, KM_SLEEP);
6623 			ASSERT(pi->ret_prop.buf != NULL);
6624 		} else {
6625 			pi->ret_prop.buf_size = 0;
6626 			pi->ret_prop.buf = NULL;
6627 		}
6628 
6629 		if ((*upibuf)[index].ret_prop.ret_buf_size != NULL) {
6630 			pi->ret_prop.ret_buf_size = (uint_t *)kmem_zalloc(
6631 			    sizeof (*pi->ret_prop.ret_buf_size), KM_SLEEP);
6632 			ASSERT(pi->ret_prop.ret_buf_size != NULL);
6633 		} else {
6634 			pi->ret_prop.ret_buf_size = NULL;
6635 		}
6636 	}
6637 
6638 	return (0);
6639 }
6640 
6641 
6642 /*
6643  * Routine to free memory for the path information structures.
6644  * This is the memory which was allocated earlier.
6645  */
6646 /* ARGSUSED */
6647 static void
6648 vhci_ioc_free_pathinfo(sv_path_info_t *upibuf, sv_path_info_t *kpibuf,
6649     uint_t num_paths)
6650 {
6651 	sv_path_info_t	*pi;
6652 	int		index;
6653 
6654 	/* Free memory for per path properties */
6655 	for (index = 0, pi = kpibuf; index < num_paths; index++, pi++) {
6656 		if (pi->ret_prop.ret_buf_size != NULL) {
6657 			kmem_free(pi->ret_prop.ret_buf_size,
6658 			    sizeof (*pi->ret_prop.ret_buf_size));
6659 		}
6660 
6661 		if (pi->ret_prop.buf != NULL) {
6662 			kmem_free(pi->ret_prop.buf, pi->ret_prop.buf_size);
6663 		}
6664 	}
6665 
6666 	/* Free memory for path info structures */
6667 	kmem_free(upibuf, sizeof (sv_path_info_t) * num_paths);
6668 	kmem_free(kpibuf, sizeof (sv_path_info_t) * num_paths);
6669 }
6670 
6671 
6672 /*
6673  * Routine to copy path information and path properties to userland.
6674  */
6675 /* ARGSUSED */
6676 static int
6677 vhci_ioc_send_pathinfo(sv_path_info_t *upibuf, sv_path_info_t *kpibuf,
6678     uint_t num_paths, sv_iocdata_t *pioc, int mode, caddr_t s)
6679 {
6680 	int			retval = 0, index;
6681 	sv_path_info_t		*upi_ptr;
6682 	sv_path_info32_t	*upi32_ptr;
6683 
6684 #ifdef  _MULTI_DATAMODEL
6685 	switch (ddi_model_convert_from(mode & FMODELS)) {
6686 	case DDI_MODEL_ILP32:
6687 		goto copy_32bit;
6688 
6689 	case DDI_MODEL_NONE:
6690 		goto copy_normal;
6691 	}
6692 #else   /* _MULTI_DATAMODEL */
6693 
6694 	goto copy_normal;
6695 
6696 #endif  /* _MULTI_DATAMODEL */
6697 
6698 copy_normal:
6699 
6700 	/*
6701 	 * Copy path information and path properties to user land.
6702 	 * Pointer fields inside the path property structure were
6703 	 * saved in the 'upibuf' structure earlier.
6704 	 */
6705 	upi_ptr = pioc->ret_buf;
6706 	for (index = 0; index < num_paths; index++) {
6707 		if (ddi_copyout(kpibuf[index].device.ret_ct,
6708 		    upi_ptr[index].device.ret_ct, MAXPATHLEN, mode)) {
6709 			retval = EFAULT;
6710 			break;
6711 		}
6712 
6713 		if (ddi_copyout(kpibuf[index].ret_addr,
6714 		    upi_ptr[index].ret_addr, MAXNAMELEN, mode)) {
6715 			retval = EFAULT;
6716 			break;
6717 		}
6718 
6719 		if (ddi_copyout(&kpibuf[index].ret_state,
6720 		    &upi_ptr[index].ret_state, sizeof (kpibuf[index].ret_state),
6721 		    mode)) {
6722 			retval = EFAULT;
6723 			break;
6724 		}
6725 
6726 		if (ddi_copyout(&kpibuf[index].ret_ext_state,
6727 		    &upi_ptr[index].ret_ext_state,
6728 		    sizeof (kpibuf[index].ret_ext_state), mode)) {
6729 			retval = EFAULT;
6730 			break;
6731 		}
6732 
6733 		if ((kpibuf[index].ret_prop.ret_buf_size != NULL) &&
6734 		    ddi_copyout(kpibuf[index].ret_prop.ret_buf_size,
6735 		    upibuf[index].ret_prop.ret_buf_size,
6736 		    sizeof (*upibuf[index].ret_prop.ret_buf_size), mode)) {
6737 			retval = EFAULT;
6738 			break;
6739 		}
6740 
6741 		if ((kpibuf[index].ret_prop.buf != NULL) &&
6742 		    ddi_copyout(kpibuf[index].ret_prop.buf,
6743 		    upibuf[index].ret_prop.buf,
6744 		    upibuf[index].ret_prop.buf_size, mode)) {
6745 			retval = EFAULT;
6746 			break;
6747 		}
6748 	}
6749 
6750 #ifdef DEBUG
6751 	if (retval) {
6752 		VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_get_ioc: ioctl <%s> "
6753 		    "normal: path_info copyout failed", s));
6754 	}
6755 #endif
6756 
6757 	return (retval);
6758 
6759 copy_32bit:
6760 	/*
6761 	 * Copy path information and path properties to user land.
6762 	 * Pointer fields inside the path property structure were
6763 	 * saved in the 'upibuf' structure earlier.
6764 	 */
6765 	upi32_ptr = (sv_path_info32_t *)pioc->ret_buf;
6766 	for (index = 0; index < num_paths; index++) {
6767 		if (ddi_copyout(kpibuf[index].device.ret_ct,
6768 		    upi32_ptr[index].device.ret_ct, MAXPATHLEN, mode)) {
6769 			retval = EFAULT;
6770 			break;
6771 		}
6772 
6773 		if (ddi_copyout(kpibuf[index].ret_addr,
6774 		    upi32_ptr[index].ret_addr, MAXNAMELEN, mode)) {
6775 			retval = EFAULT;
6776 			break;
6777 		}
6778 
6779 		if (ddi_copyout(&kpibuf[index].ret_state,
6780 		    &upi32_ptr[index].ret_state,
6781 		    sizeof (kpibuf[index].ret_state), mode)) {
6782 			retval = EFAULT;
6783 			break;
6784 		}
6785 
6786 		if (ddi_copyout(&kpibuf[index].ret_ext_state,
6787 		    &upi32_ptr[index].ret_ext_state,
6788 		    sizeof (kpibuf[index].ret_ext_state), mode)) {
6789 			retval = EFAULT;
6790 			break;
6791 		}
6792 		if ((kpibuf[index].ret_prop.ret_buf_size != NULL) &&
6793 		    ddi_copyout(kpibuf[index].ret_prop.ret_buf_size,
6794 		    upibuf[index].ret_prop.ret_buf_size,
6795 		    sizeof (*upibuf[index].ret_prop.ret_buf_size), mode)) {
6796 			retval = EFAULT;
6797 			break;
6798 		}
6799 
6800 		if ((kpibuf[index].ret_prop.buf != NULL) &&
6801 		    ddi_copyout(kpibuf[index].ret_prop.buf,
6802 		    upibuf[index].ret_prop.buf,
6803 		    upibuf[index].ret_prop.buf_size, mode)) {
6804 			retval = EFAULT;
6805 			break;
6806 		}
6807 	}
6808 
6809 #ifdef DEBUG
6810 	if (retval) {
6811 		VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_get_ioc: ioctl <%s> "
6812 		    "normal: path_info copyout failed", s));
6813 	}
6814 #endif
6815 
6816 	return (retval);
6817 }
6818 
6819 
6820 /*
6821  * vhci_failover()
6822  * This routine expects VHCI_HOLD_LUN before being invoked.  It can be invoked
6823  * as MDI_FAILOVER_ASYNC or MDI_FAILOVER_SYNC.  For Asynchronous failovers
6824  * this routine shall VHCI_RELEASE_LUN on exiting.  For synchronous failovers
6825  * it is the callers responsibility to release lun.
6826  */
6827 
6828 /* ARGSUSED */
6829 static int
6830 vhci_failover(dev_info_t *vdip, dev_info_t *cdip, int flags)
6831 {
6832 	char			*guid;
6833 	scsi_vhci_lun_t		*vlun = NULL;
6834 	struct scsi_vhci	*vhci;
6835 	mdi_pathinfo_t		*pip, *npip;
6836 	char			*s_pclass, *pclass1, *pclass2, *pclass;
6837 	char			active_pclass_copy[255], *active_pclass_ptr;
6838 	char			*ptr1, *ptr2;
6839 	mdi_pathinfo_state_t	pi_state;
6840 	uint32_t		pi_ext_state;
6841 	scsi_vhci_priv_t	*svp;
6842 	struct scsi_device	*sd;
6843 	struct scsi_failover_ops	*sfo;
6844 	int			sps; /* mdi_select_path() status */
6845 	int			activation_done = 0;
6846 	int			rval, retval = MDI_FAILURE;
6847 	int			reserve_pending, check_condition, UA_condition;
6848 	struct scsi_pkt		*pkt;
6849 	struct buf		*bp;
6850 
6851 	vhci = ddi_get_soft_state(vhci_softstate, ddi_get_instance(vdip));
6852 	sd = ddi_get_driver_private(cdip);
6853 	vlun = ADDR2VLUN(&sd->sd_address);
6854 	ASSERT(vlun != 0);
6855 	ASSERT(VHCI_LUN_IS_HELD(vlun));
6856 	guid = vlun->svl_lun_wwn;
6857 	VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(1): guid %s\n", guid));
6858 	vhci_log(CE_NOTE, vdip, "!Initiating failover for device %s "
6859 	    "(GUID %s)", ddi_node_name(cdip), guid);
6860 
6861 	/*
6862 	 * Lets maintain a local copy of the vlun->svl_active_pclass
6863 	 * for the rest of the processing. Accessing the field
6864 	 * directly in the loop below causes loop logic to break
6865 	 * especially when the field gets updated by other threads
6866 	 * update path status etc and causes 'paths are not currently
6867 	 * available' condition to be declared prematurely.
6868 	 */
6869 	mutex_enter(&vlun->svl_mutex);
6870 	if (vlun->svl_active_pclass != NULL) {
6871 		(void) strlcpy(active_pclass_copy, vlun->svl_active_pclass,
6872 		    sizeof (active_pclass_copy));
6873 		active_pclass_ptr = &active_pclass_copy[0];
6874 		mutex_exit(&vlun->svl_mutex);
6875 		if (vhci_quiesce_paths(vdip, cdip, vlun, guid,
6876 		    active_pclass_ptr) != 0) {
6877 			retval = MDI_FAILURE;
6878 		}
6879 	} else {
6880 		/*
6881 		 * can happen only when the available path to device
6882 		 * discovered is a STANDBY path.
6883 		 */
6884 		mutex_exit(&vlun->svl_mutex);
6885 		active_pclass_copy[0] = '\0';
6886 		active_pclass_ptr = NULL;
6887 	}
6888 
6889 	sfo = vlun->svl_fops;
6890 	ASSERT(sfo != NULL);
6891 	pclass1 = s_pclass = active_pclass_ptr;
6892 	VHCI_DEBUG(1, (CE_NOTE, NULL, "!(%s)failing over from %s\n", guid,
6893 	    (s_pclass == NULL ? "<none>" : s_pclass)));
6894 
6895 next_pathclass:
6896 
6897 	rval = sfo->sfo_pathclass_next(pclass1, &pclass2,
6898 	    vlun->svl_fops_ctpriv);
6899 	if (rval == ENOENT) {
6900 		if (s_pclass == NULL) {
6901 			VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(4)(%s): "
6902 			    "failed, no more pathclasses\n", guid));
6903 			goto done;
6904 		} else {
6905 			(void) sfo->sfo_pathclass_next(NULL, &pclass2,
6906 			    vlun->svl_fops_ctpriv);
6907 		}
6908 	} else if (rval == EINVAL) {
6909 		vhci_log(CE_NOTE, vdip, "!Failover operation failed for "
6910 		    "device %s (GUID %s): Invalid path-class %s",
6911 		    ddi_node_name(cdip), guid,
6912 		    ((pclass1 == NULL) ? "<none>" : pclass1));
6913 		goto done;
6914 	}
6915 	if ((s_pclass != NULL) && (strcmp(pclass2, s_pclass) == 0)) {
6916 		/*
6917 		 * paths are not currently available
6918 		 */
6919 		vhci_log(CE_NOTE, vdip, "!Failover path currently unavailable"
6920 		    " for device %s (GUID %s)",
6921 		    ddi_node_name(cdip), guid);
6922 		goto done;
6923 	}
6924 	pip = npip = NULL;
6925 	VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(5.2)(%s): considering "
6926 	    "%s as failover destination\n", guid, pclass2));
6927 	sps = mdi_select_path(cdip, NULL, MDI_SELECT_STANDBY_PATH, NULL, &npip);
6928 	if ((npip == NULL) || (sps != MDI_SUCCESS)) {
6929 		VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(%s): no "
6930 		    "STANDBY paths found (status:%x)!\n", guid, sps));
6931 		pclass1 = pclass2;
6932 		goto next_pathclass;
6933 	}
6934 	do {
6935 		pclass = NULL;
6936 		if ((mdi_prop_lookup_string(npip, "path-class",
6937 		    &pclass) != MDI_SUCCESS) || (strcmp(pclass2,
6938 		    pclass) != 0)) {
6939 			VHCI_DEBUG(1, (CE_NOTE, NULL,
6940 			    "!vhci_failover(5.5)(%s): skipping path "
6941 			    "%p(%s)...\n", guid, (void *)npip, pclass));
6942 			pip = npip;
6943 			sps = mdi_select_path(cdip, NULL,
6944 			    MDI_SELECT_STANDBY_PATH, pip, &npip);
6945 			mdi_rele_path(pip);
6946 			(void) mdi_prop_free(pclass);
6947 			continue;
6948 		}
6949 		svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(npip);
6950 
6951 		/*
6952 		 * Issue READ at non-zer block on this STANDBY path.
6953 		 * Purple returns
6954 		 * 1. RESERVATION_CONFLICT if reservation is pending
6955 		 * 2. POR check condition if it reset happened.
6956 		 * 2. failover Check Conditions if one is already in progress.
6957 		 */
6958 		reserve_pending = 0;
6959 		check_condition = 0;
6960 		UA_condition = 0;
6961 
6962 		bp = scsi_alloc_consistent_buf(&svp->svp_psd->sd_address,
6963 		    (struct buf *)NULL, DEV_BSIZE, B_READ, NULL, NULL);
6964 		if (!bp) {
6965 			VHCI_DEBUG(1, (CE_NOTE, NULL,
6966 			    "vhci_failover !No resources (buf)\n"));
6967 			mdi_rele_path(npip);
6968 			goto done;
6969 		}
6970 		pkt = scsi_init_pkt(&svp->svp_psd->sd_address, NULL, bp,
6971 		    CDB_GROUP1, sizeof (struct scsi_arq_status), 0,
6972 		    PKT_CONSISTENT, NULL, NULL);
6973 		if (pkt) {
6974 			(void) scsi_setup_cdb((union scsi_cdb *)(uintptr_t)
6975 			    pkt->pkt_cdbp, SCMD_READ, 1, 1, 0);
6976 			pkt->pkt_flags = FLAG_NOINTR;
6977 check_path_again:
6978 			pkt->pkt_path_instance = mdi_pi_get_path_instance(npip);
6979 			pkt->pkt_time = 3*30;
6980 
6981 			if (scsi_transport(pkt) == TRAN_ACCEPT) {
6982 				switch (pkt->pkt_reason) {
6983 				case CMD_CMPLT:
6984 					switch (SCBP_C(pkt)) {
6985 					case STATUS_GOOD:
6986 						/* Already failed over */
6987 						activation_done = 1;
6988 						break;
6989 					case STATUS_RESERVATION_CONFLICT:
6990 						reserve_pending = 1;
6991 						break;
6992 					case STATUS_CHECK:
6993 						check_condition = 1;
6994 						break;
6995 					}
6996 				}
6997 			}
6998 			if (check_condition &&
6999 			    (pkt->pkt_state & STATE_ARQ_DONE)) {
7000 				uint8_t *sns, skey, asc, ascq;
7001 				sns = (uint8_t *)
7002 				    &(((struct scsi_arq_status *)(uintptr_t)
7003 				    (pkt->pkt_scbp))->sts_sensedata);
7004 				skey = scsi_sense_key(sns);
7005 				asc = scsi_sense_asc(sns);
7006 				ascq = scsi_sense_ascq(sns);
7007 				if (skey == KEY_UNIT_ATTENTION &&
7008 				    asc == 0x29) {
7009 					/* Already failed over */
7010 					VHCI_DEBUG(1, (CE_NOTE, NULL,
7011 					    "!vhci_failover(7)(%s): "
7012 					    "path 0x%p POR UA condition\n",
7013 					    guid, (void *)npip));
7014 					if (UA_condition == 0) {
7015 						UA_condition = 1;
7016 						goto check_path_again;
7017 					}
7018 				} else {
7019 					activation_done = 0;
7020 					VHCI_DEBUG(1, (CE_NOTE, NULL,
7021 					    "!vhci_failover(%s): path 0x%p "
7022 					    "unhandled chkcond %x %x %x\n",
7023 					    guid, (void *)npip, skey,
7024 					    asc, ascq));
7025 				}
7026 			}
7027 			scsi_destroy_pkt(pkt);
7028 		}
7029 		scsi_free_consistent_buf(bp);
7030 
7031 		if (activation_done) {
7032 			mdi_rele_path(npip);
7033 			VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(7)(%s): "
7034 			    "path 0x%p already failedover\n", guid,
7035 			    (void *)npip));
7036 			break;
7037 		}
7038 		if (reserve_pending && (vlun->svl_xlf_capable == 0)) {
7039 			(void) vhci_recovery_reset(vlun,
7040 			    &svp->svp_psd->sd_address,
7041 			    FALSE, VHCI_DEPTH_ALL);
7042 		}
7043 		VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(6)(%s): "
7044 		    "activating path 0x%p(psd:%p)\n", guid, (void *)npip,
7045 		    (void *)svp->svp_psd));
7046 		if (sfo->sfo_path_activate(svp->svp_psd, pclass2,
7047 		    vlun->svl_fops_ctpriv) == 0) {
7048 			activation_done = 1;
7049 			mdi_rele_path(npip);
7050 			VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(7)(%s): "
7051 			    "path 0x%p successfully activated\n", guid,
7052 			    (void *)npip));
7053 			break;
7054 		}
7055 		pip = npip;
7056 		sps = mdi_select_path(cdip, NULL, MDI_SELECT_STANDBY_PATH,
7057 		    pip, &npip);
7058 		mdi_rele_path(pip);
7059 	} while ((npip != NULL) && (sps == MDI_SUCCESS));
7060 	if (activation_done == 0) {
7061 		pclass1 = pclass2;
7062 		goto next_pathclass;
7063 	}
7064 
7065 	/*
7066 	 * if we are here, we have succeeded in activating path npip of
7067 	 * pathclass pclass2; let us validate all paths of pclass2 by
7068 	 * "ping"-ing each one and mark the good ones ONLINE
7069 	 * Also, set the state of the paths belonging to the previously
7070 	 * active pathclass to STANDBY
7071 	 */
7072 	pip = npip = NULL;
7073 	sps = mdi_select_path(cdip, NULL, (MDI_SELECT_ONLINE_PATH |
7074 	    MDI_SELECT_STANDBY_PATH | MDI_SELECT_USER_DISABLE_PATH),
7075 	    NULL, &npip);
7076 	if (npip == NULL || sps != MDI_SUCCESS) {
7077 		VHCI_DEBUG(1, (CE_NOTE, NULL, "!Failover operation failed for "
7078 		    "device %s (GUID %s): paths may be busy\n",
7079 		    ddi_node_name(cdip), guid));
7080 		goto done;
7081 	}
7082 	do {
7083 		(void) mdi_pi_get_state2(npip, &pi_state, &pi_ext_state);
7084 		if (mdi_prop_lookup_string(npip, "path-class", &pclass)
7085 		    != MDI_SUCCESS) {
7086 			pip = npip;
7087 			sps = mdi_select_path(cdip, NULL,
7088 			    (MDI_SELECT_ONLINE_PATH |
7089 			    MDI_SELECT_STANDBY_PATH |
7090 			    MDI_SELECT_USER_DISABLE_PATH),
7091 			    pip, &npip);
7092 			mdi_rele_path(pip);
7093 			continue;
7094 		}
7095 		if (strcmp(pclass, pclass2) == 0) {
7096 			if (pi_state == MDI_PATHINFO_STATE_STANDBY) {
7097 				svp = (scsi_vhci_priv_t *)
7098 				    mdi_pi_get_vhci_private(npip);
7099 				VHCI_DEBUG(1, (CE_NOTE, NULL,
7100 				    "!vhci_failover(8)(%s): "
7101 				    "pinging path 0x%p\n",
7102 				    guid, (void *)npip));
7103 				if (sfo->sfo_path_ping(svp->svp_psd,
7104 				    vlun->svl_fops_ctpriv) == 1) {
7105 					mdi_pi_set_state(npip,
7106 					    MDI_PATHINFO_STATE_ONLINE);
7107 					VHCI_DEBUG(1, (CE_NOTE, NULL,
7108 					    "!vhci_failover(9)(%s): "
7109 					    "path 0x%p ping successful, "
7110 					    "marked online\n", guid,
7111 					    (void *)npip));
7112 					MDI_PI_ERRSTAT(npip, MDI_PI_FAILTO);
7113 				}
7114 			}
7115 		} else if ((s_pclass != NULL) && (strcmp(pclass, s_pclass)
7116 		    == 0)) {
7117 			if (pi_state == MDI_PATHINFO_STATE_ONLINE) {
7118 				mdi_pi_set_state(npip,
7119 				    MDI_PATHINFO_STATE_STANDBY);
7120 				VHCI_DEBUG(1, (CE_NOTE, NULL,
7121 				    "!vhci_failover(10)(%s): path 0x%p marked "
7122 				    "STANDBY\n", guid, (void *)npip));
7123 				MDI_PI_ERRSTAT(npip, MDI_PI_FAILFROM);
7124 			}
7125 		}
7126 		(void) mdi_prop_free(pclass);
7127 		pip = npip;
7128 		sps = mdi_select_path(cdip, NULL, (MDI_SELECT_ONLINE_PATH |
7129 		    MDI_SELECT_STANDBY_PATH|MDI_SELECT_USER_DISABLE_PATH),
7130 		    pip, &npip);
7131 		mdi_rele_path(pip);
7132 	} while ((npip != NULL) && (sps == MDI_SUCCESS));
7133 
7134 	/*
7135 	 * Update the AccessState of related MP-API TPGs
7136 	 */
7137 	(void) vhci_mpapi_update_tpg_acc_state_for_lu(vhci, vlun);
7138 
7139 	vhci_log(CE_NOTE, vdip, "!Failover operation completed successfully "
7140 	    "for device %s (GUID %s): failed over from %s to %s",
7141 	    ddi_node_name(cdip), guid, ((s_pclass == NULL) ? "<none>" :
7142 	    s_pclass), pclass2);
7143 	ptr1 = kmem_alloc(strlen(pclass2)+1, KM_SLEEP);
7144 	(void) strlcpy(ptr1, pclass2, (strlen(pclass2)+1));
7145 	mutex_enter(&vlun->svl_mutex);
7146 	ptr2 = vlun->svl_active_pclass;
7147 	vlun->svl_active_pclass = ptr1;
7148 	mutex_exit(&vlun->svl_mutex);
7149 	if (ptr2) {
7150 		kmem_free(ptr2, strlen(ptr2)+1);
7151 	}
7152 	mutex_enter(&vhci->vhci_mutex);
7153 	scsi_hba_reset_notify_callback(&vhci->vhci_mutex,
7154 	    &vhci->vhci_reset_notify_listf);
7155 	/* All reservations are cleared upon these resets. */
7156 	vlun->svl_flags &= ~VLUN_RESERVE_ACTIVE_FLG;
7157 	mutex_exit(&vhci->vhci_mutex);
7158 	VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(11): DONE! Active "
7159 	    "pathclass for %s is now %s\n", guid, pclass2));
7160 	retval = MDI_SUCCESS;
7161 
7162 done:
7163 	vlun->svl_failover_status = retval;
7164 	if (flags == MDI_FAILOVER_ASYNC) {
7165 		VHCI_RELEASE_LUN(vlun);
7166 		VHCI_DEBUG(6, (CE_NOTE, NULL, "!vhci_failover(12): DONE! "
7167 		    "releasing lun, as failover was ASYNC\n"));
7168 	} else {
7169 		VHCI_DEBUG(6, (CE_NOTE, NULL, "!vhci_failover(12): DONE! "
7170 		    "NOT releasing lun, as failover was SYNC\n"));
7171 	}
7172 	return (retval);
7173 }
7174 
7175 /*
7176  * vhci_client_attached is called after the successful attach of a
7177  * client devinfo node.
7178  */
7179 static void
7180 vhci_client_attached(dev_info_t *cdip)
7181 {
7182 	mdi_pathinfo_t	*pip;
7183 	int		circular;
7184 
7185 	/*
7186 	 * At this point the client has attached and it's instance number is
7187 	 * valid, so we can set up kstats.  We need to do this here because it
7188 	 * is possible for paths to go online prior to client attach, in which
7189 	 * case the call to vhci_kstat_create_pathinfo in vhci_pathinfo_online
7190 	 * was a noop.
7191 	 */
7192 	ndi_devi_enter(cdip, &circular);
7193 	for (pip = mdi_get_next_phci_path(cdip, NULL); pip;
7194 	    pip = mdi_get_next_phci_path(cdip, pip))
7195 		vhci_kstat_create_pathinfo(pip);
7196 	ndi_devi_exit(cdip, circular);
7197 }
7198 
7199 /*
7200  * quiesce all of the online paths
7201  */
7202 static int
7203 vhci_quiesce_paths(dev_info_t *vdip, dev_info_t *cdip, scsi_vhci_lun_t *vlun,
7204 	char *guid, char *active_pclass_ptr)
7205 {
7206 	scsi_vhci_priv_t	*svp;
7207 	char			*s_pclass = NULL;
7208 	mdi_pathinfo_t		*npip, *pip;
7209 	int			sps;
7210 
7211 	/* quiesce currently active paths */
7212 	s_pclass = NULL;
7213 	pip = npip = NULL;
7214 	sps = mdi_select_path(cdip, NULL, MDI_SELECT_ONLINE_PATH, NULL, &npip);
7215 	if ((npip == NULL) || (sps != MDI_SUCCESS)) {
7216 		return (1);
7217 	}
7218 	do {
7219 		if (mdi_prop_lookup_string(npip, "path-class",
7220 		    &s_pclass) != MDI_SUCCESS) {
7221 			mdi_rele_path(npip);
7222 			vhci_log(CE_NOTE, vdip, "!Failover operation failed "
7223 			    "for device %s (GUID %s) due to an internal "
7224 			    "error", ddi_node_name(cdip), guid);
7225 			return (1);
7226 		}
7227 		if (strcmp(s_pclass, active_pclass_ptr) == 0) {
7228 			/*
7229 			 * quiesce path. Free s_pclass since
7230 			 * we don't need it anymore
7231 			 */
7232 			VHCI_DEBUG(1, (CE_NOTE, NULL,
7233 			    "!vhci_failover(2)(%s): failing over "
7234 			    "from %s; quiescing path %p\n",
7235 			    guid, s_pclass, (void *)npip));
7236 			(void) mdi_prop_free(s_pclass);
7237 			svp = (scsi_vhci_priv_t *)
7238 			    mdi_pi_get_vhci_private(npip);
7239 			if (svp == NULL) {
7240 				VHCI_DEBUG(1, (CE_NOTE, NULL,
7241 				    "!vhci_failover(2.5)(%s): no "
7242 				    "client priv! %p offlined?\n",
7243 				    guid, (void *)npip));
7244 				pip = npip;
7245 				sps = mdi_select_path(cdip, NULL,
7246 				    MDI_SELECT_ONLINE_PATH, pip, &npip);
7247 				mdi_rele_path(pip);
7248 				continue;
7249 			}
7250 			if (scsi_abort(&svp->svp_psd->sd_address, NULL)
7251 			    == 0) {
7252 				(void) vhci_recovery_reset(vlun,
7253 				    &svp->svp_psd->sd_address, FALSE,
7254 				    VHCI_DEPTH_TARGET);
7255 			}
7256 			mutex_enter(&svp->svp_mutex);
7257 			if (svp->svp_cmds == 0) {
7258 				VHCI_DEBUG(1, (CE_NOTE, NULL,
7259 				    "!vhci_failover(3)(%s):"
7260 				    "quiesced path %p\n", guid, (void *)npip));
7261 			} else {
7262 				while (svp->svp_cmds != 0) {
7263 					cv_wait(&svp->svp_cv, &svp->svp_mutex);
7264 					VHCI_DEBUG(1, (CE_NOTE, NULL,
7265 					    "!vhci_failover(3.cv)(%s):"
7266 					    "quiesced path %p\n", guid,
7267 					    (void *)npip));
7268 				}
7269 			}
7270 			mutex_exit(&svp->svp_mutex);
7271 		} else {
7272 			/*
7273 			 * make sure we freeup the memory
7274 			 */
7275 			(void) mdi_prop_free(s_pclass);
7276 		}
7277 		pip = npip;
7278 		sps = mdi_select_path(cdip, NULL, MDI_SELECT_ONLINE_PATH,
7279 		    pip, &npip);
7280 		mdi_rele_path(pip);
7281 	} while ((npip != NULL) && (sps == MDI_SUCCESS));
7282 	return (0);
7283 }
7284 
7285 static struct scsi_vhci_lun *
7286 vhci_lun_lookup(dev_info_t *tgt_dip)
7287 {
7288 	return ((struct scsi_vhci_lun *)
7289 	    mdi_client_get_vhci_private(tgt_dip));
7290 }
7291 
7292 static struct scsi_vhci_lun *
7293 vhci_lun_lookup_alloc(dev_info_t *tgt_dip, char *guid, int *didalloc)
7294 {
7295 	struct scsi_vhci_lun *svl;
7296 
7297 	if (svl = vhci_lun_lookup(tgt_dip)) {
7298 		return (svl);
7299 	}
7300 
7301 	svl = kmem_zalloc(sizeof (*svl), KM_SLEEP);
7302 	svl->svl_lun_wwn = kmem_zalloc(strlen(guid)+1, KM_SLEEP);
7303 	(void) strcpy(svl->svl_lun_wwn,  guid);
7304 	mutex_init(&svl->svl_mutex, NULL, MUTEX_DRIVER, NULL);
7305 	cv_init(&svl->svl_cv, NULL, CV_DRIVER, NULL);
7306 	sema_init(&svl->svl_pgr_sema, 1, NULL, SEMA_DRIVER, NULL);
7307 	svl->svl_waiting_for_activepath = 1;
7308 	svl->svl_sector_size = 1;
7309 	mdi_client_set_vhci_private(tgt_dip, svl);
7310 	*didalloc = 1;
7311 	VHCI_DEBUG(1, (CE_NOTE, NULL,
7312 	    "vhci_lun_lookup_alloc: guid %s vlun 0x%p\n",
7313 	    guid, (void *)svl));
7314 	return (svl);
7315 }
7316 
7317 static void
7318 vhci_lun_free(dev_info_t *tgt_dip)
7319 {
7320 	struct scsi_vhci_lun *dvlp;
7321 	char *guid;
7322 	struct scsi_device *sd;
7323 
7324 	/*
7325 	 * The scsi_device was set to driver private during child node
7326 	 * initialization in the scsi_hba_bus_ctl().
7327 	 */
7328 	sd = (struct scsi_device *)ddi_get_driver_private(tgt_dip);
7329 
7330 	dvlp = (struct scsi_vhci_lun *)
7331 	    mdi_client_get_vhci_private(tgt_dip);
7332 	ASSERT(dvlp != NULL);
7333 
7334 	mdi_client_set_vhci_private(tgt_dip, NULL);
7335 
7336 	guid = dvlp->svl_lun_wwn;
7337 	ASSERT(guid != NULL);
7338 	VHCI_DEBUG(4, (CE_NOTE, NULL, "!vhci_lun_free: %s\n", guid));
7339 
7340 	mutex_enter(&dvlp->svl_mutex);
7341 	if (dvlp->svl_active_pclass != NULL) {
7342 		kmem_free(dvlp->svl_active_pclass,
7343 		    strlen(dvlp->svl_active_pclass)+1);
7344 	}
7345 	dvlp->svl_active_pclass = NULL;
7346 	mutex_exit(&dvlp->svl_mutex);
7347 
7348 	if (dvlp->svl_lun_wwn != NULL) {
7349 		kmem_free(dvlp->svl_lun_wwn, strlen(dvlp->svl_lun_wwn)+1);
7350 	}
7351 	dvlp->svl_lun_wwn = NULL;
7352 
7353 	if (dvlp->svl_fops_name) {
7354 		kmem_free(dvlp->svl_fops_name, strlen(dvlp->svl_fops_name)+1);
7355 	}
7356 	dvlp->svl_fops_name = NULL;
7357 
7358 	if (dvlp->svl_fops_ctpriv != NULL &&
7359 	    dvlp->svl_fops != NULL) {
7360 		dvlp->svl_fops->sfo_device_unprobe(sd, dvlp->svl_fops_ctpriv);
7361 	}
7362 
7363 	if (dvlp->svl_flags & VLUN_TASK_D_ALIVE_FLG)
7364 		taskq_destroy(dvlp->svl_taskq);
7365 
7366 	mutex_destroy(&dvlp->svl_mutex);
7367 	cv_destroy(&dvlp->svl_cv);
7368 	sema_destroy(&dvlp->svl_pgr_sema);
7369 	kmem_free(dvlp, sizeof (*dvlp));
7370 	/*
7371 	 * vhci_lun_free may be called before the tgt_dip
7372 	 * initialization so check if the sd is NULL.
7373 	 */
7374 	if (sd != NULL)
7375 		scsi_device_hba_private_set(sd, NULL);
7376 }
7377 
7378 int
7379 vhci_do_scsi_cmd(struct scsi_pkt *pkt)
7380 {
7381 	int	err = 0;
7382 	int	retry_cnt = 0;
7383 	uint8_t	*sns, skey;
7384 
7385 #ifdef DEBUG
7386 	if (vhci_debug > 5) {
7387 		vhci_print_cdb(pkt->pkt_address.a_hba_tran->tran_hba_dip,
7388 		    CE_WARN, "Vhci command", pkt->pkt_cdbp);
7389 	}
7390 #endif
7391 
7392 retry:
7393 	err = scsi_poll(pkt);
7394 	if (err) {
7395 		if (pkt->pkt_cdbp[0] == SCMD_RELEASE) {
7396 			if (SCBP_C(pkt) == STATUS_RESERVATION_CONFLICT) {
7397 				VHCI_DEBUG(1, (CE_NOTE, NULL,
7398 				    "!v_s_do_s_c: RELEASE conflict\n"));
7399 				return (0);
7400 			}
7401 		}
7402 		if (retry_cnt++ < 6) {
7403 			VHCI_DEBUG(1, (CE_WARN, NULL,
7404 			    "!v_s_do_s_c:retry packet 0x%p "
7405 			    "status 0x%x reason %s",
7406 			    (void *)pkt, SCBP_C(pkt),
7407 			    scsi_rname(pkt->pkt_reason)));
7408 			if ((pkt->pkt_reason == CMD_CMPLT) &&
7409 			    (SCBP_C(pkt) == STATUS_CHECK) &&
7410 			    (pkt->pkt_state & STATE_ARQ_DONE)) {
7411 				sns = (uint8_t *)
7412 				    &(((struct scsi_arq_status *)(uintptr_t)
7413 				    (pkt->pkt_scbp))->sts_sensedata);
7414 				skey = scsi_sense_key(sns);
7415 				VHCI_DEBUG(1, (CE_WARN, NULL,
7416 				    "!v_s_do_s_c:retry "
7417 				    "packet 0x%p  sense data %s", (void *)pkt,
7418 				    scsi_sname(skey)));
7419 			}
7420 			goto retry;
7421 		}
7422 		VHCI_DEBUG(1, (CE_WARN, NULL,
7423 		    "!v_s_do_s_c: failed transport 0x%p 0x%x",
7424 		    (void *)pkt, SCBP_C(pkt)));
7425 		return (0);
7426 	}
7427 
7428 	switch (pkt->pkt_reason) {
7429 		case CMD_TIMEOUT:
7430 			VHCI_DEBUG(1, (CE_WARN, NULL, "!pkt timed "
7431 			    "out (pkt 0x%p)", (void *)pkt));
7432 			return (0);
7433 		case CMD_CMPLT:
7434 			switch (SCBP_C(pkt)) {
7435 				case STATUS_GOOD:
7436 					break;
7437 				case STATUS_CHECK:
7438 					if (pkt->pkt_state & STATE_ARQ_DONE) {
7439 						sns = (uint8_t *)&(((
7440 						    struct scsi_arq_status *)
7441 						    (uintptr_t)
7442 						    (pkt->pkt_scbp))->
7443 						    sts_sensedata);
7444 						skey = scsi_sense_key(sns);
7445 						if ((skey ==
7446 						    KEY_UNIT_ATTENTION) ||
7447 						    (skey ==
7448 						    KEY_NOT_READY)) {
7449 							/*
7450 							 * clear unit attn.
7451 							 */
7452 
7453 							VHCI_DEBUG(1,
7454 							    (CE_WARN, NULL,
7455 							    "!v_s_do_s_c: "
7456 							    "retry "
7457 							    "packet 0x%p sense "
7458 							    "data %s",
7459 							    (void *)pkt,
7460 							    scsi_sname
7461 							    (skey)));
7462 							goto retry;
7463 						}
7464 						VHCI_DEBUG(4, (CE_WARN, NULL,
7465 						    "!ARQ while "
7466 						    "transporting "
7467 						    "(pkt 0x%p)",
7468 						    (void *)pkt));
7469 						return (0);
7470 					}
7471 					return (0);
7472 				default:
7473 					VHCI_DEBUG(1, (CE_WARN, NULL,
7474 					    "!Bad status returned "
7475 					    "(pkt 0x%p, status %x)",
7476 					    (void *)pkt, SCBP_C(pkt)));
7477 					return (0);
7478 			}
7479 			break;
7480 		case CMD_INCOMPLETE:
7481 		case CMD_RESET:
7482 		case CMD_ABORTED:
7483 		case CMD_TRAN_ERR:
7484 			if (retry_cnt++ < 1) {
7485 				VHCI_DEBUG(1, (CE_WARN, NULL,
7486 				    "!v_s_do_s_c: retry packet 0x%p %s",
7487 				    (void *)pkt, scsi_rname(pkt->pkt_reason)));
7488 				goto retry;
7489 			}
7490 			/* FALLTHROUGH */
7491 		default:
7492 			VHCI_DEBUG(1, (CE_WARN, NULL, "!pkt did not "
7493 			    "complete successfully (pkt 0x%p,"
7494 			    "reason %x)", (void *)pkt, pkt->pkt_reason));
7495 			return (0);
7496 	}
7497 	return (1);
7498 }
7499 
7500 static int
7501 vhci_quiesce_lun(struct scsi_vhci_lun *vlun)
7502 {
7503 	mdi_pathinfo_t		*pip, *spip;
7504 	dev_info_t		*cdip;
7505 	struct scsi_vhci_priv	*svp;
7506 	mdi_pathinfo_state_t	pstate;
7507 	uint32_t		p_ext_state;
7508 	int			circular;
7509 
7510 	cdip = vlun->svl_dip;
7511 	pip = spip = NULL;
7512 	ndi_devi_enter(cdip, &circular);
7513 	pip = mdi_get_next_phci_path(cdip, NULL);
7514 	while (pip != NULL) {
7515 		(void) mdi_pi_get_state2(pip, &pstate, &p_ext_state);
7516 		if (pstate != MDI_PATHINFO_STATE_ONLINE) {
7517 			spip = pip;
7518 			pip = mdi_get_next_phci_path(cdip, spip);
7519 			continue;
7520 		}
7521 		mdi_hold_path(pip);
7522 		ndi_devi_exit(cdip, circular);
7523 		svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip);
7524 		mutex_enter(&svp->svp_mutex);
7525 		while (svp->svp_cmds != 0) {
7526 			if (cv_reltimedwait(&svp->svp_cv, &svp->svp_mutex,
7527 			    drv_usectohz(vhci_path_quiesce_timeout * 1000000),
7528 			    TR_CLOCK_TICK) == -1) {
7529 				mutex_exit(&svp->svp_mutex);
7530 				mdi_rele_path(pip);
7531 				VHCI_DEBUG(1, (CE_WARN, NULL,
7532 				    "Quiesce of lun is not successful "
7533 				    "vlun: 0x%p.", (void *)vlun));
7534 				return (0);
7535 			}
7536 		}
7537 		mutex_exit(&svp->svp_mutex);
7538 		ndi_devi_enter(cdip, &circular);
7539 		spip = pip;
7540 		pip = mdi_get_next_phci_path(cdip, spip);
7541 		mdi_rele_path(spip);
7542 	}
7543 	ndi_devi_exit(cdip, circular);
7544 	return (1);
7545 }
7546 
7547 static int
7548 vhci_pgr_validate_and_register(scsi_vhci_priv_t *svp)
7549 {
7550 	scsi_vhci_lun_t		*vlun;
7551 	vhci_prout_t		*prout;
7552 	int			rval, success;
7553 	mdi_pathinfo_t		*pip, *npip;
7554 	scsi_vhci_priv_t	*osvp;
7555 	dev_info_t		*cdip;
7556 	uchar_t			cdb_1;
7557 	uchar_t			temp_res_key[MHIOC_RESV_KEY_SIZE];
7558 
7559 
7560 	/*
7561 	 * see if there are any other paths available; if none,
7562 	 * then there is nothing to do.
7563 	 */
7564 	cdip = svp->svp_svl->svl_dip;
7565 	rval = mdi_select_path(cdip, NULL, MDI_SELECT_ONLINE_PATH |
7566 	    MDI_SELECT_STANDBY_PATH, NULL, &pip);
7567 	if ((rval != MDI_SUCCESS) || (pip == NULL)) {
7568 		VHCI_DEBUG(4, (CE_NOTE, NULL,
7569 		    "%s%d: vhci_pgr_validate_and_register: first path\n",
7570 		    ddi_driver_name(cdip), ddi_get_instance(cdip)));
7571 		return (1);
7572 	}
7573 
7574 	vlun = svp->svp_svl;
7575 	prout = &vlun->svl_prout;
7576 	ASSERT(vlun->svl_pgr_active != 0);
7577 
7578 	/*
7579 	 * When the path was busy/offlined, some other host might have
7580 	 * cleared this key. Validate key on some other path first.
7581 	 * If it fails, return failure.
7582 	 */
7583 
7584 	npip = pip;
7585 	pip = NULL;
7586 	success = 0;
7587 
7588 	/* Save the res key */
7589 	bcopy(prout->res_key, temp_res_key, MHIOC_RESV_KEY_SIZE);
7590 
7591 	/*
7592 	 * Sometimes CDB from application can be a Register_And_Ignore.
7593 	 * Instead of validation, this cdb would result in force registration.
7594 	 * Convert it to normal cdb for validation.
7595 	 * After that be sure to restore the cdb.
7596 	 */
7597 	cdb_1 = vlun->svl_cdb[1];
7598 	vlun->svl_cdb[1] &= 0xe0;
7599 
7600 	do {
7601 		osvp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(npip);
7602 		if (osvp == NULL) {
7603 			VHCI_DEBUG(4, (CE_NOTE, NULL,
7604 			    "vhci_pgr_validate_and_register: no "
7605 			    "client priv! 0x%p offlined?\n",
7606 			    (void *)npip));
7607 			goto next_path_1;
7608 		}
7609 
7610 		if (osvp == svp) {
7611 			VHCI_DEBUG(4, (CE_NOTE, NULL,
7612 			    "vhci_pgr_validate_and_register: same svp 0x%p"
7613 			    " npip 0x%p vlun 0x%p\n",
7614 			    (void *)svp, (void *)npip, (void *)vlun));
7615 			goto next_path_1;
7616 		}
7617 
7618 		VHCI_DEBUG(4, (CE_NOTE, NULL,
7619 		    "vhci_pgr_validate_and_register: First validate on"
7620 		    " osvp 0x%p being done. vlun 0x%p thread 0x%p Before bcopy"
7621 		    " cdb1 %x\n", (void *)osvp, (void *)vlun,
7622 		    (void *)curthread, vlun->svl_cdb[1]));
7623 		vhci_print_prout_keys(vlun, "v_pgr_val_reg: before bcopy:");
7624 
7625 		bcopy(prout->service_key, prout->res_key, MHIOC_RESV_KEY_SIZE);
7626 
7627 		VHCI_DEBUG(4, (CE_WARN, NULL, "vlun 0x%p After bcopy",
7628 		    (void *)vlun));
7629 		vhci_print_prout_keys(vlun, "v_pgr_val_reg: after bcopy: ");
7630 
7631 		rval = vhci_do_prout(osvp);
7632 		if (rval == 1) {
7633 			VHCI_DEBUG(4, (CE_NOTE, NULL,
7634 			    "%s%d: vhci_pgr_validate_and_register: key"
7635 			    " validated thread 0x%p\n", ddi_driver_name(cdip),
7636 			    ddi_get_instance(cdip), (void *)curthread));
7637 			pip = npip;
7638 			success = 1;
7639 			break;
7640 		} else {
7641 			VHCI_DEBUG(4, (CE_NOTE, NULL,
7642 			    "vhci_pgr_validate_and_register: First validation"
7643 			    " on osvp 0x%p failed %x\n", (void *)osvp, rval));
7644 			vhci_print_prout_keys(vlun, "v_pgr_val_reg: failed:");
7645 		}
7646 
7647 		/*
7648 		 * Try other paths
7649 		 */
7650 next_path_1:
7651 		pip = npip;
7652 		rval = mdi_select_path(cdip, NULL,
7653 		    MDI_SELECT_ONLINE_PATH|MDI_SELECT_STANDBY_PATH,
7654 		    pip, &npip);
7655 		mdi_rele_path(pip);
7656 	} while ((rval == MDI_SUCCESS) && (npip != NULL));
7657 
7658 
7659 	/* Be sure to restore original cdb */
7660 	vlun->svl_cdb[1] = cdb_1;
7661 
7662 	/* Restore the res_key */
7663 	bcopy(temp_res_key, prout->res_key, MHIOC_RESV_KEY_SIZE);
7664 
7665 	/*
7666 	 * If key could not be registered on any path for the first time,
7667 	 * return success as online should still continue.
7668 	 */
7669 	if (success == 0) {
7670 		return (1);
7671 	}
7672 
7673 	ASSERT(pip != NULL);
7674 
7675 	/*
7676 	 * Force register on new path
7677 	 */
7678 	cdb_1 = vlun->svl_cdb[1];		/* store the cdb */
7679 
7680 	vlun->svl_cdb[1] &= 0xe0;
7681 	vlun->svl_cdb[1] |= VHCI_PROUT_R_AND_IGNORE;
7682 
7683 	vhci_print_prout_keys(vlun, "v_pgr_val_reg: keys before bcopy: ");
7684 
7685 	bcopy(prout->active_service_key, prout->service_key,
7686 	    MHIOC_RESV_KEY_SIZE);
7687 	bcopy(prout->active_res_key, prout->res_key, MHIOC_RESV_KEY_SIZE);
7688 
7689 	vhci_print_prout_keys(vlun, "v_pgr_val_reg:keys after bcopy: ");
7690 
7691 	rval = vhci_do_prout(svp);
7692 	vlun->svl_cdb[1] = cdb_1;		/* restore the cdb */
7693 	if (rval != 1) {
7694 		VHCI_DEBUG(4, (CE_NOTE, NULL,
7695 		    "vhci_pgr_validate_and_register: register on new"
7696 		    " path 0x%p svp 0x%p failed %x\n",
7697 		    (void *)pip, (void *)svp, rval));
7698 		vhci_print_prout_keys(vlun, "v_pgr_val_reg: reg failed: ");
7699 		mdi_rele_path(pip);
7700 		return (0);
7701 	}
7702 
7703 	if (bcmp(prout->service_key, zero_key, MHIOC_RESV_KEY_SIZE) == 0) {
7704 		VHCI_DEBUG(4, (CE_NOTE, NULL,
7705 		    "vhci_pgr_validate_and_register: zero service key\n"));
7706 		mdi_rele_path(pip);
7707 		return (rval);
7708 	}
7709 
7710 	/*
7711 	 * While the key was force registered, some other host might have
7712 	 * cleared the key. Re-validate key on another pre-existing path
7713 	 * before declaring success.
7714 	 */
7715 	npip = pip;
7716 	pip = NULL;
7717 
7718 	/*
7719 	 * Sometimes CDB from application can be Register and Ignore.
7720 	 * Instead of validation, it would result in force registration.
7721 	 * Convert it to normal cdb for validation.
7722 	 * After that be sure to restore the cdb.
7723 	 */
7724 	cdb_1 = vlun->svl_cdb[1];
7725 	vlun->svl_cdb[1] &= 0xe0;
7726 	success = 0;
7727 
7728 	do {
7729 		osvp = (scsi_vhci_priv_t *)
7730 		    mdi_pi_get_vhci_private(npip);
7731 		if (osvp == NULL) {
7732 			VHCI_DEBUG(4, (CE_NOTE, NULL,
7733 			    "vhci_pgr_validate_and_register: no "
7734 			    "client priv! 0x%p offlined?\n",
7735 			    (void *)npip));
7736 			goto next_path_2;
7737 		}
7738 
7739 		if (osvp == svp) {
7740 			VHCI_DEBUG(4, (CE_NOTE, NULL,
7741 			    "vhci_pgr_validate_and_register: same osvp 0x%p"
7742 			    " npip 0x%p vlun 0x%p\n",
7743 			    (void *)svp, (void *)npip, (void *)vlun));
7744 			goto next_path_2;
7745 		}
7746 
7747 		VHCI_DEBUG(4, (CE_NOTE, NULL,
7748 		    "vhci_pgr_validate_and_register: Re-validation on"
7749 		    " osvp 0x%p being done. vlun 0x%p Before bcopy cdb1 %x\n",
7750 		    (void *)osvp, (void *)vlun, vlun->svl_cdb[1]));
7751 		vhci_print_prout_keys(vlun, "v_pgr_val_reg: before bcopy: ");
7752 
7753 		bcopy(prout->service_key, prout->res_key, MHIOC_RESV_KEY_SIZE);
7754 
7755 		vhci_print_prout_keys(vlun, "v_pgr_val_reg: after bcopy: ");
7756 
7757 		rval = vhci_do_prout(osvp);
7758 		if (rval == 1) {
7759 			VHCI_DEBUG(4, (CE_NOTE, NULL,
7760 			    "%s%d: vhci_pgr_validate_and_register: key"
7761 			    " validated thread 0x%p\n", ddi_driver_name(cdip),
7762 			    ddi_get_instance(cdip), (void *)curthread));
7763 			pip = npip;
7764 			success = 1;
7765 			break;
7766 		} else {
7767 			VHCI_DEBUG(4, (CE_NOTE, NULL,
7768 			    "vhci_pgr_validate_and_register: Re-validation on"
7769 			    " osvp 0x%p failed %x\n", (void *)osvp, rval));
7770 			vhci_print_prout_keys(vlun,
7771 			    "v_pgr_val_reg: reval failed: ");
7772 		}
7773 
7774 		/*
7775 		 * Try other paths
7776 		 */
7777 next_path_2:
7778 		pip = npip;
7779 		rval = mdi_select_path(cdip, NULL,
7780 		    MDI_SELECT_ONLINE_PATH|MDI_SELECT_STANDBY_PATH,
7781 		    pip, &npip);
7782 		mdi_rele_path(pip);
7783 	} while ((rval == MDI_SUCCESS) && (npip != NULL));
7784 
7785 	/* Be sure to restore original cdb */
7786 	vlun->svl_cdb[1] = cdb_1;
7787 
7788 	if (success == 1) {
7789 		/* Successfully validated registration */
7790 		mdi_rele_path(pip);
7791 		return (1);
7792 	}
7793 
7794 	VHCI_DEBUG(4, (CE_WARN, NULL, "key validation failed"));
7795 
7796 	/*
7797 	 * key invalid, back out by registering key value of 0
7798 	 */
7799 	VHCI_DEBUG(4, (CE_NOTE, NULL,
7800 	    "vhci_pgr_validate_and_register: backout on"
7801 	    " svp 0x%p being done\n", (void *)svp));
7802 	vhci_print_prout_keys(vlun, "v_pgr_val_reg: before bcopy: ");
7803 
7804 	bcopy(prout->service_key, prout->res_key, MHIOC_RESV_KEY_SIZE);
7805 	bzero(prout->service_key, MHIOC_RESV_KEY_SIZE);
7806 
7807 	vhci_print_prout_keys(vlun, "v_pgr_val_reg: before bcopy: ");
7808 
7809 	/*
7810 	 * Get a new path
7811 	 */
7812 	rval = mdi_select_path(cdip, NULL, MDI_SELECT_ONLINE_PATH |
7813 	    MDI_SELECT_STANDBY_PATH, NULL, &pip);
7814 	if ((rval != MDI_SUCCESS) || (pip == NULL)) {
7815 		VHCI_DEBUG(4, (CE_NOTE, NULL,
7816 		    "%s%d: vhci_pgr_validate_and_register: no valid pip\n",
7817 		    ddi_driver_name(cdip), ddi_get_instance(cdip)));
7818 		return (0);
7819 	}
7820 
7821 	if ((rval = vhci_do_prout(svp)) != 1) {
7822 		VHCI_DEBUG(4, (CE_NOTE, NULL,
7823 		    "vhci_pgr_validate_and_register: backout on"
7824 		    " svp 0x%p failed\n", (void *)svp));
7825 		vhci_print_prout_keys(vlun, "backout failed");
7826 
7827 		VHCI_DEBUG(4, (CE_WARN, NULL,
7828 		    "%s%d: vhci_pgr_validate_and_register: key"
7829 		    " validation and backout failed", ddi_driver_name(cdip),
7830 		    ddi_get_instance(cdip)));
7831 		if (rval == VHCI_PGR_ILLEGALOP) {
7832 			VHCI_DEBUG(4, (CE_WARN, NULL,
7833 			    "%s%d: vhci_pgr_validate_and_register: key"
7834 			    " already cleared", ddi_driver_name(cdip),
7835 			    ddi_get_instance(cdip)));
7836 			rval = 1;
7837 		} else
7838 			rval = 0;
7839 	} else {
7840 		VHCI_DEBUG(4, (CE_NOTE, NULL,
7841 		    "%s%d: vhci_pgr_validate_and_register: key"
7842 		    " validation failed, key backed out\n",
7843 		    ddi_driver_name(cdip), ddi_get_instance(cdip)));
7844 		vhci_print_prout_keys(vlun, "v_pgr_val_reg: key backed out: ");
7845 	}
7846 	mdi_rele_path(pip);
7847 
7848 	return (rval);
7849 }
7850 
7851 /*
7852  * taskq routine to dispatch a scsi cmd to vhci_scsi_start.  This ensures
7853  * that vhci_scsi_start is not called in interrupt context.
7854  * As the upper layer gets TRAN_ACCEPT when the command is dispatched, we
7855  * need to complete the command if something goes wrong.
7856  */
7857 static void
7858 vhci_dispatch_scsi_start(void *arg)
7859 {
7860 	struct vhci_pkt *vpkt	= (struct vhci_pkt *)arg;
7861 	struct scsi_pkt *tpkt	= vpkt->vpkt_tgt_pkt;
7862 	int rval		= TRAN_BUSY;
7863 
7864 	VHCI_DEBUG(6, (CE_NOTE, NULL, "!vhci_dispatch_scsi_start: sending"
7865 	    " scsi-2 reserve for 0x%p\n",
7866 	    (void *)ADDR2DIP(&(vpkt->vpkt_tgt_pkt->pkt_address))));
7867 
7868 	/*
7869 	 * To prevent the taskq from being called recursively we set the
7870 	 * the VHCI_PKT_THRU_TASKQ bit in the vhci_pkt_states.
7871 	 */
7872 	vpkt->vpkt_state |= VHCI_PKT_THRU_TASKQ;
7873 
7874 	/*
7875 	 * Wait for the transport to get ready to send packets
7876 	 * and if it times out, it will return something other than
7877 	 * TRAN_BUSY. The vhci_reserve_delay may want to
7878 	 * get tuned for other transports and is therefore a global.
7879 	 * Using delay since this routine is called by taskq dispatch
7880 	 * and not called during interrupt context.
7881 	 */
7882 	while ((rval = vhci_scsi_start(&(vpkt->vpkt_tgt_pkt->pkt_address),
7883 	    vpkt->vpkt_tgt_pkt)) == TRAN_BUSY) {
7884 		delay(drv_usectohz(vhci_reserve_delay));
7885 	}
7886 
7887 	switch (rval) {
7888 	case TRAN_ACCEPT:
7889 		return;
7890 
7891 	default:
7892 		/*
7893 		 * This pkt shall be retried, and to ensure another taskq
7894 		 * is dispatched for it, clear the VHCI_PKT_THRU_TASKQ
7895 		 * flag.
7896 		 */
7897 		vpkt->vpkt_state &= ~VHCI_PKT_THRU_TASKQ;
7898 
7899 		/* Ensure that the pkt is retried without a reset */
7900 		tpkt->pkt_reason = CMD_ABORTED;
7901 		tpkt->pkt_statistics |= STAT_ABORTED;
7902 		VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_dispatch_scsi_start: "
7903 		    "TRAN_rval %d returned for dip 0x%p", rval,
7904 		    (void *)ADDR2DIP(&(vpkt->vpkt_tgt_pkt->pkt_address))));
7905 		break;
7906 	}
7907 
7908 	/*
7909 	 * vpkt_org_vpkt should always be NULL here if the retry command
7910 	 * has been successfully dispatched.  If vpkt_org_vpkt != NULL at
7911 	 * this point, it is an error so restore the original vpkt and
7912 	 * return an error to the target driver so it can retry the
7913 	 * command as appropriate.
7914 	 */
7915 	if (vpkt->vpkt_org_vpkt != NULL) {
7916 		struct vhci_pkt		*new_vpkt = vpkt;
7917 		scsi_vhci_priv_t	*svp = (scsi_vhci_priv_t *)
7918 		    mdi_pi_get_vhci_private(vpkt->vpkt_path);
7919 
7920 		vpkt = vpkt->vpkt_org_vpkt;
7921 
7922 		vpkt->vpkt_tgt_pkt->pkt_reason = tpkt->pkt_reason;
7923 		vpkt->vpkt_tgt_pkt->pkt_statistics = tpkt->pkt_statistics;
7924 
7925 		vhci_scsi_destroy_pkt(&svp->svp_psd->sd_address,
7926 		    new_vpkt->vpkt_tgt_pkt);
7927 
7928 		tpkt = vpkt->vpkt_tgt_pkt;
7929 	}
7930 
7931 	scsi_hba_pkt_comp(tpkt);
7932 }
7933 
7934 static void
7935 vhci_initiate_auto_failback(void *arg)
7936 {
7937 	struct scsi_vhci_lun	*vlun = (struct scsi_vhci_lun *)arg;
7938 	dev_info_t		*vdip, *cdip;
7939 	int			held;
7940 
7941 	cdip = vlun->svl_dip;
7942 	vdip = ddi_get_parent(cdip);
7943 
7944 	VHCI_HOLD_LUN(vlun, VH_SLEEP, held);
7945 
7946 	/*
7947 	 * Perform a final check to see if the active path class is indeed
7948 	 * not the preferred path class.  As in the time the auto failback
7949 	 * was dispatched, an external failover could have been detected.
7950 	 * [Some other host could have detected this condition and triggered
7951 	 *  the auto failback before].
7952 	 * In such a case if we go ahead with failover we will be negating the
7953 	 * whole purpose of auto failback.
7954 	 */
7955 	mutex_enter(&vlun->svl_mutex);
7956 	if (vlun->svl_active_pclass != NULL) {
7957 		char				*best_pclass;
7958 		struct scsi_failover_ops	*fo;
7959 
7960 		fo = vlun->svl_fops;
7961 
7962 		(void) fo->sfo_pathclass_next(NULL, &best_pclass,
7963 		    vlun->svl_fops_ctpriv);
7964 		if (strcmp(vlun->svl_active_pclass, best_pclass) == 0) {
7965 			mutex_exit(&vlun->svl_mutex);
7966 			VHCI_RELEASE_LUN(vlun);
7967 			VHCI_DEBUG(1, (CE_NOTE, NULL, "Not initiating "
7968 			    "auto failback for %s as %s pathclass already "
7969 			    "active.\n", vlun->svl_lun_wwn, best_pclass));
7970 			return;
7971 		}
7972 	}
7973 	mutex_exit(&vlun->svl_mutex);
7974 	if (mdi_failover(vdip, vlun->svl_dip, MDI_FAILOVER_SYNC)
7975 	    == MDI_SUCCESS) {
7976 		vhci_log(CE_NOTE, vdip, "!Auto failback operation "
7977 		    "succeeded for device %s (GUID %s)",
7978 		    ddi_node_name(cdip), vlun->svl_lun_wwn);
7979 	} else {
7980 		vhci_log(CE_NOTE, vdip, "!Auto failback operation "
7981 		    "failed for device %s (GUID %s)",
7982 		    ddi_node_name(cdip), vlun->svl_lun_wwn);
7983 	}
7984 	VHCI_RELEASE_LUN(vlun);
7985 }
7986 
7987 #ifdef DEBUG
7988 static void
7989 vhci_print_prin_keys(vhci_prin_readkeys_t *prin, int numkeys)
7990 {
7991 	vhci_clean_print(NULL, 5, "Current PGR Keys",
7992 	    (uchar_t *)prin, numkeys * 8);
7993 }
7994 #endif
7995 
7996 static void
7997 vhci_print_prout_keys(scsi_vhci_lun_t *vlun, char *msg)
7998 {
7999 	int			i;
8000 	vhci_prout_t		*prout;
8001 	char			buf1[4*MHIOC_RESV_KEY_SIZE + 1];
8002 	char			buf2[4*MHIOC_RESV_KEY_SIZE + 1];
8003 	char			buf3[4*MHIOC_RESV_KEY_SIZE + 1];
8004 	char			buf4[4*MHIOC_RESV_KEY_SIZE + 1];
8005 
8006 	prout = &vlun->svl_prout;
8007 
8008 	for (i = 0; i < MHIOC_RESV_KEY_SIZE; i++)
8009 		(void) sprintf(&buf1[4*i], "[%02x]", prout->res_key[i]);
8010 	for (i = 0; i < MHIOC_RESV_KEY_SIZE; i++)
8011 		(void) sprintf(&buf2[(4*i)], "[%02x]", prout->service_key[i]);
8012 	for (i = 0; i < MHIOC_RESV_KEY_SIZE; i++)
8013 		(void) sprintf(&buf3[4*i], "[%02x]", prout->active_res_key[i]);
8014 	for (i = 0; i < MHIOC_RESV_KEY_SIZE; i++)
8015 		(void) sprintf(&buf4[4*i], "[%02x]",
8016 		    prout->active_service_key[i]);
8017 
8018 	/* Printing all in one go. Otherwise it will jumble up */
8019 	VHCI_DEBUG(5, (CE_CONT, NULL, "%s vlun 0x%p, thread 0x%p\n"
8020 	    "res_key:          : %s\n"
8021 	    "service_key       : %s\n"
8022 	    "active_res_key    : %s\n"
8023 	    "active_service_key: %s\n",
8024 	    msg, (void *)vlun, (void *)curthread, buf1, buf2, buf3, buf4));
8025 }
8026 
8027 /*
8028  * Called from vhci_scsi_start to update the pHCI pkt with target packet.
8029  */
8030 static void
8031 vhci_update_pHCI_pkt(struct vhci_pkt *vpkt, struct scsi_pkt *pkt)
8032 {
8033 
8034 	ASSERT(vpkt->vpkt_hba_pkt);
8035 
8036 	vpkt->vpkt_hba_pkt->pkt_flags = pkt->pkt_flags;
8037 	vpkt->vpkt_hba_pkt->pkt_flags |= FLAG_NOQUEUE;
8038 
8039 	if ((vpkt->vpkt_hba_pkt->pkt_flags & FLAG_NOINTR) ||
8040 	    MDI_PI_IS_SUSPENDED(vpkt->vpkt_path)) {
8041 		/*
8042 		 * Polled Command is requested or HBA is in
8043 		 * suspended state
8044 		 */
8045 		vpkt->vpkt_hba_pkt->pkt_flags |= FLAG_NOINTR;
8046 		vpkt->vpkt_hba_pkt->pkt_comp = NULL;
8047 	} else {
8048 		vpkt->vpkt_hba_pkt->pkt_comp = vhci_intr;
8049 	}
8050 	vpkt->vpkt_hba_pkt->pkt_time = pkt->pkt_time;
8051 	bcopy(pkt->pkt_cdbp, vpkt->vpkt_hba_pkt->pkt_cdbp,
8052 	    vpkt->vpkt_tgt_init_cdblen);
8053 	vpkt->vpkt_hba_pkt->pkt_resid = pkt->pkt_resid;
8054 
8055 	/* Re-initialize the following pHCI packet state information */
8056 	vpkt->vpkt_hba_pkt->pkt_state = 0;
8057 	vpkt->vpkt_hba_pkt->pkt_statistics = 0;
8058 	vpkt->vpkt_hba_pkt->pkt_reason = 0;
8059 }
8060 
8061 static int
8062 vhci_scsi_bus_power(dev_info_t *parent, void *impl_arg, pm_bus_power_op_t op,
8063     void *arg, void *result)
8064 {
8065 	int ret = DDI_SUCCESS;
8066 
8067 	/*
8068 	 * Generic processing in MPxIO framework
8069 	 */
8070 	ret = mdi_bus_power(parent, impl_arg, op, arg, result);
8071 
8072 	switch (ret) {
8073 	case MDI_SUCCESS:
8074 		ret = DDI_SUCCESS;
8075 		break;
8076 	case MDI_FAILURE:
8077 		ret = DDI_FAILURE;
8078 		break;
8079 	default:
8080 		break;
8081 	}
8082 
8083 	return (ret);
8084 }
8085 
8086 static int
8087 vhci_pHCI_cap(struct scsi_address *ap, char *cap, int val, int whom,
8088     mdi_pathinfo_t *pip)
8089 {
8090 	dev_info_t		*cdip;
8091 	mdi_pathinfo_t		*npip = NULL;
8092 	scsi_vhci_priv_t	*svp = NULL;
8093 	struct scsi_address	*pap = NULL;
8094 	scsi_hba_tran_t		*hba = NULL;
8095 	int			sps;
8096 	int			mps_flag;
8097 	int			rval = 0;
8098 
8099 	mps_flag = (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH);
8100 	if (pip) {
8101 		/*
8102 		 * If the call is from vhci_pathinfo_state_change,
8103 		 * then this path was busy and is becoming ready to accept IO.
8104 		 */
8105 		ASSERT(ap != NULL);
8106 		hba = ap->a_hba_tran;
8107 		ASSERT(hba != NULL);
8108 		rval = scsi_ifsetcap(ap, cap, val, whom);
8109 
8110 		VHCI_DEBUG(2, (CE_NOTE, NULL,
8111 		    "!vhci_pHCI_cap: only on path %p, ap %p, rval %x\n",
8112 		    (void *)pip, (void *)ap, rval));
8113 
8114 		return (rval);
8115 	}
8116 
8117 	/*
8118 	 * Set capability on all the pHCIs.
8119 	 * If any path is busy, then the capability would be set by
8120 	 * vhci_pathinfo_state_change.
8121 	 */
8122 
8123 	cdip = ADDR2DIP(ap);
8124 	ASSERT(cdip != NULL);
8125 	sps = mdi_select_path(cdip, NULL, mps_flag, NULL, &pip);
8126 	if ((sps != MDI_SUCCESS) || (pip == NULL)) {
8127 		VHCI_DEBUG(2, (CE_WARN, NULL,
8128 		    "!vhci_pHCI_cap: Unable to get a path, dip 0x%p",
8129 		    (void *)cdip));
8130 		return (0);
8131 	}
8132 
8133 again:
8134 	svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip);
8135 	if (svp == NULL) {
8136 		VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_pHCI_cap: "
8137 		    "priv is NULL, pip 0x%p", (void *)pip));
8138 		mdi_rele_path(pip);
8139 		return (rval);
8140 	}
8141 
8142 	if (svp->svp_psd == NULL) {
8143 		VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_pHCI_cap: "
8144 		    "psd is NULL, pip 0x%p, svp 0x%p",
8145 		    (void *)pip, (void *)svp));
8146 		mdi_rele_path(pip);
8147 		return (rval);
8148 	}
8149 
8150 	pap = &svp->svp_psd->sd_address;
8151 	ASSERT(pap != NULL);
8152 	hba = pap->a_hba_tran;
8153 	ASSERT(hba != NULL);
8154 
8155 	if (hba->tran_setcap != NULL) {
8156 		rval = scsi_ifsetcap(pap, cap, val, whom);
8157 
8158 		VHCI_DEBUG(2, (CE_NOTE, NULL,
8159 		    "!vhci_pHCI_cap: path %p, ap %p, rval %x\n",
8160 		    (void *)pip, (void *)ap, rval));
8161 
8162 		/*
8163 		 * Select next path and issue the setcap, repeat
8164 		 * until all paths are exhausted
8165 		 */
8166 		sps = mdi_select_path(cdip, NULL, mps_flag, pip, &npip);
8167 		if ((sps != MDI_SUCCESS) || (npip == NULL)) {
8168 			mdi_rele_path(pip);
8169 			return (1);
8170 		}
8171 		mdi_rele_path(pip);
8172 		pip = npip;
8173 		goto again;
8174 	}
8175 	mdi_rele_path(pip);
8176 	return (rval);
8177 }
8178 
8179 static int
8180 vhci_scsi_bus_config(dev_info_t *pdip, uint_t flags, ddi_bus_config_op_t op,
8181     void *arg, dev_info_t **child)
8182 {
8183 	char *guid;
8184 
8185 	if (vhci_bus_config_debug)
8186 		flags |= NDI_DEVI_DEBUG;
8187 
8188 	if (op == BUS_CONFIG_ONE || op == BUS_UNCONFIG_ONE)
8189 		guid = vhci_devnm_to_guid((char *)arg);
8190 	else
8191 		guid = NULL;
8192 
8193 	if (mdi_vhci_bus_config(pdip, flags, op, arg, child, guid)
8194 	    == MDI_SUCCESS)
8195 		return (NDI_SUCCESS);
8196 	else
8197 		return (NDI_FAILURE);
8198 }
8199 
8200 static int
8201 vhci_scsi_bus_unconfig(dev_info_t *pdip, uint_t flags, ddi_bus_config_op_t op,
8202     void *arg)
8203 {
8204 	if (vhci_bus_config_debug)
8205 		flags |= NDI_DEVI_DEBUG;
8206 
8207 	return (ndi_busop_bus_unconfig(pdip, flags, op, arg));
8208 }
8209 
8210 /*
8211  * Take the original vhci_pkt, create a duplicate of the pkt for resending
8212  * as though it originated in ssd.
8213  */
8214 static struct scsi_pkt *
8215 vhci_create_retry_pkt(struct vhci_pkt *vpkt)
8216 {
8217 	struct vhci_pkt *new_vpkt = NULL;
8218 	struct scsi_pkt	*pkt = NULL;
8219 
8220 	scsi_vhci_priv_t *svp = (scsi_vhci_priv_t *)
8221 	    mdi_pi_get_vhci_private(vpkt->vpkt_path);
8222 
8223 	/*
8224 	 * Ensure consistent data at completion time by setting PKT_CONSISTENT
8225 	 */
8226 	pkt = vhci_scsi_init_pkt(&svp->svp_psd->sd_address, pkt,
8227 	    vpkt->vpkt_tgt_init_bp, vpkt->vpkt_tgt_init_cdblen,
8228 	    vpkt->vpkt_tgt_init_scblen, 0, PKT_CONSISTENT, NULL_FUNC, NULL);
8229 	if (pkt != NULL) {
8230 		new_vpkt = TGTPKT2VHCIPKT(pkt);
8231 
8232 		pkt->pkt_address = vpkt->vpkt_tgt_pkt->pkt_address;
8233 		pkt->pkt_flags = vpkt->vpkt_tgt_pkt->pkt_flags;
8234 		pkt->pkt_time = vpkt->vpkt_tgt_pkt->pkt_time;
8235 		pkt->pkt_comp = vpkt->vpkt_tgt_pkt->pkt_comp;
8236 
8237 		pkt->pkt_resid = 0;
8238 		pkt->pkt_statistics = 0;
8239 		pkt->pkt_reason = 0;
8240 
8241 		bcopy(vpkt->vpkt_tgt_pkt->pkt_cdbp,
8242 		    pkt->pkt_cdbp, vpkt->vpkt_tgt_init_cdblen);
8243 
8244 		/*
8245 		 * Save a pointer to the original vhci_pkt
8246 		 */
8247 		new_vpkt->vpkt_org_vpkt = vpkt;
8248 	}
8249 
8250 	return (pkt);
8251 }
8252 
8253 /*
8254  * Copy the successful completion information from the hba packet into
8255  * the original target pkt from the upper layer.  Returns the original
8256  * vpkt and destroys the new vpkt from the internal retry.
8257  */
8258 static struct vhci_pkt *
8259 vhci_sync_retry_pkt(struct vhci_pkt *vpkt)
8260 {
8261 	struct vhci_pkt		*ret_vpkt = NULL;
8262 	struct scsi_pkt		*tpkt = NULL;
8263 	struct scsi_pkt		*hba_pkt = NULL;
8264 	scsi_vhci_priv_t	*svp = (scsi_vhci_priv_t *)
8265 	    mdi_pi_get_vhci_private(vpkt->vpkt_path);
8266 
8267 	ASSERT(vpkt->vpkt_org_vpkt != NULL);
8268 	VHCI_DEBUG(0, (CE_NOTE, NULL, "vhci_sync_retry_pkt: Retry pkt "
8269 	    "completed successfully!\n"));
8270 
8271 	ret_vpkt = vpkt->vpkt_org_vpkt;
8272 	tpkt = ret_vpkt->vpkt_tgt_pkt;
8273 	hba_pkt = vpkt->vpkt_hba_pkt;
8274 
8275 	/*
8276 	 * Copy the good status into the target driver's packet
8277 	 */
8278 	*(tpkt->pkt_scbp) = *(hba_pkt->pkt_scbp);
8279 	tpkt->pkt_resid = hba_pkt->pkt_resid;
8280 	tpkt->pkt_state = hba_pkt->pkt_state;
8281 	tpkt->pkt_statistics = hba_pkt->pkt_statistics;
8282 	tpkt->pkt_reason = hba_pkt->pkt_reason;
8283 
8284 	/*
8285 	 * Destroy the internally created vpkt for the retry
8286 	 */
8287 	vhci_scsi_destroy_pkt(&svp->svp_psd->sd_address,
8288 	    vpkt->vpkt_tgt_pkt);
8289 
8290 	return (ret_vpkt);
8291 }
8292 
8293 /* restart the request sense request */
8294 static void
8295 vhci_uscsi_restart_sense(void *arg)
8296 {
8297 	struct buf 	*rqbp;
8298 	struct buf 	*bp;
8299 	struct scsi_pkt *rqpkt = (struct scsi_pkt *)arg;
8300 	mp_uscsi_cmd_t 	*mp_uscmdp;
8301 
8302 	VHCI_DEBUG(4, (CE_WARN, NULL,
8303 	    "vhci_uscsi_restart_sense: enter: rqpkt: %p", (void *)rqpkt));
8304 
8305 	if (scsi_transport(rqpkt) != TRAN_ACCEPT) {
8306 		/* if it fails - need to wakeup the original command */
8307 		mp_uscmdp = rqpkt->pkt_private;
8308 		bp = mp_uscmdp->cmdbp;
8309 		rqbp = mp_uscmdp->rqbp;
8310 		ASSERT(mp_uscmdp && bp && rqbp);
8311 		scsi_free_consistent_buf(rqbp);
8312 		scsi_destroy_pkt(rqpkt);
8313 		bp->b_resid = bp->b_bcount;
8314 		bioerror(bp, EIO);
8315 		biodone(bp);
8316 	}
8317 }
8318 
8319 /*
8320  * auto-rqsense is not enabled so we have to retrieve the request sense
8321  * manually.
8322  */
8323 static int
8324 vhci_uscsi_send_sense(struct scsi_pkt *pkt, mp_uscsi_cmd_t *mp_uscmdp)
8325 {
8326 	struct buf 		*rqbp, *cmdbp;
8327 	struct scsi_pkt 	*rqpkt;
8328 	int			rval = 0;
8329 
8330 	cmdbp = mp_uscmdp->cmdbp;
8331 	ASSERT(cmdbp != NULL);
8332 
8333 	VHCI_DEBUG(4, (CE_WARN, NULL,
8334 	    "vhci_uscsi_send_sense: enter: bp: %p pkt: %p scmd: %p",
8335 	    (void *)cmdbp, (void *)pkt, (void *)mp_uscmdp));
8336 	/* set up the packet information and cdb */
8337 	if ((rqbp = scsi_alloc_consistent_buf(mp_uscmdp->ap, NULL,
8338 	    SENSE_LENGTH, B_READ, NULL, NULL)) == NULL) {
8339 		return (-1);
8340 	}
8341 
8342 	if ((rqpkt = scsi_init_pkt(mp_uscmdp->ap, NULL, rqbp,
8343 	    CDB_GROUP0, 1, 0, PKT_CONSISTENT, NULL, NULL)) == NULL) {
8344 		scsi_free_consistent_buf(rqbp);
8345 		return (-1);
8346 	}
8347 
8348 	(void) scsi_setup_cdb((union scsi_cdb *)(intptr_t)rqpkt->pkt_cdbp,
8349 	    SCMD_REQUEST_SENSE, 0, SENSE_LENGTH, 0);
8350 
8351 	mp_uscmdp->rqbp = rqbp;
8352 	rqbp->b_private = mp_uscmdp;
8353 	rqpkt->pkt_flags |= FLAG_SENSING;
8354 	rqpkt->pkt_time = 60;
8355 	rqpkt->pkt_comp = vhci_uscsi_iodone;
8356 	rqpkt->pkt_private = mp_uscmdp;
8357 
8358 	/*
8359 	 * NOTE: This code path is related to MPAPI uscsi(7I), so path
8360 	 * selection is not based on path_instance.
8361 	 */
8362 	if (scsi_pkt_allocated_correctly(rqpkt))
8363 		rqpkt->pkt_path_instance = 0;
8364 
8365 	/* get her done */
8366 	switch (scsi_transport(rqpkt)) {
8367 	case TRAN_ACCEPT:
8368 		VHCI_DEBUG(1, (CE_NOTE, NULL, "vhci_uscsi_send_sense: "
8369 		    "transport accepted."));
8370 		break;
8371 	case TRAN_BUSY:
8372 		VHCI_DEBUG(1, (CE_NOTE, NULL, "vhci_uscsi_send_sense: "
8373 		    "transport busy, setting timeout."));
8374 		vhci_restart_timeid = timeout(vhci_uscsi_restart_sense, rqpkt,
8375 		    (drv_usectohz(5 * 1000000)));
8376 		break;
8377 	default:
8378 		VHCI_DEBUG(1, (CE_NOTE, NULL, "vhci_uscsi_send_sense: "
8379 		    "transport failed"));
8380 		scsi_free_consistent_buf(rqbp);
8381 		scsi_destroy_pkt(rqpkt);
8382 		rval = -1;
8383 	}
8384 
8385 	return (rval);
8386 }
8387 
8388 /*
8389  * done routine for the mpapi uscsi command - this is behaving as though
8390  * FLAG_DIAGNOSE is set meaning there are no retries except for a manual
8391  * request sense.
8392  */
8393 void
8394 vhci_uscsi_iodone(struct scsi_pkt *pkt)
8395 {
8396 	struct buf 			*bp;
8397 	mp_uscsi_cmd_t 			*mp_uscmdp;
8398 	struct uscsi_cmd 		*uscmdp;
8399 	struct scsi_arq_status 		*arqstat;
8400 	int 				err;
8401 
8402 	mp_uscmdp = (mp_uscsi_cmd_t *)pkt->pkt_private;
8403 	uscmdp = mp_uscmdp->uscmdp;
8404 	bp = mp_uscmdp->cmdbp;
8405 	ASSERT(bp != NULL);
8406 	VHCI_DEBUG(4, (CE_WARN, NULL,
8407 	    "vhci_uscsi_iodone: enter: bp: %p pkt: %p scmd: %p",
8408 	    (void *)bp, (void *)pkt, (void *)mp_uscmdp));
8409 	/* Save the status and the residual into the uscsi_cmd struct */
8410 	uscmdp->uscsi_status = ((*(pkt)->pkt_scbp) & STATUS_MASK);
8411 	uscmdp->uscsi_resid = bp->b_resid;
8412 
8413 	/* return on a very successful command */
8414 	if (pkt->pkt_reason == CMD_CMPLT &&
8415 	    SCBP_C(pkt) == 0 && ((pkt->pkt_flags & FLAG_SENSING) == 0) &&
8416 	    pkt->pkt_resid == 0) {
8417 		mdi_pi_kstat_iosupdate(mp_uscmdp->pip, bp);
8418 		scsi_destroy_pkt(pkt);
8419 		biodone(bp);
8420 		return;
8421 	}
8422 	VHCI_DEBUG(4, (CE_NOTE, NULL, "iodone: reason=0x%x "
8423 	    " pkt_resid=%ld pkt_state: 0x%x b_count: %ld b_resid: %ld",
8424 	    pkt->pkt_reason, pkt->pkt_resid,
8425 	    pkt->pkt_state, bp->b_bcount, bp->b_resid));
8426 
8427 	err = EIO;
8428 
8429 	arqstat = (struct scsi_arq_status *)(intptr_t)(pkt->pkt_scbp);
8430 	if (pkt->pkt_reason != CMD_CMPLT) {
8431 		/*
8432 		 * The command did not complete.
8433 		 */
8434 		VHCI_DEBUG(4, (CE_NOTE, NULL,
8435 		    "vhci_uscsi_iodone: command did not complete."
8436 		    " reason: %x flag: %x", pkt->pkt_reason, pkt->pkt_flags));
8437 		if (pkt->pkt_flags & FLAG_SENSING) {
8438 			MDI_PI_ERRSTAT(mp_uscmdp->pip, MDI_PI_TRANSERR);
8439 		} else if (pkt->pkt_reason == CMD_TIMEOUT) {
8440 			MDI_PI_ERRSTAT(mp_uscmdp->pip, MDI_PI_HARDERR);
8441 			err = ETIMEDOUT;
8442 		}
8443 	} else if (pkt->pkt_state & STATE_ARQ_DONE && mp_uscmdp->arq_enabled) {
8444 		/*
8445 		 * The auto-rqsense happened, and the packet has a filled-in
8446 		 * scsi_arq_status structure, pointed to by pkt_scbp.
8447 		 */
8448 		VHCI_DEBUG(4, (CE_NOTE, NULL,
8449 		    "vhci_uscsi_iodone: received auto-requested sense"));
8450 		if (uscmdp->uscsi_flags & USCSI_RQENABLE) {
8451 			/* get the amount of data to copy into rqbuf */
8452 			int rqlen = SENSE_LENGTH - arqstat->sts_rqpkt_resid;
8453 			rqlen = min(((int)uscmdp->uscsi_rqlen), rqlen);
8454 			uscmdp->uscsi_rqresid = uscmdp->uscsi_rqlen - rqlen;
8455 			uscmdp->uscsi_rqstatus =
8456 			    *((char *)&arqstat->sts_rqpkt_status);
8457 			if (uscmdp->uscsi_rqbuf && uscmdp->uscsi_rqlen &&
8458 			    rqlen != 0) {
8459 				bcopy(&(arqstat->sts_sensedata),
8460 				    uscmdp->uscsi_rqbuf, rqlen);
8461 			}
8462 			mdi_pi_kstat_iosupdate(mp_uscmdp->pip, bp);
8463 			VHCI_DEBUG(4, (CE_NOTE, NULL,
8464 			    "vhci_uscsi_iodone: ARQ "
8465 			    "uscsi_rqstatus=0x%x uscsi_rqresid=%d rqlen: %d "
8466 			    "xfer: %d rqpkt_resid: %d\n",
8467 			    uscmdp->uscsi_rqstatus, uscmdp->uscsi_rqresid,
8468 			    uscmdp->uscsi_rqlen, rqlen,
8469 			    arqstat->sts_rqpkt_resid));
8470 		}
8471 	} else if (pkt->pkt_flags & FLAG_SENSING) {
8472 		struct buf *rqbp;
8473 		struct scsi_status *rqstatus;
8474 
8475 		rqstatus = (struct scsi_status *)pkt->pkt_scbp;
8476 		/* a manual request sense was done - get the information */
8477 		if (uscmdp->uscsi_flags & USCSI_RQENABLE) {
8478 			int rqlen = SENSE_LENGTH - pkt->pkt_resid;
8479 
8480 			rqbp = mp_uscmdp->rqbp;
8481 			/* get the amount of data to copy into rqbuf */
8482 			rqlen = min(((int)uscmdp->uscsi_rqlen), rqlen);
8483 			uscmdp->uscsi_rqresid = uscmdp->uscsi_rqlen - rqlen;
8484 			uscmdp->uscsi_rqstatus = *((char *)rqstatus);
8485 			if (uscmdp->uscsi_rqlen && uscmdp->uscsi_rqbuf) {
8486 				bcopy(rqbp->b_un.b_addr, uscmdp->uscsi_rqbuf,
8487 				    rqlen);
8488 			}
8489 			MDI_PI_ERRSTAT(mp_uscmdp->pip, MDI_PI_TRANSERR);
8490 			scsi_free_consistent_buf(rqbp);
8491 		}
8492 		VHCI_DEBUG(4, (CE_NOTE, NULL, "vhci_uscsi_iodone: FLAG_SENSING"
8493 		    "uscsi_rqstatus=0x%x uscsi_rqresid=%d\n",
8494 		    uscmdp->uscsi_rqstatus, uscmdp->uscsi_rqresid));
8495 	} else {
8496 		struct scsi_status *status =
8497 		    (struct scsi_status *)pkt->pkt_scbp;
8498 		/*
8499 		 * Command completed and we're not getting sense. Check for
8500 		 * errors and decide what to do next.
8501 		 */
8502 		VHCI_DEBUG(4, (CE_NOTE, NULL,
8503 		    "vhci_uscsi_iodone: command appears complete: reason: %x",
8504 		    pkt->pkt_reason));
8505 		if (status->sts_chk) {
8506 			/* need to manually get the request sense */
8507 			if (vhci_uscsi_send_sense(pkt, mp_uscmdp) == 0) {
8508 				scsi_destroy_pkt(pkt);
8509 				return;
8510 			}
8511 		} else {
8512 			VHCI_DEBUG(4, (CE_NOTE, NULL,
8513 			    "vhci_chk_err: appears complete"));
8514 			err = 0;
8515 			mdi_pi_kstat_iosupdate(mp_uscmdp->pip, bp);
8516 			if (pkt->pkt_resid) {
8517 				bp->b_resid += pkt->pkt_resid;
8518 			}
8519 		}
8520 	}
8521 
8522 	if (err) {
8523 		if (bp->b_resid == 0)
8524 			bp->b_resid = bp->b_bcount;
8525 		bioerror(bp, err);
8526 		bp->b_flags |= B_ERROR;
8527 	}
8528 
8529 	scsi_destroy_pkt(pkt);
8530 	biodone(bp);
8531 
8532 	VHCI_DEBUG(4, (CE_WARN, NULL, "vhci_uscsi_iodone: exit"));
8533 }
8534 
8535 /*
8536  * start routine for the mpapi uscsi command
8537  */
8538 int
8539 vhci_uscsi_iostart(struct buf *bp)
8540 {
8541 	struct scsi_pkt 	*pkt;
8542 	struct uscsi_cmd	*uscmdp;
8543 	mp_uscsi_cmd_t 		*mp_uscmdp;
8544 	int			stat_size, rval;
8545 	int			retry = 0;
8546 
8547 	ASSERT(bp->b_private != NULL);
8548 
8549 	mp_uscmdp = (mp_uscsi_cmd_t *)bp->b_private;
8550 	uscmdp = mp_uscmdp->uscmdp;
8551 	if (uscmdp->uscsi_flags & USCSI_RQENABLE) {
8552 		stat_size = SENSE_LENGTH;
8553 	} else {
8554 		stat_size = 1;
8555 	}
8556 
8557 	pkt = scsi_init_pkt(mp_uscmdp->ap, NULL, bp, uscmdp->uscsi_cdblen,
8558 	    stat_size, 0, 0, SLEEP_FUNC, NULL);
8559 	if (pkt == NULL) {
8560 		VHCI_DEBUG(4, (CE_NOTE, NULL,
8561 		    "vhci_uscsi_iostart: rval: EINVAL"));
8562 		bp->b_resid = bp->b_bcount;
8563 		uscmdp->uscsi_resid = bp->b_bcount;
8564 		bioerror(bp, EINVAL);
8565 		biodone(bp);
8566 		return (EINVAL);
8567 	}
8568 
8569 	pkt->pkt_time = uscmdp->uscsi_timeout;
8570 	bcopy(uscmdp->uscsi_cdb, pkt->pkt_cdbp, (size_t)uscmdp->uscsi_cdblen);
8571 	pkt->pkt_comp = vhci_uscsi_iodone;
8572 	pkt->pkt_private = mp_uscmdp;
8573 	if (uscmdp->uscsi_flags & USCSI_SILENT)
8574 		pkt->pkt_flags |= FLAG_SILENT;
8575 	if (uscmdp->uscsi_flags & USCSI_ISOLATE)
8576 		pkt->pkt_flags |= FLAG_ISOLATE;
8577 	if (uscmdp->uscsi_flags & USCSI_DIAGNOSE)
8578 		pkt->pkt_flags |= FLAG_DIAGNOSE;
8579 	if (uscmdp->uscsi_flags & USCSI_RENEGOT) {
8580 		pkt->pkt_flags |= FLAG_RENEGOTIATE_WIDE_SYNC;
8581 	}
8582 	VHCI_DEBUG(4, (CE_WARN, NULL,
8583 	    "vhci_uscsi_iostart: ap: %p pkt: %p pcdbp: %p uscmdp: %p"
8584 	    " ucdbp: %p pcdblen: %d bp: %p count: %ld pip: %p"
8585 	    " stat_size: %d",
8586 	    (void *)mp_uscmdp->ap, (void *)pkt, (void *)pkt->pkt_cdbp,
8587 	    (void *)uscmdp, (void *)uscmdp->uscsi_cdb, pkt->pkt_cdblen,
8588 	    (void *)bp, bp->b_bcount, (void *)mp_uscmdp->pip, stat_size));
8589 
8590 	/*
8591 	 * NOTE: This code path is related to MPAPI uscsi(7I), so path
8592 	 * selection is not based on path_instance.
8593 	 */
8594 	if (scsi_pkt_allocated_correctly(pkt))
8595 		pkt->pkt_path_instance = 0;
8596 
8597 	while (((rval = scsi_transport(pkt)) == TRAN_BUSY) &&
8598 	    retry < vhci_uscsi_retry_count) {
8599 		delay(drv_usectohz(vhci_uscsi_delay));
8600 		retry++;
8601 	}
8602 	if (retry >= vhci_uscsi_retry_count) {
8603 		VHCI_DEBUG(4, (CE_NOTE, NULL,
8604 		    "vhci_uscsi_iostart: tran_busy - retry: %d", retry));
8605 	}
8606 	switch (rval) {
8607 	case TRAN_ACCEPT:
8608 		rval =  0;
8609 		break;
8610 
8611 	default:
8612 		VHCI_DEBUG(4, (CE_NOTE, NULL,
8613 		    "vhci_uscsi_iostart: rval: %d count: %ld res: %ld",
8614 		    rval, bp->b_bcount, bp->b_resid));
8615 		bp->b_resid = bp->b_bcount;
8616 		uscmdp->uscsi_resid = bp->b_bcount;
8617 		bioerror(bp, EIO);
8618 		scsi_destroy_pkt(pkt);
8619 		biodone(bp);
8620 		rval = EIO;
8621 		MDI_PI_ERRSTAT(mp_uscmdp->pip, MDI_PI_TRANSERR);
8622 		break;
8623 	}
8624 	VHCI_DEBUG(4, (CE_NOTE, NULL,
8625 	    "vhci_uscsi_iostart: exit: rval: %d", rval));
8626 	return (rval);
8627 }
8628 
8629 /* ARGSUSED */
8630 static struct scsi_failover_ops *
8631 vhci_dev_fo(dev_info_t *vdip, struct scsi_device *psd,
8632     void **ctprivp, char **fo_namep)
8633 {
8634 	struct scsi_failover_ops	*sfo;
8635 	char				*sfo_name;
8636 	char				*override;
8637 	struct scsi_failover		*sf;
8638 
8639 	ASSERT(psd && psd->sd_inq);
8640 	if ((psd == NULL) || (psd->sd_inq == NULL)) {
8641 		VHCI_DEBUG(1, (CE_NOTE, NULL,
8642 		    "!vhci_dev_fo:return NULL no scsi_device or inquiry"));
8643 		return (NULL);
8644 	}
8645 
8646 	/*
8647 	 * Determine if device is supported under scsi_vhci, and select
8648 	 * failover module.
8649 	 *
8650 	 * See if there is a scsi_vhci.conf file override for this devices's
8651 	 * VID/PID. The following values can be returned:
8652 	 *
8653 	 * NULL		If the NULL is returned then there is no scsi_vhci.conf
8654 	 *		override.  For NULL, we determine the failover_ops for
8655 	 *		this device by checking the sfo_device_probe entry
8656 	 *		point for each 'fops' module, in order.
8657 	 *
8658 	 *		NOTE: Correct operation may depend on module ordering
8659 	 *		of 'specific' (failover modules that are completely
8660 	 *		VID/PID table based) to 'generic' (failover modules
8661 	 *		that based on T10 standards like TPGS).  Currently,
8662 	 *		the value of 'ddi-forceload' in scsi_vhci.conf is used
8663 	 *		to establish the module list and probe order.
8664 	 *
8665 	 * "NONE"	If value "NONE" is returned then there is a
8666 	 *		scsi_vhci.conf VID/PID override to indicate the device
8667 	 *		should not be supported under scsi_vhci (even if there
8668 	 *		is an 'fops' module supporting the device).
8669 	 *
8670 	 * "<other>"	If another value is returned then that value is the
8671 	 *		name of the 'fops' module that should be used.
8672 	 */
8673 	sfo = NULL;	/* "NONE" */
8674 	override = scsi_get_device_type_string(
8675 	    "scsi-vhci-failover-override", vdip, psd);
8676 	if (override == NULL) {
8677 		/* NULL: default: select based on sfo_device_probe results */
8678 		for (sf = scsi_failover_table; sf->sf_mod; sf++) {
8679 			if ((sf->sf_sfo == NULL) ||
8680 			    sf->sf_sfo->sfo_device_probe(psd, psd->sd_inq,
8681 			    ctprivp) == SFO_DEVICE_PROBE_PHCI)
8682 				continue;
8683 
8684 			/* found failover module, supported under scsi_vhci */
8685 			sfo = sf->sf_sfo;
8686 			if (fo_namep && (*fo_namep == NULL)) {
8687 				sfo_name = i_ddi_strdup(sfo->sfo_name,
8688 				    KM_SLEEP);
8689 				*fo_namep = sfo_name;
8690 			}
8691 			break;
8692 		}
8693 	} else if (strcasecmp(override, "NONE")) {
8694 		/* !"NONE": select based on driver.conf specified name */
8695 		for (sf = scsi_failover_table, sfo = NULL; sf->sf_mod; sf++) {
8696 			if ((sf->sf_sfo == NULL) ||
8697 			    (sf->sf_sfo->sfo_name == NULL) ||
8698 			    strcmp(override, sf->sf_sfo->sfo_name))
8699 				continue;
8700 
8701 			/*
8702 			 * NOTE: If sfo_device_probe() has side-effects,
8703 			 * including setting *ctprivp, these are not going
8704 			 * to occur with override config.
8705 			 */
8706 
8707 			/* found failover module, supported under scsi_vhci */
8708 			sfo = sf->sf_sfo;
8709 			if (fo_namep && (*fo_namep == NULL)) {
8710 				sfo_name = kmem_alloc(strlen("conf ") +
8711 				    strlen(sfo->sfo_name) + 1, KM_SLEEP);
8712 				(void) sprintf(sfo_name, "conf %s",
8713 				    sfo->sfo_name);
8714 				*fo_namep = sfo_name;
8715 			}
8716 			break;
8717 		}
8718 	}
8719 	if (override)
8720 		kmem_free(override, strlen(override) + 1);
8721 	return (sfo);
8722 }
8723 
8724 /*
8725  * Determine the device described by cinfo should be enumerated under
8726  * the vHCI or the pHCI - if there is a failover ops then device is
8727  * supported under vHCI.  By agreement with SCSA cinfo is a pointer
8728  * to a scsi_device structure associated with a decorated pHCI probe node.
8729  */
8730 /* ARGSUSED */
8731 int
8732 vhci_is_dev_supported(dev_info_t *vdip, dev_info_t *pdip, void *cinfo)
8733 {
8734 	struct scsi_device	*psd = (struct scsi_device *)cinfo;
8735 
8736 	return (vhci_dev_fo(vdip, psd, NULL, NULL) ? MDI_SUCCESS : MDI_FAILURE);
8737 }
8738 
8739 
8740 #ifdef DEBUG
8741 extern struct scsi_key_strings scsi_cmds[];
8742 
8743 static char *
8744 vhci_print_scsi_cmd(char cmd)
8745 {
8746 	char tmp[64];
8747 	char *cpnt;
8748 
8749 	cpnt = scsi_cmd_name(cmd, scsi_cmds, tmp);
8750 	/* tmp goes out of scope on return and caller sees garbage */
8751 	if (cpnt == tmp) {
8752 		cpnt = "Unknown Command";
8753 	}
8754 	return (cpnt);
8755 }
8756 
8757 extern uchar_t	scsi_cdb_size[];
8758 
8759 static void
8760 vhci_print_cdb(dev_info_t *dip, uint_t level, char *title, uchar_t *cdb)
8761 {
8762 	int len = scsi_cdb_size[CDB_GROUPID(cdb[0])];
8763 	char buf[256];
8764 
8765 	if (level == CE_NOTE) {
8766 		vhci_log(level, dip, "path cmd %s\n",
8767 		    vhci_print_scsi_cmd(*cdb));
8768 		return;
8769 	}
8770 
8771 	(void) sprintf(buf, "%s for cmd(%s)", title, vhci_print_scsi_cmd(*cdb));
8772 	vhci_clean_print(dip, level, buf, cdb, len);
8773 }
8774 
8775 static void
8776 vhci_clean_print(dev_info_t *dev, uint_t level, char *title, uchar_t *data,
8777     int len)
8778 {
8779 	int	i;
8780 	int 	c;
8781 	char	*format;
8782 	char	buf[256];
8783 	uchar_t	byte;
8784 
8785 	(void) sprintf(buf, "%s:\n", title);
8786 	vhci_log(level, dev, "%s", buf);
8787 	level = CE_CONT;
8788 	for (i = 0; i < len; ) {
8789 		buf[0] = 0;
8790 		for (c = 0; c < 8 && i < len; c++, i++) {
8791 			byte = (uchar_t)data[i];
8792 			if (byte < 0x10)
8793 				format = "0x0%x ";
8794 			else
8795 				format = "0x%x ";
8796 			(void) sprintf(&buf[(int)strlen(buf)], format, byte);
8797 		}
8798 		(void) sprintf(&buf[(int)strlen(buf)], "\n");
8799 
8800 		vhci_log(level, dev, "%s\n", buf);
8801 	}
8802 }
8803 #endif
8804 static void
8805 vhci_invalidate_mpapi_lu(struct scsi_vhci *vhci, scsi_vhci_lun_t *vlun)
8806 {
8807 	char			*svl_wwn;
8808 	mpapi_item_list_t	*ilist;
8809 	mpapi_lu_data_t		*ld;
8810 
8811 	if (vlun == NULL) {
8812 		return;
8813 	} else {
8814 		svl_wwn = vlun->svl_lun_wwn;
8815 	}
8816 
8817 	ilist = vhci->mp_priv->obj_hdr_list[MP_OBJECT_TYPE_MULTIPATH_LU]->head;
8818 
8819 	while (ilist != NULL) {
8820 		ld = (mpapi_lu_data_t *)(ilist->item->idata);
8821 		if ((ld != NULL) && (strncmp(ld->prop.name, svl_wwn,
8822 		    strlen(svl_wwn)) == 0)) {
8823 			ld->valid = 0;
8824 			VHCI_DEBUG(6, (CE_WARN, NULL,
8825 			    "vhci_invalidate_mpapi_lu: "
8826 			    "Invalidated LU(%s)", svl_wwn));
8827 			return;
8828 		}
8829 		ilist = ilist->next;
8830 	}
8831 	VHCI_DEBUG(6, (CE_WARN, NULL, "vhci_invalidate_mpapi_lu: "
8832 	    "Could not find LU(%s) to invalidate.", svl_wwn));
8833 }
8834