xref: /illumos-gate/usr/src/uts/common/io/scsi/adapters/scsi_vhci/scsi_vhci.c (revision 28c5054df6dda607af9c25186be1c9644cb5bab5)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
23  */
24 /*
25  * Copyright 2014 Nexenta Systems, Inc.  All rights reserved.
26  * Copyright (c) 2016 by Delphix. All rights reserved.
27  * Copyright 2023 Oxide Computer Company
28  * Copyright 2024 RackTop Systems, Inc.
29  */
30 
31 /*
32  * Multiplexed I/O SCSI vHCI implementation
33  */
34 
35 #include <sys/conf.h>
36 #include <sys/file.h>
37 #include <sys/ddi.h>
38 #include <sys/sunddi.h>
39 #include <sys/scsi/scsi.h>
40 #include <sys/scsi/impl/scsi_reset_notify.h>
41 #include <sys/scsi/impl/services.h>
42 #include <sys/sunmdi.h>
43 #include <sys/mdi_impldefs.h>
44 #include <sys/scsi/adapters/scsi_vhci.h>
45 #include <sys/disp.h>
46 #include <sys/byteorder.h>
47 
48 extern uintptr_t scsi_callback_id;
49 extern ddi_dma_attr_t scsi_alloc_attr;
50 
51 #ifdef	DEBUG
52 int	vhci_debug = VHCI_DEBUG_DEFAULT_VAL;
53 #endif
54 
55 /* retry for the vhci_do_prout command when a not ready is returned */
56 int vhci_prout_not_ready_retry = 180;
57 
58 /*
59  * These values are defined to support the internal retry of
60  * SCSI packets for better sense code handling.
61  */
62 #define	VHCI_CMD_CMPLT	0
63 #define	VHCI_CMD_RETRY	1
64 #define	VHCI_CMD_ERROR	-1
65 
66 #define	PROPFLAGS (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM)
67 #define	VHCI_SCSI_PERR		0x47
68 #define	VHCI_PGR_ILLEGALOP	-2
69 #define	VHCI_NUM_UPDATE_TASKQ	8
70 /* changed to 132 to accomodate HDS */
71 
72 /*
73  * Version Macros
74  */
75 #define	VHCI_NAME_VERSION	"SCSI VHCI Driver"
76 char		vhci_version_name[] = VHCI_NAME_VERSION;
77 
78 int		vhci_first_time = 0;
79 clock_t		vhci_to_ticks = 0;
80 int		vhci_init_wait_timeout = VHCI_INIT_WAIT_TIMEOUT;
81 kcondvar_t	vhci_cv;
82 kmutex_t	vhci_global_mutex;
83 void		*vhci_softstate = NULL; /* for soft state */
84 
85 /*
86  * Flag to delay the retry of the reserve command
87  */
88 int		vhci_reserve_delay = 100000;
89 static int	vhci_path_quiesce_timeout = 60;
90 static uchar_t	zero_key[MHIOC_RESV_KEY_SIZE];
91 
92 /* uscsi delay for a TRAN_BUSY */
93 static int vhci_uscsi_delay = 100000;
94 static int vhci_uscsi_retry_count = 180;
95 /* uscsi_restart_sense timeout id in case it needs to get canceled */
96 static timeout_id_t vhci_restart_timeid = 0;
97 
98 static int	vhci_bus_config_debug = 0;
99 
100 /*
101  * Bidirectional map of 'target-port' to port id <pid> for support of
102  * iostat(8) '-Xx' and '-Yx' output.
103  */
104 static kmutex_t		vhci_targetmap_mutex;
105 static uint_t		vhci_targetmap_pid = 1;
106 static mod_hash_t	*vhci_targetmap_bypid;	/* <pid> -> 'target-port' */
107 static mod_hash_t	*vhci_targetmap_byport;	/* 'target-port' -> <pid> */
108 
109 /*
110  * functions exported by scsi_vhci struct cb_ops
111  */
112 static int vhci_open(dev_t *, int, int, cred_t *);
113 static int vhci_close(dev_t, int, int, cred_t *);
114 static int vhci_ioctl(dev_t, int, intptr_t, int, cred_t *, int *);
115 
116 /*
117  * functions exported by scsi_vhci struct dev_ops
118  */
119 static int vhci_getinfo(dev_info_t *, ddi_info_cmd_t, void *, void **);
120 static int vhci_attach(dev_info_t *, ddi_attach_cmd_t);
121 static int vhci_detach(dev_info_t *, ddi_detach_cmd_t);
122 
123 /*
124  * functions exported by scsi_vhci scsi_hba_tran_t transport table
125  */
126 static int vhci_scsi_tgt_init(dev_info_t *, dev_info_t *,
127     scsi_hba_tran_t *, struct scsi_device *);
128 static void vhci_scsi_tgt_free(dev_info_t *, dev_info_t *, scsi_hba_tran_t *,
129     struct scsi_device *);
130 static int vhci_pgr_register_start(scsi_vhci_lun_t *, struct scsi_pkt *);
131 static int vhci_scsi_start(struct scsi_address *, struct scsi_pkt *);
132 static int vhci_scsi_abort(struct scsi_address *, struct scsi_pkt *);
133 static int vhci_scsi_reset(struct scsi_address *, int);
134 static int vhci_scsi_reset_target(struct scsi_address *, int level,
135     uint8_t select_path);
136 static int vhci_scsi_reset_bus(struct scsi_address *);
137 static int vhci_scsi_getcap(struct scsi_address *, char *, int);
138 static int vhci_scsi_setcap(struct scsi_address *, char *, int, int);
139 static int vhci_commoncap(struct scsi_address *, char *, int, int, int);
140 static int vhci_pHCI_cap(struct scsi_address *ap, char *cap, int val, int whom,
141     mdi_pathinfo_t *pip);
142 static struct scsi_pkt *vhci_scsi_init_pkt(struct scsi_address *,
143     struct scsi_pkt *, struct buf *, int, int, int, int, int (*)(), caddr_t);
144 static void vhci_scsi_destroy_pkt(struct scsi_address *, struct scsi_pkt *);
145 static void vhci_scsi_dmafree(struct scsi_address *, struct scsi_pkt *);
146 static void vhci_scsi_sync_pkt(struct scsi_address *, struct scsi_pkt *);
147 static int vhci_scsi_reset_notify(struct scsi_address *, int, void (*)(caddr_t),
148     caddr_t);
149 static int vhci_scsi_get_bus_addr(struct scsi_device *, char *, int);
150 static int vhci_scsi_get_name(struct scsi_device *, char *, int);
151 static int vhci_scsi_bus_power(dev_info_t *, void *, pm_bus_power_op_t,
152     void *, void *);
153 static int vhci_scsi_bus_config(dev_info_t *, uint_t, ddi_bus_config_op_t,
154     void *, dev_info_t **);
155 static int vhci_scsi_bus_unconfig(dev_info_t *, uint_t, ddi_bus_config_op_t,
156     void *);
157 static struct scsi_failover_ops *vhci_dev_fo(dev_info_t *, struct scsi_device *,
158     void **, char **);
159 
160 /*
161  * functions registered with the mpxio framework via mdi_vhci_ops_t
162  */
163 static int vhci_pathinfo_init(dev_info_t *, mdi_pathinfo_t *, int);
164 static int vhci_pathinfo_uninit(dev_info_t *, mdi_pathinfo_t *, int);
165 static int vhci_pathinfo_state_change(dev_info_t *, mdi_pathinfo_t *,
166     mdi_pathinfo_state_t, uint32_t, int);
167 static int vhci_pathinfo_online(dev_info_t *, mdi_pathinfo_t *, int);
168 static int vhci_pathinfo_offline(dev_info_t *, mdi_pathinfo_t *, int);
169 static int vhci_failover(dev_info_t *, dev_info_t *, int);
170 static void vhci_client_attached(dev_info_t *);
171 static int vhci_is_dev_supported(dev_info_t *, dev_info_t *, void *);
172 
173 static int vhci_ctl(dev_t, int, intptr_t, int, cred_t *, int *);
174 static int vhci_devctl(dev_t, int, intptr_t, int, cred_t *, int *);
175 static int vhci_ioc_get_phci_path(sv_iocdata_t *, caddr_t, int, caddr_t);
176 static int vhci_ioc_get_client_path(sv_iocdata_t *, caddr_t, int, caddr_t);
177 static int vhci_ioc_get_paddr(sv_iocdata_t *, caddr_t, int, caddr_t);
178 static int vhci_ioc_send_client_path(caddr_t, sv_iocdata_t *, int, caddr_t);
179 static void vhci_ioc_devi_to_path(dev_info_t *, caddr_t);
180 static int vhci_get_phci_path_list(dev_info_t *, sv_path_info_t *, uint_t);
181 static int vhci_get_client_path_list(dev_info_t *, sv_path_info_t *, uint_t);
182 static int vhci_get_iocdata(const void *, sv_iocdata_t *, int, caddr_t);
183 static int vhci_get_iocswitchdata(const void *, sv_switch_to_cntlr_iocdata_t *,
184     int, caddr_t);
185 static int vhci_ioc_alloc_pathinfo(sv_path_info_t **, sv_path_info_t **,
186     uint_t, sv_iocdata_t *, int, caddr_t);
187 static void vhci_ioc_free_pathinfo(sv_path_info_t *, sv_path_info_t *, uint_t);
188 static int vhci_ioc_send_pathinfo(sv_path_info_t *, sv_path_info_t *, uint_t,
189     sv_iocdata_t *, int, caddr_t);
190 static int vhci_handle_ext_fo(struct scsi_pkt *, int);
191 static int vhci_efo_watch_cb(caddr_t, struct scsi_watch_result *);
192 static int vhci_quiesce_lun(struct scsi_vhci_lun *);
193 static int vhci_pgr_validate_and_register(scsi_vhci_priv_t *);
194 static void vhci_dispatch_scsi_start(void *);
195 static void vhci_efo_done(void *);
196 static void vhci_initiate_auto_failback(void *);
197 static void vhci_update_pHCI_pkt(struct vhci_pkt *, struct scsi_pkt *);
198 static int vhci_update_pathinfo(struct scsi_device *, mdi_pathinfo_t *,
199     struct scsi_failover_ops *, scsi_vhci_lun_t *, struct scsi_vhci *);
200 static void vhci_kstat_create_pathinfo(mdi_pathinfo_t *);
201 static int vhci_quiesce_paths(dev_info_t *, dev_info_t *,
202     scsi_vhci_lun_t *, char *, char *);
203 
204 static char *vhci_devnm_to_guid(char *);
205 static int vhci_bind_transport(struct scsi_address *, struct vhci_pkt *,
206     int, int (*func)(caddr_t));
207 static void vhci_intr(struct scsi_pkt *);
208 static int vhci_do_prout(scsi_vhci_priv_t *);
209 static void vhci_run_cmd(void *);
210 static int vhci_do_prin(struct vhci_pkt **);
211 static struct scsi_pkt *vhci_create_retry_pkt(struct vhci_pkt *);
212 static struct vhci_pkt *vhci_sync_retry_pkt(struct vhci_pkt *);
213 static struct scsi_vhci_lun *vhci_lun_lookup(dev_info_t *);
214 static struct scsi_vhci_lun *vhci_lun_lookup_alloc(dev_info_t *, char *, int *);
215 static void vhci_lun_free(struct scsi_vhci_lun *dvlp, struct scsi_device *sd);
216 static int vhci_recovery_reset(scsi_vhci_lun_t *, struct scsi_address *,
217     uint8_t, uint8_t);
218 void vhci_update_pathstates(void *);
219 
220 #ifdef DEBUG
221 static void vhci_print_prin_keys(vhci_prin_readkeys_t *, int);
222 static void vhci_print_cdb(dev_info_t *dip, uint_t level,
223     char *title, uchar_t *cdb);
224 static void vhci_clean_print(dev_info_t *dev, uint_t level,
225     char *title, uchar_t *data, int len);
226 #endif
227 static void vhci_print_prout_keys(scsi_vhci_lun_t *, char *);
228 static void vhci_uscsi_iodone(struct scsi_pkt *pkt);
229 static void vhci_invalidate_mpapi_lu(struct scsi_vhci *, scsi_vhci_lun_t *);
230 
231 /*
232  * MP-API related functions
233  */
234 extern int vhci_mpapi_init(struct scsi_vhci *);
235 extern void vhci_mpapi_add_dev_prod(struct scsi_vhci *, char *);
236 extern int vhci_mpapi_ctl(dev_t, int, intptr_t, int, cred_t *, int *);
237 extern void vhci_update_mpapi_data(struct scsi_vhci *,
238     scsi_vhci_lun_t *, mdi_pathinfo_t *);
239 extern void* vhci_get_mpapi_item(struct scsi_vhci *, mpapi_list_header_t *,
240     uint8_t, void*);
241 extern void vhci_mpapi_set_path_state(dev_info_t *, mdi_pathinfo_t *, int);
242 extern int vhci_mpapi_update_tpg_acc_state_for_lu(struct scsi_vhci *,
243     scsi_vhci_lun_t *);
244 
245 #define	VHCI_DMA_MAX_XFER_CAP	INT_MAX
246 
247 #define	VHCI_MAX_PGR_RETRIES	3
248 
249 /*
250  * Macros for the device-type mpxio options
251  */
252 #define	LOAD_BALANCE_OPTIONS		"load-balance-options"
253 #define	LOGICAL_BLOCK_REGION_SIZE	"region-size"
254 #define	MPXIO_OPTIONS_LIST		"device-type-mpxio-options-list"
255 #define	DEVICE_TYPE_STR			"device-type"
256 #define	isdigit(ch)			((ch) >= '0' && (ch) <= '9')
257 
258 static struct cb_ops vhci_cb_ops = {
259 	vhci_open,			/* open */
260 	vhci_close,			/* close */
261 	nodev,				/* strategy */
262 	nodev,				/* print */
263 	nodev,				/* dump */
264 	nodev,				/* read */
265 	nodev,				/* write */
266 	vhci_ioctl,			/* ioctl */
267 	nodev,				/* devmap */
268 	nodev,				/* mmap */
269 	nodev,				/* segmap */
270 	nochpoll,			/* chpoll */
271 	ddi_prop_op,			/* cb_prop_op */
272 	0,				/* streamtab */
273 	D_NEW | D_MP,			/* cb_flag */
274 	CB_REV,				/* rev */
275 	nodev,				/* aread */
276 	nodev				/* awrite */
277 };
278 
279 static struct dev_ops vhci_ops = {
280 	DEVO_REV,
281 	0,
282 	vhci_getinfo,
283 	nulldev,		/* identify */
284 	nulldev,		/* probe */
285 	vhci_attach,		/* attach and detach are mandatory */
286 	vhci_detach,
287 	nodev,			/* reset */
288 	&vhci_cb_ops,		/* cb_ops */
289 	NULL,			/* bus_ops */
290 	NULL,			/* power */
291 	ddi_quiesce_not_needed,	/* quiesce */
292 };
293 
294 extern struct mod_ops mod_driverops;
295 
296 static struct modldrv modldrv = {
297 	&mod_driverops,
298 	vhci_version_name,	/* module name */
299 	&vhci_ops
300 };
301 
302 static struct modlinkage modlinkage = {
303 	MODREV_1,
304 	&modldrv,
305 	NULL
306 };
307 
308 static mdi_vhci_ops_t vhci_opinfo = {
309 	MDI_VHCI_OPS_REV,
310 	vhci_pathinfo_init,		/* Pathinfo node init callback */
311 	vhci_pathinfo_uninit,		/* Pathinfo uninit callback */
312 	vhci_pathinfo_state_change,	/* Pathinfo node state change */
313 	vhci_failover,			/* failover callback */
314 	vhci_client_attached,		/* client attached callback	*/
315 	vhci_is_dev_supported		/* is device supported by mdi */
316 };
317 
318 /*
319  * The scsi_failover table defines an ordered set of 'fops' modules supported
320  * by scsi_vhci.  Currently, initialize this table from the 'ddi-forceload'
321  * property specified in scsi_vhci.conf.
322  */
323 static struct scsi_failover {
324 	ddi_modhandle_t			sf_mod;
325 	struct scsi_failover_ops	*sf_sfo;
326 } *scsi_failover_table;
327 static uint_t	scsi_nfailover;
328 
329 int
_init(void)330 _init(void)
331 {
332 	int	rval;
333 
334 	/*
335 	 * Allocate soft state and prepare to do ddi_soft_state_zalloc()
336 	 * before registering with the transport first.
337 	 */
338 	if ((rval = ddi_soft_state_init(&vhci_softstate,
339 	    sizeof (struct scsi_vhci), 1)) != 0) {
340 		VHCI_DEBUG(1, (CE_NOTE, NULL,
341 		    "!_init:soft state init failed\n"));
342 		return (rval);
343 	}
344 
345 	if ((rval = scsi_hba_init(&modlinkage)) != 0) {
346 		VHCI_DEBUG(1, (CE_NOTE, NULL,
347 		    "!_init: scsi hba init failed\n"));
348 		ddi_soft_state_fini(&vhci_softstate);
349 		return (rval);
350 	}
351 
352 	mutex_init(&vhci_global_mutex, NULL, MUTEX_DRIVER, NULL);
353 	cv_init(&vhci_cv, NULL, CV_DRIVER, NULL);
354 
355 	mutex_init(&vhci_targetmap_mutex, NULL, MUTEX_DRIVER, NULL);
356 	vhci_targetmap_byport = mod_hash_create_strhash(
357 	    "vhci_targetmap_byport", 256, mod_hash_null_valdtor);
358 	vhci_targetmap_bypid = mod_hash_create_idhash(
359 	    "vhci_targetmap_bypid", 256, mod_hash_null_valdtor);
360 
361 	if ((rval = mod_install(&modlinkage)) != 0) {
362 		VHCI_DEBUG(1, (CE_NOTE, NULL, "!_init: mod_install failed\n"));
363 		if (vhci_targetmap_bypid)
364 			mod_hash_destroy_idhash(vhci_targetmap_bypid);
365 		if (vhci_targetmap_byport)
366 			mod_hash_destroy_strhash(vhci_targetmap_byport);
367 		mutex_destroy(&vhci_targetmap_mutex);
368 		cv_destroy(&vhci_cv);
369 		mutex_destroy(&vhci_global_mutex);
370 		scsi_hba_fini(&modlinkage);
371 		ddi_soft_state_fini(&vhci_softstate);
372 	}
373 	return (rval);
374 }
375 
376 
377 /*
378  * the system is done with us as a driver, so clean up
379  */
380 int
_fini(void)381 _fini(void)
382 {
383 	int rval;
384 
385 	/*
386 	 * don't start cleaning up until we know that the module remove
387 	 * has worked  -- if this works, then we know that each instance
388 	 * has successfully been DDI_DETACHed
389 	 */
390 	if ((rval = mod_remove(&modlinkage)) != 0) {
391 		VHCI_DEBUG(4, (CE_NOTE, NULL, "!_fini: mod_remove failed\n"));
392 		return (rval);
393 	}
394 
395 	if (vhci_targetmap_bypid)
396 		mod_hash_destroy_idhash(vhci_targetmap_bypid);
397 	if (vhci_targetmap_byport)
398 		mod_hash_destroy_strhash(vhci_targetmap_byport);
399 	mutex_destroy(&vhci_targetmap_mutex);
400 	cv_destroy(&vhci_cv);
401 	mutex_destroy(&vhci_global_mutex);
402 	scsi_hba_fini(&modlinkage);
403 	ddi_soft_state_fini(&vhci_softstate);
404 
405 	return (rval);
406 }
407 
408 int
_info(struct modinfo * modinfop)409 _info(struct modinfo *modinfop)
410 {
411 	return (mod_info(&modlinkage, modinfop));
412 }
413 
414 /*
415  * Lookup scsi_failover by "short name" of failover module.
416  */
417 struct scsi_failover_ops *
vhci_failover_ops_by_name(char * name)418 vhci_failover_ops_by_name(char *name)
419 {
420 	struct scsi_failover	*sf;
421 
422 	for (sf = scsi_failover_table; sf->sf_mod; sf++) {
423 		if (sf->sf_sfo == NULL)
424 			continue;
425 		if (strcmp(sf->sf_sfo->sfo_name, name) == 0)
426 			return (sf->sf_sfo);
427 	}
428 	return (NULL);
429 }
430 
431 /*
432  * Load all scsi_failover_ops 'fops' modules.
433  */
434 static void
vhci_failover_modopen(struct scsi_vhci * vhci)435 vhci_failover_modopen(struct scsi_vhci *vhci)
436 {
437 	char			**module;
438 	int			i;
439 	struct scsi_failover	*sf;
440 	char			**dt;
441 	int			e;
442 
443 	if (scsi_failover_table)
444 		return;
445 
446 	/* Get the list of modules from scsi_vhci.conf */
447 	if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY,
448 	    vhci->vhci_dip, DDI_PROP_DONTPASS, "ddi-forceload",
449 	    &module, &scsi_nfailover) != DDI_PROP_SUCCESS) {
450 		cmn_err(CE_WARN, "scsi_vhci: "
451 		    "scsi_vhci.conf is missing 'ddi-forceload'");
452 		return;
453 	}
454 	if (scsi_nfailover == 0) {
455 		cmn_err(CE_WARN, "scsi_vhci: "
456 		    "scsi_vhci.conf has empty 'ddi-forceload'");
457 		ddi_prop_free(module);
458 		return;
459 	}
460 
461 	/* allocate failover table based on number of modules */
462 	scsi_failover_table = (struct scsi_failover *)
463 	    kmem_zalloc(sizeof (struct scsi_failover) * (scsi_nfailover + 1),
464 	    KM_SLEEP);
465 
466 	/* loop over modules specified in scsi_vhci.conf and open each module */
467 	for (i = 0, sf = scsi_failover_table; i < scsi_nfailover; i++) {
468 		if (module[i] == NULL)
469 			continue;
470 
471 		sf->sf_mod = ddi_modopen(module[i], KRTLD_MODE_FIRST, &e);
472 		if (sf->sf_mod == NULL) {
473 			/*
474 			 * A module returns EEXIST if other software is
475 			 * supporting the intended function: for example
476 			 * the scsi_vhci_f_sum_emc module returns EEXIST
477 			 * from _init if EMC powerpath software is installed.
478 			 */
479 			if (e != EEXIST)
480 				cmn_err(CE_WARN, "scsi_vhci: unable to open "
481 				    "module '%s', error %d", module[i], e);
482 			continue;
483 		}
484 		sf->sf_sfo = ddi_modsym(sf->sf_mod,
485 		    "scsi_vhci_failover_ops", &e);
486 		if (sf->sf_sfo == NULL) {
487 			cmn_err(CE_WARN, "scsi_vhci: "
488 			    "unable to import 'scsi_failover_ops' from '%s', "
489 			    "error %d", module[i], e);
490 			(void) ddi_modclose(sf->sf_mod);
491 			sf->sf_mod = NULL;
492 			continue;
493 		}
494 
495 		/* register vid/pid of devices supported with mpapi */
496 		for (dt = sf->sf_sfo->sfo_devices; *dt; dt++)
497 			vhci_mpapi_add_dev_prod(vhci, *dt);
498 		sf++;
499 	}
500 
501 	/* verify that at least the "well-known" modules were there */
502 	if (vhci_failover_ops_by_name(SFO_NAME_SYM) == NULL)
503 		cmn_err(CE_WARN, "scsi_vhci: well-known module \""
504 		    SFO_NAME_SYM "\" not defined in scsi_vhci.conf's "
505 		    "'ddi-forceload'");
506 	if (vhci_failover_ops_by_name(SFO_NAME_TPGS) == NULL)
507 		cmn_err(CE_WARN, "scsi_vhci: well-known module \""
508 		    SFO_NAME_TPGS "\" not defined in scsi_vhci.conf's "
509 		    "'ddi-forceload'");
510 
511 	/* call sfo_init for modules that need it */
512 	for (sf = scsi_failover_table; sf->sf_mod; sf++) {
513 		if (sf->sf_sfo && sf->sf_sfo->sfo_init)
514 			sf->sf_sfo->sfo_init();
515 	}
516 
517 	ddi_prop_free(module);
518 }
519 
520 /*
521  * unload all loaded scsi_failover_ops modules
522  */
523 static void
vhci_failover_modclose()524 vhci_failover_modclose()
525 {
526 	struct scsi_failover	*sf;
527 
528 	for (sf = scsi_failover_table; sf->sf_mod; sf++) {
529 		if ((sf->sf_mod == NULL) || (sf->sf_sfo == NULL))
530 			continue;
531 		(void) ddi_modclose(sf->sf_mod);
532 		sf->sf_mod = NULL;
533 		sf->sf_sfo = NULL;
534 	}
535 
536 	if (scsi_failover_table && scsi_nfailover)
537 		kmem_free(scsi_failover_table,
538 		    sizeof (struct scsi_failover) * (scsi_nfailover + 1));
539 	scsi_failover_table = NULL;
540 	scsi_nfailover = 0;
541 }
542 
543 /* ARGSUSED */
544 static int
vhci_open(dev_t * devp,int flag,int otype,cred_t * credp)545 vhci_open(dev_t *devp, int flag, int otype, cred_t *credp)
546 {
547 	struct scsi_vhci	*vhci;
548 
549 	if (otype != OTYP_CHR) {
550 		return (EINVAL);
551 	}
552 
553 	vhci = ddi_get_soft_state(vhci_softstate, MINOR2INST(getminor(*devp)));
554 	if (vhci == NULL) {
555 		VHCI_DEBUG(1, (CE_NOTE, NULL, "vhci_open: failed ENXIO\n"));
556 		return (ENXIO);
557 	}
558 
559 	mutex_enter(&vhci->vhci_mutex);
560 	if ((flag & FEXCL) && (vhci->vhci_state & VHCI_STATE_OPEN)) {
561 		mutex_exit(&vhci->vhci_mutex);
562 		vhci_log(CE_NOTE, vhci->vhci_dip,
563 		    "!vhci%d: Already open\n", getminor(*devp));
564 		return (EBUSY);
565 	}
566 
567 	vhci->vhci_state |= VHCI_STATE_OPEN;
568 	mutex_exit(&vhci->vhci_mutex);
569 	return (0);
570 }
571 
572 
573 /* ARGSUSED */
574 static int
vhci_close(dev_t dev,int flag,int otype,cred_t * credp)575 vhci_close(dev_t dev, int flag, int otype, cred_t *credp)
576 {
577 	struct scsi_vhci	*vhci;
578 
579 	if (otype != OTYP_CHR) {
580 		return (EINVAL);
581 	}
582 
583 	vhci = ddi_get_soft_state(vhci_softstate, MINOR2INST(getminor(dev)));
584 	if (vhci == NULL) {
585 		VHCI_DEBUG(1, (CE_NOTE, NULL, "vhci_close: failed ENXIO\n"));
586 		return (ENXIO);
587 	}
588 
589 	mutex_enter(&vhci->vhci_mutex);
590 	vhci->vhci_state &= ~VHCI_STATE_OPEN;
591 	mutex_exit(&vhci->vhci_mutex);
592 
593 	return (0);
594 }
595 
596 /* ARGSUSED */
597 static int
vhci_ioctl(dev_t dev,int cmd,intptr_t data,int mode,cred_t * credp,int * rval)598 vhci_ioctl(dev_t dev, int cmd, intptr_t data, int mode,
599     cred_t *credp, int *rval)
600 {
601 	if (IS_DEVCTL(cmd)) {
602 		return (vhci_devctl(dev, cmd, data, mode, credp, rval));
603 	} else if (cmd == MP_CMD) {
604 		return (vhci_mpapi_ctl(dev, cmd, data, mode, credp, rval));
605 	} else {
606 		return (vhci_ctl(dev, cmd, data, mode, credp, rval));
607 	}
608 }
609 
610 /*
611  * attach the module
612  */
613 static int
vhci_attach(dev_info_t * dip,ddi_attach_cmd_t cmd)614 vhci_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
615 {
616 	int			rval = DDI_FAILURE;
617 	int			scsi_hba_attached = 0;
618 	int			vhci_attached = 0;
619 	int			mutex_initted = 0;
620 	int			instance;
621 	struct scsi_vhci	*vhci;
622 	scsi_hba_tran_t		*tran;
623 	char			cache_name_buf[64];
624 	char			*data;
625 
626 	VHCI_DEBUG(4, (CE_NOTE, NULL, "vhci_attach: cmd=0x%x\n", cmd));
627 
628 	instance = ddi_get_instance(dip);
629 
630 	switch (cmd) {
631 	case DDI_ATTACH:
632 		break;
633 
634 	case DDI_RESUME:
635 	case DDI_PM_RESUME:
636 		VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_attach: resume not yet"
637 		    "implemented\n"));
638 		return (rval);
639 
640 	default:
641 		VHCI_DEBUG(1, (CE_NOTE, NULL,
642 		    "!vhci_attach: unknown ddi command\n"));
643 		return (rval);
644 	}
645 
646 	/*
647 	 * Allocate vhci data structure.
648 	 */
649 	if (ddi_soft_state_zalloc(vhci_softstate, instance) != DDI_SUCCESS) {
650 		VHCI_DEBUG(1, (CE_NOTE, dip, "!vhci_attach:"
651 		    "soft state alloc failed\n"));
652 		return (DDI_FAILURE);
653 	}
654 
655 	if ((vhci = ddi_get_soft_state(vhci_softstate, instance)) == NULL) {
656 		VHCI_DEBUG(1, (CE_NOTE, dip, "!vhci_attach:"
657 		    "bad soft state\n"));
658 		ddi_soft_state_free(vhci_softstate, instance);
659 		return (DDI_FAILURE);
660 	}
661 
662 	/* Allocate packet cache */
663 	(void) snprintf(cache_name_buf, sizeof (cache_name_buf),
664 	    "vhci%d_cache", instance);
665 
666 	mutex_init(&vhci->vhci_mutex, NULL, MUTEX_DRIVER, NULL);
667 	mutex_initted++;
668 
669 	/*
670 	 * Allocate a transport structure
671 	 */
672 	tran = scsi_hba_tran_alloc(dip, SCSI_HBA_CANSLEEP);
673 	ASSERT(tran != NULL);
674 
675 	vhci->vhci_tran		= tran;
676 	vhci->vhci_dip		= dip;
677 	vhci->vhci_instance	= instance;
678 
679 	tran->tran_hba_private	= vhci;
680 	tran->tran_tgt_init	= vhci_scsi_tgt_init;
681 	tran->tran_tgt_probe	= NULL;
682 	tran->tran_tgt_free	= vhci_scsi_tgt_free;
683 
684 	tran->tran_start	= vhci_scsi_start;
685 	tran->tran_abort	= vhci_scsi_abort;
686 	tran->tran_reset	= vhci_scsi_reset;
687 	tran->tran_getcap	= vhci_scsi_getcap;
688 	tran->tran_setcap	= vhci_scsi_setcap;
689 	tran->tran_init_pkt	= vhci_scsi_init_pkt;
690 	tran->tran_destroy_pkt	= vhci_scsi_destroy_pkt;
691 	tran->tran_dmafree	= vhci_scsi_dmafree;
692 	tran->tran_sync_pkt	= vhci_scsi_sync_pkt;
693 	tran->tran_reset_notify = vhci_scsi_reset_notify;
694 
695 	tran->tran_get_bus_addr	= vhci_scsi_get_bus_addr;
696 	tran->tran_get_name	= vhci_scsi_get_name;
697 	tran->tran_bus_reset	= NULL;
698 	tran->tran_quiesce	= NULL;
699 	tran->tran_unquiesce	= NULL;
700 
701 	/*
702 	 * register event notification routines with scsa
703 	 */
704 	tran->tran_get_eventcookie = NULL;
705 	tran->tran_add_eventcall = NULL;
706 	tran->tran_remove_eventcall = NULL;
707 	tran->tran_post_event	= NULL;
708 
709 	tran->tran_bus_power	= vhci_scsi_bus_power;
710 
711 	tran->tran_bus_config	= vhci_scsi_bus_config;
712 	tran->tran_bus_unconfig	= vhci_scsi_bus_unconfig;
713 
714 	/*
715 	 * Attach this instance with the mpxio framework
716 	 */
717 	if (mdi_vhci_register(MDI_HCI_CLASS_SCSI, dip, &vhci_opinfo, 0)
718 	    != MDI_SUCCESS) {
719 		VHCI_DEBUG(1, (CE_NOTE, dip, "!vhci_attach:"
720 		    "mdi_vhci_register failed\n"));
721 		goto attach_fail;
722 	}
723 	vhci_attached++;
724 
725 	/*
726 	 * Attach this instance of the hba.
727 	 *
728 	 * Regarding dma attributes: Since scsi_vhci is a virtual scsi HBA
729 	 * driver, it has nothing to do with DMA. However, when calling
730 	 * scsi_hba_attach_setup() we need to pass something valid in the
731 	 * dma attributes parameter. So we just use scsi_alloc_attr.
732 	 * SCSA itself seems to care only for dma_attr_minxfer and
733 	 * dma_attr_burstsizes fields of dma attributes structure.
734 	 * It expects those fileds to be non-zero.
735 	 */
736 	if (scsi_hba_attach_setup(dip, &scsi_alloc_attr, tran,
737 	    SCSI_HBA_ADDR_COMPLEX) != DDI_SUCCESS) {
738 		VHCI_DEBUG(1, (CE_NOTE, dip, "!vhci_attach:"
739 		    "hba attach failed\n"));
740 		goto attach_fail;
741 	}
742 	scsi_hba_attached++;
743 
744 	if (ddi_create_minor_node(dip, "devctl", S_IFCHR,
745 	    INST2DEVCTL(instance), DDI_NT_SCSI_NEXUS, 0) != DDI_SUCCESS) {
746 		VHCI_DEBUG(1, (CE_NOTE, dip, "!vhci_attach:"
747 		    " ddi_create_minor_node failed\n"));
748 		goto attach_fail;
749 	}
750 
751 	/*
752 	 * Set pm-want-child-notification property for
753 	 * power management of the phci and client
754 	 */
755 	if (ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP,
756 	    "pm-want-child-notification?", NULL, 0) != DDI_PROP_SUCCESS) {
757 		cmn_err(CE_WARN,
758 		    "%s%d fail to create pm-want-child-notification? prop",
759 		    ddi_driver_name(dip), ddi_get_instance(dip));
760 		goto attach_fail;
761 	}
762 
763 	vhci->vhci_taskq = taskq_create("vhci_taskq", 1, MINCLSYSPRI, 1, 4, 0);
764 	vhci->vhci_update_pathstates_taskq =
765 	    taskq_create("vhci_update_pathstates", VHCI_NUM_UPDATE_TASKQ,
766 	    MINCLSYSPRI, 1, 4, 0);
767 	ASSERT(vhci->vhci_taskq);
768 	ASSERT(vhci->vhci_update_pathstates_taskq);
769 
770 	/*
771 	 * Set appropriate configuration flags based on options set in
772 	 * conf file.
773 	 */
774 	vhci->vhci_conf_flags = 0;
775 	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, PROPFLAGS,
776 	    "auto-failback", &data) == DDI_SUCCESS) {
777 		if (strcmp(data, "enable") == 0)
778 			vhci->vhci_conf_flags |= VHCI_CONF_FLAGS_AUTO_FAILBACK;
779 		ddi_prop_free(data);
780 	}
781 
782 	if (!(vhci->vhci_conf_flags & VHCI_CONF_FLAGS_AUTO_FAILBACK))
783 		vhci_log(CE_NOTE, dip, "!Auto-failback capability "
784 		    "disabled through scsi_vhci.conf file.");
785 
786 	/*
787 	 * Allocate an mpapi private structure
788 	 */
789 	vhci->mp_priv = kmem_zalloc(sizeof (mpapi_priv_t), KM_SLEEP);
790 	if (vhci_mpapi_init(vhci) != 0) {
791 		VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_attach: "
792 		    "vhci_mpapi_init() failed"));
793 	}
794 
795 	vhci_failover_modopen(vhci);		/* load failover modules */
796 
797 	ddi_report_dev(dip);
798 	return (DDI_SUCCESS);
799 
800 attach_fail:
801 	if (vhci_attached)
802 		(void) mdi_vhci_unregister(dip, 0);
803 
804 	if (scsi_hba_attached)
805 		(void) scsi_hba_detach(dip);
806 
807 	if (vhci->vhci_tran)
808 		scsi_hba_tran_free(vhci->vhci_tran);
809 
810 	if (mutex_initted) {
811 		mutex_destroy(&vhci->vhci_mutex);
812 	}
813 
814 	ddi_soft_state_free(vhci_softstate, instance);
815 	return (DDI_FAILURE);
816 }
817 
818 
819 /*ARGSUSED*/
820 static int
vhci_detach(dev_info_t * dip,ddi_detach_cmd_t cmd)821 vhci_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
822 {
823 	int			instance = ddi_get_instance(dip);
824 	scsi_hba_tran_t		*tran;
825 	struct scsi_vhci	*vhci;
826 
827 	VHCI_DEBUG(4, (CE_NOTE, NULL, "vhci_detach: cmd=0x%x\n", cmd));
828 
829 	if ((tran = ddi_get_driver_private(dip)) == NULL)
830 		return (DDI_FAILURE);
831 
832 	vhci = TRAN2HBAPRIVATE(tran);
833 	if (!vhci) {
834 		return (DDI_FAILURE);
835 	}
836 
837 	switch (cmd) {
838 	case DDI_DETACH:
839 		break;
840 
841 	case DDI_SUSPEND:
842 	case DDI_PM_SUSPEND:
843 		VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_detach: suspend/pm not yet"
844 		    "implemented\n"));
845 		return (DDI_FAILURE);
846 
847 	default:
848 		VHCI_DEBUG(1, (CE_NOTE, NULL,
849 		    "!vhci_detach: unknown ddi command\n"));
850 		return (DDI_FAILURE);
851 	}
852 
853 	(void) mdi_vhci_unregister(dip, 0);
854 	(void) scsi_hba_detach(dip);
855 	scsi_hba_tran_free(tran);
856 
857 	if (ddi_prop_remove(DDI_DEV_T_NONE, dip,
858 	    "pm-want-child-notification?") != DDI_PROP_SUCCESS) {
859 		cmn_err(CE_WARN,
860 		    "%s%d unable to remove prop pm-want_child_notification?",
861 		    ddi_driver_name(dip), ddi_get_instance(dip));
862 	}
863 	if (vhci_restart_timeid != 0) {
864 		(void) untimeout(vhci_restart_timeid);
865 	}
866 	vhci_restart_timeid = 0;
867 
868 	mutex_destroy(&vhci->vhci_mutex);
869 	vhci->vhci_dip = NULL;
870 	vhci->vhci_tran = NULL;
871 	taskq_destroy(vhci->vhci_taskq);
872 	taskq_destroy(vhci->vhci_update_pathstates_taskq);
873 	ddi_remove_minor_node(dip, NULL);
874 	ddi_soft_state_free(vhci_softstate, instance);
875 
876 	vhci_failover_modclose();		/* unload failover modules */
877 	return (DDI_SUCCESS);
878 }
879 
880 /*
881  * vhci_getinfo()
882  * Given the device number, return the devinfo pointer or the
883  * instance number.
884  * Note: always succeed DDI_INFO_DEVT2INSTANCE, even before attach.
885  */
886 
887 /*ARGSUSED*/
888 static int
vhci_getinfo(dev_info_t * dip,ddi_info_cmd_t cmd,void * arg,void ** result)889 vhci_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result)
890 {
891 	struct scsi_vhci	*vhcip;
892 	int			instance = MINOR2INST(getminor((dev_t)arg));
893 
894 	switch (cmd) {
895 	case DDI_INFO_DEVT2DEVINFO:
896 		vhcip = ddi_get_soft_state(vhci_softstate, instance);
897 		if (vhcip != NULL)
898 			*result = vhcip->vhci_dip;
899 		else {
900 			*result = NULL;
901 			return (DDI_FAILURE);
902 		}
903 		break;
904 
905 	case DDI_INFO_DEVT2INSTANCE:
906 		*result = (void *)(uintptr_t)instance;
907 		break;
908 
909 	default:
910 		return (DDI_FAILURE);
911 	}
912 
913 	return (DDI_SUCCESS);
914 }
915 
916 /*ARGSUSED*/
917 static int
vhci_scsi_tgt_init(dev_info_t * hba_dip,dev_info_t * tgt_dip,scsi_hba_tran_t * hba_tran,struct scsi_device * sd)918 vhci_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
919     scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
920 {
921 	char			*guid;
922 	scsi_vhci_lun_t		*vlun;
923 	struct scsi_vhci	*vhci;
924 	clock_t			from_ticks;
925 	mdi_pathinfo_t		*pip;
926 	int			rval;
927 
928 	ASSERT(hba_dip != NULL);
929 	ASSERT(tgt_dip != NULL);
930 
931 	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, tgt_dip, PROPFLAGS,
932 	    MDI_CLIENT_GUID_PROP, &guid) != DDI_SUCCESS) {
933 		/*
934 		 * This must be the .conf node without GUID property.
935 		 * The node under fp already inserts a delay, so we
936 		 * just return from here. We rely on this delay to have
937 		 * all dips be posted to the ndi hotplug thread's newdev
938 		 * list. This is necessary for the deferred attach
939 		 * mechanism to work and opens() done soon after boot to
940 		 * succeed.
941 		 */
942 		VHCI_DEBUG(4, (CE_WARN, hba_dip, "tgt_init: lun guid "
943 		    "property failed"));
944 		return (DDI_NOT_WELL_FORMED);
945 	}
946 
947 	if (ndi_dev_is_persistent_node(tgt_dip) == 0) {
948 		/*
949 		 * This must be .conf node with the GUID property. We don't
950 		 * merge property by ndi_merge_node() here  because the
951 		 * devi_addr_buf of .conf node is "" always according the
952 		 * implementation of vhci_scsi_get_name_bus_addr().
953 		 */
954 		ddi_set_name_addr(tgt_dip, NULL);
955 		return (DDI_FAILURE);
956 	}
957 
958 	vhci = ddi_get_soft_state(vhci_softstate, ddi_get_instance(hba_dip));
959 	ASSERT(vhci != NULL);
960 
961 	VHCI_DEBUG(4, (CE_NOTE, hba_dip,
962 	    "!tgt_init: called for %s (instance %d)\n",
963 	    ddi_driver_name(tgt_dip), ddi_get_instance(tgt_dip)));
964 
965 	vlun = vhci_lun_lookup(tgt_dip);
966 
967 	mutex_enter(&vhci_global_mutex);
968 
969 	from_ticks = ddi_get_lbolt();
970 	if (vhci_to_ticks == 0) {
971 		vhci_to_ticks = from_ticks +
972 		    drv_usectohz(vhci_init_wait_timeout);
973 	}
974 
975 #if DEBUG
976 	if (vlun) {
977 		VHCI_DEBUG(1, (CE_WARN, hba_dip, "tgt_init: "
978 		    "vhci_scsi_tgt_init: guid %s : found vlun 0x%p "
979 		    "from_ticks %lx to_ticks %lx",
980 		    guid, (void *)vlun, from_ticks, vhci_to_ticks));
981 	} else {
982 		VHCI_DEBUG(1, (CE_WARN, hba_dip, "tgt_init: "
983 		    "vhci_scsi_tgt_init: guid %s : vlun not found "
984 		    "from_ticks %lx to_ticks %lx", guid, from_ticks,
985 		    vhci_to_ticks));
986 	}
987 #endif
988 
989 	rval = mdi_select_path(tgt_dip, NULL,
990 	    (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH), NULL, &pip);
991 	if (rval == MDI_SUCCESS) {
992 		mdi_rele_path(pip);
993 	}
994 
995 	/*
996 	 * Wait for the following conditions :
997 	 *	1. no vlun available yet
998 	 *	2. no path established
999 	 *	3. timer did not expire
1000 	 */
1001 	while ((vlun == NULL) || (mdi_client_get_path_count(tgt_dip) == 0) ||
1002 	    (rval != MDI_SUCCESS)) {
1003 		if (vlun && vlun->svl_not_supported) {
1004 			VHCI_DEBUG(1, (CE_WARN, hba_dip, "tgt_init: "
1005 			    "vlun 0x%p lun guid %s not supported!",
1006 			    (void *)vlun, guid));
1007 			mutex_exit(&vhci_global_mutex);
1008 			ddi_prop_free(guid);
1009 			return (DDI_NOT_WELL_FORMED);
1010 		}
1011 		if ((vhci_first_time == 0) && (from_ticks >= vhci_to_ticks)) {
1012 			vhci_first_time = 1;
1013 		}
1014 		if (vhci_first_time == 1) {
1015 			VHCI_DEBUG(1, (CE_WARN, hba_dip, "vhci_scsi_tgt_init: "
1016 			    "no wait for %s. from_tick %lx, to_tick %lx",
1017 			    guid, from_ticks, vhci_to_ticks));
1018 			mutex_exit(&vhci_global_mutex);
1019 			ddi_prop_free(guid);
1020 			return (DDI_NOT_WELL_FORMED);
1021 		}
1022 
1023 		if (cv_timedwait(&vhci_cv,
1024 		    &vhci_global_mutex, vhci_to_ticks) == -1) {
1025 			/* Timed out */
1026 #ifdef DEBUG
1027 			if (vlun == NULL) {
1028 				VHCI_DEBUG(1, (CE_WARN, hba_dip,
1029 				    "tgt_init: no vlun for %s!", guid));
1030 			} else if (mdi_client_get_path_count(tgt_dip) == 0) {
1031 				VHCI_DEBUG(1, (CE_WARN, hba_dip,
1032 				    "tgt_init: client path count is "
1033 				    "zero for %s!", guid));
1034 			} else {
1035 				VHCI_DEBUG(1, (CE_WARN, hba_dip,
1036 				    "tgt_init: client path not "
1037 				    "available yet for %s!", guid));
1038 			}
1039 #endif /* DEBUG */
1040 			mutex_exit(&vhci_global_mutex);
1041 			ddi_prop_free(guid);
1042 			return (DDI_NOT_WELL_FORMED);
1043 		}
1044 		vlun = vhci_lun_lookup(tgt_dip);
1045 		rval = mdi_select_path(tgt_dip, NULL,
1046 		    (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH),
1047 		    NULL, &pip);
1048 		if (rval == MDI_SUCCESS) {
1049 			mdi_rele_path(pip);
1050 		}
1051 		from_ticks = ddi_get_lbolt();
1052 	}
1053 	mutex_exit(&vhci_global_mutex);
1054 
1055 	ASSERT(vlun != NULL);
1056 	ddi_prop_free(guid);
1057 
1058 	scsi_device_hba_private_set(sd, vlun);
1059 
1060 	return (DDI_SUCCESS);
1061 }
1062 
1063 /*ARGSUSED*/
1064 static void
vhci_scsi_tgt_free(dev_info_t * hba_dip,dev_info_t * tgt_dip,scsi_hba_tran_t * hba_tran,struct scsi_device * sd)1065 vhci_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
1066     scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
1067 {
1068 	struct scsi_vhci_lun *dvlp;
1069 	ASSERT(mdi_client_get_path_count(tgt_dip) <= 0);
1070 	dvlp = (struct scsi_vhci_lun *)scsi_device_hba_private_get(sd);
1071 	ASSERT(dvlp != NULL);
1072 
1073 	vhci_lun_free(dvlp, sd);
1074 }
1075 
1076 /*
1077  * a PGR register command has started; copy the info we need
1078  */
1079 int
vhci_pgr_register_start(scsi_vhci_lun_t * vlun,struct scsi_pkt * pkt)1080 vhci_pgr_register_start(scsi_vhci_lun_t *vlun, struct scsi_pkt *pkt)
1081 {
1082 	struct vhci_pkt		*vpkt = TGTPKT2VHCIPKT(pkt);
1083 	void			*addr;
1084 
1085 	if (!vpkt->vpkt_tgt_init_bp)
1086 		return (TRAN_BADPKT);
1087 
1088 	addr = bp_mapin_common(vpkt->vpkt_tgt_init_bp,
1089 	    (vpkt->vpkt_flags & CFLAG_NOWAIT) ? VM_NOSLEEP : VM_SLEEP);
1090 	if (addr == NULL)
1091 		return (TRAN_BUSY);
1092 
1093 	mutex_enter(&vlun->svl_mutex);
1094 
1095 	vhci_print_prout_keys(vlun, "v_pgr_reg_start: before bcopy:");
1096 
1097 	bcopy(addr, &vlun->svl_prout, sizeof (vhci_prout_t) -
1098 	    (2 * MHIOC_RESV_KEY_SIZE * sizeof (char)));
1099 	bcopy(pkt->pkt_cdbp, vlun->svl_cdb, sizeof (vlun->svl_cdb));
1100 
1101 	vhci_print_prout_keys(vlun, "v_pgr_reg_start: after bcopy:");
1102 
1103 	vlun->svl_time = pkt->pkt_time;
1104 	vlun->svl_bcount = vpkt->vpkt_tgt_init_bp->b_bcount;
1105 	vlun->svl_first_path = vpkt->vpkt_path;
1106 	mutex_exit(&vlun->svl_mutex);
1107 	return (0);
1108 }
1109 
1110 /*
1111  * Function name : vhci_scsi_start()
1112  *
1113  * Return Values : TRAN_FATAL_ERROR	- vhci has been shutdown
1114  *					  or other fatal failure
1115  *					  preventing packet transportation
1116  *		   TRAN_BUSY		- request queue is full
1117  *		   TRAN_ACCEPT		- pkt has been submitted to phci
1118  *					  (or is held in the waitQ)
1119  * Description	 : Implements SCSA's tran_start() entry point for
1120  *		   packet transport
1121  *
1122  */
1123 static int
vhci_scsi_start(struct scsi_address * ap,struct scsi_pkt * pkt)1124 vhci_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt)
1125 {
1126 	int			rval = TRAN_ACCEPT;
1127 	int			instance, held;
1128 	struct scsi_vhci	*vhci = ADDR2VHCI(ap);
1129 	struct scsi_vhci_lun	*vlun = ADDR2VLUN(ap);
1130 	struct vhci_pkt		*vpkt = TGTPKT2VHCIPKT(pkt);
1131 	int			flags = 0;
1132 	scsi_vhci_priv_t	*svp, *svp_resrv;
1133 	dev_info_t		*cdip;
1134 	client_lb_t		lbp;
1135 	int			restore_lbp = 0;
1136 	/* set if pkt is SCSI-II RESERVE cmd */
1137 	int			pkt_reserve_cmd = 0;
1138 	int			reserve_failed = 0;
1139 	int			resrv_instance = 0;
1140 	mdi_pathinfo_t		*pip;
1141 	struct scsi_pkt		*rel_pkt;
1142 
1143 	ASSERT(vhci != NULL);
1144 	ASSERT(vpkt != NULL);
1145 	ASSERT(vpkt->vpkt_state != VHCI_PKT_ISSUED);
1146 	cdip = ADDR2DIP(ap);
1147 
1148 	/*
1149 	 * Block IOs if LUN is held or QUIESCED for IOs.
1150 	 */
1151 	if ((VHCI_LUN_IS_HELD(vlun)) ||
1152 	    ((vlun->svl_flags & VLUN_QUIESCED_FLG) == VLUN_QUIESCED_FLG)) {
1153 		return (TRAN_BUSY);
1154 	}
1155 
1156 	/*
1157 	 * vhci_lun needs to be quiesced before SCSI-II RESERVE command
1158 	 * can be issued.  This may require a cv_timedwait, which is
1159 	 * dangerous to perform in an interrupt context.  So if this
1160 	 * is a RESERVE command a taskq is dispatched to service it.
1161 	 * This taskq shall again call vhci_scsi_start, but we shall be
1162 	 * sure its not in an interrupt context.
1163 	 */
1164 	if ((pkt->pkt_cdbp[0] == SCMD_RESERVE) ||
1165 	    (pkt->pkt_cdbp[0] == SCMD_RESERVE_G1)) {
1166 		if (!(vpkt->vpkt_state & VHCI_PKT_THRU_TASKQ)) {
1167 			if (taskq_dispatch(vhci->vhci_taskq,
1168 			    vhci_dispatch_scsi_start, (void *) vpkt,
1169 			    KM_NOSLEEP) != TASKQID_INVALID) {
1170 				return (TRAN_ACCEPT);
1171 			} else {
1172 				return (TRAN_BUSY);
1173 			}
1174 		}
1175 
1176 		/*
1177 		 * Here we ensure that simultaneous SCSI-II RESERVE cmds don't
1178 		 * get serviced for a lun.
1179 		 */
1180 		VHCI_HOLD_LUN(vlun, VH_NOSLEEP, held);
1181 		if (!held) {
1182 			return (TRAN_BUSY);
1183 		} else if ((vlun->svl_flags & VLUN_QUIESCED_FLG) ==
1184 		    VLUN_QUIESCED_FLG) {
1185 			VHCI_RELEASE_LUN(vlun);
1186 			return (TRAN_BUSY);
1187 		}
1188 
1189 		/*
1190 		 * To ensure that no IOs occur for this LUN for the duration
1191 		 * of this pkt set the VLUN_QUIESCED_FLG.
1192 		 * In case this routine needs to exit on error make sure that
1193 		 * this flag is cleared.
1194 		 */
1195 		vlun->svl_flags |= VLUN_QUIESCED_FLG;
1196 		pkt_reserve_cmd = 1;
1197 
1198 		/*
1199 		 * if this is a SCSI-II RESERVE command, set load balancing
1200 		 * policy to be ALTERNATE PATH to ensure that all subsequent
1201 		 * IOs are routed on the same path.  This is because if commands
1202 		 * are routed across multiple paths then IOs on paths other than
1203 		 * the one on which the RESERVE was executed will get a
1204 		 * RESERVATION CONFLICT
1205 		 */
1206 		lbp = mdi_get_lb_policy(cdip);
1207 		if (lbp != LOAD_BALANCE_NONE) {
1208 			if (vhci_quiesce_lun(vlun) != 1) {
1209 				vlun->svl_flags &= ~VLUN_QUIESCED_FLG;
1210 				VHCI_RELEASE_LUN(vlun);
1211 				return (TRAN_FATAL_ERROR);
1212 			}
1213 			vlun->svl_lb_policy_save = lbp;
1214 			if (mdi_set_lb_policy(cdip, LOAD_BALANCE_NONE) !=
1215 			    MDI_SUCCESS) {
1216 				vlun->svl_flags &= ~VLUN_QUIESCED_FLG;
1217 				VHCI_RELEASE_LUN(vlun);
1218 				return (TRAN_FATAL_ERROR);
1219 			}
1220 			restore_lbp = 1;
1221 		}
1222 
1223 		VHCI_DEBUG(2, (CE_NOTE, vhci->vhci_dip,
1224 		    "!vhci_scsi_start: sending SCSI-2 RESERVE, vlun 0x%p, "
1225 		    "svl_resrv_pip 0x%p, svl_flags: %x, lb_policy %x",
1226 		    (void *)vlun, (void *)vlun->svl_resrv_pip, vlun->svl_flags,
1227 		    mdi_get_lb_policy(cdip)));
1228 
1229 		/*
1230 		 * See comments for VLUN_RESERVE_ACTIVE_FLG in scsi_vhci.h
1231 		 * To narrow this window where a reserve command may be sent
1232 		 * down an inactive path the path states first need to be
1233 		 * updated.  Before calling vhci_update_pathstates reset
1234 		 * VLUN_RESERVE_ACTIVE_FLG, just in case it was already set
1235 		 * for this lun.  This shall prevent an unnecessary reset
1236 		 * from being sent out.  Also remember currently reserved path
1237 		 * just for a case the new reservation will go to another path.
1238 		 */
1239 		if (vlun->svl_flags & VLUN_RESERVE_ACTIVE_FLG) {
1240 			resrv_instance = mdi_pi_get_path_instance(
1241 			    vlun->svl_resrv_pip);
1242 		}
1243 		vlun->svl_flags &= ~VLUN_RESERVE_ACTIVE_FLG;
1244 		vhci_update_pathstates((void *)vlun);
1245 	}
1246 
1247 	instance = ddi_get_instance(vhci->vhci_dip);
1248 
1249 	/*
1250 	 * If the command is PRIN with action of zero, then the cmd
1251 	 * is reading PR keys which requires filtering on completion.
1252 	 * Data cache sync must be guaranteed.
1253 	 */
1254 	if ((pkt->pkt_cdbp[0] == SCMD_PRIN) && (pkt->pkt_cdbp[1] == 0) &&
1255 	    (vpkt->vpkt_org_vpkt == NULL)) {
1256 		vpkt->vpkt_tgt_init_pkt_flags |= PKT_CONSISTENT;
1257 	}
1258 
1259 	/*
1260 	 * Do not defer bind for PKT_DMA_PARTIAL
1261 	 */
1262 	if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) == 0) {
1263 
1264 		/* This is a non pkt_dma_partial case */
1265 		if ((rval = vhci_bind_transport(
1266 		    ap, vpkt, vpkt->vpkt_tgt_init_pkt_flags, NULL_FUNC))
1267 		    != TRAN_ACCEPT) {
1268 			VHCI_DEBUG(6, (CE_WARN, vhci->vhci_dip,
1269 			    "!vhci%d %x: failed to bind transport: "
1270 			    "vlun 0x%p pkt_reserved %x restore_lbp %x,"
1271 			    "lbp %x", instance, rval, (void *)vlun,
1272 			    pkt_reserve_cmd, restore_lbp, lbp));
1273 			if (restore_lbp)
1274 				(void) mdi_set_lb_policy(cdip, lbp);
1275 			if (pkt_reserve_cmd)
1276 				vlun->svl_flags &= ~VLUN_QUIESCED_FLG;
1277 			return (rval);
1278 		}
1279 		VHCI_DEBUG(8, (CE_NOTE, NULL,
1280 		    "vhci_scsi_start: v_b_t called 0x%p\n", (void *)vpkt));
1281 	}
1282 	ASSERT(vpkt->vpkt_hba_pkt != NULL);
1283 	ASSERT(vpkt->vpkt_path != NULL);
1284 
1285 	/*
1286 	 * This is the chance to adjust the pHCI's pkt and other information
1287 	 * from target driver's pkt.
1288 	 */
1289 	VHCI_DEBUG(8, (CE_NOTE, vhci->vhci_dip, "vhci_scsi_start vpkt %p\n",
1290 	    (void *)vpkt));
1291 	vhci_update_pHCI_pkt(vpkt, pkt);
1292 
1293 	if (vlun->svl_flags & VLUN_RESERVE_ACTIVE_FLG) {
1294 		if (vpkt->vpkt_path != vlun->svl_resrv_pip) {
1295 			VHCI_DEBUG(1, (CE_WARN, vhci->vhci_dip,
1296 			    "!vhci_bind: reserve flag set for vlun 0x%p, but, "
1297 			    "pktpath 0x%p resrv path 0x%p differ. lb_policy %x",
1298 			    (void *)vlun, (void *)vpkt->vpkt_path,
1299 			    (void *)vlun->svl_resrv_pip,
1300 			    mdi_get_lb_policy(cdip)));
1301 			reserve_failed = 1;
1302 		}
1303 	}
1304 
1305 	svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(vpkt->vpkt_path);
1306 	if (svp == NULL || reserve_failed) {
1307 		if (pkt_reserve_cmd) {
1308 			VHCI_DEBUG(6, (CE_WARN, vhci->vhci_dip,
1309 			    "!vhci_bind returned null svp vlun 0x%p",
1310 			    (void *)vlun));
1311 			vlun->svl_flags &= ~VLUN_QUIESCED_FLG;
1312 			if (restore_lbp)
1313 				(void) mdi_set_lb_policy(cdip, lbp);
1314 		}
1315 pkt_cleanup:
1316 		if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) == 0) {
1317 			scsi_destroy_pkt(vpkt->vpkt_hba_pkt);
1318 			vpkt->vpkt_hba_pkt = NULL;
1319 			if (vpkt->vpkt_path) {
1320 				mdi_rele_path(vpkt->vpkt_path);
1321 				vpkt->vpkt_path = NULL;
1322 			}
1323 		}
1324 		if ((pkt->pkt_cdbp[0] == SCMD_PROUT) &&
1325 		    (((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_REGISTER) ||
1326 		    ((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_R_AND_IGNORE))) {
1327 			sema_v(&vlun->svl_pgr_sema);
1328 		}
1329 		return (TRAN_BUSY);
1330 	}
1331 
1332 	if ((resrv_instance != 0) && (resrv_instance !=
1333 	    mdi_pi_get_path_instance(vpkt->vpkt_path))) {
1334 		/*
1335 		 * This is an attempt to reserve vpkt->vpkt_path.  But the
1336 		 * previously reserved path referred by resrv_instance might
1337 		 * still be reserved.  Hence we will send a release command
1338 		 * there in order to avoid a reservation conflict.
1339 		 */
1340 		VHCI_DEBUG(1, (CE_NOTE, vhci->vhci_dip, "!vhci_scsi_start: "
1341 		    "conflicting reservation on another path, vlun 0x%p, "
1342 		    "reserved instance %d, new instance: %d, pip: 0x%p",
1343 		    (void *)vlun, resrv_instance,
1344 		    mdi_pi_get_path_instance(vpkt->vpkt_path),
1345 		    (void *)vpkt->vpkt_path));
1346 
1347 		/*
1348 		 * In rare cases, the path referred by resrv_instance could
1349 		 * disappear in the meantime. Calling mdi_select_path() below
1350 		 * is an attempt to find out if the path still exists. It also
1351 		 * ensures that the path will be held when the release is sent.
1352 		 */
1353 		rval = mdi_select_path(cdip, NULL, MDI_SELECT_PATH_INSTANCE,
1354 		    (void *)(intptr_t)resrv_instance, &pip);
1355 
1356 		if ((rval == MDI_SUCCESS) && (pip != NULL)) {
1357 			svp_resrv = (scsi_vhci_priv_t *)
1358 			    mdi_pi_get_vhci_private(pip);
1359 			rel_pkt = scsi_init_pkt(&svp_resrv->svp_psd->sd_address,
1360 			    NULL, NULL, CDB_GROUP0,
1361 			    sizeof (struct scsi_arq_status), 0, 0, SLEEP_FUNC,
1362 			    NULL);
1363 
1364 			if (rel_pkt == NULL) {
1365 				char	*p_path;
1366 
1367 				/*
1368 				 * This is very unlikely.
1369 				 * scsi_init_pkt(SLEEP_FUNC) does not fail
1370 				 * because of resources. But in theory it could
1371 				 * fail for some other reason. There is not an
1372 				 * easy way how to recover though. Log a warning
1373 				 * and return.
1374 				 */
1375 				p_path = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
1376 				vhci_log(CE_WARN, vhci->vhci_dip, "!Sending "
1377 				    "RELEASE(6) to %s failed, a potential "
1378 				    "reservation conflict ahead.",
1379 				    ddi_pathname(mdi_pi_get_phci(pip), p_path));
1380 				kmem_free(p_path, MAXPATHLEN);
1381 
1382 				if (restore_lbp)
1383 					(void) mdi_set_lb_policy(cdip, lbp);
1384 
1385 				/* no need to check pkt_reserve_cmd here */
1386 				vlun->svl_flags &= ~VLUN_QUIESCED_FLG;
1387 				return (TRAN_FATAL_ERROR);
1388 			}
1389 
1390 			rel_pkt->pkt_cdbp[0] = SCMD_RELEASE;
1391 			rel_pkt->pkt_time = 60;
1392 
1393 			/*
1394 			 * Ignore the return value.  If it will fail
1395 			 * then most likely it is no longer reserved
1396 			 * anyway.
1397 			 */
1398 			(void) vhci_do_scsi_cmd(rel_pkt);
1399 			VHCI_DEBUG(1, (CE_NOTE, NULL,
1400 			    "!vhci_scsi_start: path 0x%p, issued SCSI-2"
1401 			    " RELEASE\n", (void *)pip));
1402 			scsi_destroy_pkt(rel_pkt);
1403 			mdi_rele_path(pip);
1404 		}
1405 	}
1406 
1407 	VHCI_INCR_PATH_CMDCOUNT(svp);
1408 
1409 	/*
1410 	 * Ensure that no other IOs raced ahead, while a RESERVE cmd was
1411 	 * QUIESCING the same lun.
1412 	 */
1413 	if ((!pkt_reserve_cmd) &&
1414 	    ((vlun->svl_flags & VLUN_QUIESCED_FLG) == VLUN_QUIESCED_FLG)) {
1415 		VHCI_DECR_PATH_CMDCOUNT(svp);
1416 		goto pkt_cleanup;
1417 	}
1418 
1419 	if ((pkt->pkt_cdbp[0] == SCMD_PRIN) ||
1420 	    (pkt->pkt_cdbp[0] == SCMD_PROUT)) {
1421 		/*
1422 		 * currently this thread only handles running PGR
1423 		 * commands, so don't bother creating it unless
1424 		 * something interesting is going to happen (like
1425 		 * either a PGR out, or a PGR in with enough space
1426 		 * to hold the keys that are getting returned)
1427 		 */
1428 		mutex_enter(&vlun->svl_mutex);
1429 		if (((vlun->svl_flags & VLUN_TASK_D_ALIVE_FLG) == 0) &&
1430 		    (pkt->pkt_cdbp[0] == SCMD_PROUT)) {
1431 			vlun->svl_taskq = taskq_create("vlun_pgr_task_daemon",
1432 			    1, MINCLSYSPRI, 1, 4, 0);
1433 			vlun->svl_flags |= VLUN_TASK_D_ALIVE_FLG;
1434 		}
1435 		mutex_exit(&vlun->svl_mutex);
1436 		if ((pkt->pkt_cdbp[0] == SCMD_PROUT) &&
1437 		    (((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_REGISTER) ||
1438 		    ((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_R_AND_IGNORE))) {
1439 			if (rval = vhci_pgr_register_start(vlun, pkt)) {
1440 				/* an error */
1441 				sema_v(&vlun->svl_pgr_sema);
1442 				return (rval);
1443 			}
1444 		}
1445 	}
1446 
1447 	/*
1448 	 * SCSI-II RESERVE cmd is not expected in polled mode.
1449 	 * If this changes it needs to be handled for the polled scenario.
1450 	 */
1451 	flags = vpkt->vpkt_hba_pkt->pkt_flags;
1452 
1453 	/*
1454 	 * Set the path_instance *before* sending the scsi_pkt down the path
1455 	 * to mpxio's pHCI so that additional path abstractions at a pHCI
1456 	 * level (like maybe iSCSI at some point in the future) can update
1457 	 * the path_instance.
1458 	 */
1459 	if (scsi_pkt_allocated_correctly(vpkt->vpkt_hba_pkt))
1460 		vpkt->vpkt_hba_pkt->pkt_path_instance =
1461 		    mdi_pi_get_path_instance(vpkt->vpkt_path);
1462 
1463 	rval = scsi_transport(vpkt->vpkt_hba_pkt);
1464 	if (rval == TRAN_ACCEPT) {
1465 		if (flags & FLAG_NOINTR) {
1466 			struct scsi_pkt *tpkt = vpkt->vpkt_tgt_pkt;
1467 			struct scsi_pkt *pkt = vpkt->vpkt_hba_pkt;
1468 
1469 			ASSERT(tpkt != NULL);
1470 			*(tpkt->pkt_scbp) = *(pkt->pkt_scbp);
1471 			tpkt->pkt_resid = pkt->pkt_resid;
1472 			tpkt->pkt_state = pkt->pkt_state;
1473 			tpkt->pkt_statistics = pkt->pkt_statistics;
1474 			tpkt->pkt_reason = pkt->pkt_reason;
1475 
1476 			if ((*(pkt->pkt_scbp) == STATUS_CHECK) &&
1477 			    (pkt->pkt_state & STATE_ARQ_DONE)) {
1478 				bcopy(pkt->pkt_scbp, tpkt->pkt_scbp,
1479 				    vpkt->vpkt_tgt_init_scblen);
1480 			}
1481 
1482 			VHCI_DECR_PATH_CMDCOUNT(svp);
1483 			if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) == 0) {
1484 				scsi_destroy_pkt(vpkt->vpkt_hba_pkt);
1485 				vpkt->vpkt_hba_pkt = NULL;
1486 				if (vpkt->vpkt_path) {
1487 					mdi_rele_path(vpkt->vpkt_path);
1488 					vpkt->vpkt_path = NULL;
1489 				}
1490 			}
1491 			/*
1492 			 * This path will not automatically retry pkts
1493 			 * internally, therefore, vpkt_org_vpkt should
1494 			 * never be set.
1495 			 */
1496 			ASSERT(vpkt->vpkt_org_vpkt == NULL);
1497 			scsi_hba_pkt_comp(tpkt);
1498 		}
1499 		return (rval);
1500 	} else if ((pkt->pkt_cdbp[0] == SCMD_PROUT) &&
1501 	    (((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_REGISTER) ||
1502 	    ((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_R_AND_IGNORE))) {
1503 		/* the command exited with bad status */
1504 		sema_v(&vlun->svl_pgr_sema);
1505 	} else if (vpkt->vpkt_tgt_pkt->pkt_cdbp[0] == SCMD_PRIN) {
1506 		/* the command exited with bad status */
1507 		sema_v(&vlun->svl_pgr_sema);
1508 	} else if (pkt_reserve_cmd) {
1509 		VHCI_DEBUG(6, (CE_WARN, vhci->vhci_dip,
1510 		    "!vhci_scsi_start: reserve failed vlun 0x%p",
1511 		    (void *)vlun));
1512 		vlun->svl_flags &= ~VLUN_QUIESCED_FLG;
1513 		if (restore_lbp)
1514 			(void) mdi_set_lb_policy(cdip, lbp);
1515 	}
1516 
1517 	ASSERT(vpkt->vpkt_hba_pkt != NULL);
1518 	VHCI_DECR_PATH_CMDCOUNT(svp);
1519 
1520 	/* Do not destroy phci packet information for PKT_DMA_PARTIAL */
1521 	if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) == 0) {
1522 		scsi_destroy_pkt(vpkt->vpkt_hba_pkt);
1523 		vpkt->vpkt_hba_pkt = NULL;
1524 		if (vpkt->vpkt_path) {
1525 			MDI_PI_ERRSTAT(vpkt->vpkt_path, MDI_PI_TRANSERR);
1526 			mdi_rele_path(vpkt->vpkt_path);
1527 			vpkt->vpkt_path = NULL;
1528 		}
1529 	}
1530 	return (TRAN_BUSY);
1531 }
1532 
1533 /*
1534  * Function name : vhci_scsi_reset()
1535  *
1536  * Return Values : 0 - reset failed
1537  *		   1 - reset succeeded
1538  */
1539 
1540 /* ARGSUSED */
1541 static int
vhci_scsi_reset(struct scsi_address * ap,int level)1542 vhci_scsi_reset(struct scsi_address *ap, int level)
1543 {
1544 	int rval = 0;
1545 
1546 	cmn_err(CE_WARN, "!vhci_scsi_reset 0x%x", level);
1547 	if ((level == RESET_TARGET) || (level == RESET_LUN)) {
1548 		return (vhci_scsi_reset_target(ap, level, TRUE));
1549 	} else if (level == RESET_ALL) {
1550 		return (vhci_scsi_reset_bus(ap));
1551 	}
1552 
1553 	return (rval);
1554 }
1555 
1556 /*
1557  * vhci_recovery_reset:
1558  *	Issues reset to the device
1559  * Input:
1560  *	vlun - vhci lun pointer of the device
1561  *	ap - address of the device
1562  *	select_path:
1563  *		If select_path is FALSE, then the address specified in ap is
1564  *		the path on which reset will be issued.
1565  *		If select_path is TRUE, then path is obtained by calling
1566  *		mdi_select_path.
1567  *
1568  *	recovery_depth:
1569  *		Caller can specify the level of reset.
1570  *		VHCI_DEPTH_LUN -
1571  *			Issues LUN RESET if device supports lun reset.
1572  *		VHCI_DEPTH_TARGET -
1573  *			If Lun Reset fails or the device does not support
1574  *			Lun Reset, issues TARGET RESET
1575  *		VHCI_DEPTH_ALL -
1576  *			If Lun Reset fails or the device does not support
1577  *			Lun Reset, issues TARGET RESET.
1578  *			If TARGET RESET does not succeed, issues Bus Reset.
1579  */
1580 
1581 static int
vhci_recovery_reset(scsi_vhci_lun_t * vlun,struct scsi_address * ap,uint8_t select_path,uint8_t recovery_depth)1582 vhci_recovery_reset(scsi_vhci_lun_t *vlun, struct scsi_address *ap,
1583     uint8_t select_path, uint8_t recovery_depth)
1584 {
1585 	int	ret = 0;
1586 
1587 	ASSERT(ap != NULL);
1588 
1589 	if (vlun && vlun->svl_support_lun_reset == 1) {
1590 		ret = vhci_scsi_reset_target(ap, RESET_LUN,
1591 		    select_path);
1592 	}
1593 
1594 	recovery_depth--;
1595 
1596 	if ((ret == 0) && recovery_depth) {
1597 		ret = vhci_scsi_reset_target(ap, RESET_TARGET,
1598 		    select_path);
1599 		recovery_depth--;
1600 	}
1601 
1602 	if ((ret == 0) && recovery_depth) {
1603 		(void) scsi_reset(ap, RESET_ALL);
1604 	}
1605 
1606 	return (ret);
1607 }
1608 
1609 /*
1610  * Note: The scsi_address passed to this routine could be the scsi_address
1611  * for the virtual device or the physical device. No assumptions should be
1612  * made in this routine about the contents of the ap structure.
1613  * Further, note that the child dip would be the dip of the ssd node regardless
1614  * of the scsi_address passed in.
1615  */
1616 static int
vhci_scsi_reset_target(struct scsi_address * ap,int level,uint8_t select_path)1617 vhci_scsi_reset_target(struct scsi_address *ap, int level, uint8_t select_path)
1618 {
1619 	dev_info_t		*vdip, *cdip;
1620 	mdi_pathinfo_t		*pip = NULL;
1621 	mdi_pathinfo_t		*npip = NULL;
1622 	int			rval = -1;
1623 	scsi_vhci_priv_t	*svp = NULL;
1624 	struct scsi_address	*pap = NULL;
1625 	scsi_hba_tran_t		*hba = NULL;
1626 	int			sps;
1627 	struct scsi_vhci	*vhci = NULL;
1628 
1629 	if (select_path != TRUE) {
1630 		ASSERT(ap != NULL);
1631 		if (level == RESET_LUN) {
1632 			hba = ap->a_hba_tran;
1633 			ASSERT(hba != NULL);
1634 			return (hba->tran_reset(ap, RESET_LUN));
1635 		}
1636 		return (scsi_reset(ap, level));
1637 	}
1638 
1639 	cdip = ADDR2DIP(ap);
1640 	ASSERT(cdip != NULL);
1641 	vdip = ddi_get_parent(cdip);
1642 	ASSERT(vdip != NULL);
1643 	vhci = ddi_get_soft_state(vhci_softstate, ddi_get_instance(vdip));
1644 	ASSERT(vhci != NULL);
1645 
1646 	rval = mdi_select_path(cdip, NULL, MDI_SELECT_ONLINE_PATH, NULL, &pip);
1647 	if ((rval != MDI_SUCCESS) || (pip == NULL)) {
1648 		VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_scsi_reset_target: "
1649 		    "Unable to get a path, dip 0x%p", (void *)cdip));
1650 		return (0);
1651 	}
1652 again:
1653 	svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip);
1654 	if (svp == NULL) {
1655 		VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_scsi_reset_target: "
1656 		    "priv is NULL, pip 0x%p", (void *)pip));
1657 		mdi_rele_path(pip);
1658 		return (0);
1659 	}
1660 
1661 	if (svp->svp_psd == NULL) {
1662 		VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_scsi_reset_target: "
1663 		    "psd is NULL, pip 0x%p, svp 0x%p",
1664 		    (void *)pip, (void *)svp));
1665 		mdi_rele_path(pip);
1666 		return (0);
1667 	}
1668 
1669 	pap = &svp->svp_psd->sd_address;
1670 	hba = pap->a_hba_tran;
1671 
1672 	ASSERT(pap != NULL);
1673 	ASSERT(hba != NULL);
1674 
1675 	if (hba->tran_reset != NULL) {
1676 		if (hba->tran_reset(pap, level) == 0) {
1677 			vhci_log(CE_WARN, vdip, "!%s%d: "
1678 			    "path %s, reset %d failed",
1679 			    ddi_driver_name(cdip), ddi_get_instance(cdip),
1680 			    mdi_pi_spathname(pip), level);
1681 
1682 			/*
1683 			 * Select next path and issue the reset, repeat
1684 			 * until all paths are exhausted
1685 			 */
1686 			sps = mdi_select_path(cdip, NULL,
1687 			    MDI_SELECT_ONLINE_PATH, pip, &npip);
1688 			if ((sps != MDI_SUCCESS) || (npip == NULL)) {
1689 				mdi_rele_path(pip);
1690 				return (0);
1691 			}
1692 			mdi_rele_path(pip);
1693 			pip = npip;
1694 			goto again;
1695 		}
1696 		mdi_rele_path(pip);
1697 		mutex_enter(&vhci->vhci_mutex);
1698 		scsi_hba_reset_notify_callback(&vhci->vhci_mutex,
1699 		    &vhci->vhci_reset_notify_listf);
1700 		mutex_exit(&vhci->vhci_mutex);
1701 		VHCI_DEBUG(6, (CE_NOTE, NULL, "!vhci_scsi_reset_target: "
1702 		    "reset %d sent down pip:%p for cdip:%p\n", level,
1703 		    (void *)pip, (void *)cdip));
1704 		return (1);
1705 	}
1706 	mdi_rele_path(pip);
1707 	return (0);
1708 }
1709 
1710 
1711 /* ARGSUSED */
1712 static int
vhci_scsi_reset_bus(struct scsi_address * ap)1713 vhci_scsi_reset_bus(struct scsi_address *ap)
1714 {
1715 	return (1);
1716 }
1717 
1718 
1719 /*
1720  * called by vhci_getcap and vhci_setcap to get and set (respectively)
1721  * SCSI capabilities
1722  */
1723 /* ARGSUSED */
1724 static int
vhci_commoncap(struct scsi_address * ap,char * cap,int val,int tgtonly,int doset)1725 vhci_commoncap(struct scsi_address *ap, char *cap,
1726     int val, int tgtonly, int doset)
1727 {
1728 	struct scsi_vhci		*vhci = ADDR2VHCI(ap);
1729 	struct scsi_vhci_lun		*vlun = ADDR2VLUN(ap);
1730 	int			cidx;
1731 	int			rval = 0;
1732 
1733 	if (cap == (char *)0) {
1734 		VHCI_DEBUG(3, (CE_WARN, vhci->vhci_dip,
1735 		    "!vhci_commoncap: invalid arg"));
1736 		return (rval);
1737 	}
1738 
1739 	if (vlun == NULL) {
1740 		VHCI_DEBUG(3, (CE_WARN, vhci->vhci_dip,
1741 		    "!vhci_commoncap: vlun is null"));
1742 		return (rval);
1743 	}
1744 
1745 	if ((cidx = scsi_hba_lookup_capstr(cap)) == -1) {
1746 		return (UNDEFINED);
1747 	}
1748 
1749 	/*
1750 	 * Process setcap request.
1751 	 */
1752 	if (doset) {
1753 		/*
1754 		 * At present, we can only set binary (0/1) values
1755 		 */
1756 		switch (cidx) {
1757 		case SCSI_CAP_ARQ:
1758 			if (val == 0) {
1759 				rval = 0;
1760 			} else {
1761 				rval = 1;
1762 			}
1763 			break;
1764 
1765 		case SCSI_CAP_LUN_RESET:
1766 			if (tgtonly == 0) {
1767 				VHCI_DEBUG(1, (CE_WARN, vhci->vhci_dip,
1768 				    "scsi_vhci_setcap: "
1769 				    "Returning error since whom = 0"));
1770 				rval = -1;
1771 				break;
1772 			}
1773 			/*
1774 			 * Set the capability accordingly.
1775 			 */
1776 			mutex_enter(&vlun->svl_mutex);
1777 			vlun->svl_support_lun_reset = val;
1778 			rval = val;
1779 			mutex_exit(&vlun->svl_mutex);
1780 			break;
1781 
1782 		case SCSI_CAP_SECTOR_SIZE:
1783 			mutex_enter(&vlun->svl_mutex);
1784 			vlun->svl_sector_size = val;
1785 			vlun->svl_setcap_done = 1;
1786 			mutex_exit(&vlun->svl_mutex);
1787 			(void) vhci_pHCI_cap(ap, cap, val, tgtonly, NULL);
1788 
1789 			/* Always return success */
1790 			rval = 1;
1791 			break;
1792 
1793 		default:
1794 			VHCI_DEBUG(6, (CE_WARN, vhci->vhci_dip,
1795 			    "!vhci_setcap: unsupported %d", cidx));
1796 			rval = UNDEFINED;
1797 			break;
1798 		}
1799 
1800 		VHCI_DEBUG(6, (CE_NOTE, vhci->vhci_dip,
1801 		    "!set cap: cap=%s, val/tgtonly/doset/rval = "
1802 		    "0x%x/0x%x/0x%x/%d\n",
1803 		    cap, val, tgtonly, doset, rval));
1804 
1805 	} else {
1806 		/*
1807 		 * Process getcap request.
1808 		 */
1809 		switch (cidx) {
1810 		case SCSI_CAP_DMA_MAX:
1811 			/*
1812 			 * For X86 this capability is caught in scsi_ifgetcap().
1813 			 * XXX Should this be getting the value from the pHCI?
1814 			 */
1815 			rval = (int)VHCI_DMA_MAX_XFER_CAP;
1816 			break;
1817 
1818 		case SCSI_CAP_INITIATOR_ID:
1819 			rval = 0x00;
1820 			break;
1821 
1822 		case SCSI_CAP_ARQ:
1823 		case SCSI_CAP_RESET_NOTIFICATION:
1824 		case SCSI_CAP_TAGGED_QING:
1825 			rval = 1;
1826 			break;
1827 
1828 		case SCSI_CAP_SCSI_VERSION:
1829 			rval = 3;
1830 			break;
1831 
1832 		case SCSI_CAP_INTERCONNECT_TYPE:
1833 			rval = INTERCONNECT_FABRIC;
1834 			break;
1835 
1836 		case SCSI_CAP_LUN_RESET:
1837 			/*
1838 			 * scsi_vhci will always return success for LUN reset.
1839 			 * When request for doing LUN reset comes
1840 			 * through scsi_reset entry point, at that time attempt
1841 			 * will be made to do reset through all the possible
1842 			 * paths.
1843 			 */
1844 			mutex_enter(&vlun->svl_mutex);
1845 			rval = vlun->svl_support_lun_reset;
1846 			mutex_exit(&vlun->svl_mutex);
1847 			VHCI_DEBUG(4, (CE_WARN, vhci->vhci_dip,
1848 			    "scsi_vhci_getcap:"
1849 			    "Getting the Lun reset capability %d", rval));
1850 			break;
1851 
1852 		case SCSI_CAP_SECTOR_SIZE:
1853 			mutex_enter(&vlun->svl_mutex);
1854 			rval = vlun->svl_sector_size;
1855 			mutex_exit(&vlun->svl_mutex);
1856 			break;
1857 
1858 		case SCSI_CAP_CDB_LEN:
1859 			rval = VHCI_SCSI_CDB_SIZE;
1860 			break;
1861 
1862 		case SCSI_CAP_DMA_MAX_ARCH:
1863 			/*
1864 			 * For X86 this capability is caught in scsi_ifgetcap().
1865 			 * XXX Should this be getting the value from the pHCI?
1866 			 */
1867 			rval = 0;
1868 			break;
1869 
1870 		default:
1871 			VHCI_DEBUG(6, (CE_WARN, vhci->vhci_dip,
1872 			    "!vhci_getcap: unsupported %d", cidx));
1873 			rval = UNDEFINED;
1874 			break;
1875 		}
1876 
1877 		VHCI_DEBUG(6, (CE_NOTE, vhci->vhci_dip,
1878 		    "!get cap: cap=%s, val/tgtonly/doset/rval = "
1879 		    "0x%x/0x%x/0x%x/%d\n",
1880 		    cap, val, tgtonly, doset, rval));
1881 	}
1882 	return (rval);
1883 }
1884 
1885 
1886 /*
1887  * Function name : vhci_scsi_getcap()
1888  *
1889  */
1890 static int
vhci_scsi_getcap(struct scsi_address * ap,char * cap,int whom)1891 vhci_scsi_getcap(struct scsi_address *ap, char *cap, int whom)
1892 {
1893 	return (vhci_commoncap(ap, cap, 0, whom, 0));
1894 }
1895 
1896 static int
vhci_scsi_setcap(struct scsi_address * ap,char * cap,int value,int whom)1897 vhci_scsi_setcap(struct scsi_address *ap, char *cap, int value, int whom)
1898 {
1899 	return (vhci_commoncap(ap, cap, value, whom, 1));
1900 }
1901 
1902 /*
1903  * Function name : vhci_scsi_abort()
1904  */
1905 /* ARGSUSED */
1906 static int
vhci_scsi_abort(struct scsi_address * ap,struct scsi_pkt * pkt)1907 vhci_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt)
1908 {
1909 	return (0);
1910 }
1911 
1912 /*
1913  * Function name : vhci_scsi_init_pkt
1914  *
1915  * Return Values : pointer to scsi_pkt, or NULL
1916  */
1917 /* ARGSUSED */
1918 static struct scsi_pkt *
vhci_scsi_init_pkt(struct scsi_address * ap,struct scsi_pkt * pkt,struct buf * bp,int cmdlen,int statuslen,int tgtlen,int flags,int (* callback)(caddr_t),caddr_t arg)1919 vhci_scsi_init_pkt(struct scsi_address *ap, struct scsi_pkt *pkt,
1920     struct buf *bp, int cmdlen, int statuslen, int tgtlen,
1921     int flags, int (*callback)(caddr_t), caddr_t arg)
1922 {
1923 	struct scsi_vhci	*vhci = ADDR2VHCI(ap);
1924 	struct vhci_pkt		*vpkt;
1925 	int			rval;
1926 	int			newpkt = 0;
1927 	struct scsi_pkt		*pktp;
1928 
1929 
1930 	if (pkt == NULL) {
1931 		if (cmdlen > VHCI_SCSI_CDB_SIZE) {
1932 			if ((cmdlen != VHCI_SCSI_OSD_CDB_SIZE) ||
1933 			    ((flags & VHCI_SCSI_OSD_PKT_FLAGS) !=
1934 			    VHCI_SCSI_OSD_PKT_FLAGS)) {
1935 				VHCI_DEBUG(1, (CE_NOTE, NULL,
1936 				    "!init pkt: cdb size not supported\n"));
1937 				return (NULL);
1938 			}
1939 		}
1940 
1941 		pktp = scsi_hba_pkt_alloc(vhci->vhci_dip,
1942 		    ap, cmdlen, statuslen, tgtlen, sizeof (*vpkt), callback,
1943 		    arg);
1944 
1945 		if (pktp == NULL) {
1946 			return (NULL);
1947 		}
1948 
1949 		/* Get the vhci's private structure */
1950 		vpkt = (struct vhci_pkt *)(pktp->pkt_ha_private);
1951 		ASSERT(vpkt);
1952 
1953 		/* Save the target driver's packet */
1954 		vpkt->vpkt_tgt_pkt = pktp;
1955 
1956 		/*
1957 		 * Save pkt_tgt_init_pkt fields if deferred binding
1958 		 * is needed or for other purposes.
1959 		 */
1960 		vpkt->vpkt_tgt_init_pkt_flags = flags;
1961 		vpkt->vpkt_flags = (callback == NULL_FUNC) ? CFLAG_NOWAIT : 0;
1962 		vpkt->vpkt_state = VHCI_PKT_IDLE;
1963 		vpkt->vpkt_tgt_init_cdblen = cmdlen;
1964 		vpkt->vpkt_tgt_init_scblen = statuslen;
1965 		newpkt = 1;
1966 	} else { /* pkt not NULL */
1967 		vpkt = pkt->pkt_ha_private;
1968 	}
1969 
1970 	VHCI_DEBUG(8, (CE_NOTE, NULL, "vhci_scsi_init_pkt "
1971 	    "vpkt %p flags %x\n", (void *)vpkt, flags));
1972 
1973 	/* Clear any stale error flags */
1974 	if (bp) {
1975 		bioerror(bp, 0);
1976 	}
1977 
1978 	vpkt->vpkt_tgt_init_bp = bp;
1979 
1980 	if (flags & PKT_DMA_PARTIAL) {
1981 
1982 		/*
1983 		 * Immediate binding is needed.
1984 		 * Target driver may not set this flag in next invocation.
1985 		 * vhci has to remember this flag was set during first
1986 		 * invocation of vhci_scsi_init_pkt.
1987 		 */
1988 		vpkt->vpkt_flags |= CFLAG_DMA_PARTIAL;
1989 	}
1990 
1991 	if (vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) {
1992 
1993 		/*
1994 		 * Re-initialize some of the target driver packet state
1995 		 * information.
1996 		 */
1997 		vpkt->vpkt_tgt_pkt->pkt_state = 0;
1998 		vpkt->vpkt_tgt_pkt->pkt_statistics = 0;
1999 		vpkt->vpkt_tgt_pkt->pkt_reason = 0;
2000 
2001 		/*
2002 		 * Binding a vpkt->vpkt_path for this IO at init_time.
2003 		 * If an IO error happens later, target driver will clear
2004 		 * this vpkt->vpkt_path binding before re-init IO again.
2005 		 */
2006 		VHCI_DEBUG(8, (CE_NOTE, NULL,
2007 		    "vhci_scsi_init_pkt: calling v_b_t %p, newpkt %d\n",
2008 		    (void *)vpkt, newpkt));
2009 		if (pkt && vpkt->vpkt_hba_pkt) {
2010 			VHCI_DEBUG(4, (CE_NOTE, NULL,
2011 			    "v_s_i_p calling update_pHCI_pkt resid %ld\n",
2012 			    pkt->pkt_resid));
2013 			vhci_update_pHCI_pkt(vpkt, pkt);
2014 		}
2015 		if (callback == SLEEP_FUNC) {
2016 			rval = vhci_bind_transport(
2017 			    ap, vpkt, flags, callback);
2018 		} else {
2019 			rval = vhci_bind_transport(
2020 			    ap, vpkt, flags, NULL_FUNC);
2021 		}
2022 		VHCI_DEBUG(8, (CE_NOTE, NULL,
2023 		    "vhci_scsi_init_pkt: v_b_t called 0x%p rval 0x%x\n",
2024 		    (void *)vpkt, rval));
2025 		if (bp) {
2026 			if (rval == TRAN_FATAL_ERROR) {
2027 				/*
2028 				 * No paths available. Could not bind
2029 				 * any pHCI. Setting EFAULT as a way
2030 				 * to indicate no DMA is mapped.
2031 				 */
2032 				bioerror(bp, EFAULT);
2033 			} else {
2034 				/*
2035 				 * Do not indicate any pHCI errors to
2036 				 * target driver otherwise.
2037 				 */
2038 				bioerror(bp, 0);
2039 			}
2040 		}
2041 		if (rval != TRAN_ACCEPT) {
2042 			VHCI_DEBUG(8, (CE_NOTE, NULL,
2043 			    "vhci_scsi_init_pkt: "
2044 			    "v_b_t failed 0x%p newpkt %x\n",
2045 			    (void *)vpkt, newpkt));
2046 			if (newpkt) {
2047 				scsi_hba_pkt_free(ap,
2048 				    vpkt->vpkt_tgt_pkt);
2049 			}
2050 			return (NULL);
2051 		}
2052 		ASSERT(vpkt->vpkt_hba_pkt != NULL);
2053 		ASSERT(vpkt->vpkt_path != NULL);
2054 
2055 		/* Update the resid for the target driver */
2056 		vpkt->vpkt_tgt_pkt->pkt_resid =
2057 		    vpkt->vpkt_hba_pkt->pkt_resid;
2058 	}
2059 
2060 	return (vpkt->vpkt_tgt_pkt);
2061 }
2062 
2063 /*
2064  * Function name : vhci_scsi_destroy_pkt
2065  *
2066  * Return Values : none
2067  */
2068 static void
vhci_scsi_destroy_pkt(struct scsi_address * ap,struct scsi_pkt * pkt)2069 vhci_scsi_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
2070 {
2071 	struct vhci_pkt		*vpkt = (struct vhci_pkt *)pkt->pkt_ha_private;
2072 
2073 	VHCI_DEBUG(8, (CE_NOTE, NULL,
2074 	    "vhci_scsi_destroy_pkt: vpkt 0x%p\n", (void *)vpkt));
2075 
2076 	vpkt->vpkt_tgt_init_pkt_flags = 0;
2077 	if (vpkt->vpkt_hba_pkt) {
2078 		scsi_destroy_pkt(vpkt->vpkt_hba_pkt);
2079 		vpkt->vpkt_hba_pkt = NULL;
2080 	}
2081 	if (vpkt->vpkt_path) {
2082 		mdi_rele_path(vpkt->vpkt_path);
2083 		vpkt->vpkt_path = NULL;
2084 	}
2085 
2086 	ASSERT(vpkt->vpkt_state != VHCI_PKT_ISSUED);
2087 	scsi_hba_pkt_free(ap, vpkt->vpkt_tgt_pkt);
2088 }
2089 
2090 /*
2091  * Function name : vhci_scsi_dmafree()
2092  *
2093  * Return Values : none
2094  */
2095 /*ARGSUSED*/
2096 static void
vhci_scsi_dmafree(struct scsi_address * ap,struct scsi_pkt * pkt)2097 vhci_scsi_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
2098 {
2099 	struct vhci_pkt	*vpkt = (struct vhci_pkt *)pkt->pkt_ha_private;
2100 
2101 	VHCI_DEBUG(6, (CE_NOTE, NULL,
2102 	    "vhci_scsi_dmafree: vpkt 0x%p\n", (void *)vpkt));
2103 
2104 	ASSERT(vpkt != NULL);
2105 	if (vpkt->vpkt_hba_pkt) {
2106 		scsi_destroy_pkt(vpkt->vpkt_hba_pkt);
2107 		vpkt->vpkt_hba_pkt = NULL;
2108 	}
2109 	if (vpkt->vpkt_path) {
2110 		mdi_rele_path(vpkt->vpkt_path);
2111 		vpkt->vpkt_path = NULL;
2112 	}
2113 }
2114 
2115 /*
2116  * Function name : vhci_scsi_sync_pkt()
2117  *
2118  * Return Values : none
2119  */
2120 /*ARGSUSED*/
2121 static void
vhci_scsi_sync_pkt(struct scsi_address * ap,struct scsi_pkt * pkt)2122 vhci_scsi_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
2123 {
2124 	struct vhci_pkt	*vpkt = (struct vhci_pkt *)pkt->pkt_ha_private;
2125 
2126 	ASSERT(vpkt != NULL);
2127 	if (vpkt->vpkt_hba_pkt) {
2128 		scsi_sync_pkt(vpkt->vpkt_hba_pkt);
2129 	}
2130 }
2131 
2132 /*
2133  * routine for reset notification setup, to register or cancel.
2134  */
2135 static int
vhci_scsi_reset_notify(struct scsi_address * ap,int flag,void (* callback)(caddr_t),caddr_t arg)2136 vhci_scsi_reset_notify(struct scsi_address *ap, int flag,
2137     void (*callback)(caddr_t), caddr_t arg)
2138 {
2139 	struct scsi_vhci *vhci = ADDR2VHCI(ap);
2140 	return (scsi_hba_reset_notify_setup(ap, flag, callback, arg,
2141 	    &vhci->vhci_mutex, &vhci->vhci_reset_notify_listf));
2142 }
2143 
2144 static int
vhci_scsi_get_name_bus_addr(struct scsi_device * sd,char * name,int len,int bus_addr)2145 vhci_scsi_get_name_bus_addr(struct scsi_device *sd,
2146     char *name, int len, int bus_addr)
2147 {
2148 	dev_info_t		*cdip;
2149 	char			*guid;
2150 	scsi_vhci_lun_t		*vlun;
2151 
2152 	ASSERT(sd != NULL);
2153 	ASSERT(name != NULL);
2154 
2155 	*name = 0;
2156 	cdip = sd->sd_dev;
2157 
2158 	ASSERT(cdip != NULL);
2159 
2160 	if (mdi_component_is_client(cdip, NULL) != MDI_SUCCESS)
2161 		return (1);
2162 
2163 	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, cdip, PROPFLAGS,
2164 	    MDI_CLIENT_GUID_PROP, &guid) != DDI_SUCCESS)
2165 		return (1);
2166 
2167 	/*
2168 	 * Message is "sd# at scsi_vhci0: unit-address <guid>: <bus_addr>".
2169 	 *	<guid>		bus_addr argument == 0
2170 	 *	<bus_addr>	bus_addr argument != 0
2171 	 * Since the <guid> is already provided with unit-address, we just
2172 	 * provide failover module in <bus_addr> to keep output shorter.
2173 	 */
2174 	vlun = ADDR2VLUN(&sd->sd_address);
2175 	if (bus_addr == 0) {
2176 		/* report the guid:  */
2177 		(void) snprintf(name, len, "g%s", guid);
2178 	} else if (vlun && vlun->svl_fops_name) {
2179 		/* report the name of the failover module */
2180 		(void) snprintf(name, len, "%s", vlun->svl_fops_name);
2181 	}
2182 
2183 	ddi_prop_free(guid);
2184 	return (1);
2185 }
2186 
2187 static int
vhci_scsi_get_bus_addr(struct scsi_device * sd,char * name,int len)2188 vhci_scsi_get_bus_addr(struct scsi_device *sd, char *name, int len)
2189 {
2190 	return (vhci_scsi_get_name_bus_addr(sd, name, len, 1));
2191 }
2192 
2193 static int
vhci_scsi_get_name(struct scsi_device * sd,char * name,int len)2194 vhci_scsi_get_name(struct scsi_device *sd, char *name, int len)
2195 {
2196 	return (vhci_scsi_get_name_bus_addr(sd, name, len, 0));
2197 }
2198 
2199 /*
2200  * Return a pointer to the guid part of the devnm.
2201  * devnm format is "nodename@busaddr", busaddr format is "gGUID".
2202  */
2203 static char *
vhci_devnm_to_guid(char * devnm)2204 vhci_devnm_to_guid(char *devnm)
2205 {
2206 	char *cp = devnm;
2207 
2208 	if (devnm == NULL)
2209 		return (NULL);
2210 
2211 	while (*cp != '\0' && *cp != '@')
2212 		cp++;
2213 	if (*cp == '@' && *(cp + 1) == 'g')
2214 		return (cp + 2);
2215 	return (NULL);
2216 }
2217 
2218 static int
vhci_bind_transport(struct scsi_address * ap,struct vhci_pkt * vpkt,int flags,int (* func)(caddr_t))2219 vhci_bind_transport(struct scsi_address *ap, struct vhci_pkt *vpkt, int flags,
2220     int (*func)(caddr_t))
2221 {
2222 	struct scsi_vhci	*vhci = ADDR2VHCI(ap);
2223 	dev_info_t		*cdip = ADDR2DIP(ap);
2224 	mdi_pathinfo_t		*pip = NULL;
2225 	mdi_pathinfo_t		*npip = NULL;
2226 	scsi_vhci_priv_t	*svp = NULL;
2227 	struct scsi_device	*psd = NULL;
2228 	struct scsi_address	*address = NULL;
2229 	struct scsi_pkt		*pkt = NULL;
2230 	int			rval = -1;
2231 	int			pgr_sema_held = 0;
2232 	int			held;
2233 	int			mps_flag = MDI_SELECT_ONLINE_PATH;
2234 	struct scsi_vhci_lun	*vlun;
2235 	int			path_instance = 0;
2236 
2237 	vlun = ADDR2VLUN(ap);
2238 	ASSERT(vlun != 0);
2239 
2240 	if ((vpkt->vpkt_tgt_pkt->pkt_cdbp[0] == SCMD_PROUT) &&
2241 	    (((vpkt->vpkt_tgt_pkt->pkt_cdbp[1] & 0x1f) ==
2242 	    VHCI_PROUT_REGISTER) ||
2243 	    ((vpkt->vpkt_tgt_pkt->pkt_cdbp[1] & 0x1f) ==
2244 	    VHCI_PROUT_R_AND_IGNORE))) {
2245 		if (!sema_tryp(&vlun->svl_pgr_sema))
2246 			return (TRAN_BUSY);
2247 		pgr_sema_held = 1;
2248 		if (vlun->svl_first_path != NULL) {
2249 			rval = mdi_select_path(cdip, NULL,
2250 			    MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH,
2251 			    NULL, &pip);
2252 			if ((rval != MDI_SUCCESS) || (pip == NULL)) {
2253 				VHCI_DEBUG(4, (CE_NOTE, NULL,
2254 				    "vhci_bind_transport: path select fail\n"));
2255 			} else {
2256 				npip = pip;
2257 				do {
2258 					if (npip == vlun->svl_first_path) {
2259 						VHCI_DEBUG(4, (CE_NOTE, NULL,
2260 						    "vhci_bind_transport: "
2261 						    "valid first path 0x%p\n",
2262 						    (void *)
2263 						    vlun->svl_first_path));
2264 						pip = vlun->svl_first_path;
2265 						goto bind_path;
2266 					}
2267 					pip = npip;
2268 					rval = mdi_select_path(cdip, NULL,
2269 					    MDI_SELECT_ONLINE_PATH |
2270 					    MDI_SELECT_STANDBY_PATH,
2271 					    pip, &npip);
2272 					mdi_rele_path(pip);
2273 				} while ((rval == MDI_SUCCESS) &&
2274 				    (npip != NULL));
2275 			}
2276 		}
2277 
2278 		if (vlun->svl_first_path) {
2279 			VHCI_DEBUG(4, (CE_NOTE, NULL,
2280 			    "vhci_bind_transport: invalid first path 0x%p\n",
2281 			    (void *)vlun->svl_first_path));
2282 			vlun->svl_first_path = NULL;
2283 		}
2284 	} else if (vpkt->vpkt_tgt_pkt->pkt_cdbp[0] == SCMD_PRIN) {
2285 		if ((vpkt->vpkt_state & VHCI_PKT_THRU_TASKQ) == 0) {
2286 			if (!sema_tryp(&vlun->svl_pgr_sema))
2287 				return (TRAN_BUSY);
2288 		}
2289 		pgr_sema_held = 1;
2290 	}
2291 
2292 	/*
2293 	 * If the path is already bound for PKT_PARTIAL_DMA case,
2294 	 * try to use the same path.
2295 	 */
2296 	if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) && vpkt->vpkt_path) {
2297 		VHCI_DEBUG(4, (CE_NOTE, NULL,
2298 		    "vhci_bind_transport: PKT_PARTIAL_DMA "
2299 		    "vpkt 0x%p, path 0x%p\n",
2300 		    (void *)vpkt, (void *)vpkt->vpkt_path));
2301 		pip = vpkt->vpkt_path;
2302 		goto bind_path;
2303 	}
2304 
2305 	/*
2306 	 * Get path_instance. Non-zero with FLAG_PKT_PATH_INSTANCE set
2307 	 * indicates that mdi_select_path should be called to select a
2308 	 * specific instance.
2309 	 *
2310 	 * NB: Condition pkt_path_instance reference on proper allocation.
2311 	 */
2312 	if ((vpkt->vpkt_tgt_pkt->pkt_flags & FLAG_PKT_PATH_INSTANCE) &&
2313 	    scsi_pkt_allocated_correctly(vpkt->vpkt_tgt_pkt)) {
2314 		path_instance = vpkt->vpkt_tgt_pkt->pkt_path_instance;
2315 	}
2316 
2317 	/*
2318 	 * If reservation is active bind the transport directly to the pip
2319 	 * with the reservation.
2320 	 */
2321 	if (vpkt->vpkt_hba_pkt == NULL) {
2322 		if (vlun->svl_flags & VLUN_RESERVE_ACTIVE_FLG) {
2323 			if (MDI_PI_IS_ONLINE(vlun->svl_resrv_pip)) {
2324 				pip = vlun->svl_resrv_pip;
2325 				mdi_hold_path(pip);
2326 				vlun->svl_waiting_for_activepath = 0;
2327 				rval = MDI_SUCCESS;
2328 				goto bind_path;
2329 			} else {
2330 				if (pgr_sema_held) {
2331 					sema_v(&vlun->svl_pgr_sema);
2332 				}
2333 				return (TRAN_BUSY);
2334 			}
2335 		}
2336 try_again:
2337 		rval = mdi_select_path(cdip, vpkt->vpkt_tgt_init_bp,
2338 		    path_instance ? MDI_SELECT_PATH_INSTANCE : 0,
2339 		    (void *)(intptr_t)path_instance, &pip);
2340 		if (rval == MDI_BUSY) {
2341 			if (pgr_sema_held) {
2342 				sema_v(&vlun->svl_pgr_sema);
2343 			}
2344 			return (TRAN_BUSY);
2345 		} else if (rval == MDI_DEVI_ONLINING) {
2346 			/*
2347 			 * if we are here then we are in the midst of
2348 			 * an attach/probe of the client device.
2349 			 * We attempt to bind to ONLINE path if available,
2350 			 * else it is OK to bind to a STANDBY path (instead
2351 			 * of triggering a failover) because IO associated
2352 			 * with attach/probe (eg. INQUIRY, block 0 read)
2353 			 * are completed by targets even on passive paths
2354 			 * If no ONLINE paths available, it is important
2355 			 * to set svl_waiting_for_activepath for two
2356 			 * reasons: (1) avoid sense analysis in the
2357 			 * "external failure detection" codepath in
2358 			 * vhci_intr().  Failure to do so will result in
2359 			 * infinite loop (unless an ONLINE path becomes
2360 			 * available at some point) (2) avoid
2361 			 * unnecessary failover (see "---Waiting For Active
2362 			 * Path---" comment below).
2363 			 */
2364 			VHCI_DEBUG(1, (CE_NOTE, NULL, "!%p in onlining "
2365 			    "state\n", (void *)cdip));
2366 			pip = NULL;
2367 			rval = mdi_select_path(cdip, vpkt->vpkt_tgt_init_bp,
2368 			    mps_flag, NULL, &pip);
2369 			if ((rval != MDI_SUCCESS) || (pip == NULL)) {
2370 				if (vlun->svl_waiting_for_activepath == 0) {
2371 					vlun->svl_waiting_for_activepath = 1;
2372 					vlun->svl_wfa_time = gethrtime();
2373 				}
2374 				mps_flag |= MDI_SELECT_STANDBY_PATH;
2375 				rval = mdi_select_path(cdip,
2376 				    vpkt->vpkt_tgt_init_bp,
2377 				    mps_flag, NULL, &pip);
2378 				if ((rval != MDI_SUCCESS) || (pip == NULL)) {
2379 					if (pgr_sema_held) {
2380 						sema_v(&vlun->svl_pgr_sema);
2381 					}
2382 					return (TRAN_FATAL_ERROR);
2383 				}
2384 				goto bind_path;
2385 			}
2386 		} else if ((rval == MDI_FAILURE) ||
2387 		    ((rval == MDI_NOPATH) && (path_instance))) {
2388 			if (pgr_sema_held) {
2389 				sema_v(&vlun->svl_pgr_sema);
2390 			}
2391 			return (TRAN_FATAL_ERROR);
2392 		}
2393 
2394 		if ((pip == NULL) || (rval == MDI_NOPATH)) {
2395 			while (vlun->svl_waiting_for_activepath) {
2396 				/*
2397 				 * ---Waiting For Active Path---
2398 				 * This device was discovered across a
2399 				 * passive path; lets wait for a little
2400 				 * bit, hopefully an active path will
2401 				 * show up obviating the need for a
2402 				 * failover
2403 				 */
2404 				if ((gethrtime() - vlun->svl_wfa_time) >=
2405 				    (60 * NANOSEC)) {
2406 					vlun->svl_waiting_for_activepath = 0;
2407 				} else {
2408 					drv_usecwait(1000);
2409 					if (vlun->svl_waiting_for_activepath
2410 					    == 0) {
2411 						/*
2412 						 * an active path has come
2413 						 * online!
2414 						 */
2415 						goto try_again;
2416 					}
2417 				}
2418 			}
2419 			VHCI_HOLD_LUN(vlun, VH_NOSLEEP, held);
2420 			if (!held) {
2421 				VHCI_DEBUG(4, (CE_NOTE, NULL,
2422 				    "!Lun not held\n"));
2423 				if (pgr_sema_held) {
2424 					sema_v(&vlun->svl_pgr_sema);
2425 				}
2426 				return (TRAN_BUSY);
2427 			}
2428 			/*
2429 			 * now that the LUN is stable, one last check
2430 			 * to make sure no other changes sneaked in
2431 			 * (like a path coming online or a
2432 			 * failover initiated by another thread)
2433 			 */
2434 			pip = NULL;
2435 			rval = mdi_select_path(cdip, vpkt->vpkt_tgt_init_bp,
2436 			    0, NULL, &pip);
2437 			if (pip != NULL) {
2438 				VHCI_RELEASE_LUN(vlun);
2439 				vlun->svl_waiting_for_activepath = 0;
2440 				goto bind_path;
2441 			}
2442 
2443 			/*
2444 			 * Check if there is an ONLINE path OR a STANDBY path
2445 			 * available. If none is available, do not attempt
2446 			 * to do a failover, just return a fatal error at this
2447 			 * point.
2448 			 */
2449 			npip = NULL;
2450 			rval = mdi_select_path(cdip, NULL,
2451 			    (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH),
2452 			    NULL, &npip);
2453 			if ((npip == NULL) || (rval != MDI_SUCCESS)) {
2454 				/*
2455 				 * No paths available, jus return FATAL error.
2456 				 */
2457 				VHCI_RELEASE_LUN(vlun);
2458 				if (pgr_sema_held) {
2459 					sema_v(&vlun->svl_pgr_sema);
2460 				}
2461 				return (TRAN_FATAL_ERROR);
2462 			}
2463 			mdi_rele_path(npip);
2464 			if (!(vpkt->vpkt_state & VHCI_PKT_IN_FAILOVER)) {
2465 				VHCI_DEBUG(1, (CE_NOTE, NULL, "!invoking "
2466 				    "mdi_failover\n"));
2467 				rval = mdi_failover(vhci->vhci_dip, cdip,
2468 				    MDI_FAILOVER_ASYNC);
2469 			} else {
2470 				rval = vlun->svl_failover_status;
2471 			}
2472 			if (rval == MDI_FAILURE) {
2473 				VHCI_RELEASE_LUN(vlun);
2474 				if (pgr_sema_held) {
2475 					sema_v(&vlun->svl_pgr_sema);
2476 				}
2477 				return (TRAN_FATAL_ERROR);
2478 			} else if (rval == MDI_BUSY) {
2479 				VHCI_RELEASE_LUN(vlun);
2480 				if (pgr_sema_held) {
2481 					sema_v(&vlun->svl_pgr_sema);
2482 				}
2483 				return (TRAN_BUSY);
2484 			} else {
2485 				if (pgr_sema_held) {
2486 					sema_v(&vlun->svl_pgr_sema);
2487 				}
2488 				vpkt->vpkt_state |= VHCI_PKT_IN_FAILOVER;
2489 				return (TRAN_BUSY);
2490 			}
2491 		}
2492 		vlun->svl_waiting_for_activepath = 0;
2493 bind_path:
2494 		vpkt->vpkt_path = pip;
2495 		svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip);
2496 		ASSERT(svp != NULL);
2497 
2498 		psd = svp->svp_psd;
2499 		ASSERT(psd != NULL);
2500 		address = &psd->sd_address;
2501 	} else {
2502 		pkt = vpkt->vpkt_hba_pkt;
2503 		address = &pkt->pkt_address;
2504 	}
2505 
2506 	/* Verify match of specified path_instance and selected path_instance */
2507 	ASSERT((path_instance == 0) ||
2508 	    (path_instance == mdi_pi_get_path_instance(vpkt->vpkt_path)));
2509 
2510 	/*
2511 	 * For PKT_PARTIAL_DMA case, call pHCI's scsi_init_pkt whenever
2512 	 * target driver calls vhci_scsi_init_pkt.
2513 	 */
2514 	if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) &&
2515 	    vpkt->vpkt_path && vpkt->vpkt_hba_pkt) {
2516 		VHCI_DEBUG(4, (CE_NOTE, NULL,
2517 		    "vhci_bind_transport: PKT_PARTIAL_DMA "
2518 		    "vpkt 0x%p, path 0x%p hba_pkt 0x%p\n",
2519 		    (void *)vpkt, (void *)vpkt->vpkt_path, (void *)pkt));
2520 		pkt = vpkt->vpkt_hba_pkt;
2521 		address = &pkt->pkt_address;
2522 	}
2523 
2524 	if (pkt == NULL || (vpkt->vpkt_flags & CFLAG_DMA_PARTIAL)) {
2525 		pkt = scsi_init_pkt(address, pkt,
2526 		    vpkt->vpkt_tgt_init_bp, vpkt->vpkt_tgt_init_cdblen,
2527 		    vpkt->vpkt_tgt_init_scblen, 0, flags, func, NULL);
2528 
2529 		if (pkt == NULL) {
2530 			VHCI_DEBUG(4, (CE_NOTE, NULL,
2531 			    "!bind transport: 0x%p 0x%p 0x%p\n",
2532 			    (void *)vhci, (void *)psd, (void *)vpkt));
2533 			if ((vpkt->vpkt_hba_pkt == NULL) && vpkt->vpkt_path) {
2534 				MDI_PI_ERRSTAT(vpkt->vpkt_path,
2535 				    MDI_PI_TRANSERR);
2536 				mdi_rele_path(vpkt->vpkt_path);
2537 				vpkt->vpkt_path = NULL;
2538 			}
2539 			if (pgr_sema_held) {
2540 				sema_v(&vlun->svl_pgr_sema);
2541 			}
2542 			/*
2543 			 * Consider it a fatal error if b_error is
2544 			 * set as a result of DMA binding failure
2545 			 * vs. a condition of being temporarily out of
2546 			 * some resource
2547 			 */
2548 			if (vpkt->vpkt_tgt_init_bp == NULL ||
2549 			    geterror(vpkt->vpkt_tgt_init_bp))
2550 				return (TRAN_FATAL_ERROR);
2551 			else
2552 				return (TRAN_BUSY);
2553 		}
2554 	}
2555 
2556 	pkt->pkt_private = vpkt;
2557 	vpkt->vpkt_hba_pkt = pkt;
2558 	return (TRAN_ACCEPT);
2559 }
2560 
2561 
2562 /*PRINTFLIKE3*/
2563 void
vhci_log(int level,dev_info_t * dip,const char * fmt,...)2564 vhci_log(int level, dev_info_t *dip, const char *fmt, ...)
2565 {
2566 	char		buf[256];
2567 	va_list		ap;
2568 
2569 	va_start(ap, fmt);
2570 	(void) vsprintf(buf, fmt, ap);
2571 	va_end(ap);
2572 
2573 	scsi_log(dip, "scsi_vhci", level, buf);
2574 }
2575 
2576 /* do a PGR out with the information we've saved away */
2577 static int
vhci_do_prout(scsi_vhci_priv_t * svp)2578 vhci_do_prout(scsi_vhci_priv_t *svp)
2579 {
2580 
2581 	struct scsi_pkt			*new_pkt;
2582 	struct buf			*bp;
2583 	scsi_vhci_lun_t			*vlun = svp->svp_svl;
2584 	int				rval, retry, nr_retry, ua_retry;
2585 	uint8_t				*sns, skey;
2586 
2587 	bp = getrbuf(KM_SLEEP);
2588 	bp->b_flags = B_WRITE;
2589 	bp->b_resid = 0;
2590 	bp->b_un.b_addr = (caddr_t)&vlun->svl_prout;
2591 	bp->b_bcount = vlun->svl_bcount;
2592 
2593 	VHCI_INCR_PATH_CMDCOUNT(svp);
2594 
2595 	new_pkt = scsi_init_pkt(&svp->svp_psd->sd_address, NULL, bp,
2596 	    CDB_GROUP1, sizeof (struct scsi_arq_status), 0, 0,
2597 	    SLEEP_FUNC, NULL);
2598 	if (new_pkt == NULL) {
2599 		VHCI_DECR_PATH_CMDCOUNT(svp);
2600 		freerbuf(bp);
2601 		cmn_err(CE_WARN, "!vhci_do_prout: scsi_init_pkt failed");
2602 		return (0);
2603 	}
2604 	mutex_enter(&vlun->svl_mutex);
2605 	bp->b_un.b_addr = (caddr_t)&vlun->svl_prout;
2606 	bp->b_bcount = vlun->svl_bcount;
2607 	bcopy(vlun->svl_cdb, new_pkt->pkt_cdbp,
2608 	    sizeof (vlun->svl_cdb));
2609 	new_pkt->pkt_time = vlun->svl_time;
2610 	mutex_exit(&vlun->svl_mutex);
2611 	new_pkt->pkt_flags = FLAG_NOINTR;
2612 
2613 	ua_retry = nr_retry = retry = 0;
2614 again:
2615 	rval = vhci_do_scsi_cmd(new_pkt);
2616 	if (rval != 1) {
2617 		if ((new_pkt->pkt_reason == CMD_CMPLT) &&
2618 		    (SCBP_C(new_pkt) == STATUS_CHECK) &&
2619 		    (new_pkt->pkt_state & STATE_ARQ_DONE)) {
2620 			sns = (uint8_t *)
2621 			    &(((struct scsi_arq_status *)(uintptr_t)
2622 			    (new_pkt->pkt_scbp))->sts_sensedata);
2623 			skey = scsi_sense_key(sns);
2624 			if ((skey == KEY_UNIT_ATTENTION) ||
2625 			    (skey == KEY_NOT_READY)) {
2626 				int max_retry;
2627 				struct scsi_failover_ops *fops;
2628 				fops = vlun->svl_fops;
2629 				rval = fops->sfo_analyze_sense(svp->svp_psd,
2630 				    sns, vlun->svl_fops_ctpriv);
2631 				if (rval == SCSI_SENSE_NOT_READY) {
2632 					max_retry = vhci_prout_not_ready_retry;
2633 					retry = nr_retry++;
2634 					delay(1 * drv_usectohz(1000000));
2635 				} else {
2636 					/* chk for state change and update */
2637 					if (rval == SCSI_SENSE_STATE_CHANGED) {
2638 						int held;
2639 						VHCI_HOLD_LUN(vlun,
2640 						    VH_NOSLEEP, held);
2641 						if (!held) {
2642 							rval = TRAN_BUSY;
2643 						} else {
2644 							/* chk for alua first */
2645 							vhci_update_pathstates(
2646 							    (void *)vlun);
2647 						}
2648 					}
2649 					retry = ua_retry++;
2650 					max_retry = VHCI_MAX_PGR_RETRIES;
2651 				}
2652 				if (retry < max_retry) {
2653 					VHCI_DEBUG(4, (CE_WARN, NULL,
2654 					    "!vhci_do_prout retry 0x%x "
2655 					    "(0x%x 0x%x 0x%x)",
2656 					    SCBP_C(new_pkt),
2657 					    new_pkt->pkt_cdbp[0],
2658 					    new_pkt->pkt_cdbp[1],
2659 					    new_pkt->pkt_cdbp[2]));
2660 					goto again;
2661 				}
2662 				rval = 0;
2663 				VHCI_DEBUG(4, (CE_WARN, NULL,
2664 				    "!vhci_do_prout 0x%x "
2665 				    "(0x%x 0x%x 0x%x)",
2666 				    SCBP_C(new_pkt),
2667 				    new_pkt->pkt_cdbp[0],
2668 				    new_pkt->pkt_cdbp[1],
2669 				    new_pkt->pkt_cdbp[2]));
2670 			} else if (skey == KEY_ILLEGAL_REQUEST)
2671 				rval = VHCI_PGR_ILLEGALOP;
2672 		}
2673 	} else {
2674 		rval = 1;
2675 	}
2676 	scsi_destroy_pkt(new_pkt);
2677 	VHCI_DECR_PATH_CMDCOUNT(svp);
2678 	freerbuf(bp);
2679 	return (rval);
2680 }
2681 
2682 static void
vhci_run_cmd(void * arg)2683 vhci_run_cmd(void *arg)
2684 {
2685 	struct scsi_pkt		*pkt = (struct scsi_pkt *)arg;
2686 	struct scsi_pkt		*tpkt;
2687 	scsi_vhci_priv_t	*svp;
2688 	mdi_pathinfo_t		*pip, *npip;
2689 	scsi_vhci_lun_t		*vlun;
2690 	dev_info_t		*cdip;
2691 	scsi_vhci_priv_t	*nsvp;
2692 	int			fail = 0;
2693 	int			rval;
2694 	struct vhci_pkt		*vpkt;
2695 	uchar_t			cdb_1;
2696 	vhci_prout_t		*prout;
2697 
2698 	vpkt = (struct vhci_pkt *)pkt->pkt_private;
2699 	tpkt = vpkt->vpkt_tgt_pkt;
2700 	pip = vpkt->vpkt_path;
2701 	svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip);
2702 	if (svp == NULL) {
2703 		tpkt->pkt_reason = CMD_TRAN_ERR;
2704 		tpkt->pkt_statistics = STAT_ABORTED;
2705 		goto done;
2706 	}
2707 	vlun = svp->svp_svl;
2708 	prout = &vlun->svl_prout;
2709 	if (SCBP_C(pkt) != STATUS_GOOD)
2710 		fail++;
2711 	cdip = vlun->svl_dip;
2712 	pip = npip = NULL;
2713 	rval = mdi_select_path(cdip, NULL,
2714 	    MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH, NULL, &npip);
2715 	if ((rval != MDI_SUCCESS) || (npip == NULL)) {
2716 		VHCI_DEBUG(4, (CE_NOTE, NULL,
2717 		    "vhci_run_cmd: no path! 0x%p\n", (void *)svp));
2718 		tpkt->pkt_reason = CMD_TRAN_ERR;
2719 		tpkt->pkt_statistics = STAT_ABORTED;
2720 		goto done;
2721 	}
2722 
2723 	cdb_1 = vlun->svl_cdb[1];
2724 	vlun->svl_cdb[1] &= 0xe0;
2725 	vlun->svl_cdb[1] |= VHCI_PROUT_R_AND_IGNORE;
2726 
2727 	do {
2728 		nsvp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(npip);
2729 		if (nsvp == NULL) {
2730 			VHCI_DEBUG(4, (CE_NOTE, NULL,
2731 			    "vhci_run_cmd: no "
2732 			    "client priv! 0x%p offlined?\n",
2733 			    (void *)npip));
2734 			goto next_path;
2735 		}
2736 		if (vlun->svl_first_path == npip) {
2737 			goto next_path;
2738 		} else {
2739 			if (vhci_do_prout(nsvp) != 1)
2740 				fail++;
2741 		}
2742 next_path:
2743 		pip = npip;
2744 		rval = mdi_select_path(cdip, NULL,
2745 		    MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH,
2746 		    pip, &npip);
2747 		mdi_rele_path(pip);
2748 	} while ((rval == MDI_SUCCESS) && (npip != NULL));
2749 
2750 	vlun->svl_cdb[1] = cdb_1;
2751 
2752 	if (fail) {
2753 		VHCI_DEBUG(4, (CE_WARN, NULL, "%s%d: key registration failed, "
2754 		    "couldn't be replicated on all paths",
2755 		    ddi_driver_name(cdip), ddi_get_instance(cdip)));
2756 		vhci_print_prout_keys(vlun, "vhci_run_cmd: ");
2757 
2758 		if (SCBP_C(pkt) != STATUS_GOOD) {
2759 			tpkt->pkt_reason = CMD_TRAN_ERR;
2760 			tpkt->pkt_statistics = STAT_ABORTED;
2761 		}
2762 	} else {
2763 		vlun->svl_pgr_active = 1;
2764 		vhci_print_prout_keys(vlun, "vhci_run_cmd: before bcopy:");
2765 
2766 		bcopy((const void *)prout->service_key,
2767 		    (void *)prout->active_service_key, MHIOC_RESV_KEY_SIZE);
2768 		bcopy((const void *)prout->res_key,
2769 		    (void *)prout->active_res_key, MHIOC_RESV_KEY_SIZE);
2770 
2771 		vhci_print_prout_keys(vlun, "vhci_run_cmd: after bcopy:");
2772 	}
2773 done:
2774 	if (SCBP_C(pkt) == STATUS_GOOD)
2775 		vlun->svl_first_path = NULL;
2776 
2777 	if (svp)
2778 		VHCI_DECR_PATH_CMDCOUNT(svp);
2779 
2780 	if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) == 0) {
2781 		scsi_destroy_pkt(pkt);
2782 		vpkt->vpkt_hba_pkt = NULL;
2783 		if (vpkt->vpkt_path) {
2784 			mdi_rele_path(vpkt->vpkt_path);
2785 			vpkt->vpkt_path = NULL;
2786 		}
2787 	}
2788 
2789 	sema_v(&vlun->svl_pgr_sema);
2790 	/*
2791 	 * The PROUT commands are not included in the automatic retry
2792 	 * mechanism, therefore, vpkt_org_vpkt should never be set here.
2793 	 */
2794 	ASSERT(vpkt->vpkt_org_vpkt == NULL);
2795 	scsi_hba_pkt_comp(tpkt);
2796 }
2797 
2798 /*
2799  * Get the keys registered with this target.  Since we will have
2800  * registered the same key with multiple initiators, strip out
2801  * any duplicate keys.
2802  *
2803  * The pointers which will be used to filter the registered keys from
2804  * the device will be stored in filter_prin and filter_pkt.  If the
2805  * allocation length of the buffer was sufficient for the number of
2806  * parameter data bytes available to be returned by the device then the
2807  * key filtering will use the keylist returned from the original
2808  * request.  If the allocation length of the buffer was not sufficient,
2809  * then the filtering will use the keylist returned from the request
2810  * that is resent below.
2811  *
2812  * If the device returns an additional length field that is greater than
2813  * the allocation length of the buffer, then allocate a new buffer which
2814  * can accommodate the number of parameter data bytes available to be
2815  * returned.  Resend the scsi PRIN command, filter out the duplicate
2816  * keys and return as many of the unique keys found that was originally
2817  * requested and set the additional length field equal to the data bytes
2818  * of unique reservation keys available to be returned.
2819  *
2820  * If the device returns an additional length field that is less than or
2821  * equal to the allocation length of the buffer, then all the available
2822  * keys registered were returned by the device.  Filter out the
2823  * duplicate keys and return all of the unique keys found and set the
2824  * additional length field equal to the data bytes of the reservation
2825  * keys to be returned.
2826  */
2827 
2828 #define	VHCI_PRIN_HEADER_SZ (sizeof (prin->length) + sizeof (prin->generation))
2829 
2830 static int
vhci_do_prin(struct vhci_pkt ** intr_vpkt)2831 vhci_do_prin(struct vhci_pkt **intr_vpkt)
2832 {
2833 	scsi_vhci_priv_t *svp;
2834 	struct vhci_pkt *vpkt = *intr_vpkt;
2835 	vhci_prin_readkeys_t *prin;
2836 	scsi_vhci_lun_t *vlun;
2837 	struct scsi_vhci *vhci = ADDR2VHCI(&vpkt->vpkt_tgt_pkt->pkt_address);
2838 
2839 	struct buf		*new_bp = NULL;
2840 	struct scsi_pkt		*new_pkt = NULL;
2841 	struct vhci_pkt		*new_vpkt = NULL;
2842 	uint32_t		needed_length;
2843 	int			rval = VHCI_CMD_CMPLT;
2844 	uint32_t		prin_length = 0;
2845 	uint32_t		svl_prin_length = 0;
2846 
2847 	ASSERT(vpkt->vpkt_path);
2848 	svp = mdi_pi_get_vhci_private(vpkt->vpkt_path);
2849 	ASSERT(svp);
2850 	vlun = svp->svp_svl;
2851 	ASSERT(vlun);
2852 
2853 	/*
2854 	 * If the caller only asked for an amount of data that would not
2855 	 * be enough to include any key data it is likely that they will
2856 	 * send the next command with a buffer size based on the information
2857 	 * from this header. Doing recovery on this would be a duplication
2858 	 * of efforts.
2859 	 */
2860 	if (vpkt->vpkt_tgt_init_bp->b_bcount <= VHCI_PRIN_HEADER_SZ) {
2861 		rval = VHCI_CMD_CMPLT;
2862 		goto exit;
2863 	}
2864 
2865 	if (vpkt->vpkt_org_vpkt == NULL) {
2866 		/*
2867 		 * Can fail as sleep is not allowed.
2868 		 */
2869 		prin = (vhci_prin_readkeys_t *)
2870 		    bp_mapin_common(vpkt->vpkt_tgt_init_bp, VM_NOSLEEP);
2871 	} else {
2872 		/*
2873 		 * The retry buf doesn't need to be mapped in.
2874 		 */
2875 		prin = (vhci_prin_readkeys_t *)
2876 		    vpkt->vpkt_tgt_init_bp->b_un.b_daddr;
2877 	}
2878 
2879 	if (prin == NULL) {
2880 		VHCI_DEBUG(5, (CE_WARN, NULL,
2881 		    "vhci_do_prin: bp_mapin_common failed."));
2882 		rval = VHCI_CMD_ERROR;
2883 		goto fail;
2884 	}
2885 
2886 	prin_length = BE_32(prin->length);
2887 
2888 	/*
2889 	 * According to SPC-3r22, sec 4.3.4.6: "If the amount of
2890 	 * information to be transferred exceeds the maximum value
2891 	 * that the ALLOCATION LENGTH field is capable of specifying,
2892 	 * the device server shall...terminate the command with CHECK
2893 	 * CONDITION status".  The ALLOCATION LENGTH field of the
2894 	 * PERSISTENT RESERVE IN command is 2 bytes. We should never
2895 	 * get here with an ADDITIONAL LENGTH greater than 0xFFFF
2896 	 * so if we do, then it is an error!
2897 	 */
2898 
2899 
2900 	if ((prin_length + VHCI_PRIN_HEADER_SZ) > 0xFFFF) {
2901 		VHCI_DEBUG(5, (CE_NOTE, NULL,
2902 		    "vhci_do_prin: Device returned invalid "
2903 		    "length 0x%x\n", prin_length));
2904 		rval = VHCI_CMD_ERROR;
2905 		goto fail;
2906 	}
2907 	needed_length = prin_length + VHCI_PRIN_HEADER_SZ;
2908 
2909 	/*
2910 	 * If prin->length is greater than the byte count allocated in the
2911 	 * original buffer, then resend the request with enough buffer
2912 	 * allocated to get all of the available registered keys.
2913 	 */
2914 	if ((vpkt->vpkt_tgt_init_bp->b_bcount < needed_length) &&
2915 	    (vpkt->vpkt_org_vpkt == NULL)) {
2916 
2917 		new_pkt = vhci_create_retry_pkt(vpkt);
2918 		if (new_pkt == NULL) {
2919 			rval = VHCI_CMD_ERROR;
2920 			goto fail;
2921 		}
2922 		new_vpkt = TGTPKT2VHCIPKT(new_pkt);
2923 
2924 		/*
2925 		 * This is the buf with buffer pointer
2926 		 * where the prin readkeys will be
2927 		 * returned from the device
2928 		 */
2929 		new_bp = scsi_alloc_consistent_buf(&svp->svp_psd->sd_address,
2930 		    NULL, needed_length, B_READ, NULL_FUNC, NULL);
2931 		if ((new_bp == NULL) || (new_bp->b_un.b_addr == NULL)) {
2932 			if (new_bp) {
2933 				scsi_free_consistent_buf(new_bp);
2934 			}
2935 			vhci_scsi_destroy_pkt(&new_pkt->pkt_address, new_pkt);
2936 			rval = VHCI_CMD_ERROR;
2937 			goto fail;
2938 		}
2939 		new_bp->b_bcount = needed_length;
2940 		new_pkt->pkt_cdbp[7] = (uchar_t)(needed_length >> 8);
2941 		new_pkt->pkt_cdbp[8] = (uchar_t)needed_length;
2942 
2943 		rval = VHCI_CMD_RETRY;
2944 
2945 		new_vpkt->vpkt_tgt_init_bp = new_bp;
2946 	}
2947 
2948 	if (rval == VHCI_CMD_RETRY) {
2949 
2950 		/*
2951 		 * There were more keys then the original request asked for.
2952 		 */
2953 		mdi_pathinfo_t *path_holder = vpkt->vpkt_path;
2954 
2955 		/*
2956 		 * Release the old path because it does not matter which path
2957 		 * this command is sent down.  This allows the normal bind
2958 		 * transport mechanism to be used.
2959 		 */
2960 		if (vpkt->vpkt_path != NULL) {
2961 			mdi_rele_path(vpkt->vpkt_path);
2962 			vpkt->vpkt_path = NULL;
2963 		}
2964 
2965 		/*
2966 		 * Dispatch the retry command
2967 		 */
2968 		if (taskq_dispatch(vhci->vhci_taskq, vhci_dispatch_scsi_start,
2969 		    (void *) new_vpkt, KM_NOSLEEP) == TASKQID_INVALID) {
2970 			if (path_holder) {
2971 				vpkt->vpkt_path = path_holder;
2972 				mdi_hold_path(path_holder);
2973 			}
2974 			scsi_free_consistent_buf(new_bp);
2975 			vhci_scsi_destroy_pkt(&new_pkt->pkt_address, new_pkt);
2976 			rval = VHCI_CMD_ERROR;
2977 			goto fail;
2978 		}
2979 
2980 		/*
2981 		 * If we return VHCI_CMD_RETRY, that means the caller
2982 		 * is going to bail and wait for the reissued command
2983 		 * to complete.  In that case, we need to decrement
2984 		 * the path command count right now.  In any other
2985 		 * case, it'll be decremented by the caller.
2986 		 */
2987 		VHCI_DECR_PATH_CMDCOUNT(svp);
2988 		goto exit;
2989 
2990 	}
2991 
2992 	if (rval == VHCI_CMD_CMPLT) {
2993 		/*
2994 		 * The original request got all of the keys or the recovery
2995 		 * packet returns.
2996 		 */
2997 		int new;
2998 		int old;
2999 		int num_keys = prin_length / MHIOC_RESV_KEY_SIZE;
3000 
3001 		VHCI_DEBUG(4, (CE_NOTE, NULL, "vhci_do_prin: %d keys read\n",
3002 		    num_keys));
3003 
3004 #ifdef DEBUG
3005 		VHCI_DEBUG(5, (CE_NOTE, NULL, "vhci_do_prin: from storage\n"));
3006 		if (vhci_debug == 5)
3007 			vhci_print_prin_keys(prin, num_keys);
3008 		VHCI_DEBUG(5, (CE_NOTE, NULL,
3009 		    "vhci_do_prin: MPxIO old keys:\n"));
3010 		if (vhci_debug == 5)
3011 			vhci_print_prin_keys(&vlun->svl_prin, num_keys);
3012 #endif
3013 
3014 		/*
3015 		 * Filter out all duplicate keys returned from the device
3016 		 * We know that we use a different key for every host, so we
3017 		 * can simply strip out duplicates. Otherwise we would need to
3018 		 * do more bookkeeping to figure out which keys to strip out.
3019 		 */
3020 
3021 		new = 0;
3022 
3023 		/*
3024 		 * If we got at least 1 key copy it.
3025 		 */
3026 		if (num_keys > 0) {
3027 			vlun->svl_prin.keylist[0] = prin->keylist[0];
3028 			new++;
3029 		}
3030 
3031 		/*
3032 		 * find next unique key.
3033 		 */
3034 		for (old = 1; old < num_keys; old++) {
3035 			int j;
3036 			int match = 0;
3037 
3038 			if (new >= VHCI_NUM_RESV_KEYS)
3039 				break;
3040 			for (j = 0; j < new; j++) {
3041 				if (bcmp(&prin->keylist[old],
3042 				    &vlun->svl_prin.keylist[j],
3043 				    sizeof (mhioc_resv_key_t)) == 0) {
3044 					match = 1;
3045 					break;
3046 				}
3047 			}
3048 			if (!match) {
3049 				vlun->svl_prin.keylist[new] =
3050 				    prin->keylist[old];
3051 				new++;
3052 			}
3053 		}
3054 
3055 		/* Stored Big Endian */
3056 		vlun->svl_prin.generation = prin->generation;
3057 		svl_prin_length = new * sizeof (mhioc_resv_key_t);
3058 		/* Stored Big Endian */
3059 		vlun->svl_prin.length = BE_32(svl_prin_length);
3060 		svl_prin_length += VHCI_PRIN_HEADER_SZ;
3061 
3062 		/*
3063 		 * If we arrived at this point after issuing a retry, make sure
3064 		 * that we put everything back the way it originally was so
3065 		 * that the target driver can complete the command correctly.
3066 		 */
3067 		if (vpkt->vpkt_org_vpkt != NULL) {
3068 			new_bp = vpkt->vpkt_tgt_init_bp;
3069 
3070 			scsi_free_consistent_buf(new_bp);
3071 
3072 			vpkt = vhci_sync_retry_pkt(vpkt);
3073 			*intr_vpkt = vpkt;
3074 
3075 			/*
3076 			 * Make sure the original buffer is mapped into kernel
3077 			 * space before we try to copy the filtered keys into
3078 			 * it.
3079 			 */
3080 			prin = (vhci_prin_readkeys_t *)bp_mapin_common(
3081 			    vpkt->vpkt_tgt_init_bp, VM_NOSLEEP);
3082 		}
3083 
3084 		/*
3085 		 * Now copy the desired number of prin keys into the original
3086 		 * target buffer.
3087 		 */
3088 		if (svl_prin_length <= vpkt->vpkt_tgt_init_bp->b_bcount) {
3089 			/*
3090 			 * It is safe to return all of the available unique
3091 			 * keys
3092 			 */
3093 			bcopy(&vlun->svl_prin, prin, svl_prin_length);
3094 		} else {
3095 			/*
3096 			 * Not all of the available keys were requested by the
3097 			 * original command.
3098 			 */
3099 			bcopy(&vlun->svl_prin, prin,
3100 			    vpkt->vpkt_tgt_init_bp->b_bcount);
3101 		}
3102 #ifdef DEBUG
3103 		VHCI_DEBUG(5, (CE_NOTE, NULL,
3104 		    "vhci_do_prin: To Application:\n"));
3105 		if (vhci_debug == 5)
3106 			vhci_print_prin_keys(prin, new);
3107 		VHCI_DEBUG(5, (CE_NOTE, NULL,
3108 		    "vhci_do_prin: MPxIO new keys:\n"));
3109 		if (vhci_debug == 5)
3110 			vhci_print_prin_keys(&vlun->svl_prin, new);
3111 #endif
3112 	}
3113 fail:
3114 	if (rval == VHCI_CMD_ERROR) {
3115 		/*
3116 		 * If we arrived at this point after issuing a
3117 		 * retry, make sure that we put everything back
3118 		 * the way it originally was so that ssd can
3119 		 * complete the command correctly.
3120 		 */
3121 
3122 		if (vpkt->vpkt_org_vpkt != NULL) {
3123 			new_bp = vpkt->vpkt_tgt_init_bp;
3124 			if (new_bp != NULL) {
3125 				scsi_free_consistent_buf(new_bp);
3126 			}
3127 
3128 			new_vpkt = vpkt;
3129 			vpkt = vpkt->vpkt_org_vpkt;
3130 
3131 			vhci_scsi_destroy_pkt(&svp->svp_psd->sd_address,
3132 			    new_vpkt->vpkt_tgt_pkt);
3133 		}
3134 
3135 		/*
3136 		 * Mark this command completion as having an error so that
3137 		 * ssd will retry the command.
3138 		 */
3139 
3140 		vpkt->vpkt_tgt_pkt->pkt_reason = CMD_ABORTED;
3141 		vpkt->vpkt_tgt_pkt->pkt_statistics |= STAT_ABORTED;
3142 
3143 		rval = VHCI_CMD_CMPLT;
3144 	}
3145 exit:
3146 	/*
3147 	 * Make sure that the semaphore is only released once.
3148 	 */
3149 	if (rval == VHCI_CMD_CMPLT) {
3150 		sema_v(&vlun->svl_pgr_sema);
3151 	}
3152 
3153 	return (rval);
3154 }
3155 
3156 static void
vhci_intr(struct scsi_pkt * pkt)3157 vhci_intr(struct scsi_pkt *pkt)
3158 {
3159 	struct vhci_pkt		*vpkt = (struct vhci_pkt *)pkt->pkt_private;
3160 	struct scsi_pkt		*tpkt;
3161 	scsi_vhci_priv_t	*svp;
3162 	scsi_vhci_lun_t		*vlun;
3163 	int			rval, held;
3164 	struct scsi_failover_ops	*fops;
3165 	uint8_t			*sns, skey, asc, ascq;
3166 	mdi_pathinfo_t		*lpath;
3167 	static char		*timeout_err = "Command Timeout";
3168 	static char		*parity_err = "Parity Error";
3169 	char			*err_str = NULL;
3170 	dev_info_t		*vdip, *cdip;
3171 	char			*cpath;
3172 
3173 	ASSERT(vpkt != NULL);
3174 	tpkt = vpkt->vpkt_tgt_pkt;
3175 	ASSERT(tpkt != NULL);
3176 	svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(vpkt->vpkt_path);
3177 	ASSERT(svp != NULL);
3178 	vlun = svp->svp_svl;
3179 	ASSERT(vlun != NULL);
3180 	lpath = vpkt->vpkt_path;
3181 
3182 	/*
3183 	 * sync up the target driver's pkt with the pkt that
3184 	 * we actually used
3185 	 */
3186 	*(tpkt->pkt_scbp) = *(pkt->pkt_scbp);
3187 	tpkt->pkt_resid = pkt->pkt_resid;
3188 	tpkt->pkt_state = pkt->pkt_state;
3189 	tpkt->pkt_statistics = pkt->pkt_statistics;
3190 	tpkt->pkt_reason = pkt->pkt_reason;
3191 
3192 	/* Return path_instance information back to the target driver. */
3193 	if (scsi_pkt_allocated_correctly(tpkt)) {
3194 		if (scsi_pkt_allocated_correctly(pkt)) {
3195 			/*
3196 			 * If both packets were correctly allocated,
3197 			 * return path returned by pHCI.
3198 			 */
3199 			tpkt->pkt_path_instance = pkt->pkt_path_instance;
3200 		} else {
3201 			/* Otherwise return path of pHCI we used */
3202 			tpkt->pkt_path_instance =
3203 			    mdi_pi_get_path_instance(lpath);
3204 		}
3205 	}
3206 
3207 	if (pkt->pkt_cdbp[0] == SCMD_PROUT &&
3208 	    ((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_REGISTER) ||
3209 	    ((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_R_AND_IGNORE)) {
3210 		if ((SCBP_C(pkt) != STATUS_GOOD) ||
3211 		    (pkt->pkt_reason != CMD_CMPLT)) {
3212 			sema_v(&vlun->svl_pgr_sema);
3213 		}
3214 	} else if (pkt->pkt_cdbp[0] == SCMD_PRIN) {
3215 		if (pkt->pkt_reason != CMD_CMPLT ||
3216 		    (SCBP_C(pkt) != STATUS_GOOD)) {
3217 			sema_v(&vlun->svl_pgr_sema);
3218 		}
3219 	}
3220 
3221 	switch (pkt->pkt_reason) {
3222 	case CMD_CMPLT:
3223 		/*
3224 		 * cmd completed successfully, check for scsi errors
3225 		 */
3226 		switch (*(pkt->pkt_scbp)) {
3227 		case STATUS_CHECK:
3228 			if (pkt->pkt_state & STATE_ARQ_DONE) {
3229 				sns = (uint8_t *)
3230 				    &(((struct scsi_arq_status *)(uintptr_t)
3231 				    (pkt->pkt_scbp))->sts_sensedata);
3232 				skey = scsi_sense_key(sns);
3233 				asc = scsi_sense_asc(sns);
3234 				ascq = scsi_sense_ascq(sns);
3235 				fops = vlun->svl_fops;
3236 				ASSERT(fops != NULL);
3237 				VHCI_DEBUG(4, (CE_NOTE, NULL, "vhci_intr: "
3238 				    "Received sns key %x  esc %x  escq %x\n",
3239 				    skey, asc, ascq));
3240 
3241 				if (vlun->svl_waiting_for_activepath == 1) {
3242 					/*
3243 					 * if we are here it means we are
3244 					 * in the midst of a probe/attach
3245 					 * through a passive path; this
3246 					 * case is exempt from sense analysis
3247 					 * for detection of ext. failover
3248 					 * because that would unnecessarily
3249 					 * increase attach time.
3250 					 */
3251 					bcopy(pkt->pkt_scbp, tpkt->pkt_scbp,
3252 					    vpkt->vpkt_tgt_init_scblen);
3253 					break;
3254 				}
3255 				if (asc == VHCI_SCSI_PERR) {
3256 					/*
3257 					 * parity error
3258 					 */
3259 					err_str = parity_err;
3260 					bcopy(pkt->pkt_scbp, tpkt->pkt_scbp,
3261 					    vpkt->vpkt_tgt_init_scblen);
3262 					break;
3263 				}
3264 				rval = fops->sfo_analyze_sense(svp->svp_psd,
3265 				    sns, vlun->svl_fops_ctpriv);
3266 				if ((rval == SCSI_SENSE_NOFAILOVER) ||
3267 				    (rval == SCSI_SENSE_UNKNOWN) ||
3268 				    (rval == SCSI_SENSE_NOT_READY)) {
3269 					bcopy(pkt->pkt_scbp, tpkt->pkt_scbp,
3270 					    vpkt->vpkt_tgt_init_scblen);
3271 					break;
3272 				} else if (rval == SCSI_SENSE_STATE_CHANGED) {
3273 					struct scsi_vhci	*vhci;
3274 					vhci = ADDR2VHCI(&tpkt->pkt_address);
3275 					VHCI_HOLD_LUN(vlun, VH_NOSLEEP, held);
3276 					if (!held) {
3277 						/*
3278 						 * looks like some other thread
3279 						 * has already detected this
3280 						 * condition
3281 						 */
3282 						tpkt->pkt_state &=
3283 						    ~STATE_ARQ_DONE;
3284 						*(tpkt->pkt_scbp) =
3285 						    STATUS_BUSY;
3286 						break;
3287 					}
3288 					(void) taskq_dispatch(
3289 					    vhci->vhci_update_pathstates_taskq,
3290 					    vhci_update_pathstates,
3291 					    (void *)vlun, KM_SLEEP);
3292 				} else {
3293 					/*
3294 					 * externally initiated failover
3295 					 * has occurred or is in progress
3296 					 */
3297 					VHCI_HOLD_LUN(vlun, VH_NOSLEEP, held);
3298 					if (!held) {
3299 						/*
3300 						 * looks like some other thread
3301 						 * has already detected this
3302 						 * condition
3303 						 */
3304 						tpkt->pkt_state &=
3305 						    ~STATE_ARQ_DONE;
3306 						*(tpkt->pkt_scbp) =
3307 						    STATUS_BUSY;
3308 						break;
3309 					} else {
3310 						rval = vhci_handle_ext_fo
3311 						    (pkt, rval);
3312 						if (rval == BUSY_RETURN) {
3313 							tpkt->pkt_state &=
3314 							    ~STATE_ARQ_DONE;
3315 							*(tpkt->pkt_scbp) =
3316 							    STATUS_BUSY;
3317 							break;
3318 						}
3319 						bcopy(pkt->pkt_scbp,
3320 						    tpkt->pkt_scbp,
3321 						    vpkt->vpkt_tgt_init_scblen);
3322 						break;
3323 					}
3324 				}
3325 			}
3326 			break;
3327 
3328 		/*
3329 		 * If this is a good SCSI-II RELEASE cmd completion then restore
3330 		 * the load balancing policy and reset VLUN_RESERVE_ACTIVE_FLG.
3331 		 * If this is a good SCSI-II RESERVE cmd completion then set
3332 		 * VLUN_RESERVE_ACTIVE_FLG.
3333 		 */
3334 		case STATUS_GOOD:
3335 			if ((pkt->pkt_cdbp[0] == SCMD_RELEASE) ||
3336 			    (pkt->pkt_cdbp[0] == SCMD_RELEASE_G1)) {
3337 				(void) mdi_set_lb_policy(vlun->svl_dip,
3338 				    vlun->svl_lb_policy_save);
3339 				vlun->svl_flags &= ~VLUN_RESERVE_ACTIVE_FLG;
3340 				VHCI_DEBUG(1, (CE_WARN, NULL,
3341 				    "!vhci_intr: vlun 0x%p release path 0x%p",
3342 				    (void *)vlun, (void *)vpkt->vpkt_path));
3343 			}
3344 
3345 			if ((pkt->pkt_cdbp[0] == SCMD_RESERVE) ||
3346 			    (pkt->pkt_cdbp[0] == SCMD_RESERVE_G1)) {
3347 				vlun->svl_flags |= VLUN_RESERVE_ACTIVE_FLG;
3348 				vlun->svl_resrv_pip = vpkt->vpkt_path;
3349 				VHCI_DEBUG(1, (CE_WARN, NULL,
3350 				    "!vhci_intr: vlun 0x%p reserved path 0x%p",
3351 				    (void *)vlun, (void *)vpkt->vpkt_path));
3352 			}
3353 			break;
3354 
3355 		case STATUS_RESERVATION_CONFLICT:
3356 			VHCI_DEBUG(1, (CE_WARN, NULL,
3357 			    "!vhci_intr: vlun 0x%p "
3358 			    "reserve conflict on path 0x%p",
3359 			    (void *)vlun, (void *)vpkt->vpkt_path));
3360 			/* FALLTHROUGH */
3361 		default:
3362 			break;
3363 		}
3364 
3365 		/*
3366 		 * Update I/O completion statistics for the path
3367 		 */
3368 		mdi_pi_kstat_iosupdate(vpkt->vpkt_path, vpkt->vpkt_tgt_init_bp);
3369 
3370 		/*
3371 		 * Command completed successfully, release the dma binding and
3372 		 * destroy the transport side of the packet.
3373 		 */
3374 		if ((pkt->pkt_cdbp[0] == SCMD_PROUT) &&
3375 		    (((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_REGISTER) ||
3376 		    ((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_R_AND_IGNORE))) {
3377 			if (SCBP_C(pkt) == STATUS_GOOD) {
3378 				ASSERT(vlun->svl_taskq);
3379 				svp->svp_last_pkt_reason = pkt->pkt_reason;
3380 				(void) taskq_dispatch(vlun->svl_taskq,
3381 				    vhci_run_cmd, pkt, KM_SLEEP);
3382 				return;
3383 			}
3384 		}
3385 		if ((SCBP_C(pkt) == STATUS_GOOD) &&
3386 		    (pkt->pkt_cdbp[0] == SCMD_PRIN) && vpkt->vpkt_tgt_init_bp) {
3387 			/*
3388 			 * If the action (value in byte 1 of the cdb) is zero,
3389 			 * we're reading keys, and that's the only condition
3390 			 * where we need to be concerned with filtering keys
3391 			 * and potential retries.  Otherwise, we simply signal
3392 			 * the semaphore and move on.
3393 			 */
3394 			if (pkt->pkt_cdbp[1] == 0) {
3395 				/*
3396 				 * If this is the completion of an internal
3397 				 * retry then we need to make sure that the
3398 				 * pkt and tpkt pointers are readjusted so
3399 				 * the calls to scsi_destroy_pkt and pkt_comp
3400 				 * below work * correctly.
3401 				 */
3402 				if (vpkt->vpkt_org_vpkt != NULL) {
3403 					pkt = vpkt->vpkt_org_vpkt->vpkt_hba_pkt;
3404 					tpkt = vpkt->vpkt_org_vpkt->
3405 					    vpkt_tgt_pkt;
3406 
3407 					/*
3408 					 * If this command was issued through
3409 					 * the taskq then we need to clear
3410 					 * this flag for proper processing in
3411 					 * the case of a retry from the target
3412 					 * driver.
3413 					 */
3414 					vpkt->vpkt_state &=
3415 					    ~VHCI_PKT_THRU_TASKQ;
3416 				}
3417 
3418 				/*
3419 				 * if vhci_do_prin returns VHCI_CMD_CMPLT then
3420 				 * vpkt will contain the address of the
3421 				 * original vpkt
3422 				 */
3423 				if (vhci_do_prin(&vpkt) == VHCI_CMD_RETRY) {
3424 					/*
3425 					 * The command has been resent to get
3426 					 * all the keys from the device.  Don't
3427 					 * complete the command with ssd until
3428 					 * the retry completes.
3429 					 */
3430 					return;
3431 				}
3432 			} else {
3433 				sema_v(&vlun->svl_pgr_sema);
3434 			}
3435 		}
3436 
3437 		break;
3438 
3439 	case CMD_TIMEOUT:
3440 		if ((pkt->pkt_statistics &
3441 		    (STAT_BUS_RESET | STAT_DEV_RESET | STAT_ABORTED)) == 0) {
3442 
3443 			VHCI_DEBUG(1, (CE_NOTE, NULL,
3444 			    "!scsi vhci timeout invoked\n"));
3445 
3446 			(void) vhci_recovery_reset(vlun, &pkt->pkt_address,
3447 			    FALSE, VHCI_DEPTH_ALL);
3448 		}
3449 		MDI_PI_ERRSTAT(lpath, MDI_PI_TRANSERR);
3450 		tpkt->pkt_statistics |= STAT_ABORTED;
3451 		err_str = timeout_err;
3452 		break;
3453 
3454 	case CMD_TRAN_ERR:
3455 		/*
3456 		 * This status is returned if the transport has sent the cmd
3457 		 * down the link to the target and then some error occurs.
3458 		 * In case of SCSI-II RESERVE cmd, we don't know if the
3459 		 * reservation been accepted by the target or not, so we need
3460 		 * to clear the reservation.
3461 		 */
3462 		if ((pkt->pkt_cdbp[0] == SCMD_RESERVE) ||
3463 		    (pkt->pkt_cdbp[0] == SCMD_RESERVE_G1)) {
3464 			VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_intr received"
3465 			    " cmd_tran_err for scsi-2 reserve cmd\n"));
3466 			if (!vhci_recovery_reset(vlun, &pkt->pkt_address,
3467 			    TRUE, VHCI_DEPTH_TARGET)) {
3468 				VHCI_DEBUG(1, (CE_WARN, NULL,
3469 				    "!vhci_intr cmd_tran_err reset failed!"));
3470 			}
3471 		}
3472 		break;
3473 
3474 	case CMD_DEV_GONE:
3475 		/*
3476 		 * If this is the last path then report CMD_DEV_GONE to the
3477 		 * target driver, otherwise report BUSY to triggger retry.
3478 		 */
3479 		if (vlun->svl_dip &&
3480 		    (mdi_client_get_path_count(vlun->svl_dip) <= 1)) {
3481 			struct scsi_vhci	*vhci;
3482 			vhci = ADDR2VHCI(&tpkt->pkt_address);
3483 			VHCI_DEBUG(1, (CE_NOTE, NULL, "vhci_intr received "
3484 			    "cmd_dev_gone on last path\n"));
3485 			(void) vhci_invalidate_mpapi_lu(vhci, vlun);
3486 			break;
3487 		}
3488 
3489 		/* Report CMD_CMPLT-with-BUSY to cause retry. */
3490 		VHCI_DEBUG(1, (CE_NOTE, NULL, "vhci_intr received "
3491 		    "cmd_dev_gone\n"));
3492 		tpkt->pkt_reason = CMD_CMPLT;
3493 		tpkt->pkt_state = STATE_GOT_BUS |
3494 		    STATE_GOT_TARGET | STATE_SENT_CMD |
3495 		    STATE_GOT_STATUS;
3496 		*(tpkt->pkt_scbp) = STATUS_BUSY;
3497 		break;
3498 
3499 	default:
3500 		break;
3501 	}
3502 
3503 	/*
3504 	 * SCSI-II RESERVE cmd has been serviced by the lower layers clear
3505 	 * the flag so the lun is not QUIESCED any longer.
3506 	 * Also clear the VHCI_PKT_THRU_TASKQ flag, to ensure that if this pkt
3507 	 * is retried, a taskq shall again be dispatched to service it.  Else
3508 	 * it may lead to a system hang if the retry is within interrupt
3509 	 * context.
3510 	 */
3511 	if ((pkt->pkt_cdbp[0] == SCMD_RESERVE) ||
3512 	    (pkt->pkt_cdbp[0] == SCMD_RESERVE_G1)) {
3513 		vlun->svl_flags &= ~VLUN_QUIESCED_FLG;
3514 		vpkt->vpkt_state &= ~VHCI_PKT_THRU_TASKQ;
3515 	}
3516 
3517 	/*
3518 	 * vpkt_org_vpkt should always be NULL here if the retry command
3519 	 * has been successfully processed.  If vpkt_org_vpkt != NULL at
3520 	 * this point, it is an error so restore the original vpkt and
3521 	 * return an error to the target driver so it can retry the
3522 	 * command as appropriate.
3523 	 */
3524 	if (vpkt->vpkt_org_vpkt != NULL) {
3525 		struct vhci_pkt *new_vpkt = vpkt;
3526 		vpkt = vpkt->vpkt_org_vpkt;
3527 
3528 		vhci_scsi_destroy_pkt(&svp->svp_psd->sd_address,
3529 		    new_vpkt->vpkt_tgt_pkt);
3530 
3531 		/*
3532 		 * Mark this command completion as having an error so that
3533 		 * ssd will retry the command.
3534 		 */
3535 		vpkt->vpkt_tgt_pkt->pkt_reason = CMD_ABORTED;
3536 		vpkt->vpkt_tgt_pkt->pkt_statistics |= STAT_ABORTED;
3537 
3538 		pkt = vpkt->vpkt_hba_pkt;
3539 		tpkt = vpkt->vpkt_tgt_pkt;
3540 	}
3541 
3542 	if ((err_str != NULL) && (pkt->pkt_reason !=
3543 	    svp->svp_last_pkt_reason)) {
3544 		cdip = vlun->svl_dip;
3545 		vdip = ddi_get_parent(cdip);
3546 		cpath = kmem_alloc(MAXPATHLEN, KM_SLEEP);
3547 		vhci_log(CE_WARN, vdip, "!%s (%s%d): %s on path %s",
3548 		    ddi_pathname(cdip, cpath), ddi_driver_name(cdip),
3549 		    ddi_get_instance(cdip), err_str,
3550 		    mdi_pi_spathname(vpkt->vpkt_path));
3551 		kmem_free(cpath, MAXPATHLEN);
3552 	}
3553 	svp->svp_last_pkt_reason = pkt->pkt_reason;
3554 	VHCI_DECR_PATH_CMDCOUNT(svp);
3555 
3556 	/*
3557 	 * For PARTIAL_DMA, vhci should not free the path.
3558 	 * Target driver will call into vhci_scsi_dmafree or
3559 	 * destroy pkt to release this path.
3560 	 */
3561 	if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) == 0) {
3562 		scsi_destroy_pkt(pkt);
3563 		vpkt->vpkt_hba_pkt = NULL;
3564 		if (vpkt->vpkt_path) {
3565 			mdi_rele_path(vpkt->vpkt_path);
3566 			vpkt->vpkt_path = NULL;
3567 		}
3568 	}
3569 
3570 	scsi_hba_pkt_comp(tpkt);
3571 }
3572 
3573 /*
3574  * two possibilities: (1) failover has completed
3575  * or (2) is in progress; update our path states for
3576  * the former case; for the latter case,
3577  * initiate a scsi_watch request to
3578  * determine when failover completes - vlun is HELD
3579  * until failover completes; BUSY is returned to upper
3580  * layer in both the cases
3581  */
3582 static int
vhci_handle_ext_fo(struct scsi_pkt * pkt,int fostat)3583 vhci_handle_ext_fo(struct scsi_pkt *pkt, int fostat)
3584 {
3585 	struct vhci_pkt		*vpkt = (struct vhci_pkt *)pkt->pkt_private;
3586 	struct scsi_pkt		*tpkt;
3587 	scsi_vhci_priv_t	*svp;
3588 	scsi_vhci_lun_t		*vlun;
3589 	struct scsi_vhci	*vhci;
3590 	scsi_vhci_swarg_t	*swarg;
3591 	char			*path;
3592 
3593 	ASSERT(vpkt != NULL);
3594 	tpkt = vpkt->vpkt_tgt_pkt;
3595 	ASSERT(tpkt != NULL);
3596 	svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(vpkt->vpkt_path);
3597 	ASSERT(svp != NULL);
3598 	vlun = svp->svp_svl;
3599 	ASSERT(vlun != NULL);
3600 	ASSERT(VHCI_LUN_IS_HELD(vlun));
3601 
3602 	vhci = ADDR2VHCI(&tpkt->pkt_address);
3603 
3604 	if (fostat == SCSI_SENSE_INACTIVE) {
3605 		VHCI_DEBUG(1, (CE_NOTE, NULL, "!Failover "
3606 		    "detected for %s; updating path states...\n",
3607 		    vlun->svl_lun_wwn));
3608 		/*
3609 		 * set the vlun flag to indicate to the task that the target
3610 		 * port group needs updating
3611 		 */
3612 		vlun->svl_flags |= VLUN_UPDATE_TPG;
3613 		(void) taskq_dispatch(vhci->vhci_update_pathstates_taskq,
3614 		    vhci_update_pathstates, (void *)vlun, KM_SLEEP);
3615 	} else {
3616 		path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
3617 		vhci_log(CE_NOTE, ddi_get_parent(vlun->svl_dip),
3618 		    "!%s (%s%d): Waiting for externally initiated failover "
3619 		    "to complete", ddi_pathname(vlun->svl_dip, path),
3620 		    ddi_driver_name(vlun->svl_dip),
3621 		    ddi_get_instance(vlun->svl_dip));
3622 		kmem_free(path, MAXPATHLEN);
3623 		swarg = kmem_alloc(sizeof (*swarg), KM_NOSLEEP);
3624 		if (swarg == NULL) {
3625 			VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_handle_ext_fo: "
3626 			    "request packet allocation for %s failed....\n",
3627 			    vlun->svl_lun_wwn));
3628 			VHCI_RELEASE_LUN(vlun);
3629 			return (PKT_RETURN);
3630 		}
3631 		swarg->svs_svp = svp;
3632 		swarg->svs_tos = gethrtime();
3633 		swarg->svs_pi = vpkt->vpkt_path;
3634 		swarg->svs_release_lun = 0;
3635 		swarg->svs_done = 0;
3636 		/*
3637 		 * place a hold on the path...we don't want it to
3638 		 * vanish while scsi_watch is in progress
3639 		 */
3640 		mdi_hold_path(vpkt->vpkt_path);
3641 		svp->svp_sw_token = scsi_watch_request_submit(svp->svp_psd,
3642 		    VHCI_FOWATCH_INTERVAL, SENSE_LENGTH, vhci_efo_watch_cb,
3643 		    (caddr_t)swarg);
3644 	}
3645 	return (BUSY_RETURN);
3646 }
3647 
3648 /*
3649  * vhci_efo_watch_cb:
3650  *	Callback from scsi_watch request to check the failover status.
3651  *	Completion is either due to successful failover or timeout.
3652  *	Upon successful completion, vhci_update_path_states is called.
3653  *	For timeout condition, vhci_efo_done is called.
3654  *	Always returns 0 to scsi_watch to keep retrying till vhci_efo_done
3655  *	terminates this request properly in a separate thread.
3656  */
3657 
3658 static int
vhci_efo_watch_cb(caddr_t arg,struct scsi_watch_result * resultp)3659 vhci_efo_watch_cb(caddr_t arg, struct scsi_watch_result *resultp)
3660 {
3661 	struct scsi_status		*statusp = resultp->statusp;
3662 	uint8_t				*sensep = (uint8_t *)resultp->sensep;
3663 	struct scsi_pkt			*pkt = resultp->pkt;
3664 	scsi_vhci_swarg_t		*swarg;
3665 	scsi_vhci_priv_t		*svp;
3666 	scsi_vhci_lun_t			*vlun;
3667 	struct scsi_vhci		*vhci;
3668 	dev_info_t			*vdip;
3669 	int				rval, updt_paths;
3670 
3671 	swarg = (scsi_vhci_swarg_t *)(uintptr_t)arg;
3672 	svp = swarg->svs_svp;
3673 	if (swarg->svs_done) {
3674 		/*
3675 		 * Already completed failover or timedout.
3676 		 * Waiting for vhci_efo_done to terminate this scsi_watch.
3677 		 */
3678 		return (0);
3679 	}
3680 
3681 	ASSERT(svp != NULL);
3682 	vlun = svp->svp_svl;
3683 	ASSERT(vlun != NULL);
3684 	ASSERT(VHCI_LUN_IS_HELD(vlun));
3685 	vlun->svl_efo_update_path = 0;
3686 	vdip = ddi_get_parent(vlun->svl_dip);
3687 	vhci = ddi_get_soft_state(vhci_softstate,
3688 	    ddi_get_instance(vdip));
3689 
3690 	updt_paths = 0;
3691 
3692 	if (pkt->pkt_reason != CMD_CMPLT) {
3693 		if ((gethrtime() - swarg->svs_tos) >= VHCI_EXTFO_TIMEOUT) {
3694 			swarg->svs_release_lun = 1;
3695 			goto done;
3696 		}
3697 		return (0);
3698 	}
3699 	if (*((unsigned char *)statusp) == STATUS_CHECK) {
3700 		rval = vlun->svl_fops->sfo_analyze_sense(svp->svp_psd, sensep,
3701 		    vlun->svl_fops_ctpriv);
3702 		switch (rval) {
3703 			/*
3704 			 * Only update path states in case path is definitely
3705 			 * inactive, or no failover occurred.  For all other
3706 			 * check conditions continue pinging.  A unexpected
3707 			 * check condition shouldn't cause pinging to complete
3708 			 * prematurely.
3709 			 */
3710 			case SCSI_SENSE_INACTIVE:
3711 			case SCSI_SENSE_NOFAILOVER:
3712 				updt_paths = 1;
3713 				break;
3714 			default:
3715 				if ((gethrtime() - swarg->svs_tos)
3716 				    >= VHCI_EXTFO_TIMEOUT) {
3717 					swarg->svs_release_lun = 1;
3718 					goto done;
3719 				}
3720 				return (0);
3721 		}
3722 	} else if (*((unsigned char *)statusp) ==
3723 	    STATUS_RESERVATION_CONFLICT) {
3724 		updt_paths = 1;
3725 	} else if ((*((unsigned char *)statusp)) &
3726 	    (STATUS_BUSY | STATUS_QFULL)) {
3727 		return (0);
3728 	}
3729 	if ((*((unsigned char *)statusp) == STATUS_GOOD) ||
3730 	    (updt_paths == 1)) {
3731 		/*
3732 		 * we got here because we had detected an
3733 		 * externally initiated failover; things
3734 		 * have settled down now, so let's
3735 		 * start up a task to update the
3736 		 * path states and target port group
3737 		 */
3738 		vlun->svl_efo_update_path = 1;
3739 		swarg->svs_done = 1;
3740 		vlun->svl_swarg = swarg;
3741 		vlun->svl_flags |= VLUN_UPDATE_TPG;
3742 		(void) taskq_dispatch(vhci->vhci_update_pathstates_taskq,
3743 		    vhci_update_pathstates, (void *)vlun,
3744 		    KM_SLEEP);
3745 		return (0);
3746 	}
3747 	if ((gethrtime() - swarg->svs_tos) >= VHCI_EXTFO_TIMEOUT) {
3748 		swarg->svs_release_lun = 1;
3749 		goto done;
3750 	}
3751 	return (0);
3752 done:
3753 	swarg->svs_done = 1;
3754 	(void) taskq_dispatch(vhci->vhci_taskq,
3755 	    vhci_efo_done, (void *)swarg, KM_SLEEP);
3756 	return (0);
3757 }
3758 
3759 /*
3760  * vhci_efo_done:
3761  *	cleanly terminates scsi_watch and free up resources.
3762  *	Called as taskq function in vhci_efo_watch_cb for EFO timeout condition
3763  *	or by vhci_update_path_states invoked during external initiated
3764  *	failover completion.
3765  */
3766 static void
vhci_efo_done(void * arg)3767 vhci_efo_done(void *arg)
3768 {
3769 	scsi_vhci_lun_t			*vlun;
3770 	scsi_vhci_swarg_t		*swarg = (scsi_vhci_swarg_t *)arg;
3771 	scsi_vhci_priv_t		*svp = swarg->svs_svp;
3772 	ASSERT(svp);
3773 
3774 	vlun = svp->svp_svl;
3775 	ASSERT(vlun);
3776 
3777 	/* Wait for clean termination of scsi_watch */
3778 	(void) scsi_watch_request_terminate(svp->svp_sw_token,
3779 	    SCSI_WATCH_TERMINATE_ALL_WAIT);
3780 	svp->svp_sw_token = NULL;
3781 
3782 	/* release path and freeup resources to indicate failover completion */
3783 	mdi_rele_path(swarg->svs_pi);
3784 	if (swarg->svs_release_lun) {
3785 		VHCI_RELEASE_LUN(vlun);
3786 	}
3787 	kmem_free((void *)swarg, sizeof (*swarg));
3788 }
3789 
3790 /*
3791  * Update the path states
3792  * vlun should be HELD when this is invoked.
3793  * Calls vhci_efo_done to cleanup resources allocated for EFO.
3794  */
3795 void
vhci_update_pathstates(void * arg)3796 vhci_update_pathstates(void *arg)
3797 {
3798 	mdi_pathinfo_t			*pip, *npip;
3799 	dev_info_t			*dip;
3800 	struct scsi_failover_ops	*fo;
3801 	struct scsi_vhci_priv		*svp;
3802 	struct scsi_device		*psd;
3803 	struct scsi_path_opinfo		opinfo;
3804 	char				*pclass, *tptr;
3805 	struct scsi_vhci_lun		*vlun = (struct scsi_vhci_lun *)arg;
3806 	int				sps; /* mdi_select_path() status */
3807 	char				*cpath;
3808 	struct scsi_vhci		*vhci;
3809 	struct scsi_pkt			*pkt;
3810 	struct buf			*bp;
3811 	struct scsi_vhci_priv		*svp_conflict = NULL;
3812 	size_t				blksize;
3813 
3814 	ASSERT(VHCI_LUN_IS_HELD(vlun));
3815 	dip  = vlun->svl_dip;
3816 	pip = npip = NULL;
3817 
3818 	vhci = ddi_get_soft_state(vhci_softstate,
3819 	    ddi_get_instance(ddi_get_parent(dip)));
3820 
3821 	sps = mdi_select_path(dip, NULL, (MDI_SELECT_ONLINE_PATH |
3822 	    MDI_SELECT_STANDBY_PATH | MDI_SELECT_NO_PREFERRED), NULL, &npip);
3823 	if ((npip == NULL) || (sps != MDI_SUCCESS)) {
3824 		goto done;
3825 	}
3826 
3827 	blksize = vhci_get_blocksize(dip);
3828 
3829 	fo = vlun->svl_fops;
3830 	do {
3831 		pip = npip;
3832 		svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip);
3833 		psd = svp->svp_psd;
3834 		if (fo->sfo_path_get_opinfo(psd, &opinfo,
3835 		    vlun->svl_fops_ctpriv) != 0) {
3836 			sps = mdi_select_path(dip, NULL,
3837 			    (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH |
3838 			    MDI_SELECT_NO_PREFERRED), pip, &npip);
3839 			mdi_rele_path(pip);
3840 			continue;
3841 		}
3842 
3843 		if (mdi_prop_lookup_string(pip, "path-class", &pclass) !=
3844 		    MDI_SUCCESS) {
3845 			VHCI_DEBUG(1, (CE_NOTE, NULL,
3846 			    "!vhci_update_pathstates: prop lookup failed for "
3847 			    "path 0x%p\n", (void *)pip));
3848 			sps = mdi_select_path(dip, NULL,
3849 			    (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH |
3850 			    MDI_SELECT_NO_PREFERRED), pip, &npip);
3851 			mdi_rele_path(pip);
3852 			continue;
3853 		}
3854 
3855 		/*
3856 		 * Need to update the "path-class" property
3857 		 * value in the device tree if different
3858 		 * from the existing value.
3859 		 */
3860 		if (strcmp(pclass, opinfo.opinfo_path_attr) != 0) {
3861 			(void) mdi_prop_update_string(pip, "path-class",
3862 			    opinfo.opinfo_path_attr);
3863 		}
3864 
3865 		/*
3866 		 * Only change the state if needed. i.e. Don't call
3867 		 * mdi_pi_set_state to ONLINE a path if its already
3868 		 * ONLINE. Same for STANDBY paths.
3869 		 */
3870 
3871 		if ((opinfo.opinfo_path_state == SCSI_PATH_ACTIVE ||
3872 		    opinfo.opinfo_path_state == SCSI_PATH_ACTIVE_NONOPT)) {
3873 			if (!(MDI_PI_IS_ONLINE(pip))) {
3874 				VHCI_DEBUG(1, (CE_NOTE, NULL,
3875 				    "!vhci_update_pathstates: marking path"
3876 				    " 0x%p as ONLINE\n", (void *)pip));
3877 				cpath = kmem_alloc(MAXPATHLEN, KM_SLEEP);
3878 				vhci_log(CE_NOTE, ddi_get_parent(dip), "!%s "
3879 				    "(%s%d): path %s "
3880 				    "is now ONLINE because of "
3881 				    "an externally initiated failover",
3882 				    ddi_pathname(dip, cpath),
3883 				    ddi_driver_name(dip),
3884 				    ddi_get_instance(dip),
3885 				    mdi_pi_spathname(pip));
3886 				kmem_free(cpath, MAXPATHLEN);
3887 				mdi_pi_set_state(pip,
3888 				    MDI_PATHINFO_STATE_ONLINE);
3889 				mdi_pi_set_preferred(pip,
3890 				    opinfo.opinfo_preferred);
3891 				tptr = kmem_alloc(strlen
3892 				    (opinfo.opinfo_path_attr) + 1, KM_SLEEP);
3893 				(void) strlcpy(tptr, opinfo.opinfo_path_attr,
3894 				    (strlen(opinfo.opinfo_path_attr) + 1));
3895 				mutex_enter(&vlun->svl_mutex);
3896 				if (vlun->svl_active_pclass != NULL) {
3897 					kmem_free(vlun->svl_active_pclass,
3898 					    strlen(vlun->svl_active_pclass) +
3899 					    1);
3900 				}
3901 				vlun->svl_active_pclass = tptr;
3902 				if (vlun->svl_waiting_for_activepath) {
3903 					vlun->svl_waiting_for_activepath = 0;
3904 				}
3905 				mutex_exit(&vlun->svl_mutex);
3906 			} else if (MDI_PI_IS_ONLINE(pip)) {
3907 				if (strcmp(pclass, opinfo.opinfo_path_attr)
3908 				    != 0) {
3909 					mdi_pi_set_preferred(pip,
3910 					    opinfo.opinfo_preferred);
3911 					mutex_enter(&vlun->svl_mutex);
3912 					if (vlun->svl_active_pclass == NULL ||
3913 					    strcmp(opinfo.opinfo_path_attr,
3914 					    vlun->svl_active_pclass) != 0) {
3915 						mutex_exit(&vlun->svl_mutex);
3916 						tptr = kmem_alloc(strlen
3917 						    (opinfo.opinfo_path_attr) +
3918 						    1, KM_SLEEP);
3919 						(void) strlcpy(tptr,
3920 						    opinfo.opinfo_path_attr,
3921 						    (strlen
3922 						    (opinfo.opinfo_path_attr)
3923 						    + 1));
3924 						mutex_enter(&vlun->svl_mutex);
3925 					} else {
3926 						/*
3927 						 * No need to update
3928 						 * svl_active_pclass
3929 						 */
3930 						tptr = NULL;
3931 						mutex_exit(&vlun->svl_mutex);
3932 					}
3933 					if (tptr) {
3934 						if (vlun->svl_active_pclass
3935 						    != NULL) {
3936 							kmem_free(vlun->
3937 							    svl_active_pclass,
3938 							    strlen(vlun->
3939 							    svl_active_pclass)
3940 							    + 1);
3941 						}
3942 						vlun->svl_active_pclass = tptr;
3943 						mutex_exit(&vlun->svl_mutex);
3944 					}
3945 				}
3946 			}
3947 
3948 			/* Check for Reservation Conflict */
3949 			bp = scsi_alloc_consistent_buf(
3950 			    &svp->svp_psd->sd_address, (struct buf *)NULL,
3951 			    blksize, B_READ, NULL, NULL);
3952 			if (!bp) {
3953 				VHCI_DEBUG(1, (CE_NOTE, NULL,
3954 				    "!vhci_update_pathstates: No resources "
3955 				    "(buf)\n"));
3956 				mdi_rele_path(pip);
3957 				goto done;
3958 			}
3959 			pkt = scsi_init_pkt(&svp->svp_psd->sd_address, NULL, bp,
3960 			    CDB_GROUP1, sizeof (struct scsi_arq_status), 0,
3961 			    PKT_CONSISTENT, NULL, NULL);
3962 			if (pkt) {
3963 				(void) scsi_setup_cdb((union scsi_cdb *)
3964 				    (uintptr_t)pkt->pkt_cdbp, SCMD_READ_G1, 1,
3965 				    1, 0);
3966 				pkt->pkt_time = 3 * 30;
3967 				pkt->pkt_flags = FLAG_NOINTR;
3968 				pkt->pkt_path_instance =
3969 				    mdi_pi_get_path_instance(pip);
3970 
3971 				if ((scsi_transport(pkt) == TRAN_ACCEPT) &&
3972 				    (pkt->pkt_reason == CMD_CMPLT) &&
3973 				    (SCBP_C(pkt) ==
3974 				    STATUS_RESERVATION_CONFLICT)) {
3975 					VHCI_DEBUG(1, (CE_NOTE, NULL,
3976 					    "!vhci_update_pathstates: reserv. "
3977 					    "conflict to be resolved on 0x%p\n",
3978 					    (void *)pip));
3979 					svp_conflict = svp;
3980 				}
3981 				scsi_destroy_pkt(pkt);
3982 			}
3983 			scsi_free_consistent_buf(bp);
3984 		} else if ((opinfo.opinfo_path_state == SCSI_PATH_INACTIVE) &&
3985 		    !(MDI_PI_IS_STANDBY(pip))) {
3986 			VHCI_DEBUG(1, (CE_NOTE, NULL,
3987 			    "!vhci_update_pathstates: marking path"
3988 			    " 0x%p as STANDBY\n", (void *)pip));
3989 			cpath = kmem_alloc(MAXPATHLEN, KM_SLEEP);
3990 			vhci_log(CE_NOTE, ddi_get_parent(dip), "!%s "
3991 			    "(%s%d): path %s "
3992 			    "is now STANDBY because of "
3993 			    "an externally initiated failover",
3994 			    ddi_pathname(dip, cpath),
3995 			    ddi_driver_name(dip),
3996 			    ddi_get_instance(dip),
3997 			    mdi_pi_spathname(pip));
3998 			kmem_free(cpath, MAXPATHLEN);
3999 			mdi_pi_set_state(pip,
4000 			    MDI_PATHINFO_STATE_STANDBY);
4001 			mdi_pi_set_preferred(pip,
4002 			    opinfo.opinfo_preferred);
4003 			mutex_enter(&vlun->svl_mutex);
4004 			if (vlun->svl_active_pclass != NULL) {
4005 				if (strcmp(vlun->svl_active_pclass,
4006 				    opinfo.opinfo_path_attr) == 0) {
4007 					kmem_free(vlun->
4008 					    svl_active_pclass,
4009 					    strlen(vlun->
4010 					    svl_active_pclass) + 1);
4011 					vlun->svl_active_pclass = NULL;
4012 				}
4013 			}
4014 			mutex_exit(&vlun->svl_mutex);
4015 		}
4016 		(void) mdi_prop_free(pclass);
4017 		sps = mdi_select_path(dip, NULL,
4018 		    (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH |
4019 		    MDI_SELECT_NO_PREFERRED), pip, &npip);
4020 		mdi_rele_path(pip);
4021 
4022 	} while ((npip != NULL) && (sps == MDI_SUCCESS));
4023 
4024 	/*
4025 	 * Check to see if this vlun has an active SCSI-II RESERVE.  If so
4026 	 * clear the reservation by sending a reset, so the host doesn't
4027 	 * receive a reservation conflict.  The reset has to be sent via a
4028 	 * working path.  Let's use a path referred to by svp_conflict as it
4029 	 * should be working.
4030 	 * Reset VLUN_RESERVE_ACTIVE_FLG for this vlun.  Also notify ssd
4031 	 * of the reset, explicitly.
4032 	 */
4033 	if (vlun->svl_flags & VLUN_RESERVE_ACTIVE_FLG) {
4034 		if (svp_conflict && (vlun->svl_xlf_capable == 0)) {
4035 			VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_update_pathstates:"
4036 			    " sending recovery reset on 0x%p, path_state: %x",
4037 			    svp_conflict->svp_psd->sd_private,
4038 			    mdi_pi_get_state((mdi_pathinfo_t *)
4039 			    svp_conflict->svp_psd->sd_private)));
4040 
4041 			(void) vhci_recovery_reset(vlun,
4042 			    &svp_conflict->svp_psd->sd_address, FALSE,
4043 			    VHCI_DEPTH_TARGET);
4044 		}
4045 		vlun->svl_flags &= ~VLUN_RESERVE_ACTIVE_FLG;
4046 		mutex_enter(&vhci->vhci_mutex);
4047 		scsi_hba_reset_notify_callback(&vhci->vhci_mutex,
4048 		    &vhci->vhci_reset_notify_listf);
4049 		mutex_exit(&vhci->vhci_mutex);
4050 	}
4051 	if (vlun->svl_flags & VLUN_UPDATE_TPG) {
4052 		/*
4053 		 * Update the AccessState of related MP-API TPGs
4054 		 */
4055 		(void) vhci_mpapi_update_tpg_acc_state_for_lu(vhci, vlun);
4056 		vlun->svl_flags &= ~VLUN_UPDATE_TPG;
4057 	}
4058 done:
4059 	if (vlun->svl_efo_update_path) {
4060 		vlun->svl_efo_update_path = 0;
4061 		vhci_efo_done(vlun->svl_swarg);
4062 		vlun->svl_swarg = 0;
4063 	}
4064 	VHCI_RELEASE_LUN(vlun);
4065 }
4066 
4067 /* ARGSUSED */
4068 static int
vhci_pathinfo_init(dev_info_t * vdip,mdi_pathinfo_t * pip,int flags)4069 vhci_pathinfo_init(dev_info_t *vdip, mdi_pathinfo_t *pip, int flags)
4070 {
4071 	scsi_hba_tran_t		*hba = NULL;
4072 	struct scsi_device	*psd = NULL;
4073 	scsi_vhci_lun_t		*vlun = NULL;
4074 	dev_info_t		*pdip = NULL;
4075 	dev_info_t		*tgt_dip;
4076 	struct scsi_vhci	*vhci;
4077 	char			*guid;
4078 	scsi_vhci_priv_t	*svp = NULL;
4079 	int			rval = MDI_FAILURE;
4080 	int			vlun_alloced = 0;
4081 
4082 	ASSERT(vdip != NULL);
4083 	ASSERT(pip != NULL);
4084 
4085 	vhci = ddi_get_soft_state(vhci_softstate, ddi_get_instance(vdip));
4086 	ASSERT(vhci != NULL);
4087 
4088 	pdip = mdi_pi_get_phci(pip);
4089 	ASSERT(pdip != NULL);
4090 
4091 	hba = ddi_get_driver_private(pdip);
4092 	ASSERT(hba != NULL);
4093 
4094 	tgt_dip = mdi_pi_get_client(pip);
4095 	ASSERT(tgt_dip != NULL);
4096 
4097 	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, tgt_dip, PROPFLAGS,
4098 	    MDI_CLIENT_GUID_PROP, &guid) != DDI_SUCCESS) {
4099 		VHCI_DEBUG(1, (CE_WARN, NULL,
4100 		    "vhci_pathinfo_init: lun guid property failed"));
4101 		goto failure;
4102 	}
4103 
4104 	vlun = vhci_lun_lookup_alloc(tgt_dip, guid, &vlun_alloced);
4105 	ddi_prop_free(guid);
4106 
4107 	vlun->svl_dip = tgt_dip;
4108 
4109 	svp = kmem_zalloc(sizeof (*svp), KM_SLEEP);
4110 	svp->svp_svl = vlun;
4111 
4112 	/*
4113 	 * Initialize svl_lb_policy_save only for newly allocated vlun. Writing
4114 	 * to svl_lb_policy_save later could accidentally overwrite saved lb
4115 	 * policy.
4116 	 */
4117 	if (vlun_alloced) {
4118 		vlun->svl_lb_policy_save = mdi_get_lb_policy(tgt_dip);
4119 	}
4120 
4121 	mutex_init(&svp->svp_mutex, NULL, MUTEX_DRIVER, NULL);
4122 	cv_init(&svp->svp_cv, NULL, CV_DRIVER, NULL);
4123 
4124 	psd = kmem_zalloc(sizeof (*psd), KM_SLEEP);
4125 	mutex_init(&psd->sd_mutex, NULL, MUTEX_DRIVER, NULL);
4126 
4127 	if (hba->tran_hba_flags & SCSI_HBA_ADDR_COMPLEX) {
4128 		/*
4129 		 * For a SCSI_HBA_ADDR_COMPLEX transport we store a pointer to
4130 		 * scsi_device in the scsi_address structure.  This allows an
4131 		 * an HBA driver to find its scsi_device(9S) and
4132 		 * per-scsi_device(9S) HBA private data given a
4133 		 * scsi_address(9S) by using scsi_address_device(9F) and
4134 		 * scsi_device_hba_private_get(9F)).
4135 		 */
4136 		psd->sd_address.a.a_sd = psd;
4137 	} else if (hba->tran_hba_flags & SCSI_HBA_TRAN_CLONE) {
4138 		/*
4139 		 * Clone transport structure if requested, so
4140 		 * Self enumerating HBAs always need to use cloning
4141 		 */
4142 		scsi_hba_tran_t	*clone =
4143 		    kmem_alloc(sizeof (scsi_hba_tran_t), KM_SLEEP);
4144 		bcopy(hba, clone, sizeof (scsi_hba_tran_t));
4145 		hba = clone;
4146 		hba->tran_sd = psd;
4147 	} else {
4148 		/*
4149 		 * SPI pHCI unit-address. If we ever need to support this
4150 		 * we could set a.spi.a_target/a.spi.a_lun based on pathinfo
4151 		 * node unit-address properties.  For now we fail...
4152 		 */
4153 		goto failure;
4154 	}
4155 
4156 	psd->sd_dev = tgt_dip;
4157 	psd->sd_address.a_hba_tran = hba;
4158 
4159 	/*
4160 	 * Mark scsi_device as being associated with a pathinfo node. For
4161 	 * a scsi_device structure associated with a devinfo node,
4162 	 * scsi_ctlops_initchild sets this field to NULL.
4163 	 */
4164 	psd->sd_pathinfo = pip;
4165 
4166 	/*
4167 	 * LEGACY: sd_private: set for older mpxio-capable pHCI drivers with
4168 	 * too much scsi_vhci/mdi/ndi knowledge. Remove this code when all
4169 	 * mpxio-capable pHCI drivers use SCSA enumeration services (or at
4170 	 * least have been changed to use sd_pathinfo instead).
4171 	 */
4172 	psd->sd_private = (caddr_t)pip;
4173 
4174 	/* See scsi_hba.c for info on sd_tran_safe kludge */
4175 	psd->sd_tran_safe = hba;
4176 
4177 	svp->svp_psd = psd;
4178 	mdi_pi_set_vhci_private(pip, (caddr_t)svp);
4179 
4180 	/*
4181 	 * call hba's target init entry point if it exists
4182 	 */
4183 	if (hba->tran_tgt_init != NULL) {
4184 		psd->sd_tran_tgt_free_done = 0;
4185 		if ((rval = (*hba->tran_tgt_init)(pdip, tgt_dip,
4186 		    hba, psd)) != DDI_SUCCESS) {
4187 			VHCI_DEBUG(1, (CE_WARN, pdip,
4188 			    "!vhci_pathinfo_init: tran_tgt_init failed for "
4189 			    "path=0x%p rval=%x", (void *)pip, rval));
4190 			goto failure;
4191 		}
4192 	}
4193 
4194 	svp->svp_new_path = 1;
4195 
4196 	VHCI_DEBUG(4, (CE_NOTE, NULL, "!vhci_pathinfo_init: path:%p\n",
4197 	    (void *)pip));
4198 	return (MDI_SUCCESS);
4199 
4200 failure:
4201 	if (psd) {
4202 		mutex_destroy(&psd->sd_mutex);
4203 		kmem_free(psd, sizeof (*psd));
4204 	}
4205 	if (svp) {
4206 		mdi_pi_set_vhci_private(pip, NULL);
4207 		mutex_destroy(&svp->svp_mutex);
4208 		cv_destroy(&svp->svp_cv);
4209 		kmem_free(svp, sizeof (*svp));
4210 	}
4211 	if (hba && (hba->tran_hba_flags & SCSI_HBA_TRAN_CLONE))
4212 		kmem_free(hba, sizeof (scsi_hba_tran_t));
4213 
4214 	if (vlun_alloced)
4215 		vhci_lun_free(vlun, NULL);
4216 
4217 	return (rval);
4218 }
4219 
4220 /* ARGSUSED */
4221 static int
vhci_pathinfo_uninit(dev_info_t * vdip,mdi_pathinfo_t * pip,int flags)4222 vhci_pathinfo_uninit(dev_info_t *vdip, mdi_pathinfo_t *pip, int flags)
4223 {
4224 	scsi_hba_tran_t		*hba = NULL;
4225 	struct scsi_device	*psd = NULL;
4226 	dev_info_t		*pdip = NULL;
4227 	dev_info_t		*cdip = NULL;
4228 	scsi_vhci_priv_t	*svp = NULL;
4229 
4230 	ASSERT(vdip != NULL);
4231 	ASSERT(pip != NULL);
4232 
4233 	pdip = mdi_pi_get_phci(pip);
4234 	ASSERT(pdip != NULL);
4235 
4236 	cdip = mdi_pi_get_client(pip);
4237 	ASSERT(cdip != NULL);
4238 
4239 	hba = ddi_get_driver_private(pdip);
4240 	ASSERT(hba != NULL);
4241 
4242 	vhci_mpapi_set_path_state(vdip, pip, MP_DRVR_PATH_STATE_UNINIT);
4243 	svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip);
4244 	if (svp == NULL) {
4245 		/* path already freed. Nothing to do. */
4246 		return (MDI_SUCCESS);
4247 	}
4248 
4249 	psd = svp->svp_psd;
4250 	ASSERT(psd != NULL);
4251 
4252 	if (hba->tran_hba_flags & SCSI_HBA_ADDR_COMPLEX) {
4253 		/* Verify plumbing */
4254 		ASSERT(psd->sd_address.a_hba_tran == hba);
4255 		ASSERT(psd->sd_address.a.a_sd == psd);
4256 	} else if (hba->tran_hba_flags & SCSI_HBA_TRAN_CLONE) {
4257 		/* Switch to cloned scsi_hba_tran(9S) structure */
4258 		hba = psd->sd_address.a_hba_tran;
4259 		ASSERT(hba->tran_hba_flags & SCSI_HBA_TRAN_CLONE);
4260 		ASSERT(hba->tran_sd == psd);
4261 	}
4262 
4263 	if ((hba->tran_tgt_free != NULL) && !psd->sd_tran_tgt_free_done) {
4264 		(*hba->tran_tgt_free) (pdip, cdip, hba, psd);
4265 		psd->sd_tran_tgt_free_done = 1;
4266 	}
4267 	mutex_destroy(&psd->sd_mutex);
4268 	if (hba->tran_hba_flags & SCSI_HBA_TRAN_CLONE) {
4269 		kmem_free(hba, sizeof (*hba));
4270 	}
4271 
4272 	mdi_pi_set_vhci_private(pip, NULL);
4273 
4274 	/*
4275 	 * Free the pathinfo related scsi_device inquiry data. Note that this
4276 	 * matches what happens for scsi_hba.c devinfo case at uninitchild time.
4277 	 */
4278 	if (psd->sd_inq)
4279 		kmem_free((caddr_t)psd->sd_inq, sizeof (struct scsi_inquiry));
4280 	kmem_free((caddr_t)psd, sizeof (*psd));
4281 
4282 	mutex_destroy(&svp->svp_mutex);
4283 	cv_destroy(&svp->svp_cv);
4284 	kmem_free((caddr_t)svp, sizeof (*svp));
4285 
4286 	VHCI_DEBUG(4, (CE_NOTE, NULL, "!vhci_pathinfo_uninit: path=0x%p\n",
4287 	    (void *)pip));
4288 	return (MDI_SUCCESS);
4289 }
4290 
4291 /* ARGSUSED */
4292 static int
vhci_pathinfo_state_change(dev_info_t * vdip,mdi_pathinfo_t * pip,mdi_pathinfo_state_t state,uint32_t ext_state,int flags)4293 vhci_pathinfo_state_change(dev_info_t *vdip, mdi_pathinfo_t *pip,
4294     mdi_pathinfo_state_t state, uint32_t ext_state, int flags)
4295 {
4296 	int			rval = MDI_SUCCESS;
4297 	scsi_vhci_priv_t	*svp;
4298 	scsi_vhci_lun_t		*vlun;
4299 	int			held;
4300 	int			op = (flags & 0xf00) >> 8;
4301 	struct scsi_vhci	*vhci;
4302 
4303 	vhci = ddi_get_soft_state(vhci_softstate, ddi_get_instance(vdip));
4304 
4305 	if (flags & MDI_EXT_STATE_CHANGE) {
4306 		/*
4307 		 * We do not want to issue any commands down the path in case
4308 		 * sync flag is set. Lower layers might not be ready to accept
4309 		 * any I/O commands.
4310 		 */
4311 		if (op == DRIVER_DISABLE)
4312 			return (MDI_SUCCESS);
4313 
4314 		svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip);
4315 		if (svp == NULL) {
4316 			return (MDI_FAILURE);
4317 		}
4318 		vlun = svp->svp_svl;
4319 
4320 		if (flags & MDI_BEFORE_STATE_CHANGE) {
4321 			/*
4322 			 * Hold the LUN.
4323 			 */
4324 			VHCI_HOLD_LUN(vlun, VH_SLEEP, held);
4325 			if (flags & MDI_DISABLE_OP)  {
4326 				/*
4327 				 * Issue scsi reset if it happens to be
4328 				 * reserved path.
4329 				 */
4330 				if (vlun->svl_flags & VLUN_RESERVE_ACTIVE_FLG) {
4331 					/*
4332 					 * if reservation pending on
4333 					 * this path, dont' mark the
4334 					 * path busy
4335 					 */
4336 					if (op == DRIVER_DISABLE_TRANSIENT) {
4337 						VHCI_DEBUG(1, (CE_NOTE, NULL,
4338 						    "!vhci_pathinfo"
4339 						    "_state_change (pip:%p): "
4340 						    " reservation: fail busy\n",
4341 						    (void *)pip));
4342 						return (MDI_FAILURE);
4343 					}
4344 					if (pip == vlun->svl_resrv_pip) {
4345 						if (vhci_recovery_reset(
4346 						    svp->svp_svl,
4347 						    &svp->svp_psd->sd_address,
4348 						    TRUE,
4349 						    VHCI_DEPTH_TARGET) == 0) {
4350 							VHCI_DEBUG(1,
4351 							    (CE_NOTE, NULL,
4352 							    "!vhci_pathinfo"
4353 							    "_state_change "
4354 							    " (pip:%p): "
4355 							    "reset failed, "
4356 							    "give up!\n",
4357 							    (void *)pip));
4358 						}
4359 						vlun->svl_flags &=
4360 						    ~VLUN_RESERVE_ACTIVE_FLG;
4361 					}
4362 				}
4363 			} else if (flags & MDI_ENABLE_OP)  {
4364 				if (((vhci->vhci_conf_flags &
4365 				    VHCI_CONF_FLAGS_AUTO_FAILBACK) ==
4366 				    VHCI_CONF_FLAGS_AUTO_FAILBACK) &&
4367 				    MDI_PI_IS_USER_DISABLE(pip) &&
4368 				    MDI_PI_IS_STANDBY(pip)) {
4369 					struct scsi_failover_ops	*fo;
4370 					char *best_pclass, *pclass = NULL;
4371 					int  best_class, rv;
4372 					/*
4373 					 * Failback if enabling a standby path
4374 					 * and it is the primary class or
4375 					 * preferred class
4376 					 */
4377 					best_class = mdi_pi_get_preferred(pip);
4378 					if (best_class == 0) {
4379 						/*
4380 						 * if not preferred - compare
4381 						 * path-class with class
4382 						 */
4383 						fo = vlun->svl_fops;
4384 						(void) fo->sfo_pathclass_next(
4385 						    NULL, &best_pclass,
4386 						    vlun->svl_fops_ctpriv);
4387 						pclass = NULL;
4388 						rv = mdi_prop_lookup_string(pip,
4389 						    "path-class", &pclass);
4390 						if (rv != MDI_SUCCESS ||
4391 						    pclass == NULL) {
4392 							vhci_log(CE_NOTE, vdip,
4393 							    "!path-class "
4394 							    " lookup "
4395 							    "failed. rv: %d"
4396 							    "class: %p", rv,
4397 							    (void *)pclass);
4398 						} else if (strncmp(pclass,
4399 						    best_pclass,
4400 						    strlen(best_pclass)) == 0) {
4401 							best_class = 1;
4402 						}
4403 						if (rv == MDI_SUCCESS &&
4404 						    pclass != NULL) {
4405 							rv = mdi_prop_free(
4406 							    pclass);
4407 							if (rv !=
4408 							    DDI_PROP_SUCCESS) {
4409 								vhci_log(
4410 								    CE_NOTE,
4411 								    vdip,
4412 								    "!path-"
4413 								    "class"
4414 								    " free"
4415 								    " failed"
4416 								    " rv: %d"
4417 								    " class: "
4418 								    "%p",
4419 								    rv,
4420 								    (void *)
4421 								    pclass);
4422 							}
4423 						}
4424 					}
4425 					if (best_class == 1) {
4426 						VHCI_DEBUG(1, (CE_NOTE, NULL,
4427 						    "preferred path: %p "
4428 						    "USER_DISABLE->USER_ENABLE "
4429 						    "transition for lun %s\n",
4430 						    (void *)pip,
4431 						    vlun->svl_lun_wwn));
4432 						(void) taskq_dispatch(
4433 						    vhci->vhci_taskq,
4434 						    vhci_initiate_auto_failback,
4435 						    (void *) vlun, KM_SLEEP);
4436 					}
4437 				}
4438 				/*
4439 				 * if PGR is active, revalidate key and
4440 				 * register on this path also, if key is
4441 				 * still valid
4442 				 */
4443 				sema_p(&vlun->svl_pgr_sema);
4444 				if (vlun->svl_pgr_active)
4445 					(void)
4446 					    vhci_pgr_validate_and_register(svp);
4447 				sema_v(&vlun->svl_pgr_sema);
4448 				/*
4449 				 * Inform target driver about any
4450 				 * reservations to be reinstated if target
4451 				 * has dropped reservation during the busy
4452 				 * period.
4453 				 */
4454 				mutex_enter(&vhci->vhci_mutex);
4455 				scsi_hba_reset_notify_callback(
4456 				    &vhci->vhci_mutex,
4457 				    &vhci->vhci_reset_notify_listf);
4458 				mutex_exit(&vhci->vhci_mutex);
4459 			}
4460 		}
4461 		if (flags & MDI_AFTER_STATE_CHANGE) {
4462 			if (flags & MDI_ENABLE_OP)  {
4463 				mutex_enter(&vhci_global_mutex);
4464 				cv_broadcast(&vhci_cv);
4465 				mutex_exit(&vhci_global_mutex);
4466 			}
4467 			if (vlun->svl_setcap_done) {
4468 				(void) vhci_pHCI_cap(&svp->svp_psd->sd_address,
4469 				    "sector-size", vlun->svl_sector_size,
4470 				    1, pip);
4471 			}
4472 
4473 			/*
4474 			 * Release the LUN
4475 			 */
4476 			VHCI_RELEASE_LUN(vlun);
4477 
4478 			/*
4479 			 * Path transition is complete.
4480 			 * Run callback to indicate target driver to
4481 			 * retry to prevent IO starvation.
4482 			 */
4483 			if (scsi_callback_id != 0) {
4484 				ddi_run_callback(&scsi_callback_id);
4485 			}
4486 		}
4487 	} else {
4488 		switch (state) {
4489 		case MDI_PATHINFO_STATE_ONLINE:
4490 			rval = vhci_pathinfo_online(vdip, pip, flags);
4491 			break;
4492 
4493 		case MDI_PATHINFO_STATE_OFFLINE:
4494 			rval = vhci_pathinfo_offline(vdip, pip, flags);
4495 			break;
4496 
4497 		default:
4498 			break;
4499 		}
4500 		/*
4501 		 * Path transition is complete.
4502 		 * Run callback to indicate target driver to
4503 		 * retry to prevent IO starvation.
4504 		 */
4505 		if ((rval == MDI_SUCCESS) && (scsi_callback_id != 0)) {
4506 			ddi_run_callback(&scsi_callback_id);
4507 		}
4508 		return (rval);
4509 	}
4510 
4511 	return (MDI_SUCCESS);
4512 }
4513 
4514 /*
4515  * Parse the mpxio load balancing options. The datanameptr
4516  * will point to a string containing the load-balance-options value.
4517  * The load-balance-options value will be a property that
4518  * defines the load-balance algorithm and any arguments to that
4519  * algorithm.
4520  * For example:
4521  * device-type-mpxio-options-list=
4522  * "device-type=SUN    SENA", "load-balance-options=logical-block-options"
4523  * "device-type=SUN     SE6920", "round-robin-options";
4524  * logical-block-options="load-balance=logical-block", "region-size=15";
4525  * round-robin-options="load-balance=round-robin";
4526  *
4527  * If the load-balance is not defined the load balance algorithm will
4528  * default to the global setting. There will be default values assigned
4529  * to the arguments (region-size=18) and if an argument is one
4530  * that is not known, it will be ignored.
4531  */
4532 static void
vhci_parse_mpxio_lb_options(dev_info_t * dip,dev_info_t * cdip,caddr_t datanameptr)4533 vhci_parse_mpxio_lb_options(dev_info_t *dip, dev_info_t *cdip,
4534     caddr_t datanameptr)
4535 {
4536 	char			*dataptr, *next_entry;
4537 	caddr_t			config_list	= NULL;
4538 	int			config_list_len = 0, list_len = 0;
4539 	int			region_size = -1;
4540 	client_lb_t		load_balance;
4541 
4542 	if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, datanameptr,
4543 	    (caddr_t)&config_list, &config_list_len) != DDI_PROP_SUCCESS) {
4544 		return;
4545 	}
4546 
4547 	list_len = config_list_len;
4548 	next_entry = config_list;
4549 	while (config_list_len > 0) {
4550 		dataptr = next_entry;
4551 
4552 		if (strncmp(mdi_load_balance, dataptr,
4553 		    strlen(mdi_load_balance)) == 0) {
4554 			/* get the load-balance scheme */
4555 			dataptr += strlen(mdi_load_balance) + 1;
4556 			if (strcmp(dataptr, LOAD_BALANCE_PROP_RR) == 0) {
4557 				(void) mdi_set_lb_policy(cdip, LOAD_BALANCE_RR);
4558 				load_balance = LOAD_BALANCE_RR;
4559 			} else if (strcmp(dataptr,
4560 			    LOAD_BALANCE_PROP_LBA) == 0) {
4561 				(void) mdi_set_lb_policy(cdip,
4562 				    LOAD_BALANCE_LBA);
4563 				load_balance = LOAD_BALANCE_LBA;
4564 			} else if (strcmp(dataptr,
4565 			    LOAD_BALANCE_PROP_NONE) == 0) {
4566 				(void) mdi_set_lb_policy(cdip,
4567 				    LOAD_BALANCE_NONE);
4568 				load_balance = LOAD_BALANCE_NONE;
4569 			}
4570 		} else if (strncmp(dataptr, LOGICAL_BLOCK_REGION_SIZE,
4571 		    strlen(LOGICAL_BLOCK_REGION_SIZE)) == 0) {
4572 			int	i = 0;
4573 			char	*ptr;
4574 			char	*tmp;
4575 
4576 			tmp = dataptr + (strlen(LOGICAL_BLOCK_REGION_SIZE) + 1);
4577 			/* check for numeric value */
4578 			for (ptr = tmp; i < strlen(tmp); i++, ptr++) {
4579 				if (!isdigit(*ptr)) {
4580 					cmn_err(CE_WARN,
4581 					    "Illegal region size: %s."
4582 					    " Setting to default value: %d",
4583 					    tmp,
4584 					    LOAD_BALANCE_DEFAULT_REGION_SIZE);
4585 					region_size =
4586 					    LOAD_BALANCE_DEFAULT_REGION_SIZE;
4587 					break;
4588 				}
4589 			}
4590 			if (i >= strlen(tmp)) {
4591 				region_size = stoi(&tmp);
4592 			}
4593 			(void) mdi_set_lb_region_size(cdip, region_size);
4594 		}
4595 		config_list_len -= (strlen(next_entry) + 1);
4596 		next_entry += strlen(next_entry) + 1;
4597 	}
4598 #ifdef DEBUG
4599 	if ((region_size >= 0) && (load_balance != LOAD_BALANCE_LBA)) {
4600 		VHCI_DEBUG(1, (CE_NOTE, dip,
4601 		    "!vhci_parse_mpxio_lb_options: region-size: %d"
4602 		    "only valid for load-balance=logical-block\n",
4603 		    region_size));
4604 	}
4605 #endif
4606 	if ((region_size == -1) && (load_balance == LOAD_BALANCE_LBA)) {
4607 		VHCI_DEBUG(1, (CE_NOTE, dip,
4608 		    "!vhci_parse_mpxio_lb_options: No region-size"
4609 		    " defined load-balance=logical-block."
4610 		    " Default to: %d\n", LOAD_BALANCE_DEFAULT_REGION_SIZE));
4611 		(void) mdi_set_lb_region_size(cdip,
4612 		    LOAD_BALANCE_DEFAULT_REGION_SIZE);
4613 	}
4614 	if (list_len > 0) {
4615 		kmem_free(config_list, list_len);
4616 	}
4617 }
4618 
4619 /*
4620  * Parse the device-type-mpxio-options-list looking for the key of
4621  * "load-balance-options". If found, parse the load balancing options.
4622  * Check the comment of the vhci_get_device_type_mpxio_options()
4623  * for the device-type-mpxio-options-list.
4624  */
4625 static void
vhci_parse_mpxio_options(dev_info_t * dip,dev_info_t * cdip,caddr_t datanameptr,int list_len)4626 vhci_parse_mpxio_options(dev_info_t *dip, dev_info_t *cdip,
4627     caddr_t datanameptr, int list_len)
4628 {
4629 	char		*dataptr;
4630 	int		len;
4631 
4632 	/*
4633 	 * get the data list
4634 	 */
4635 	dataptr = datanameptr;
4636 	len = 0;
4637 	while (len < list_len &&
4638 	    strncmp(dataptr, DEVICE_TYPE_STR, strlen(DEVICE_TYPE_STR))
4639 	    != 0) {
4640 		if (strncmp(dataptr, LOAD_BALANCE_OPTIONS,
4641 		    strlen(LOAD_BALANCE_OPTIONS)) == 0) {
4642 			len += strlen(LOAD_BALANCE_OPTIONS) + 1;
4643 			dataptr += strlen(LOAD_BALANCE_OPTIONS) + 1;
4644 			vhci_parse_mpxio_lb_options(dip, cdip, dataptr);
4645 		}
4646 		len += strlen(dataptr) + 1;
4647 		dataptr += strlen(dataptr) + 1;
4648 	}
4649 }
4650 
4651 /*
4652  * Check the inquriy string returned from the device with the device-type
4653  * Check for the existence of the device-type-mpxio-options-list and
4654  * if found parse the list checking for a match with the device-type
4655  * value and the inquiry string returned from the device. If a match
4656  * is found, parse the mpxio options list. The format of the
4657  * device-type-mpxio-options-list is:
4658  * device-type-mpxio-options-list=
4659  * "device-type=SUN    SENA", "load-balance-options=logical-block-options"
4660  * "device-type=SUN     SE6920", "round-robin-options";
4661  * logical-block-options="load-balance=logical-block", "region-size=15";
4662  * round-robin-options="load-balance=round-robin";
4663  */
4664 void
vhci_get_device_type_mpxio_options(dev_info_t * dip,dev_info_t * cdip,struct scsi_device * devp)4665 vhci_get_device_type_mpxio_options(dev_info_t *dip, dev_info_t *cdip,
4666     struct scsi_device *devp)
4667 {
4668 
4669 	caddr_t			config_list	= NULL;
4670 	caddr_t			vidptr, datanameptr;
4671 	int			vidlen, dupletlen = 0;
4672 	int			config_list_len = 0, len;
4673 	struct scsi_inquiry	*inq = devp->sd_inq;
4674 
4675 	/*
4676 	 * look up the device-type-mpxio-options-list and walk thru
4677 	 * the list compare the vendor ids of the earlier inquiry command and
4678 	 * with those vids in the list if there is a match, lookup
4679 	 * the mpxio-options value
4680 	 */
4681 	if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
4682 	    MPXIO_OPTIONS_LIST,
4683 	    (caddr_t)&config_list, &config_list_len) == DDI_PROP_SUCCESS) {
4684 
4685 		/*
4686 		 * Compare vids in each duplet - if it matches,
4687 		 * parse the mpxio options list.
4688 		 */
4689 		for (len = config_list_len, vidptr = config_list; len > 0;
4690 		    len -= dupletlen) {
4691 
4692 			dupletlen = 0;
4693 
4694 			if (strlen(vidptr) != 0 &&
4695 			    strncmp(vidptr, DEVICE_TYPE_STR,
4696 			    strlen(DEVICE_TYPE_STR)) == 0) {
4697 				/* point to next duplet */
4698 				datanameptr = vidptr + strlen(vidptr) + 1;
4699 				/* add len of this duplet */
4700 				dupletlen += strlen(vidptr) + 1;
4701 				/* get to device type */
4702 				vidptr += strlen(DEVICE_TYPE_STR) + 1;
4703 				vidlen = strlen(vidptr);
4704 				if ((vidlen != 0) &&
4705 				    bcmp(inq->inq_vid, vidptr, vidlen) == 0) {
4706 					vhci_parse_mpxio_options(dip, cdip,
4707 					    datanameptr, len - dupletlen);
4708 					break;
4709 				}
4710 				/* get to next duplet */
4711 				vidptr += strlen(vidptr) + 1;
4712 			}
4713 			/* get to the next device-type */
4714 			while (len - dupletlen > 0 &&
4715 			    strlen(vidptr) != 0 &&
4716 			    strncmp(vidptr, DEVICE_TYPE_STR,
4717 			    strlen(DEVICE_TYPE_STR)) != 0) {
4718 				dupletlen += strlen(vidptr) + 1;
4719 				vidptr += strlen(vidptr) + 1;
4720 			}
4721 		}
4722 		if (config_list_len > 0) {
4723 			kmem_free(config_list, config_list_len);
4724 		}
4725 	}
4726 }
4727 
4728 static int
vhci_update_pathinfo(struct scsi_device * psd,mdi_pathinfo_t * pip,struct scsi_failover_ops * fo,scsi_vhci_lun_t * vlun,struct scsi_vhci * vhci)4729 vhci_update_pathinfo(struct scsi_device *psd,  mdi_pathinfo_t *pip,
4730     struct scsi_failover_ops *fo, scsi_vhci_lun_t *vlun,
4731     struct scsi_vhci *vhci)
4732 {
4733 	struct scsi_path_opinfo		opinfo;
4734 	char				*pclass, *best_pclass;
4735 	char				*resrv_pclass = NULL;
4736 	int				force_rereserve = 0;
4737 	int				update_pathinfo_done = 0;
4738 
4739 	if (fo->sfo_path_get_opinfo(psd, &opinfo, vlun->svl_fops_ctpriv) != 0) {
4740 		VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_update_pathinfo: "
4741 		    "Failed to get operation info for path:%p\n", (void *)pip));
4742 		return (MDI_FAILURE);
4743 	}
4744 	/* set the xlf capable flag in the vlun for future use */
4745 	vlun->svl_xlf_capable = opinfo.opinfo_xlf_capable;
4746 	(void) mdi_prop_update_string(pip, "path-class",
4747 	    opinfo.opinfo_path_attr);
4748 
4749 	pclass = opinfo.opinfo_path_attr;
4750 	if (opinfo.opinfo_path_state == SCSI_PATH_ACTIVE) {
4751 		mutex_enter(&vlun->svl_mutex);
4752 		if (vlun->svl_active_pclass != NULL) {
4753 			if (strcmp(vlun->svl_active_pclass, pclass) != 0) {
4754 				mutex_exit(&vlun->svl_mutex);
4755 				/*
4756 				 * Externally initiated failover has happened;
4757 				 * force the path state to be STANDBY/ONLINE,
4758 				 * next IO will trigger failover and thus
4759 				 * sync-up the pathstates.  Reason we don't
4760 				 * sync-up immediately by invoking
4761 				 * vhci_update_pathstates() is because it
4762 				 * needs a VHCI_HOLD_LUN() and we don't
4763 				 * want to block here.
4764 				 *
4765 				 * Further, if the device is an ALUA device,
4766 				 * then failure to exactly match 'pclass' and
4767 				 * 'svl_active_pclass'(as is the case here)
4768 				 * indicates that the currently active path
4769 				 * is a 'non-optimized' path - which means
4770 				 * that 'svl_active_pclass' needs to be
4771 				 * replaced with opinfo.opinfo_path_state
4772 				 * value.
4773 				 */
4774 
4775 				if (SCSI_FAILOVER_IS_TPGS(vlun->svl_fops)) {
4776 					char	*tptr;
4777 
4778 					/*
4779 					 * The device is ALUA compliant. The
4780 					 * state need to be changed to online
4781 					 * rather than standby state which is
4782 					 * done typically for a asymmetric
4783 					 * device that is non ALUA compliant.
4784 					 */
4785 					mdi_pi_set_state(pip,
4786 					    MDI_PATHINFO_STATE_ONLINE);
4787 					tptr = kmem_alloc(strlen
4788 					    (opinfo.opinfo_path_attr) + 1,
4789 					    KM_SLEEP);
4790 					(void) strlcpy(tptr,
4791 					    opinfo.opinfo_path_attr,
4792 					    (strlen(opinfo.opinfo_path_attr)
4793 					    + 1));
4794 					mutex_enter(&vlun->svl_mutex);
4795 					kmem_free(vlun->svl_active_pclass,
4796 					    strlen(vlun->svl_active_pclass) +
4797 					    1);
4798 					vlun->svl_active_pclass = tptr;
4799 					mutex_exit(&vlun->svl_mutex);
4800 				} else {
4801 					/*
4802 					 * Non ALUA device case.
4803 					 */
4804 					mdi_pi_set_state(pip,
4805 					    MDI_PATHINFO_STATE_STANDBY);
4806 				}
4807 				vlun->svl_fo_support = opinfo.opinfo_mode;
4808 				mdi_pi_set_preferred(pip,
4809 				    opinfo.opinfo_preferred);
4810 				update_pathinfo_done = 1;
4811 			}
4812 
4813 			/*
4814 			 * Find out a class of currently reserved path if there
4815 			 * is any.
4816 			 */
4817 			if ((vlun->svl_flags & VLUN_RESERVE_ACTIVE_FLG) &&
4818 			    mdi_prop_lookup_string(vlun->svl_resrv_pip,
4819 			    "path-class", &resrv_pclass) != MDI_SUCCESS) {
4820 				VHCI_DEBUG(1, (CE_NOTE, NULL,
4821 				    "!vhci_update_pathinfo: prop lookup "
4822 				    "failed for path 0x%p\n",
4823 				    (void *)vlun->svl_resrv_pip));
4824 				/*
4825 				 * Something is wrong with the reserved path.
4826 				 * We can't do much with that right here. Just
4827 				 * force re-reservation to another path.
4828 				 */
4829 				force_rereserve = 1;
4830 			}
4831 
4832 			(void) fo->sfo_pathclass_next(NULL, &best_pclass,
4833 			    vlun->svl_fops_ctpriv);
4834 			if ((force_rereserve == 1) || ((resrv_pclass != NULL) &&
4835 			    (strcmp(pclass, best_pclass) == 0) &&
4836 			    (strcmp(resrv_pclass, best_pclass) != 0))) {
4837 				/*
4838 				 * Inform target driver that a reservation
4839 				 * should be reinstated because the reserved
4840 				 * path is not the most preferred one.
4841 				 */
4842 				mutex_enter(&vhci->vhci_mutex);
4843 				scsi_hba_reset_notify_callback(
4844 				    &vhci->vhci_mutex,
4845 				    &vhci->vhci_reset_notify_listf);
4846 				mutex_exit(&vhci->vhci_mutex);
4847 			}
4848 
4849 			if (update_pathinfo_done == 1) {
4850 				return (MDI_SUCCESS);
4851 			}
4852 		} else {
4853 			char	*tptr;
4854 
4855 			/*
4856 			 * lets release the mutex before we try to
4857 			 * allocate since the potential to sleep is
4858 			 * possible.
4859 			 */
4860 			mutex_exit(&vlun->svl_mutex);
4861 			tptr = kmem_alloc(strlen(pclass) + 1, KM_SLEEP);
4862 			(void) strlcpy(tptr, pclass, (strlen(pclass) + 1));
4863 			mutex_enter(&vlun->svl_mutex);
4864 			vlun->svl_active_pclass = tptr;
4865 		}
4866 		mutex_exit(&vlun->svl_mutex);
4867 		mdi_pi_set_state(pip, MDI_PATHINFO_STATE_ONLINE);
4868 		vlun->svl_waiting_for_activepath = 0;
4869 	} else if (opinfo.opinfo_path_state == SCSI_PATH_ACTIVE_NONOPT) {
4870 		mutex_enter(&vlun->svl_mutex);
4871 		if (vlun->svl_active_pclass == NULL) {
4872 			char	*tptr;
4873 
4874 			mutex_exit(&vlun->svl_mutex);
4875 			tptr = kmem_alloc(strlen(pclass) + 1, KM_SLEEP);
4876 			(void) strlcpy(tptr, pclass, (strlen(pclass) + 1));
4877 			mutex_enter(&vlun->svl_mutex);
4878 			vlun->svl_active_pclass = tptr;
4879 		}
4880 		mutex_exit(&vlun->svl_mutex);
4881 		mdi_pi_set_state(pip, MDI_PATHINFO_STATE_ONLINE);
4882 		vlun->svl_waiting_for_activepath = 0;
4883 	} else if (opinfo.opinfo_path_state == SCSI_PATH_INACTIVE) {
4884 		mutex_enter(&vlun->svl_mutex);
4885 		if (vlun->svl_active_pclass != NULL) {
4886 			if (strcmp(vlun->svl_active_pclass, pclass) == 0) {
4887 				mutex_exit(&vlun->svl_mutex);
4888 				/*
4889 				 * externally initiated failover has happened;
4890 				 * force state to ONLINE (see comment above)
4891 				 */
4892 				mdi_pi_set_state(pip,
4893 				    MDI_PATHINFO_STATE_ONLINE);
4894 				vlun->svl_fo_support = opinfo.opinfo_mode;
4895 				mdi_pi_set_preferred(pip,
4896 				    opinfo.opinfo_preferred);
4897 				return (MDI_SUCCESS);
4898 			}
4899 		}
4900 		mutex_exit(&vlun->svl_mutex);
4901 		mdi_pi_set_state(pip, MDI_PATHINFO_STATE_STANDBY);
4902 
4903 		/*
4904 		 * Initiate auto-failback, if enabled, for path if path-state
4905 		 * is transitioning from OFFLINE->STANDBY and pathclass is the
4906 		 * preferred pathclass for this storage.
4907 		 * NOTE: In case where opinfo_path_state is SCSI_PATH_ACTIVE
4908 		 * (above), where the pi state is set to STANDBY, we don't
4909 		 * initiate auto-failback as the next IO shall take care of.
4910 		 * this. See comment above.
4911 		 */
4912 		(void) fo->sfo_pathclass_next(NULL, &best_pclass,
4913 		    vlun->svl_fops_ctpriv);
4914 		if (((vhci->vhci_conf_flags & VHCI_CONF_FLAGS_AUTO_FAILBACK) ==
4915 		    VHCI_CONF_FLAGS_AUTO_FAILBACK) &&
4916 		    (strcmp(pclass, best_pclass) == 0) &&
4917 		    ((MDI_PI_OLD_STATE(pip) == MDI_PATHINFO_STATE_OFFLINE) ||
4918 		    (MDI_PI_OLD_STATE(pip) == MDI_PATHINFO_STATE_INIT))) {
4919 			VHCI_DEBUG(1, (CE_NOTE, NULL, "%s pathclass path: %p"
4920 			    " OFFLINE->STANDBY transition for lun %s\n",
4921 			    best_pclass, (void *)pip, vlun->svl_lun_wwn));
4922 			(void) taskq_dispatch(vhci->vhci_taskq,
4923 			    vhci_initiate_auto_failback, (void *) vlun,
4924 			    KM_SLEEP);
4925 		}
4926 	}
4927 	vlun->svl_fo_support = opinfo.opinfo_mode;
4928 	mdi_pi_set_preferred(pip, opinfo.opinfo_preferred);
4929 
4930 	VHCI_DEBUG(8, (CE_NOTE, NULL, "vhci_update_pathinfo: opinfo_rev = %x,"
4931 	    " opinfo_path_state = %x opinfo_preferred = %x, opinfo_mode = %x\n",
4932 	    opinfo.opinfo_rev, opinfo.opinfo_path_state,
4933 	    opinfo.opinfo_preferred, opinfo.opinfo_mode));
4934 
4935 	return (MDI_SUCCESS);
4936 }
4937 
4938 /*
4939  * Form the kstat name and and call mdi_pi_kstat_create()
4940  */
4941 void
vhci_kstat_create_pathinfo(mdi_pathinfo_t * pip)4942 vhci_kstat_create_pathinfo(mdi_pathinfo_t *pip)
4943 {
4944 	dev_info_t	*tgt_dip;
4945 	dev_info_t	*pdip;
4946 	char		*guid;
4947 	char		*target_port, *target_port_dup;
4948 	char		ks_name[KSTAT_STRLEN];
4949 	uint_t		pid;
4950 	int		by_id;
4951 	mod_hash_val_t	hv;
4952 
4953 
4954 	/* return if we have already allocated kstats */
4955 	if (mdi_pi_kstat_exists(pip))
4956 		return;
4957 
4958 	/*
4959 	 * We need instance numbers to create a kstat name, return if we don't
4960 	 * have instance numbers assigned yet.
4961 	 */
4962 	tgt_dip = mdi_pi_get_client(pip);
4963 	pdip = mdi_pi_get_phci(pip);
4964 	if ((ddi_get_instance(tgt_dip) == -1) || (ddi_get_instance(pdip) == -1))
4965 		return;
4966 
4967 	/*
4968 	 * A path oriented kstat has a ks_name of the form:
4969 	 *
4970 	 * <client-driver><instance>.t<pid>.<pHCI-driver><instance>
4971 	 *
4972 	 * We maintain a bidirectional 'target-port' to <pid> map,
4973 	 * called targetmap. All pathinfo nodes with the same
4974 	 * 'target-port' map to the same <pid>. The iostat(8) code,
4975 	 * when parsing a path oriented kstat name, uses the <pid> as
4976 	 * a SCSI_VHCI_GET_TARGET_LONGNAME ioctl argument in order
4977 	 * to get the 'target-port'. For KSTAT_FLAG_PERSISTENT kstats,
4978 	 * this ioctl needs to translate a <pid> to a 'target-port'
4979 	 * even after all pathinfo nodes associated with the
4980 	 * 'target-port' have been destroyed. This is needed to support
4981 	 * consistent first-iteration activity-since-boot iostat(8)
4982 	 * output. Because of this requirement, the mapping can't be
4983 	 * based on pathinfo information in a devinfo snapshot.
4984 	 */
4985 
4986 	/* determine 'target-port' */
4987 	if (mdi_prop_lookup_string(pip,
4988 	    SCSI_ADDR_PROP_TARGET_PORT, &target_port) == MDI_SUCCESS) {
4989 		target_port_dup = i_ddi_strdup(target_port, KM_SLEEP);
4990 		(void) mdi_prop_free(target_port);
4991 		by_id = 1;
4992 	} else {
4993 		/*
4994 		 * If the pHCI did not set up 'target-port' on this
4995 		 * pathinfo node, assume that our client is the only
4996 		 * one with paths to the device by using the guid
4997 		 * value as the 'target-port'. Since no other client
4998 		 * will have the same guid, no other client will use
4999 		 * the same <pid>.  NOTE: a client with an instance
5000 		 * number always has a guid.
5001 		 */
5002 		(void) ddi_prop_lookup_string(DDI_DEV_T_ANY, tgt_dip,
5003 		    PROPFLAGS, MDI_CLIENT_GUID_PROP, &guid);
5004 		target_port_dup = i_ddi_strdup(guid, KM_SLEEP);
5005 		ddi_prop_free(guid);
5006 
5007 		/*
5008 		 * For this type of mapping we don't want the
5009 		 * <id> -> 'target-port' mapping to be made.  This
5010 		 * will cause the SCSI_VHCI_GET_TARGET_LONGNAME ioctl
5011 		 * to fail, and the iostat(8) long '-n' output will
5012 		 * still use the <pid>.  We do this because we just
5013 		 * made up the 'target-port' using the guid, and we
5014 		 * don't want to expose that fact in iostat output.
5015 		 */
5016 		by_id = 0;
5017 	}
5018 
5019 	/* find/establish <pid> given 'target-port' */
5020 	mutex_enter(&vhci_targetmap_mutex);
5021 	if (mod_hash_find(vhci_targetmap_byport,
5022 	    (mod_hash_key_t)target_port_dup, &hv) == 0) {
5023 		pid = (int)(intptr_t)hv;	/* mapping exists */
5024 	} else {
5025 		pid = vhci_targetmap_pid++;	/* new mapping */
5026 
5027 		(void) mod_hash_insert(vhci_targetmap_byport,
5028 		    (mod_hash_key_t)target_port_dup,
5029 		    (mod_hash_val_t)(intptr_t)pid);
5030 		if (by_id) {
5031 			(void) mod_hash_insert(vhci_targetmap_bypid,
5032 			    (mod_hash_key_t)(uintptr_t)pid,
5033 			    (mod_hash_val_t)(uintptr_t)target_port_dup);
5034 		}
5035 		target_port_dup = NULL;		/* owned by hash */
5036 	}
5037 	mutex_exit(&vhci_targetmap_mutex);
5038 
5039 	/* form kstat name */
5040 	(void) snprintf(ks_name, KSTAT_STRLEN, "%s%d.t%d.%s%d",
5041 	    ddi_driver_name(tgt_dip), ddi_get_instance(tgt_dip),
5042 	    pid, ddi_driver_name(pdip), ddi_get_instance(pdip));
5043 
5044 	VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_path_online: path:%p "
5045 	    "kstat %s: pid %x <-> port %s\n", (void *)pip,
5046 	    ks_name, pid, target_port_dup));
5047 	if (target_port_dup)
5048 		kmem_free(target_port_dup, strlen(target_port_dup) + 1);
5049 
5050 	/* call mdi to create kstats with the name we built */
5051 	(void) mdi_pi_kstat_create(pip, ks_name);
5052 }
5053 
5054 /* ARGSUSED */
5055 static int
vhci_pathinfo_online(dev_info_t * vdip,mdi_pathinfo_t * pip,int flags)5056 vhci_pathinfo_online(dev_info_t *vdip, mdi_pathinfo_t *pip, int flags)
5057 {
5058 	scsi_hba_tran_t			*hba = NULL;
5059 	struct scsi_device		*psd = NULL;
5060 	scsi_vhci_lun_t			*vlun = NULL;
5061 	dev_info_t			*pdip = NULL;
5062 	dev_info_t			*cdip;
5063 	dev_info_t			*tgt_dip;
5064 	struct scsi_vhci		*vhci;
5065 	char				*guid;
5066 	struct scsi_failover_ops	*sfo;
5067 	scsi_vhci_priv_t		*svp = NULL;
5068 	struct scsi_address		*ap;
5069 	struct scsi_pkt			*pkt;
5070 	int				rval = MDI_FAILURE;
5071 	mpapi_item_list_t		*list_ptr;
5072 	mpapi_lu_data_t			*ld;
5073 
5074 	ASSERT(vdip != NULL);
5075 	ASSERT(pip != NULL);
5076 
5077 	vhci = ddi_get_soft_state(vhci_softstate, ddi_get_instance(vdip));
5078 	ASSERT(vhci != NULL);
5079 
5080 	pdip = mdi_pi_get_phci(pip);
5081 	hba = ddi_get_driver_private(pdip);
5082 	ASSERT(hba != NULL);
5083 
5084 	svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip);
5085 	ASSERT(svp != NULL);
5086 
5087 	cdip = mdi_pi_get_client(pip);
5088 	ASSERT(cdip != NULL);
5089 	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, cdip, PROPFLAGS,
5090 	    MDI_CLIENT_GUID_PROP, &guid) != DDI_SUCCESS) {
5091 		VHCI_DEBUG(1, (CE_WARN, NULL, "vhci_path_online: lun guid "
5092 		    "property failed"));
5093 		goto failure;
5094 	}
5095 
5096 	vlun = vhci_lun_lookup(cdip);
5097 	ASSERT(vlun != NULL);
5098 
5099 	ddi_prop_free(guid);
5100 
5101 	vlun->svl_dip = mdi_pi_get_client(pip);
5102 	ASSERT(vlun->svl_dip != NULL);
5103 
5104 	psd = svp->svp_psd;
5105 	ASSERT(psd != NULL);
5106 
5107 	ap = &psd->sd_address;
5108 
5109 	/*
5110 	 * Get inquiry data into pathinfo related scsi_device structure.
5111 	 * Free sq_inq when pathinfo related scsi_device structure is destroyed
5112 	 * by vhci_pathinfo_uninit(). In other words, vhci maintains its own
5113 	 * copy of scsi_device and scsi_inquiry data on a per-path basis.
5114 	 */
5115 	if (scsi_probe(psd, SLEEP_FUNC) != SCSIPROBE_EXISTS) {
5116 		VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_pathinfo_online: "
5117 		    "scsi_probe failed path:%p rval:%x\n", (void *)pip, rval));
5118 		rval = MDI_FAILURE;
5119 		goto failure;
5120 	}
5121 
5122 	/*
5123 	 * See if we have a failover module to support the device.
5124 	 *
5125 	 * We re-probe to determine the failover ops for each path. This
5126 	 * is done in case there are any path-specific side-effects associated
5127 	 * with the sfo_device_probe implementation.
5128 	 *
5129 	 * Give the first successfull sfo_device_probe the opportunity to
5130 	 * establish 'ctpriv', vlun/client private data. The ctpriv will
5131 	 * then be passed into the failover module on all other sfo_device_*()
5132 	 * operations (and must be freed by sfo_device_unprobe implementation).
5133 	 *
5134 	 * NOTE: While sfo_device_probe is done once per path,
5135 	 * sfo_device_unprobe only occurs once - when the vlun is destroyed.
5136 	 *
5137 	 * NOTE: We don't currently support per-path fops private data
5138 	 * mechanism.
5139 	 */
5140 	sfo = vhci_dev_fo(vdip, psd,
5141 	    &vlun->svl_fops_ctpriv, &vlun->svl_fops_name);
5142 
5143 	/* check path configuration result with current vlun state */
5144 	if (((sfo && vlun->svl_fops) && (sfo != vlun->svl_fops)) ||
5145 	    (sfo && vlun->svl_not_supported) ||
5146 	    ((sfo == NULL) && vlun->svl_fops)) {
5147 		/* Getting different results for different paths. */
5148 		VHCI_DEBUG(1, (CE_NOTE, vhci->vhci_dip,
5149 		    "!vhci_pathinfo_online: dev (path 0x%p) contradiction\n",
5150 		    (void *)pip));
5151 		cmn_err(CE_WARN, "scsi_vhci: failover contradiction: "
5152 		    "'%s'.vs.'%s': path %s\n",
5153 		    vlun->svl_fops ? vlun->svl_fops->sfo_name : "NULL",
5154 		    sfo ? sfo->sfo_name : "NULL", mdi_pi_pathname(pip));
5155 		vlun->svl_not_supported = 1;
5156 		rval = MDI_NOT_SUPPORTED;
5157 		goto done;
5158 	} else if (sfo == NULL) {
5159 		/* No failover module - device not supported under vHCI.  */
5160 		VHCI_DEBUG(1, (CE_NOTE, vhci->vhci_dip,
5161 		    "!vhci_pathinfo_online: dev (path 0x%p) not "
5162 		    "supported\n", (void *)pip));
5163 
5164 		/* XXX does this contradict vhci_is_dev_supported ? */
5165 		vlun->svl_not_supported = 1;
5166 		rval = MDI_NOT_SUPPORTED;
5167 		goto done;
5168 	}
5169 
5170 	/* failover supported for device - save failover_ops in vlun */
5171 	vlun->svl_fops = sfo;
5172 	ASSERT(vlun->svl_fops_name != NULL);
5173 
5174 	/*
5175 	 * Obtain the device-type based mpxio options as specified in
5176 	 * scsi_vhci.conf file.
5177 	 *
5178 	 * NOTE: currently, the end result is a call to
5179 	 * mdi_set_lb_region_size().
5180 	 */
5181 	tgt_dip = psd->sd_dev;
5182 	ASSERT(tgt_dip != NULL);
5183 	vhci_get_device_type_mpxio_options(vdip, tgt_dip, psd);
5184 
5185 	/*
5186 	 * if PGR is active, revalidate key and register on this path also,
5187 	 * if key is still valid
5188 	 */
5189 	sema_p(&vlun->svl_pgr_sema);
5190 	if (vlun->svl_pgr_active) {
5191 		rval = vhci_pgr_validate_and_register(svp);
5192 		if (rval != 1) {
5193 			rval = MDI_FAILURE;
5194 			sema_v(&vlun->svl_pgr_sema);
5195 			goto failure;
5196 		}
5197 	}
5198 	sema_v(&vlun->svl_pgr_sema);
5199 
5200 	if (svp->svp_new_path) {
5201 		/*
5202 		 * Last chance to perform any cleanup operations on this
5203 		 * new path before making this path completely online.
5204 		 */
5205 		svp->svp_new_path = 0;
5206 
5207 		/*
5208 		 * If scsi_vhci knows the lun is alread RESERVE'd,
5209 		 * then skip the issue of RELEASE on new path.
5210 		 */
5211 		if ((vlun->svl_flags & VLUN_RESERVE_ACTIVE_FLG) == 0) {
5212 			/*
5213 			 * Issue SCSI-2 RELEASE only for the first time on
5214 			 * a new path just in case the host rebooted and
5215 			 * a reservation is still pending on this path.
5216 			 * IBM Shark storage does not clear RESERVE upon
5217 			 * host reboot.
5218 			 */
5219 			pkt = scsi_init_pkt(ap, NULL, NULL, CDB_GROUP0,
5220 			    sizeof (struct scsi_arq_status), 0, 0,
5221 			    SLEEP_FUNC, NULL);
5222 			if (pkt == NULL) {
5223 				VHCI_DEBUG(1, (CE_NOTE, NULL,
5224 				    "!vhci_pathinfo_online: "
5225 				    "Release init_pkt failed :%p\n",
5226 				    (void *)pip));
5227 				rval = MDI_FAILURE;
5228 				goto failure;
5229 			}
5230 			pkt->pkt_cdbp[0] = SCMD_RELEASE;
5231 			pkt->pkt_time = 60;
5232 
5233 			VHCI_DEBUG(1, (CE_NOTE, NULL,
5234 			    "!vhci_path_online: path:%p "
5235 			    "Issued SCSI-2 RELEASE\n", (void *)pip));
5236 
5237 			/* Ignore the return value */
5238 			(void) vhci_do_scsi_cmd(pkt);
5239 			scsi_destroy_pkt(pkt);
5240 		}
5241 	}
5242 
5243 	rval = vhci_update_pathinfo(psd, pip, sfo, vlun, vhci);
5244 	if (rval == MDI_FAILURE) {
5245 		goto failure;
5246 	}
5247 
5248 	/* Initialize MP-API data */
5249 	vhci_update_mpapi_data(vhci, vlun, pip);
5250 
5251 	/*
5252 	 * MP-API also needs the Inquiry data to be maintained in the
5253 	 * mp_vendor_prop_t structure, so find the lun and update its
5254 	 * structure with this data.
5255 	 */
5256 	list_ptr = (mpapi_item_list_t *)vhci_get_mpapi_item(vhci, NULL,
5257 	    MP_OBJECT_TYPE_MULTIPATH_LU, (void *)vlun);
5258 	ld = (mpapi_lu_data_t *)list_ptr->item->idata;
5259 	if (ld != NULL) {
5260 		bcopy(psd->sd_inq->inq_vid, ld->prop.prodInfo.vendor, 8);
5261 		bcopy(psd->sd_inq->inq_pid, ld->prop.prodInfo.product, 16);
5262 		bcopy(psd->sd_inq->inq_revision, ld->prop.prodInfo.revision, 4);
5263 	} else {
5264 		VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_pathinfo_online: "
5265 		    "mpapi_lu_data_t is NULL"));
5266 	}
5267 
5268 	/* create kstats for path */
5269 	vhci_kstat_create_pathinfo(pip);
5270 
5271 done:
5272 	mutex_enter(&vhci_global_mutex);
5273 	cv_broadcast(&vhci_cv);
5274 	mutex_exit(&vhci_global_mutex);
5275 
5276 	if (vlun->svl_setcap_done) {
5277 		(void) vhci_pHCI_cap(ap, "sector-size",
5278 		    vlun->svl_sector_size, 1, pip);
5279 	}
5280 
5281 	VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_path_online: path:%p\n",
5282 	    (void *)pip));
5283 
5284 failure:
5285 	return (rval);
5286 }
5287 
5288 /*
5289  * path offline handler.  Release all bindings that will not be
5290  * released by the normal packet transport/completion code path.
5291  * Since we don't (presently) keep any bindings alive outside of
5292  * the in-transport packets (which will be released on completion)
5293  * there is not much to do here.
5294  */
5295 /* ARGSUSED */
5296 static int
vhci_pathinfo_offline(dev_info_t * vdip,mdi_pathinfo_t * pip,int flags)5297 vhci_pathinfo_offline(dev_info_t *vdip, mdi_pathinfo_t *pip, int flags)
5298 {
5299 	scsi_hba_tran_t		*hba = NULL;
5300 	struct scsi_device	*psd = NULL;
5301 	dev_info_t		*pdip = NULL;
5302 	dev_info_t		*cdip = NULL;
5303 	scsi_vhci_priv_t	*svp = NULL;
5304 
5305 	ASSERT(vdip != NULL);
5306 	ASSERT(pip != NULL);
5307 
5308 	pdip = mdi_pi_get_phci(pip);
5309 	ASSERT(pdip != NULL);
5310 	if (pdip == NULL) {
5311 		VHCI_DEBUG(1, (CE_WARN, vdip, "Invalid path 0x%p: NULL "
5312 		    "phci dip", (void *)pip));
5313 		return (MDI_FAILURE);
5314 	}
5315 
5316 	cdip = mdi_pi_get_client(pip);
5317 	ASSERT(cdip != NULL);
5318 	if (cdip == NULL) {
5319 		VHCI_DEBUG(1, (CE_WARN, vdip, "Invalid path 0x%p: NULL "
5320 		    "client dip", (void *)pip));
5321 		return (MDI_FAILURE);
5322 	}
5323 
5324 	hba = ddi_get_driver_private(pdip);
5325 	ASSERT(hba != NULL);
5326 
5327 	svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip);
5328 	if (svp == NULL) {
5329 		/*
5330 		 * mdi_pathinfo node in INIT state can have vHCI private
5331 		 * information set to null
5332 		 */
5333 		VHCI_DEBUG(1, (CE_NOTE, vdip, "!vhci_pathinfo_offline: "
5334 		    "svp is NULL for pip 0x%p\n", (void *)pip));
5335 		return (MDI_SUCCESS);
5336 	}
5337 
5338 	psd = svp->svp_psd;
5339 	ASSERT(psd != NULL);
5340 
5341 	mutex_enter(&svp->svp_mutex);
5342 
5343 	VHCI_DEBUG(1, (CE_NOTE, vdip, "!vhci_pathinfo_offline: "
5344 	    "%d cmds pending on path: 0x%p\n", svp->svp_cmds, (void *)pip));
5345 	while (svp->svp_cmds != 0) {
5346 		if (cv_reltimedwait(&svp->svp_cv, &svp->svp_mutex,
5347 		    drv_usectohz(vhci_path_quiesce_timeout * 1000000),
5348 		    TR_CLOCK_TICK) == -1) {
5349 			/*
5350 			 * The timeout time reached without the condition
5351 			 * being signaled.
5352 			 */
5353 			VHCI_DEBUG(1, (CE_NOTE, vdip, "!vhci_pathinfo_offline: "
5354 			    "Timeout reached on path 0x%p without the cond\n",
5355 			    (void *)pip));
5356 			VHCI_DEBUG(1, (CE_NOTE, vdip, "!vhci_pathinfo_offline: "
5357 			    "%d cmds still pending on path: 0x%p\n",
5358 			    svp->svp_cmds, (void *)pip));
5359 			break;
5360 		}
5361 	}
5362 	mutex_exit(&svp->svp_mutex);
5363 
5364 	/*
5365 	 * Check to see if this vlun has an active SCSI-II RESERVE. And this
5366 	 * is the pip for the path that has been reserved.
5367 	 * If so clear the reservation by sending a reset, so the host will not
5368 	 * get a reservation conflict.  Reset the flag VLUN_RESERVE_ACTIVE_FLG
5369 	 * for this lun.  Also a reset notify is sent to the target driver
5370 	 * just in case the POR check condition is cleared by some other layer
5371 	 * in the stack.
5372 	 */
5373 	if (svp->svp_svl->svl_flags & VLUN_RESERVE_ACTIVE_FLG) {
5374 		if (pip == svp->svp_svl->svl_resrv_pip) {
5375 			if (vhci_recovery_reset(svp->svp_svl,
5376 			    &svp->svp_psd->sd_address, TRUE,
5377 			    VHCI_DEPTH_TARGET) == 0) {
5378 				VHCI_DEBUG(1, (CE_NOTE, NULL,
5379 				    "!vhci_pathinfo_offline (pip:%p):"
5380 				    "reset failed, retrying\n", (void *)pip));
5381 				delay(1 * drv_usectohz(1000000));
5382 				if (vhci_recovery_reset(svp->svp_svl,
5383 				    &svp->svp_psd->sd_address, TRUE,
5384 				    VHCI_DEPTH_TARGET) == 0) {
5385 					VHCI_DEBUG(1, (CE_NOTE, NULL,
5386 					    "!vhci_pathinfo_offline "
5387 					    "(pip:%p): reset failed, "
5388 					    "giving up!\n", (void *)pip));
5389 				}
5390 			}
5391 			svp->svp_svl->svl_flags &= ~VLUN_RESERVE_ACTIVE_FLG;
5392 		}
5393 	}
5394 
5395 	mdi_pi_set_state(pip, MDI_PATHINFO_STATE_OFFLINE);
5396 	vhci_mpapi_set_path_state(vdip, pip, MP_DRVR_PATH_STATE_REMOVED);
5397 
5398 	VHCI_DEBUG(1, (CE_NOTE, NULL,
5399 	    "!vhci_pathinfo_offline: offlined path 0x%p\n", (void *)pip));
5400 	return (MDI_SUCCESS);
5401 }
5402 
5403 
5404 /*
5405  * routine for SCSI VHCI IOCTL implementation.
5406  */
5407 /* ARGSUSED */
5408 static int
vhci_ctl(dev_t dev,int cmd,intptr_t data,int mode,cred_t * credp,int * rval)5409 vhci_ctl(dev_t dev, int cmd, intptr_t data, int mode, cred_t *credp, int *rval)
5410 {
5411 	struct scsi_vhci		*vhci;
5412 	dev_info_t			*vdip;
5413 	mdi_pathinfo_t			*pip;
5414 	int				instance, held;
5415 	int				retval = 0;
5416 	caddr_t				phci_path = NULL, client_path = NULL;
5417 	caddr_t				paddr = NULL;
5418 	sv_iocdata_t			ioc;
5419 	sv_iocdata_t			*pioc = &ioc;
5420 	sv_switch_to_cntlr_iocdata_t	iocsc;
5421 	sv_switch_to_cntlr_iocdata_t	*piocsc = &iocsc;
5422 	caddr_t				s;
5423 	scsi_vhci_lun_t			*vlun;
5424 	struct scsi_failover_ops	*fo;
5425 	char				*pclass;
5426 
5427 	/* Check for validity of vhci structure */
5428 	vhci = ddi_get_soft_state(vhci_softstate, MINOR2INST(getminor(dev)));
5429 	if (vhci == NULL) {
5430 		return (ENXIO);
5431 	}
5432 
5433 	mutex_enter(&vhci->vhci_mutex);
5434 	if ((vhci->vhci_state & VHCI_STATE_OPEN) == 0) {
5435 		mutex_exit(&vhci->vhci_mutex);
5436 		return (ENXIO);
5437 	}
5438 	mutex_exit(&vhci->vhci_mutex);
5439 
5440 	/* Get the vhci dip */
5441 	vdip = vhci->vhci_dip;
5442 	ASSERT(vdip != NULL);
5443 	instance = ddi_get_instance(vdip);
5444 
5445 	/* Allocate memory for getting parameters from userland */
5446 	phci_path	= kmem_zalloc(MAXPATHLEN, KM_SLEEP);
5447 	client_path	= kmem_zalloc(MAXPATHLEN, KM_SLEEP);
5448 	paddr		= kmem_zalloc(MAXNAMELEN, KM_SLEEP);
5449 
5450 	/*
5451 	 * Set a local variable indicating the ioctl name. Used for
5452 	 * printing debug strings.
5453 	 */
5454 	switch (cmd) {
5455 	case SCSI_VHCI_GET_CLIENT_MULTIPATH_INFO:
5456 		s = "GET_CLIENT_MULTIPATH_INFO";
5457 		break;
5458 
5459 	case SCSI_VHCI_GET_PHCI_MULTIPATH_INFO:
5460 		s = "GET_PHCI_MULTIPATH_INFO";
5461 		break;
5462 
5463 	case SCSI_VHCI_GET_CLIENT_NAME:
5464 		s = "GET_CLIENT_NAME";
5465 		break;
5466 
5467 	case SCSI_VHCI_PATH_ONLINE:
5468 		s = "PATH_ONLINE";
5469 		break;
5470 
5471 	case SCSI_VHCI_PATH_OFFLINE:
5472 		s = "PATH_OFFLINE";
5473 		break;
5474 
5475 	case SCSI_VHCI_PATH_STANDBY:
5476 		s = "PATH_STANDBY";
5477 		break;
5478 
5479 	case SCSI_VHCI_PATH_TEST:
5480 		s = "PATH_TEST";
5481 		break;
5482 
5483 	case SCSI_VHCI_SWITCH_TO_CNTLR:
5484 		s = "SWITCH_TO_CNTLR";
5485 		break;
5486 	case SCSI_VHCI_PATH_DISABLE:
5487 		s = "PATH_DISABLE";
5488 		break;
5489 	case SCSI_VHCI_PATH_ENABLE:
5490 		s = "PATH_ENABLE";
5491 		break;
5492 
5493 	case SCSI_VHCI_GET_TARGET_LONGNAME:
5494 		s = "GET_TARGET_LONGNAME";
5495 		break;
5496 
5497 #ifdef	DEBUG
5498 	case SCSI_VHCI_CONFIGURE_PHCI:
5499 		s = "CONFIGURE_PHCI";
5500 		break;
5501 
5502 	case SCSI_VHCI_UNCONFIGURE_PHCI:
5503 		s = "UNCONFIGURE_PHCI";
5504 		break;
5505 #endif
5506 
5507 	default:
5508 		s = "Unknown";
5509 		vhci_log(CE_NOTE, vdip,
5510 		    "!vhci%d: ioctl %x (unsupported ioctl)", instance, cmd);
5511 		retval = ENOTSUP;
5512 		break;
5513 	}
5514 	if (retval != 0) {
5515 		goto end;
5516 	}
5517 
5518 	VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci%d: ioctl <%s>", instance, s));
5519 
5520 	/*
5521 	 * Get IOCTL parameters from userland
5522 	 */
5523 	switch (cmd) {
5524 	case SCSI_VHCI_GET_CLIENT_MULTIPATH_INFO:
5525 	case SCSI_VHCI_GET_PHCI_MULTIPATH_INFO:
5526 	case SCSI_VHCI_GET_CLIENT_NAME:
5527 	case SCSI_VHCI_PATH_ONLINE:
5528 	case SCSI_VHCI_PATH_OFFLINE:
5529 	case SCSI_VHCI_PATH_STANDBY:
5530 	case SCSI_VHCI_PATH_TEST:
5531 	case SCSI_VHCI_PATH_DISABLE:
5532 	case SCSI_VHCI_PATH_ENABLE:
5533 	case SCSI_VHCI_GET_TARGET_LONGNAME:
5534 #ifdef	DEBUG
5535 	case SCSI_VHCI_CONFIGURE_PHCI:
5536 	case SCSI_VHCI_UNCONFIGURE_PHCI:
5537 #endif
5538 		retval = vhci_get_iocdata((const void *)data, pioc, mode, s);
5539 		break;
5540 
5541 	case SCSI_VHCI_SWITCH_TO_CNTLR:
5542 		retval = vhci_get_iocswitchdata((const void *)data, piocsc,
5543 		    mode, s);
5544 		break;
5545 	}
5546 	if (retval != 0) {
5547 		goto end;
5548 	}
5549 
5550 
5551 	/*
5552 	 * Process the IOCTL
5553 	 */
5554 	switch (cmd) {
5555 	case SCSI_VHCI_GET_CLIENT_MULTIPATH_INFO:
5556 	{
5557 		uint_t		num_paths;	/* Num paths to client dev */
5558 		sv_path_info_t	*upibuf = NULL;	/* To keep userland values */
5559 		sv_path_info_t	*kpibuf = NULL; /* Kernel data for ioctls */
5560 		dev_info_t	*cdip;		/* Client device dip */
5561 
5562 		if (pioc->ret_elem == NULL) {
5563 			retval = EINVAL;
5564 			break;
5565 		}
5566 
5567 		/* Get client device path from user land */
5568 		if (vhci_ioc_get_client_path(pioc, client_path, mode, s)) {
5569 			retval = EFAULT;
5570 			break;
5571 		}
5572 
5573 		VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> "
5574 		    "client <%s>", s, client_path));
5575 
5576 		/* Get number of paths to this client device */
5577 		if ((cdip = mdi_client_path2devinfo(vdip, client_path))
5578 		    == NULL) {
5579 			retval = ENXIO;
5580 			VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
5581 			    "client dip doesn't exist. invalid path <%s>",
5582 			    s, client_path));
5583 			break;
5584 		}
5585 		num_paths = mdi_client_get_path_count(cdip);
5586 
5587 		if (ddi_copyout(&num_paths, pioc->ret_elem,
5588 		    sizeof (num_paths), mode)) {
5589 			VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
5590 			    "num_paths copyout failed", s));
5591 			retval = EFAULT;
5592 			break;
5593 		}
5594 
5595 		/* If  user just wanted num_paths, then return */
5596 		if (pioc->buf_elem == 0 || pioc->ret_buf == NULL ||
5597 		    num_paths == 0) {
5598 			break;
5599 		}
5600 
5601 		/* Set num_paths to value as much as can be sent to userland */
5602 		if (num_paths > pioc->buf_elem) {
5603 			num_paths = pioc->buf_elem;
5604 		}
5605 
5606 		/* Allocate memory and get userland pointers */
5607 		if (vhci_ioc_alloc_pathinfo(&upibuf, &kpibuf, num_paths,
5608 		    pioc, mode, s) != 0) {
5609 			retval = EFAULT;
5610 			break;
5611 		}
5612 		ASSERT(upibuf != NULL);
5613 		ASSERT(kpibuf != NULL);
5614 
5615 		/*
5616 		 * Get the path information and send it to userland.
5617 		 */
5618 		if (vhci_get_client_path_list(cdip, kpibuf, num_paths)
5619 		    != MDI_SUCCESS) {
5620 			retval = ENXIO;
5621 			vhci_ioc_free_pathinfo(upibuf, kpibuf, num_paths);
5622 			break;
5623 		}
5624 
5625 		if (vhci_ioc_send_pathinfo(upibuf, kpibuf, num_paths,
5626 		    pioc, mode, s)) {
5627 			retval = EFAULT;
5628 			vhci_ioc_free_pathinfo(upibuf, kpibuf, num_paths);
5629 			break;
5630 		}
5631 
5632 		/* Free the memory allocated for path information */
5633 		vhci_ioc_free_pathinfo(upibuf, kpibuf, num_paths);
5634 		break;
5635 	}
5636 
5637 	case SCSI_VHCI_GET_PHCI_MULTIPATH_INFO:
5638 	{
5639 		uint_t		num_paths;	/* Num paths to client dev */
5640 		sv_path_info_t	*upibuf = NULL;	/* To keep userland values */
5641 		sv_path_info_t	*kpibuf = NULL; /* Kernel data for ioctls */
5642 		dev_info_t	*pdip;		/* PHCI device dip */
5643 
5644 		if (pioc->ret_elem == NULL) {
5645 			retval = EINVAL;
5646 			break;
5647 		}
5648 
5649 		/* Get PHCI device path from user land */
5650 		if (vhci_ioc_get_phci_path(pioc, phci_path, mode, s)) {
5651 			retval = EFAULT;
5652 			break;
5653 		}
5654 
5655 		VHCI_DEBUG(6, (CE_WARN, vdip,
5656 		    "!vhci_ioctl: ioctl <%s> phci <%s>", s, phci_path));
5657 
5658 		/* Get number of devices associated with this PHCI device */
5659 		if ((pdip = mdi_phci_path2devinfo(vdip, phci_path)) == NULL) {
5660 			VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
5661 			    "phci dip doesn't exist. invalid path <%s>",
5662 			    s, phci_path));
5663 			retval = ENXIO;
5664 			break;
5665 		}
5666 
5667 		num_paths = mdi_phci_get_path_count(pdip);
5668 
5669 		if (ddi_copyout(&num_paths, pioc->ret_elem,
5670 		    sizeof (num_paths), mode)) {
5671 			VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
5672 			    "num_paths copyout failed", s));
5673 			retval = EFAULT;
5674 			break;
5675 		}
5676 
5677 		/* If  user just wanted num_paths, then return */
5678 		if (pioc->buf_elem == 0 || pioc->ret_buf == NULL ||
5679 		    num_paths == 0) {
5680 			break;
5681 		}
5682 
5683 		/* Set num_paths to value as much as can be sent to userland */
5684 		if (num_paths > pioc->buf_elem) {
5685 			num_paths = pioc->buf_elem;
5686 		}
5687 
5688 		/* Allocate memory and get userland pointers */
5689 		if (vhci_ioc_alloc_pathinfo(&upibuf, &kpibuf, num_paths,
5690 		    pioc, mode, s) != 0) {
5691 			retval = EFAULT;
5692 			break;
5693 		}
5694 		ASSERT(upibuf != NULL);
5695 		ASSERT(kpibuf != NULL);
5696 
5697 		/*
5698 		 * Get the path information and send it to userland.
5699 		 */
5700 		if (vhci_get_phci_path_list(pdip, kpibuf, num_paths)
5701 		    != MDI_SUCCESS) {
5702 			retval = ENXIO;
5703 			vhci_ioc_free_pathinfo(upibuf, kpibuf, num_paths);
5704 			break;
5705 		}
5706 
5707 		if (vhci_ioc_send_pathinfo(upibuf, kpibuf, num_paths,
5708 		    pioc, mode, s)) {
5709 			retval = EFAULT;
5710 			vhci_ioc_free_pathinfo(upibuf, kpibuf, num_paths);
5711 			break;
5712 		}
5713 
5714 		/* Free the memory allocated for path information */
5715 		vhci_ioc_free_pathinfo(upibuf, kpibuf, num_paths);
5716 		break;
5717 	}
5718 
5719 	case SCSI_VHCI_GET_CLIENT_NAME:
5720 	{
5721 		dev_info_t		*cdip, *pdip;
5722 
5723 		/* Get PHCI path and device address from user land */
5724 		if (vhci_ioc_get_phci_path(pioc, phci_path, mode, s) ||
5725 		    vhci_ioc_get_paddr(pioc, paddr, mode, s)) {
5726 			retval = EFAULT;
5727 			break;
5728 		}
5729 
5730 		VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> "
5731 		    "phci <%s>, paddr <%s>", s, phci_path, paddr));
5732 
5733 		/* Get the PHCI dip */
5734 		if ((pdip = mdi_phci_path2devinfo(vdip, phci_path)) == NULL) {
5735 			VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
5736 			    "phci dip doesn't exist. invalid path <%s>",
5737 			    s, phci_path));
5738 			retval = ENXIO;
5739 			break;
5740 		}
5741 
5742 		if ((pip = mdi_pi_find(pdip, NULL, paddr)) == NULL) {
5743 			VHCI_DEBUG(1, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> "
5744 			    "pathinfo doesn't exist. invalid device addr", s));
5745 			retval = ENXIO;
5746 			break;
5747 		}
5748 
5749 		/* Get the client device pathname and send to userland */
5750 		cdip = mdi_pi_get_client(pip);
5751 		vhci_ioc_devi_to_path(cdip, client_path);
5752 
5753 		VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> "
5754 		    "client <%s>", s, client_path));
5755 
5756 		if (vhci_ioc_send_client_path(client_path, pioc, mode, s)) {
5757 			retval = EFAULT;
5758 			break;
5759 		}
5760 		break;
5761 	}
5762 
5763 	case SCSI_VHCI_PATH_ONLINE:
5764 	case SCSI_VHCI_PATH_OFFLINE:
5765 	case SCSI_VHCI_PATH_STANDBY:
5766 	case SCSI_VHCI_PATH_TEST:
5767 	{
5768 		dev_info_t		*pdip;	/* PHCI dip */
5769 
5770 		/* Get PHCI path and device address from user land */
5771 		if (vhci_ioc_get_phci_path(pioc, phci_path, mode, s) ||
5772 		    vhci_ioc_get_paddr(pioc, paddr, mode, s)) {
5773 			retval = EFAULT;
5774 			break;
5775 		}
5776 
5777 		VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> "
5778 		    "phci <%s>, paddr <%s>", s, phci_path, paddr));
5779 
5780 		/* Get the PHCI dip */
5781 		if ((pdip = mdi_phci_path2devinfo(vdip, phci_path)) == NULL) {
5782 			VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
5783 			    "phci dip doesn't exist. invalid path <%s>",
5784 			    s, phci_path));
5785 			retval = ENXIO;
5786 			break;
5787 		}
5788 
5789 		if ((pip = mdi_pi_find(pdip, NULL, paddr)) == NULL) {
5790 			VHCI_DEBUG(1, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> "
5791 			    "pathinfo doesn't exist. invalid device addr", s));
5792 			retval = ENXIO;
5793 			break;
5794 		}
5795 
5796 		VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> "
5797 		    "Calling MDI function to change device state", s));
5798 
5799 		switch (cmd) {
5800 		case SCSI_VHCI_PATH_ONLINE:
5801 			retval = mdi_pi_online(pip, 0);
5802 			break;
5803 
5804 		case SCSI_VHCI_PATH_OFFLINE:
5805 			retval = mdi_pi_offline(pip, 0);
5806 			break;
5807 
5808 		case SCSI_VHCI_PATH_STANDBY:
5809 			retval = mdi_pi_standby(pip, 0);
5810 			break;
5811 
5812 		case SCSI_VHCI_PATH_TEST:
5813 			break;
5814 		}
5815 		break;
5816 	}
5817 
5818 	case SCSI_VHCI_SWITCH_TO_CNTLR:
5819 	{
5820 		dev_info_t *cdip;
5821 		struct scsi_device *devp;
5822 
5823 		/* Get the client device pathname */
5824 		if (ddi_copyin(piocsc->client, client_path,
5825 		    MAXPATHLEN, mode)) {
5826 			VHCI_DEBUG(2, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> "
5827 			    "client_path copyin failed", s));
5828 			retval = EFAULT;
5829 			break;
5830 		}
5831 
5832 		/* Get the path class to which user wants to switch */
5833 		if (ddi_copyin(piocsc->class, paddr, MAXNAMELEN, mode)) {
5834 			VHCI_DEBUG(2, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> "
5835 			    "controller_class copyin failed", s));
5836 			retval = EFAULT;
5837 			break;
5838 		}
5839 
5840 		/* Perform validity checks */
5841 		if ((cdip = mdi_client_path2devinfo(vdip,
5842 		    client_path)) == NULL) {
5843 			VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
5844 			    "client dip doesn't exist. invalid path <%s>",
5845 			    s, client_path));
5846 			retval = ENXIO;
5847 			break;
5848 		}
5849 
5850 		VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: Calling MDI func "
5851 		    "to switch controller"));
5852 		VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: client <%s> "
5853 		    "class <%s>", client_path, paddr));
5854 
5855 		if (strcmp(paddr, PCLASS_PRIMARY) &&
5856 		    strcmp(paddr, PCLASS_SECONDARY)) {
5857 			VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
5858 			    "invalid path class <%s>", s, paddr));
5859 			retval = ENXIO;
5860 			break;
5861 		}
5862 
5863 		devp = ddi_get_driver_private(cdip);
5864 		if (devp == NULL) {
5865 			VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
5866 			    "invalid scsi device <%s>", s, client_path));
5867 			retval = ENXIO;
5868 			break;
5869 		}
5870 		vlun = ADDR2VLUN(&devp->sd_address);
5871 		ASSERT(vlun);
5872 
5873 		/*
5874 		 * Checking to see if device has only one pclass, PRIMARY.
5875 		 * If so this device doesn't support failovers.  Assumed
5876 		 * that the devices with one pclass is PRIMARY, as thats the
5877 		 * case today.  If this is not true and in future other
5878 		 * symmetric devices are supported with other pclass, this
5879 		 * IOCTL shall have to be overhauled anyways as now the only
5880 		 * arguments it accepts are PRIMARY and SECONDARY.
5881 		 */
5882 		fo = vlun->svl_fops;
5883 		if (fo->sfo_pathclass_next(PCLASS_PRIMARY, &pclass,
5884 		    vlun->svl_fops_ctpriv)) {
5885 			retval = ENOTSUP;
5886 			break;
5887 		}
5888 
5889 		VHCI_HOLD_LUN(vlun, VH_SLEEP, held);
5890 		mutex_enter(&vlun->svl_mutex);
5891 		if (vlun->svl_active_pclass != NULL) {
5892 			if (strcmp(vlun->svl_active_pclass, paddr) == 0) {
5893 				mutex_exit(&vlun->svl_mutex);
5894 				retval = EALREADY;
5895 				VHCI_RELEASE_LUN(vlun);
5896 				break;
5897 			}
5898 		}
5899 		mutex_exit(&vlun->svl_mutex);
5900 		/* Call mdi function to cause  a switch over */
5901 		retval = mdi_failover(vdip, cdip, MDI_FAILOVER_SYNC);
5902 		if (retval == MDI_SUCCESS) {
5903 			retval = 0;
5904 		} else if (retval == MDI_BUSY) {
5905 			retval = EBUSY;
5906 		} else {
5907 			retval = EIO;
5908 		}
5909 		VHCI_RELEASE_LUN(vlun);
5910 		break;
5911 	}
5912 
5913 	case SCSI_VHCI_PATH_ENABLE:
5914 	case SCSI_VHCI_PATH_DISABLE:
5915 	{
5916 		dev_info_t	*cdip, *pdip;
5917 
5918 		/*
5919 		 * Get client device path from user land
5920 		 */
5921 		if (vhci_ioc_get_client_path(pioc, client_path, mode, s)) {
5922 			retval = EFAULT;
5923 			break;
5924 		}
5925 
5926 		/*
5927 		 * Get Phci device path from user land
5928 		 */
5929 		if (vhci_ioc_get_phci_path(pioc, phci_path, mode, s)) {
5930 			retval = EFAULT;
5931 			break;
5932 		}
5933 
5934 		/*
5935 		 * Get the devinfo for the Phci.
5936 		 */
5937 		if ((pdip = mdi_phci_path2devinfo(vdip, phci_path)) == NULL) {
5938 			VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
5939 			    "phci dip doesn't exist. invalid path <%s>",
5940 			    s, phci_path));
5941 			retval = ENXIO;
5942 			break;
5943 		}
5944 
5945 		/*
5946 		 * If the client path is set to /scsi_vhci then we need
5947 		 * to do the operation on all the clients so set cdip to NULL.
5948 		 * Else, try to get the client dip.
5949 		 */
5950 		if (strcmp(client_path, "/scsi_vhci") == 0) {
5951 			cdip = NULL;
5952 		} else {
5953 			if ((cdip = mdi_client_path2devinfo(vdip,
5954 			    client_path)) == NULL) {
5955 				retval = ENXIO;
5956 				VHCI_DEBUG(1, (CE_WARN, NULL,
5957 				    "!vhci_ioctl: ioctl <%s> client dip "
5958 				    "doesn't exist. invalid path <%s>",
5959 				    s, client_path));
5960 				break;
5961 			}
5962 		}
5963 
5964 		if (cmd == SCSI_VHCI_PATH_ENABLE)
5965 			retval = mdi_pi_enable(cdip, pdip, USER_DISABLE);
5966 		else
5967 			retval = mdi_pi_disable(cdip, pdip, USER_DISABLE);
5968 
5969 		break;
5970 	}
5971 
5972 	case SCSI_VHCI_GET_TARGET_LONGNAME:
5973 	{
5974 		uint_t		pid = pioc->buf_elem;
5975 		char		*target_port;
5976 		mod_hash_val_t	hv;
5977 
5978 		/* targetmap lookup of 'target-port' by <pid> */
5979 		if (mod_hash_find(vhci_targetmap_bypid,
5980 		    (mod_hash_key_t)(uintptr_t)pid, &hv) != 0) {
5981 			/*
5982 			 * NOTE: failure to find the mapping is OK for guid
5983 			 * based 'target-port' values.
5984 			 */
5985 			VHCI_DEBUG(3, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
5986 			    "targetport mapping doesn't exist: pid %d",
5987 			    s, pid));
5988 			retval = ENXIO;
5989 			break;
5990 		}
5991 
5992 		/* copyout 'target-port' result */
5993 		target_port = (char *)hv;
5994 		if (copyoutstr(target_port, pioc->addr, MAXNAMELEN, NULL)) {
5995 			VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
5996 			    "targetport copyout failed: len: %d",
5997 			    s, (int)strlen(target_port)));
5998 			retval = EFAULT;
5999 		}
6000 		break;
6001 	}
6002 
6003 #ifdef	DEBUG
6004 	case SCSI_VHCI_CONFIGURE_PHCI:
6005 	{
6006 		dev_info_t		*pdip;
6007 
6008 		/* Get PHCI path and device address from user land */
6009 		if (vhci_ioc_get_phci_path(pioc, phci_path, mode, s)) {
6010 			retval = EFAULT;
6011 			break;
6012 		}
6013 
6014 		VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> "
6015 		    "phci <%s>", s, phci_path));
6016 
6017 		/* Get the PHCI dip */
6018 		if ((pdip = e_ddi_hold_devi_by_path(phci_path, 0)) == NULL) {
6019 			VHCI_DEBUG(3, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
6020 			    "phci dip doesn't exist. invalid path <%s>",
6021 			    s, phci_path));
6022 			retval = ENXIO;
6023 			break;
6024 		}
6025 
6026 		if (ndi_devi_config(pdip,
6027 		    NDI_DEVFS_CLEAN | NDI_DEVI_PERSIST) != NDI_SUCCESS) {
6028 			retval = EIO;
6029 		}
6030 
6031 		ddi_release_devi(pdip);
6032 		break;
6033 	}
6034 
6035 	case SCSI_VHCI_UNCONFIGURE_PHCI:
6036 	{
6037 		dev_info_t		*pdip;
6038 
6039 		/* Get PHCI path and device address from user land */
6040 		if (vhci_ioc_get_phci_path(pioc, phci_path, mode, s)) {
6041 			retval = EFAULT;
6042 			break;
6043 		}
6044 
6045 		VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> "
6046 		    "phci <%s>", s, phci_path));
6047 
6048 		/* Get the PHCI dip */
6049 		if ((pdip = e_ddi_hold_devi_by_path(phci_path, 0)) == NULL) {
6050 			VHCI_DEBUG(3, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
6051 			    "phci dip doesn't exist. invalid path <%s>",
6052 			    s, phci_path));
6053 			retval = ENXIO;
6054 			break;
6055 		}
6056 
6057 		if (ndi_devi_unconfig(pdip,
6058 		    NDI_DEVI_REMOVE | NDI_DEVFS_CLEAN) != NDI_SUCCESS) {
6059 			retval = EBUSY;
6060 		}
6061 
6062 		ddi_release_devi(pdip);
6063 		break;
6064 	}
6065 #endif
6066 	}
6067 
6068 end:
6069 	/* Free the memory allocated above */
6070 	if (phci_path != NULL) {
6071 		kmem_free(phci_path, MAXPATHLEN);
6072 	}
6073 	if (client_path != NULL) {
6074 		kmem_free(client_path, MAXPATHLEN);
6075 	}
6076 	if (paddr != NULL) {
6077 		kmem_free(paddr, MAXNAMELEN);
6078 	}
6079 	return (retval);
6080 }
6081 
6082 /*
6083  * devctl IOCTL support for client device DR
6084  */
6085 /* ARGSUSED */
6086 int
vhci_devctl(dev_t dev,int cmd,intptr_t arg,int mode,cred_t * credp,int * rvalp)6087 vhci_devctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp,
6088     int *rvalp)
6089 {
6090 	dev_info_t *self;
6091 	dev_info_t *child;
6092 	scsi_hba_tran_t *hba;
6093 	struct devctl_iocdata *dcp;
6094 	struct scsi_vhci *vhci;
6095 	int rv = 0;
6096 	int retval = 0;
6097 	scsi_vhci_priv_t *svp;
6098 	mdi_pathinfo_t  *pip;
6099 
6100 	if ((vhci = ddi_get_soft_state(vhci_softstate,
6101 	    MINOR2INST(getminor(dev)))) == NULL)
6102 		return (ENXIO);
6103 
6104 	/*
6105 	 * check if :devctl minor device has been opened
6106 	 */
6107 	mutex_enter(&vhci->vhci_mutex);
6108 	if ((vhci->vhci_state & VHCI_STATE_OPEN) == 0) {
6109 		mutex_exit(&vhci->vhci_mutex);
6110 		return (ENXIO);
6111 	}
6112 	mutex_exit(&vhci->vhci_mutex);
6113 
6114 	self = vhci->vhci_dip;
6115 	hba = ddi_get_driver_private(self);
6116 	if (hba == NULL)
6117 		return (ENXIO);
6118 
6119 	/*
6120 	 * We can use the generic implementation for these ioctls
6121 	 */
6122 	switch (cmd) {
6123 	case DEVCTL_DEVICE_GETSTATE:
6124 	case DEVCTL_DEVICE_ONLINE:
6125 	case DEVCTL_DEVICE_OFFLINE:
6126 	case DEVCTL_DEVICE_REMOVE:
6127 	case DEVCTL_BUS_GETSTATE:
6128 		return (ndi_devctl_ioctl(self, cmd, arg, mode, 0));
6129 	}
6130 
6131 	/*
6132 	 * read devctl ioctl data
6133 	 */
6134 	if (ndi_dc_allochdl((void *)arg, &dcp) != NDI_SUCCESS)
6135 		return (EFAULT);
6136 
6137 	switch (cmd) {
6138 
6139 	case DEVCTL_DEVICE_RESET:
6140 		/*
6141 		 * lookup and hold child device
6142 		 */
6143 		if ((child = ndi_devi_find(self, ndi_dc_getname(dcp),
6144 		    ndi_dc_getaddr(dcp))) == NULL) {
6145 			rv = ENXIO;
6146 			break;
6147 		}
6148 		retval = mdi_select_path(child, NULL,
6149 		    (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH),
6150 		    NULL, &pip);
6151 		if ((retval != MDI_SUCCESS) || (pip == NULL)) {
6152 			VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioctl:"
6153 			    "Unable to get a path, dip 0x%p", (void *)child));
6154 			rv = ENXIO;
6155 			break;
6156 		}
6157 		svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip);
6158 		if (vhci_recovery_reset(svp->svp_svl,
6159 		    &svp->svp_psd->sd_address, TRUE,
6160 		    VHCI_DEPTH_TARGET) == 0) {
6161 			VHCI_DEBUG(1, (CE_NOTE, NULL,
6162 			    "!vhci_ioctl(pip:%p): "
6163 			    "reset failed\n", (void *)pip));
6164 			rv = ENXIO;
6165 		}
6166 		mdi_rele_path(pip);
6167 		break;
6168 
6169 	case DEVCTL_BUS_QUIESCE:
6170 	case DEVCTL_BUS_UNQUIESCE:
6171 	case DEVCTL_BUS_RESET:
6172 	case DEVCTL_BUS_RESETALL:
6173 #ifdef	DEBUG
6174 	case DEVCTL_BUS_CONFIGURE:
6175 	case DEVCTL_BUS_UNCONFIGURE:
6176 #endif
6177 		rv = ENOTSUP;
6178 		break;
6179 
6180 	default:
6181 		rv = ENOTTY;
6182 	} /* end of outer switch */
6183 
6184 	ndi_dc_freehdl(dcp);
6185 	return (rv);
6186 }
6187 
6188 /*
6189  * Routine to get the PHCI pathname from ioctl structures in userland
6190  */
6191 /* ARGSUSED */
6192 static int
vhci_ioc_get_phci_path(sv_iocdata_t * pioc,caddr_t phci_path,int mode,caddr_t s)6193 vhci_ioc_get_phci_path(sv_iocdata_t *pioc, caddr_t phci_path,
6194     int mode, caddr_t s)
6195 {
6196 	int retval = 0;
6197 
6198 	if (ddi_copyin(pioc->phci, phci_path, MAXPATHLEN, mode)) {
6199 		VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioc_get_phci: ioctl <%s> "
6200 		    "phci_path copyin failed", s));
6201 		retval = EFAULT;
6202 	}
6203 	return (retval);
6204 
6205 }
6206 
6207 
6208 /*
6209  * Routine to get the Client device pathname from ioctl structures in userland
6210  */
6211 /* ARGSUSED */
6212 static int
vhci_ioc_get_client_path(sv_iocdata_t * pioc,caddr_t client_path,int mode,caddr_t s)6213 vhci_ioc_get_client_path(sv_iocdata_t *pioc, caddr_t client_path,
6214     int mode, caddr_t s)
6215 {
6216 	int retval = 0;
6217 
6218 	if (ddi_copyin(pioc->client, client_path, MAXPATHLEN, mode)) {
6219 		VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioc_get_client: "
6220 		    "ioctl <%s> client_path copyin failed", s));
6221 		retval = EFAULT;
6222 	}
6223 	return (retval);
6224 }
6225 
6226 
6227 /*
6228  * Routine to get physical device address from ioctl structure in userland
6229  */
6230 /* ARGSUSED */
6231 static int
vhci_ioc_get_paddr(sv_iocdata_t * pioc,caddr_t paddr,int mode,caddr_t s)6232 vhci_ioc_get_paddr(sv_iocdata_t *pioc, caddr_t paddr, int mode, caddr_t s)
6233 {
6234 	int retval = 0;
6235 
6236 	if (ddi_copyin(pioc->addr, paddr, MAXNAMELEN, mode)) {
6237 		VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioc_get_paddr: "
6238 		    "ioctl <%s> device addr copyin failed", s));
6239 		retval = EFAULT;
6240 	}
6241 	return (retval);
6242 }
6243 
6244 
6245 /*
6246  * Routine to send client device pathname to userland.
6247  */
6248 /* ARGSUSED */
6249 static int
vhci_ioc_send_client_path(caddr_t client_path,sv_iocdata_t * pioc,int mode,caddr_t s)6250 vhci_ioc_send_client_path(caddr_t client_path, sv_iocdata_t *pioc,
6251     int mode, caddr_t s)
6252 {
6253 	int retval = 0;
6254 
6255 	if (ddi_copyout(client_path, pioc->client, MAXPATHLEN, mode)) {
6256 		VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioc_send_client: "
6257 		    "ioctl <%s> client_path copyout failed", s));
6258 		retval = EFAULT;
6259 	}
6260 	return (retval);
6261 }
6262 
6263 
6264 /*
6265  * Routine to translated dev_info pointer (dip) to device pathname.
6266  */
6267 static void
vhci_ioc_devi_to_path(dev_info_t * dip,caddr_t path)6268 vhci_ioc_devi_to_path(dev_info_t *dip, caddr_t path)
6269 {
6270 	(void) ddi_pathname(dip, path);
6271 }
6272 
6273 
6274 /*
6275  * vhci_get_phci_path_list:
6276  *		get information about devices associated with a
6277  *		given PHCI device.
6278  *
6279  * Return Values:
6280  *		path information elements
6281  */
6282 int
vhci_get_phci_path_list(dev_info_t * pdip,sv_path_info_t * pibuf,uint_t num_elems)6283 vhci_get_phci_path_list(dev_info_t *pdip, sv_path_info_t *pibuf,
6284     uint_t num_elems)
6285 {
6286 	uint_t			count, done;
6287 	mdi_pathinfo_t		*pip;
6288 	sv_path_info_t		*ret_pip;
6289 	int			status;
6290 	size_t			prop_size;
6291 
6292 	/*
6293 	 * Get the PHCI structure and retrieve the path information
6294 	 * from the GUID hash table.
6295 	 */
6296 
6297 	ret_pip = pibuf;
6298 	count = 0;
6299 
6300 	ndi_devi_enter(pdip);
6301 
6302 	done = (count >= num_elems);
6303 	pip = mdi_get_next_client_path(pdip, NULL);
6304 	while (pip && !done) {
6305 		mdi_pi_lock(pip);
6306 		(void) ddi_pathname(mdi_pi_get_phci(pip),
6307 		    ret_pip->device.ret_phci);
6308 		(void) strcpy(ret_pip->ret_addr, mdi_pi_get_addr(pip));
6309 		(void) mdi_pi_get_state2(pip, &ret_pip->ret_state,
6310 		    &ret_pip->ret_ext_state);
6311 
6312 		status = mdi_prop_size(pip, &prop_size);
6313 		if (status == MDI_SUCCESS && ret_pip->ret_prop.ret_buf_size) {
6314 			*ret_pip->ret_prop.ret_buf_size = (uint_t)prop_size;
6315 		}
6316 
6317 #ifdef DEBUG
6318 		if (status != MDI_SUCCESS) {
6319 			VHCI_DEBUG(2, (CE_WARN, NULL,
6320 			    "!vhci_get_phci_path_list: "
6321 			    "phci <%s>, prop size failure 0x%x",
6322 			    ret_pip->device.ret_phci, status));
6323 		}
6324 #endif /* DEBUG */
6325 
6326 
6327 		if (status == MDI_SUCCESS && ret_pip->ret_prop.buf &&
6328 		    prop_size && ret_pip->ret_prop.buf_size >= prop_size) {
6329 			status = mdi_prop_pack(pip,
6330 			    &ret_pip->ret_prop.buf,
6331 			    ret_pip->ret_prop.buf_size);
6332 
6333 #ifdef DEBUG
6334 			if (status != MDI_SUCCESS) {
6335 				VHCI_DEBUG(2, (CE_WARN, NULL,
6336 				    "!vhci_get_phci_path_list: "
6337 				    "phci <%s>, prop pack failure 0x%x",
6338 				    ret_pip->device.ret_phci, status));
6339 			}
6340 #endif /* DEBUG */
6341 		}
6342 
6343 		mdi_pi_unlock(pip);
6344 		pip = mdi_get_next_client_path(pdip, pip);
6345 		ret_pip++;
6346 		count++;
6347 		done = (count >= num_elems);
6348 	}
6349 
6350 	ndi_devi_exit(pdip);
6351 
6352 	return (MDI_SUCCESS);
6353 }
6354 
6355 
6356 /*
6357  * vhci_get_client_path_list:
6358  *		get information about various paths associated with a
6359  *		given client device.
6360  *
6361  * Return Values:
6362  *		path information elements
6363  */
6364 int
vhci_get_client_path_list(dev_info_t * cdip,sv_path_info_t * pibuf,uint_t num_elems)6365 vhci_get_client_path_list(dev_info_t *cdip, sv_path_info_t *pibuf,
6366     uint_t num_elems)
6367 {
6368 	uint_t			count, done;
6369 	mdi_pathinfo_t		*pip;
6370 	sv_path_info_t		*ret_pip;
6371 	int			status;
6372 	size_t			prop_size;
6373 
6374 	ret_pip = pibuf;
6375 	count = 0;
6376 
6377 	ndi_devi_enter(cdip);
6378 
6379 	done = (count >= num_elems);
6380 	pip = mdi_get_next_phci_path(cdip, NULL);
6381 	while (pip && !done) {
6382 		mdi_pi_lock(pip);
6383 		(void) ddi_pathname(mdi_pi_get_phci(pip),
6384 		    ret_pip->device.ret_phci);
6385 		(void) strcpy(ret_pip->ret_addr, mdi_pi_get_addr(pip));
6386 		(void) mdi_pi_get_state2(pip, &ret_pip->ret_state,
6387 		    &ret_pip->ret_ext_state);
6388 
6389 		status = mdi_prop_size(pip, &prop_size);
6390 		if (status == MDI_SUCCESS && ret_pip->ret_prop.ret_buf_size) {
6391 			*ret_pip->ret_prop.ret_buf_size = (uint_t)prop_size;
6392 		}
6393 
6394 #ifdef DEBUG
6395 		if (status != MDI_SUCCESS) {
6396 			VHCI_DEBUG(2, (CE_WARN, NULL,
6397 			    "!vhci_get_client_path_list: "
6398 			    "phci <%s>, prop size failure 0x%x",
6399 			    ret_pip->device.ret_phci, status));
6400 		}
6401 #endif /* DEBUG */
6402 
6403 
6404 		if (status == MDI_SUCCESS && ret_pip->ret_prop.buf &&
6405 		    prop_size && ret_pip->ret_prop.buf_size >= prop_size) {
6406 			status = mdi_prop_pack(pip,
6407 			    &ret_pip->ret_prop.buf,
6408 			    ret_pip->ret_prop.buf_size);
6409 
6410 #ifdef DEBUG
6411 			if (status != MDI_SUCCESS) {
6412 				VHCI_DEBUG(2, (CE_WARN, NULL,
6413 				    "!vhci_get_client_path_list: "
6414 				    "phci <%s>, prop pack failure 0x%x",
6415 				    ret_pip->device.ret_phci, status));
6416 			}
6417 #endif /* DEBUG */
6418 		}
6419 
6420 		mdi_pi_unlock(pip);
6421 		pip = mdi_get_next_phci_path(cdip, pip);
6422 		ret_pip++;
6423 		count++;
6424 		done = (count >= num_elems);
6425 	}
6426 
6427 	ndi_devi_exit(cdip);
6428 
6429 	return (MDI_SUCCESS);
6430 }
6431 
6432 
6433 /*
6434  * Routine to get ioctl argument structure from userland.
6435  */
6436 /* ARGSUSED */
6437 static int
vhci_get_iocdata(const void * data,sv_iocdata_t * pioc,int mode,caddr_t s)6438 vhci_get_iocdata(const void *data, sv_iocdata_t *pioc, int mode, caddr_t s)
6439 {
6440 	int	retval = 0;
6441 
6442 #ifdef  _MULTI_DATAMODEL
6443 	switch (ddi_model_convert_from(mode & FMODELS)) {
6444 	case DDI_MODEL_ILP32:
6445 	{
6446 		sv_iocdata32_t	ioc32;
6447 
6448 		if (ddi_copyin(data, &ioc32, sizeof (ioc32), mode)) {
6449 			retval = EFAULT;
6450 			break;
6451 		}
6452 		pioc->client	= (caddr_t)(uintptr_t)ioc32.client;
6453 		pioc->phci	= (caddr_t)(uintptr_t)ioc32.phci;
6454 		pioc->addr	= (caddr_t)(uintptr_t)ioc32.addr;
6455 		pioc->buf_elem	= (uint_t)ioc32.buf_elem;
6456 		pioc->ret_buf	= (sv_path_info_t *)(uintptr_t)ioc32.ret_buf;
6457 		pioc->ret_elem	= (uint_t *)(uintptr_t)ioc32.ret_elem;
6458 		break;
6459 	}
6460 
6461 	case DDI_MODEL_NONE:
6462 		if (ddi_copyin(data, pioc, sizeof (*pioc), mode)) {
6463 			retval = EFAULT;
6464 			break;
6465 		}
6466 		break;
6467 	}
6468 #else   /* _MULTI_DATAMODEL */
6469 	if (ddi_copyin(data, pioc, sizeof (*pioc), mode)) {
6470 		retval = EFAULT;
6471 	}
6472 #endif  /* _MULTI_DATAMODEL */
6473 
6474 #ifdef DEBUG
6475 	if (retval) {
6476 		VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_get_ioc: cmd <%s> "
6477 		    "iocdata copyin failed", s));
6478 	}
6479 #endif
6480 
6481 	return (retval);
6482 }
6483 
6484 
6485 /*
6486  * Routine to get the ioctl argument for ioctl causing controller switchover.
6487  */
6488 /* ARGSUSED */
6489 static int
vhci_get_iocswitchdata(const void * data,sv_switch_to_cntlr_iocdata_t * piocsc,int mode,caddr_t s)6490 vhci_get_iocswitchdata(const void *data, sv_switch_to_cntlr_iocdata_t *piocsc,
6491     int mode, caddr_t s)
6492 {
6493 	int	retval = 0;
6494 
6495 #ifdef  _MULTI_DATAMODEL
6496 	switch (ddi_model_convert_from(mode & FMODELS)) {
6497 	case DDI_MODEL_ILP32:
6498 	{
6499 		sv_switch_to_cntlr_iocdata32_t	ioc32;
6500 
6501 		if (ddi_copyin(data, &ioc32, sizeof (ioc32), mode)) {
6502 			retval = EFAULT;
6503 			break;
6504 		}
6505 		piocsc->client	= (caddr_t)(uintptr_t)ioc32.client;
6506 		piocsc->class	= (caddr_t)(uintptr_t)ioc32.class;
6507 		break;
6508 	}
6509 
6510 	case DDI_MODEL_NONE:
6511 		if (ddi_copyin(data, piocsc, sizeof (*piocsc), mode)) {
6512 			retval = EFAULT;
6513 		}
6514 		break;
6515 	}
6516 #else   /* _MULTI_DATAMODEL */
6517 	if (ddi_copyin(data, piocsc, sizeof (*piocsc), mode)) {
6518 		retval = EFAULT;
6519 	}
6520 #endif  /* _MULTI_DATAMODEL */
6521 
6522 #ifdef DEBUG
6523 	if (retval) {
6524 		VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_get_ioc: cmd <%s> "
6525 		    "switch_to_cntlr_iocdata copyin failed", s));
6526 	}
6527 #endif
6528 
6529 	return (retval);
6530 }
6531 
6532 
6533 /*
6534  * Routine to allocate memory for the path information structures.
6535  * It allocates two chunks of memory - one for keeping userland
6536  * pointers/values for path information and path properties, second for
6537  * keeping allocating kernel memory for path properties. These path
6538  * properties are finally copied to userland.
6539  */
6540 /* ARGSUSED */
6541 static int
vhci_ioc_alloc_pathinfo(sv_path_info_t ** upibuf,sv_path_info_t ** kpibuf,uint_t num_paths,sv_iocdata_t * pioc,int mode,caddr_t s)6542 vhci_ioc_alloc_pathinfo(sv_path_info_t **upibuf, sv_path_info_t **kpibuf,
6543     uint_t num_paths, sv_iocdata_t *pioc, int mode, caddr_t s)
6544 {
6545 	sv_path_info_t	*pi;
6546 	uint_t		bufsize;
6547 	int		retval = 0;
6548 	int		index;
6549 
6550 	/* Allocate memory */
6551 	*upibuf = (sv_path_info_t *)
6552 	    kmem_zalloc(sizeof (sv_path_info_t) * num_paths, KM_SLEEP);
6553 	ASSERT(*upibuf != NULL);
6554 	*kpibuf = (sv_path_info_t *)
6555 	    kmem_zalloc(sizeof (sv_path_info_t) * num_paths, KM_SLEEP);
6556 	ASSERT(*kpibuf != NULL);
6557 
6558 	/*
6559 	 * Get the path info structure from the user space.
6560 	 * We are interested in the following fields:
6561 	 *	- user size of buffer for per path properties.
6562 	 *	- user address of buffer for path info properties.
6563 	 *	- user pointer for returning actual buffer size
6564 	 * Keep these fields in the 'upibuf' structures.
6565 	 * Allocate buffer for per path info properties in kernel
6566 	 * structure ('kpibuf').
6567 	 * Size of these buffers will be equal to the size of buffers
6568 	 * in the user space.
6569 	 */
6570 #ifdef  _MULTI_DATAMODEL
6571 	switch (ddi_model_convert_from(mode & FMODELS)) {
6572 	case DDI_MODEL_ILP32:
6573 	{
6574 		sv_path_info32_t	*src;
6575 		sv_path_info32_t	pi32;
6576 
6577 		src  = (sv_path_info32_t *)pioc->ret_buf;
6578 		pi = (sv_path_info_t *)*upibuf;
6579 		for (index = 0; index < num_paths; index++, src++, pi++) {
6580 			if (ddi_copyin(src, &pi32, sizeof (pi32), mode)) {
6581 				retval = EFAULT;
6582 				break;
6583 			}
6584 
6585 			pi->ret_prop.buf_size	=
6586 			    (uint_t)pi32.ret_prop.buf_size;
6587 			pi->ret_prop.ret_buf_size =
6588 			    (uint_t *)(uintptr_t)pi32.ret_prop.ret_buf_size;
6589 			pi->ret_prop.buf	=
6590 			    (caddr_t)(uintptr_t)pi32.ret_prop.buf;
6591 		}
6592 		break;
6593 	}
6594 
6595 	case DDI_MODEL_NONE:
6596 		if (ddi_copyin(pioc->ret_buf, *upibuf,
6597 		    sizeof (sv_path_info_t) * num_paths, mode)) {
6598 			retval = EFAULT;
6599 		}
6600 		break;
6601 	}
6602 #else   /* _MULTI_DATAMODEL */
6603 	if (ddi_copyin(pioc->ret_buf, *upibuf,
6604 	    sizeof (sv_path_info_t) * num_paths, mode)) {
6605 		retval = EFAULT;
6606 	}
6607 #endif  /* _MULTI_DATAMODEL */
6608 
6609 	if (retval != 0) {
6610 		VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_alloc_path_info: "
6611 		    "ioctl <%s> normal: path_info copyin failed", s));
6612 		kmem_free(*upibuf, sizeof (sv_path_info_t) * num_paths);
6613 		kmem_free(*kpibuf, sizeof (sv_path_info_t) * num_paths);
6614 		*upibuf = NULL;
6615 		*kpibuf = NULL;
6616 		return (retval);
6617 	}
6618 
6619 	/*
6620 	 * Allocate memory for per path properties.
6621 	 */
6622 	for (index = 0, pi = *kpibuf; index < num_paths; index++, pi++) {
6623 		bufsize = (*upibuf)[index].ret_prop.buf_size;
6624 
6625 		if (bufsize && bufsize <= SV_PROP_MAX_BUF_SIZE) {
6626 			pi->ret_prop.buf_size = bufsize;
6627 			pi->ret_prop.buf = (caddr_t)
6628 			    kmem_zalloc(bufsize, KM_SLEEP);
6629 			ASSERT(pi->ret_prop.buf != NULL);
6630 		} else {
6631 			pi->ret_prop.buf_size = 0;
6632 			pi->ret_prop.buf = NULL;
6633 		}
6634 
6635 		if ((*upibuf)[index].ret_prop.ret_buf_size != NULL) {
6636 			pi->ret_prop.ret_buf_size = (uint_t *)kmem_zalloc(
6637 			    sizeof (*pi->ret_prop.ret_buf_size), KM_SLEEP);
6638 			ASSERT(pi->ret_prop.ret_buf_size != NULL);
6639 		} else {
6640 			pi->ret_prop.ret_buf_size = NULL;
6641 		}
6642 	}
6643 
6644 	return (0);
6645 }
6646 
6647 
6648 /*
6649  * Routine to free memory for the path information structures.
6650  * This is the memory which was allocated earlier.
6651  */
6652 /* ARGSUSED */
6653 static void
vhci_ioc_free_pathinfo(sv_path_info_t * upibuf,sv_path_info_t * kpibuf,uint_t num_paths)6654 vhci_ioc_free_pathinfo(sv_path_info_t *upibuf, sv_path_info_t *kpibuf,
6655     uint_t num_paths)
6656 {
6657 	sv_path_info_t	*pi;
6658 	int		index;
6659 
6660 	/* Free memory for per path properties */
6661 	for (index = 0, pi = kpibuf; index < num_paths; index++, pi++) {
6662 		if (pi->ret_prop.ret_buf_size != NULL) {
6663 			kmem_free(pi->ret_prop.ret_buf_size,
6664 			    sizeof (*pi->ret_prop.ret_buf_size));
6665 		}
6666 
6667 		if (pi->ret_prop.buf != NULL) {
6668 			kmem_free(pi->ret_prop.buf, pi->ret_prop.buf_size);
6669 		}
6670 	}
6671 
6672 	/* Free memory for path info structures */
6673 	kmem_free(upibuf, sizeof (sv_path_info_t) * num_paths);
6674 	kmem_free(kpibuf, sizeof (sv_path_info_t) * num_paths);
6675 }
6676 
6677 
6678 /*
6679  * Routine to copy path information and path properties to userland.
6680  */
6681 /* ARGSUSED */
6682 static int
vhci_ioc_send_pathinfo(sv_path_info_t * upibuf,sv_path_info_t * kpibuf,uint_t num_paths,sv_iocdata_t * pioc,int mode,caddr_t s)6683 vhci_ioc_send_pathinfo(sv_path_info_t *upibuf, sv_path_info_t *kpibuf,
6684     uint_t num_paths, sv_iocdata_t *pioc, int mode, caddr_t s)
6685 {
6686 	int			retval = 0, index;
6687 	sv_path_info_t		*upi_ptr;
6688 	sv_path_info32_t	*upi32_ptr;
6689 
6690 #ifdef  _MULTI_DATAMODEL
6691 	switch (ddi_model_convert_from(mode & FMODELS)) {
6692 	case DDI_MODEL_ILP32:
6693 		goto copy_32bit;
6694 
6695 	case DDI_MODEL_NONE:
6696 		goto copy_normal;
6697 	}
6698 #else   /* _MULTI_DATAMODEL */
6699 
6700 	goto copy_normal;
6701 
6702 #endif  /* _MULTI_DATAMODEL */
6703 
6704 copy_normal:
6705 
6706 	/*
6707 	 * Copy path information and path properties to user land.
6708 	 * Pointer fields inside the path property structure were
6709 	 * saved in the 'upibuf' structure earlier.
6710 	 */
6711 	upi_ptr = pioc->ret_buf;
6712 	for (index = 0; index < num_paths; index++) {
6713 		if (ddi_copyout(kpibuf[index].device.ret_ct,
6714 		    upi_ptr[index].device.ret_ct, MAXPATHLEN, mode)) {
6715 			retval = EFAULT;
6716 			break;
6717 		}
6718 
6719 		if (ddi_copyout(kpibuf[index].ret_addr,
6720 		    upi_ptr[index].ret_addr, MAXNAMELEN, mode)) {
6721 			retval = EFAULT;
6722 			break;
6723 		}
6724 
6725 		if (ddi_copyout(&kpibuf[index].ret_state,
6726 		    &upi_ptr[index].ret_state, sizeof (kpibuf[index].ret_state),
6727 		    mode)) {
6728 			retval = EFAULT;
6729 			break;
6730 		}
6731 
6732 		if (ddi_copyout(&kpibuf[index].ret_ext_state,
6733 		    &upi_ptr[index].ret_ext_state,
6734 		    sizeof (kpibuf[index].ret_ext_state), mode)) {
6735 			retval = EFAULT;
6736 			break;
6737 		}
6738 
6739 		if ((kpibuf[index].ret_prop.ret_buf_size != NULL) &&
6740 		    ddi_copyout(kpibuf[index].ret_prop.ret_buf_size,
6741 		    upibuf[index].ret_prop.ret_buf_size,
6742 		    sizeof (*upibuf[index].ret_prop.ret_buf_size), mode)) {
6743 			retval = EFAULT;
6744 			break;
6745 		}
6746 
6747 		if ((kpibuf[index].ret_prop.buf != NULL) &&
6748 		    ddi_copyout(kpibuf[index].ret_prop.buf,
6749 		    upibuf[index].ret_prop.buf,
6750 		    upibuf[index].ret_prop.buf_size, mode)) {
6751 			retval = EFAULT;
6752 			break;
6753 		}
6754 	}
6755 
6756 #ifdef DEBUG
6757 	if (retval) {
6758 		VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_get_ioc: ioctl <%s> "
6759 		    "normal: path_info copyout failed", s));
6760 	}
6761 #endif
6762 
6763 	return (retval);
6764 
6765 copy_32bit:
6766 	/*
6767 	 * Copy path information and path properties to user land.
6768 	 * Pointer fields inside the path property structure were
6769 	 * saved in the 'upibuf' structure earlier.
6770 	 */
6771 	upi32_ptr = (sv_path_info32_t *)pioc->ret_buf;
6772 	for (index = 0; index < num_paths; index++) {
6773 		if (ddi_copyout(kpibuf[index].device.ret_ct,
6774 		    upi32_ptr[index].device.ret_ct, MAXPATHLEN, mode)) {
6775 			retval = EFAULT;
6776 			break;
6777 		}
6778 
6779 		if (ddi_copyout(kpibuf[index].ret_addr,
6780 		    upi32_ptr[index].ret_addr, MAXNAMELEN, mode)) {
6781 			retval = EFAULT;
6782 			break;
6783 		}
6784 
6785 		if (ddi_copyout(&kpibuf[index].ret_state,
6786 		    &upi32_ptr[index].ret_state,
6787 		    sizeof (kpibuf[index].ret_state), mode)) {
6788 			retval = EFAULT;
6789 			break;
6790 		}
6791 
6792 		if (ddi_copyout(&kpibuf[index].ret_ext_state,
6793 		    &upi32_ptr[index].ret_ext_state,
6794 		    sizeof (kpibuf[index].ret_ext_state), mode)) {
6795 			retval = EFAULT;
6796 			break;
6797 		}
6798 		if ((kpibuf[index].ret_prop.ret_buf_size != NULL) &&
6799 		    ddi_copyout(kpibuf[index].ret_prop.ret_buf_size,
6800 		    upibuf[index].ret_prop.ret_buf_size,
6801 		    sizeof (*upibuf[index].ret_prop.ret_buf_size), mode)) {
6802 			retval = EFAULT;
6803 			break;
6804 		}
6805 
6806 		if ((kpibuf[index].ret_prop.buf != NULL) &&
6807 		    ddi_copyout(kpibuf[index].ret_prop.buf,
6808 		    upibuf[index].ret_prop.buf,
6809 		    upibuf[index].ret_prop.buf_size, mode)) {
6810 			retval = EFAULT;
6811 			break;
6812 		}
6813 	}
6814 
6815 #ifdef DEBUG
6816 	if (retval) {
6817 		VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_get_ioc: ioctl <%s> "
6818 		    "normal: path_info copyout failed", s));
6819 	}
6820 #endif
6821 
6822 	return (retval);
6823 }
6824 
6825 
6826 /*
6827  * vhci_failover()
6828  * This routine expects VHCI_HOLD_LUN before being invoked.  It can be invoked
6829  * as MDI_FAILOVER_ASYNC or MDI_FAILOVER_SYNC.  For Asynchronous failovers
6830  * this routine shall VHCI_RELEASE_LUN on exiting.  For synchronous failovers
6831  * it is the callers responsibility to release lun.
6832  */
6833 
6834 /* ARGSUSED */
6835 static int
vhci_failover(dev_info_t * vdip,dev_info_t * cdip,int flags)6836 vhci_failover(dev_info_t *vdip, dev_info_t *cdip, int flags)
6837 {
6838 	char			*guid;
6839 	scsi_vhci_lun_t		*vlun = NULL;
6840 	struct scsi_vhci	*vhci;
6841 	mdi_pathinfo_t		*pip, *npip;
6842 	char			*s_pclass, *pclass1, *pclass2, *pclass;
6843 	char			active_pclass_copy[255], *active_pclass_ptr;
6844 	char			*ptr1, *ptr2;
6845 	mdi_pathinfo_state_t	pi_state;
6846 	uint32_t		pi_ext_state;
6847 	scsi_vhci_priv_t	*svp;
6848 	struct scsi_device	*sd;
6849 	struct scsi_failover_ops	*sfo;
6850 	int			sps; /* mdi_select_path() status */
6851 	int			activation_done = 0;
6852 	int			rval, retval = MDI_FAILURE;
6853 	int			reserve_pending, check_condition, UA_condition;
6854 	struct scsi_pkt		*pkt;
6855 	struct buf		*bp;
6856 	size_t			blksize;
6857 
6858 	vhci = ddi_get_soft_state(vhci_softstate, ddi_get_instance(vdip));
6859 	sd = ddi_get_driver_private(cdip);
6860 	vlun = ADDR2VLUN(&sd->sd_address);
6861 	ASSERT(vlun != 0);
6862 	ASSERT(VHCI_LUN_IS_HELD(vlun));
6863 	guid = vlun->svl_lun_wwn;
6864 	VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(1): guid %s\n", guid));
6865 	vhci_log(CE_NOTE, vdip, "!Initiating failover for device %s "
6866 	    "(GUID %s)", ddi_node_name(cdip), guid);
6867 
6868 	blksize = vhci_get_blocksize(cdip);
6869 
6870 	/*
6871 	 * Lets maintain a local copy of the vlun->svl_active_pclass
6872 	 * for the rest of the processing. Accessing the field
6873 	 * directly in the loop below causes loop logic to break
6874 	 * especially when the field gets updated by other threads
6875 	 * update path status etc and causes 'paths are not currently
6876 	 * available' condition to be declared prematurely.
6877 	 */
6878 	mutex_enter(&vlun->svl_mutex);
6879 	if (vlun->svl_active_pclass != NULL) {
6880 		(void) strlcpy(active_pclass_copy, vlun->svl_active_pclass,
6881 		    sizeof (active_pclass_copy));
6882 		active_pclass_ptr = &active_pclass_copy[0];
6883 		mutex_exit(&vlun->svl_mutex);
6884 		if (vhci_quiesce_paths(vdip, cdip, vlun, guid,
6885 		    active_pclass_ptr) != 0) {
6886 			retval = MDI_FAILURE;
6887 		}
6888 	} else {
6889 		/*
6890 		 * can happen only when the available path to device
6891 		 * discovered is a STANDBY path.
6892 		 */
6893 		mutex_exit(&vlun->svl_mutex);
6894 		active_pclass_copy[0] = '\0';
6895 		active_pclass_ptr = NULL;
6896 	}
6897 
6898 	sfo = vlun->svl_fops;
6899 	ASSERT(sfo != NULL);
6900 	pclass1 = s_pclass = active_pclass_ptr;
6901 	VHCI_DEBUG(1, (CE_NOTE, NULL, "!(%s)failing over from %s\n", guid,
6902 	    (s_pclass == NULL ? "<none>" : s_pclass)));
6903 
6904 next_pathclass:
6905 
6906 	rval = sfo->sfo_pathclass_next(pclass1, &pclass2,
6907 	    vlun->svl_fops_ctpriv);
6908 	if (rval == ENOENT) {
6909 		if (s_pclass == NULL) {
6910 			VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(4)(%s): "
6911 			    "failed, no more pathclasses\n", guid));
6912 			goto done;
6913 		} else {
6914 			(void) sfo->sfo_pathclass_next(NULL, &pclass2,
6915 			    vlun->svl_fops_ctpriv);
6916 		}
6917 	} else if (rval == EINVAL) {
6918 		vhci_log(CE_NOTE, vdip, "!Failover operation failed for "
6919 		    "device %s (GUID %s): Invalid path-class %s",
6920 		    ddi_node_name(cdip), guid,
6921 		    ((pclass1 == NULL) ? "<none>" : pclass1));
6922 		goto done;
6923 	}
6924 	if ((s_pclass != NULL) && (strcmp(pclass2, s_pclass) == 0)) {
6925 		/*
6926 		 * paths are not currently available
6927 		 */
6928 		vhci_log(CE_NOTE, vdip, "!Failover path currently unavailable"
6929 		    " for device %s (GUID %s)",
6930 		    ddi_node_name(cdip), guid);
6931 		goto done;
6932 	}
6933 	pip = npip = NULL;
6934 	VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(5.2)(%s): considering "
6935 	    "%s as failover destination\n", guid, pclass2));
6936 	sps = mdi_select_path(cdip, NULL, MDI_SELECT_STANDBY_PATH, NULL, &npip);
6937 	if ((npip == NULL) || (sps != MDI_SUCCESS)) {
6938 		VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(%s): no "
6939 		    "STANDBY paths found (status:%x)!\n", guid, sps));
6940 		pclass1 = pclass2;
6941 		goto next_pathclass;
6942 	}
6943 	do {
6944 		pclass = NULL;
6945 		if ((mdi_prop_lookup_string(npip, "path-class",
6946 		    &pclass) != MDI_SUCCESS) || (strcmp(pclass2,
6947 		    pclass) != 0)) {
6948 			VHCI_DEBUG(1, (CE_NOTE, NULL,
6949 			    "!vhci_failover(5.5)(%s): skipping path "
6950 			    "%p(%s)...\n", guid, (void *)npip, pclass));
6951 			pip = npip;
6952 			sps = mdi_select_path(cdip, NULL,
6953 			    MDI_SELECT_STANDBY_PATH, pip, &npip);
6954 			mdi_rele_path(pip);
6955 			(void) mdi_prop_free(pclass);
6956 			continue;
6957 		}
6958 		svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(npip);
6959 
6960 		/*
6961 		 * Issue READ at non-zer block on this STANDBY path.
6962 		 * Purple returns
6963 		 * 1. RESERVATION_CONFLICT if reservation is pending
6964 		 * 2. POR check condition if it reset happened.
6965 		 * 2. failover Check Conditions if one is already in progress.
6966 		 */
6967 		reserve_pending = 0;
6968 		check_condition = 0;
6969 		UA_condition = 0;
6970 
6971 		bp = scsi_alloc_consistent_buf(&svp->svp_psd->sd_address,
6972 		    (struct buf *)NULL, blksize, B_READ, NULL, NULL);
6973 		if (!bp) {
6974 			VHCI_DEBUG(1, (CE_NOTE, NULL,
6975 			    "vhci_failover !No resources (buf)\n"));
6976 			mdi_rele_path(npip);
6977 			goto done;
6978 		}
6979 		pkt = scsi_init_pkt(&svp->svp_psd->sd_address, NULL, bp,
6980 		    CDB_GROUP1, sizeof (struct scsi_arq_status), 0,
6981 		    PKT_CONSISTENT, NULL, NULL);
6982 		if (pkt) {
6983 			(void) scsi_setup_cdb((union scsi_cdb *)(uintptr_t)
6984 			    pkt->pkt_cdbp, SCMD_READ_G1, 1, 1, 0);
6985 			pkt->pkt_flags = FLAG_NOINTR;
6986 check_path_again:
6987 			pkt->pkt_path_instance = mdi_pi_get_path_instance(npip);
6988 			pkt->pkt_time = 3 * 30;
6989 
6990 			if (scsi_transport(pkt) == TRAN_ACCEPT) {
6991 				switch (pkt->pkt_reason) {
6992 				case CMD_CMPLT:
6993 					switch (SCBP_C(pkt)) {
6994 					case STATUS_GOOD:
6995 						/* Already failed over */
6996 						activation_done = 1;
6997 						break;
6998 					case STATUS_RESERVATION_CONFLICT:
6999 						reserve_pending = 1;
7000 						break;
7001 					case STATUS_CHECK:
7002 						check_condition = 1;
7003 						break;
7004 					}
7005 				}
7006 			}
7007 			if (check_condition &&
7008 			    (pkt->pkt_state & STATE_ARQ_DONE)) {
7009 				uint8_t *sns, skey, asc, ascq;
7010 				sns = (uint8_t *)
7011 				    &(((struct scsi_arq_status *)(uintptr_t)
7012 				    (pkt->pkt_scbp))->sts_sensedata);
7013 				skey = scsi_sense_key(sns);
7014 				asc = scsi_sense_asc(sns);
7015 				ascq = scsi_sense_ascq(sns);
7016 				if (skey == KEY_UNIT_ATTENTION &&
7017 				    asc == 0x29) {
7018 					/* Already failed over */
7019 					VHCI_DEBUG(1, (CE_NOTE, NULL,
7020 					    "!vhci_failover(7)(%s): "
7021 					    "path 0x%p POR UA condition\n",
7022 					    guid, (void *)npip));
7023 					if (UA_condition == 0) {
7024 						UA_condition = 1;
7025 						goto check_path_again;
7026 					}
7027 				} else {
7028 					activation_done = 0;
7029 					VHCI_DEBUG(1, (CE_NOTE, NULL,
7030 					    "!vhci_failover(%s): path 0x%p "
7031 					    "unhandled chkcond %x %x %x\n",
7032 					    guid, (void *)npip, skey,
7033 					    asc, ascq));
7034 				}
7035 			}
7036 			scsi_destroy_pkt(pkt);
7037 		}
7038 		scsi_free_consistent_buf(bp);
7039 
7040 		if (activation_done) {
7041 			mdi_rele_path(npip);
7042 			VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(7)(%s): "
7043 			    "path 0x%p already failedover\n", guid,
7044 			    (void *)npip));
7045 			break;
7046 		}
7047 		if (reserve_pending && (vlun->svl_xlf_capable == 0)) {
7048 			(void) vhci_recovery_reset(vlun,
7049 			    &svp->svp_psd->sd_address,
7050 			    FALSE, VHCI_DEPTH_ALL);
7051 		}
7052 		VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(6)(%s): "
7053 		    "activating path 0x%p(psd:%p)\n", guid, (void *)npip,
7054 		    (void *)svp->svp_psd));
7055 		if (sfo->sfo_path_activate(svp->svp_psd, pclass2,
7056 		    vlun->svl_fops_ctpriv) == 0) {
7057 			activation_done = 1;
7058 			mdi_rele_path(npip);
7059 			VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(7)(%s): "
7060 			    "path 0x%p successfully activated\n", guid,
7061 			    (void *)npip));
7062 			break;
7063 		}
7064 		pip = npip;
7065 		sps = mdi_select_path(cdip, NULL, MDI_SELECT_STANDBY_PATH,
7066 		    pip, &npip);
7067 		mdi_rele_path(pip);
7068 	} while ((npip != NULL) && (sps == MDI_SUCCESS));
7069 	if (activation_done == 0) {
7070 		pclass1 = pclass2;
7071 		goto next_pathclass;
7072 	}
7073 
7074 	/*
7075 	 * if we are here, we have succeeded in activating path npip of
7076 	 * pathclass pclass2; let us validate all paths of pclass2 by
7077 	 * "ping"-ing each one and mark the good ones ONLINE
7078 	 * Also, set the state of the paths belonging to the previously
7079 	 * active pathclass to STANDBY
7080 	 */
7081 	pip = npip = NULL;
7082 	sps = mdi_select_path(cdip, NULL, (MDI_SELECT_ONLINE_PATH |
7083 	    MDI_SELECT_STANDBY_PATH | MDI_SELECT_USER_DISABLE_PATH),
7084 	    NULL, &npip);
7085 	if (npip == NULL || sps != MDI_SUCCESS) {
7086 		VHCI_DEBUG(1, (CE_NOTE, NULL, "!Failover operation failed for "
7087 		    "device %s (GUID %s): paths may be busy\n",
7088 		    ddi_node_name(cdip), guid));
7089 		goto done;
7090 	}
7091 	do {
7092 		(void) mdi_pi_get_state2(npip, &pi_state, &pi_ext_state);
7093 		if (mdi_prop_lookup_string(npip, "path-class", &pclass)
7094 		    != MDI_SUCCESS) {
7095 			pip = npip;
7096 			sps = mdi_select_path(cdip, NULL,
7097 			    (MDI_SELECT_ONLINE_PATH |
7098 			    MDI_SELECT_STANDBY_PATH |
7099 			    MDI_SELECT_USER_DISABLE_PATH),
7100 			    pip, &npip);
7101 			mdi_rele_path(pip);
7102 			continue;
7103 		}
7104 		if (strcmp(pclass, pclass2) == 0) {
7105 			if (pi_state == MDI_PATHINFO_STATE_STANDBY) {
7106 				svp = (scsi_vhci_priv_t *)
7107 				    mdi_pi_get_vhci_private(npip);
7108 				VHCI_DEBUG(1, (CE_NOTE, NULL,
7109 				    "!vhci_failover(8)(%s): "
7110 				    "pinging path 0x%p\n",
7111 				    guid, (void *)npip));
7112 				if (sfo->sfo_path_ping(svp->svp_psd,
7113 				    vlun->svl_fops_ctpriv) == 1) {
7114 					mdi_pi_set_state(npip,
7115 					    MDI_PATHINFO_STATE_ONLINE);
7116 					VHCI_DEBUG(1, (CE_NOTE, NULL,
7117 					    "!vhci_failover(9)(%s): "
7118 					    "path 0x%p ping successful, "
7119 					    "marked online\n", guid,
7120 					    (void *)npip));
7121 					MDI_PI_ERRSTAT(npip, MDI_PI_FAILTO);
7122 				}
7123 			}
7124 		} else if ((s_pclass != NULL) && (strcmp(pclass, s_pclass)
7125 		    == 0)) {
7126 			if (pi_state == MDI_PATHINFO_STATE_ONLINE) {
7127 				mdi_pi_set_state(npip,
7128 				    MDI_PATHINFO_STATE_STANDBY);
7129 				VHCI_DEBUG(1, (CE_NOTE, NULL,
7130 				    "!vhci_failover(10)(%s): path 0x%p marked "
7131 				    "STANDBY\n", guid, (void *)npip));
7132 				MDI_PI_ERRSTAT(npip, MDI_PI_FAILFROM);
7133 			}
7134 		}
7135 		(void) mdi_prop_free(pclass);
7136 		pip = npip;
7137 		sps = mdi_select_path(cdip, NULL, (MDI_SELECT_ONLINE_PATH |
7138 		    MDI_SELECT_STANDBY_PATH | MDI_SELECT_USER_DISABLE_PATH),
7139 		    pip, &npip);
7140 		mdi_rele_path(pip);
7141 	} while ((npip != NULL) && (sps == MDI_SUCCESS));
7142 
7143 	/*
7144 	 * Update the AccessState of related MP-API TPGs
7145 	 */
7146 	(void) vhci_mpapi_update_tpg_acc_state_for_lu(vhci, vlun);
7147 
7148 	vhci_log(CE_NOTE, vdip, "!Failover operation completed successfully "
7149 	    "for device %s (GUID %s): failed over from %s to %s",
7150 	    ddi_node_name(cdip), guid, ((s_pclass == NULL) ? "<none>" :
7151 	    s_pclass), pclass2);
7152 	ptr1 = kmem_alloc(strlen(pclass2) + 1, KM_SLEEP);
7153 	(void) strlcpy(ptr1, pclass2, (strlen(pclass2) + 1));
7154 	mutex_enter(&vlun->svl_mutex);
7155 	ptr2 = vlun->svl_active_pclass;
7156 	vlun->svl_active_pclass = ptr1;
7157 	mutex_exit(&vlun->svl_mutex);
7158 	if (ptr2) {
7159 		kmem_free(ptr2, strlen(ptr2) + 1);
7160 	}
7161 	mutex_enter(&vhci->vhci_mutex);
7162 	scsi_hba_reset_notify_callback(&vhci->vhci_mutex,
7163 	    &vhci->vhci_reset_notify_listf);
7164 	/* All reservations are cleared upon these resets. */
7165 	vlun->svl_flags &= ~VLUN_RESERVE_ACTIVE_FLG;
7166 	mutex_exit(&vhci->vhci_mutex);
7167 	VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(11): DONE! Active "
7168 	    "pathclass for %s is now %s\n", guid, pclass2));
7169 	retval = MDI_SUCCESS;
7170 
7171 done:
7172 	vlun->svl_failover_status = retval;
7173 	if (flags == MDI_FAILOVER_ASYNC) {
7174 		VHCI_RELEASE_LUN(vlun);
7175 		VHCI_DEBUG(6, (CE_NOTE, NULL, "!vhci_failover(12): DONE! "
7176 		    "releasing lun, as failover was ASYNC\n"));
7177 	} else {
7178 		VHCI_DEBUG(6, (CE_NOTE, NULL, "!vhci_failover(12): DONE! "
7179 		    "NOT releasing lun, as failover was SYNC\n"));
7180 	}
7181 	return (retval);
7182 }
7183 
7184 /*
7185  * vhci_client_attached is called after the successful attach of a
7186  * client devinfo node.
7187  */
7188 static void
vhci_client_attached(dev_info_t * cdip)7189 vhci_client_attached(dev_info_t *cdip)
7190 {
7191 	mdi_pathinfo_t	*pip;
7192 
7193 	/*
7194 	 * At this point the client has attached and it's instance number is
7195 	 * valid, so we can set up kstats.  We need to do this here because it
7196 	 * is possible for paths to go online prior to client attach, in which
7197 	 * case the call to vhci_kstat_create_pathinfo in vhci_pathinfo_online
7198 	 * was a noop.
7199 	 */
7200 	ndi_devi_enter(cdip);
7201 	for (pip = mdi_get_next_phci_path(cdip, NULL); pip;
7202 	    pip = mdi_get_next_phci_path(cdip, pip))
7203 		vhci_kstat_create_pathinfo(pip);
7204 	ndi_devi_exit(cdip);
7205 }
7206 
7207 /*
7208  * quiesce all of the online paths
7209  */
7210 static int
vhci_quiesce_paths(dev_info_t * vdip,dev_info_t * cdip,scsi_vhci_lun_t * vlun,char * guid,char * active_pclass_ptr)7211 vhci_quiesce_paths(dev_info_t *vdip, dev_info_t *cdip, scsi_vhci_lun_t *vlun,
7212     char *guid, char *active_pclass_ptr)
7213 {
7214 	scsi_vhci_priv_t	*svp;
7215 	char			*s_pclass = NULL;
7216 	mdi_pathinfo_t		*npip, *pip;
7217 	int			sps;
7218 
7219 	/* quiesce currently active paths */
7220 	s_pclass = NULL;
7221 	pip = npip = NULL;
7222 	sps = mdi_select_path(cdip, NULL, MDI_SELECT_ONLINE_PATH, NULL, &npip);
7223 	if ((npip == NULL) || (sps != MDI_SUCCESS)) {
7224 		return (1);
7225 	}
7226 	do {
7227 		if (mdi_prop_lookup_string(npip, "path-class",
7228 		    &s_pclass) != MDI_SUCCESS) {
7229 			mdi_rele_path(npip);
7230 			vhci_log(CE_NOTE, vdip, "!Failover operation failed "
7231 			    "for device %s (GUID %s) due to an internal "
7232 			    "error", ddi_node_name(cdip), guid);
7233 			return (1);
7234 		}
7235 		if (strcmp(s_pclass, active_pclass_ptr) == 0) {
7236 			/*
7237 			 * quiesce path. Free s_pclass since
7238 			 * we don't need it anymore
7239 			 */
7240 			VHCI_DEBUG(1, (CE_NOTE, NULL,
7241 			    "!vhci_failover(2)(%s): failing over "
7242 			    "from %s; quiescing path %p\n",
7243 			    guid, s_pclass, (void *)npip));
7244 			(void) mdi_prop_free(s_pclass);
7245 			svp = (scsi_vhci_priv_t *)
7246 			    mdi_pi_get_vhci_private(npip);
7247 			if (svp == NULL) {
7248 				VHCI_DEBUG(1, (CE_NOTE, NULL,
7249 				    "!vhci_failover(2.5)(%s): no "
7250 				    "client priv! %p offlined?\n",
7251 				    guid, (void *)npip));
7252 				pip = npip;
7253 				sps = mdi_select_path(cdip, NULL,
7254 				    MDI_SELECT_ONLINE_PATH, pip, &npip);
7255 				mdi_rele_path(pip);
7256 				continue;
7257 			}
7258 			if (scsi_abort(&svp->svp_psd->sd_address, NULL)
7259 			    == 0) {
7260 				(void) vhci_recovery_reset(vlun,
7261 				    &svp->svp_psd->sd_address, FALSE,
7262 				    VHCI_DEPTH_TARGET);
7263 			}
7264 			mutex_enter(&svp->svp_mutex);
7265 			if (svp->svp_cmds == 0) {
7266 				VHCI_DEBUG(1, (CE_NOTE, NULL,
7267 				    "!vhci_failover(3)(%s):"
7268 				    "quiesced path %p\n", guid, (void *)npip));
7269 			} else {
7270 				while (svp->svp_cmds != 0) {
7271 					cv_wait(&svp->svp_cv, &svp->svp_mutex);
7272 					VHCI_DEBUG(1, (CE_NOTE, NULL,
7273 					    "!vhci_failover(3.cv)(%s):"
7274 					    "quiesced path %p\n", guid,
7275 					    (void *)npip));
7276 				}
7277 			}
7278 			mutex_exit(&svp->svp_mutex);
7279 		} else {
7280 			/*
7281 			 * make sure we freeup the memory
7282 			 */
7283 			(void) mdi_prop_free(s_pclass);
7284 		}
7285 		pip = npip;
7286 		sps = mdi_select_path(cdip, NULL, MDI_SELECT_ONLINE_PATH,
7287 		    pip, &npip);
7288 		mdi_rele_path(pip);
7289 	} while ((npip != NULL) && (sps == MDI_SUCCESS));
7290 	return (0);
7291 }
7292 
7293 static struct scsi_vhci_lun *
vhci_lun_lookup(dev_info_t * tgt_dip)7294 vhci_lun_lookup(dev_info_t *tgt_dip)
7295 {
7296 	return ((struct scsi_vhci_lun *)
7297 	    mdi_client_get_vhci_private(tgt_dip));
7298 }
7299 
7300 static struct scsi_vhci_lun *
vhci_lun_lookup_alloc(dev_info_t * tgt_dip,char * guid,int * didalloc)7301 vhci_lun_lookup_alloc(dev_info_t *tgt_dip, char *guid, int *didalloc)
7302 {
7303 	struct scsi_vhci_lun *svl;
7304 
7305 	if (svl = vhci_lun_lookup(tgt_dip)) {
7306 		return (svl);
7307 	}
7308 
7309 	svl = kmem_zalloc(sizeof (*svl), KM_SLEEP);
7310 	svl->svl_lun_wwn = kmem_zalloc(strlen(guid) + 1, KM_SLEEP);
7311 	(void) strcpy(svl->svl_lun_wwn,  guid);
7312 	mutex_init(&svl->svl_mutex, NULL, MUTEX_DRIVER, NULL);
7313 	cv_init(&svl->svl_cv, NULL, CV_DRIVER, NULL);
7314 	sema_init(&svl->svl_pgr_sema, 1, NULL, SEMA_DRIVER, NULL);
7315 	svl->svl_waiting_for_activepath = 1;
7316 	svl->svl_sector_size = 1;
7317 	mdi_client_set_vhci_private(tgt_dip, svl);
7318 	*didalloc = 1;
7319 	VHCI_DEBUG(1, (CE_NOTE, NULL,
7320 	    "vhci_lun_lookup_alloc: guid %s vlun 0x%p\n",
7321 	    guid, (void *)svl));
7322 	return (svl);
7323 }
7324 
7325 static void
vhci_lun_free(struct scsi_vhci_lun * dvlp,struct scsi_device * sd)7326 vhci_lun_free(struct scsi_vhci_lun *dvlp, struct scsi_device *sd)
7327 {
7328 	char *guid;
7329 
7330 	guid = dvlp->svl_lun_wwn;
7331 	ASSERT(guid != NULL);
7332 	VHCI_DEBUG(4, (CE_NOTE, NULL, "!vhci_lun_free: %s\n", guid));
7333 
7334 	mutex_enter(&dvlp->svl_mutex);
7335 	if (dvlp->svl_active_pclass != NULL) {
7336 		kmem_free(dvlp->svl_active_pclass,
7337 		    strlen(dvlp->svl_active_pclass) + 1);
7338 	}
7339 	dvlp->svl_active_pclass = NULL;
7340 	mutex_exit(&dvlp->svl_mutex);
7341 
7342 	if (dvlp->svl_lun_wwn != NULL) {
7343 		kmem_free(dvlp->svl_lun_wwn, strlen(dvlp->svl_lun_wwn) + 1);
7344 	}
7345 	dvlp->svl_lun_wwn = NULL;
7346 
7347 	if (dvlp->svl_fops_name) {
7348 		kmem_free(dvlp->svl_fops_name, strlen(dvlp->svl_fops_name) + 1);
7349 	}
7350 	dvlp->svl_fops_name = NULL;
7351 
7352 	if (dvlp->svl_fops_ctpriv != NULL &&
7353 	    dvlp->svl_fops != NULL) {
7354 		dvlp->svl_fops->sfo_device_unprobe(sd, dvlp->svl_fops_ctpriv);
7355 	}
7356 
7357 	if (dvlp->svl_flags & VLUN_TASK_D_ALIVE_FLG)
7358 		taskq_destroy(dvlp->svl_taskq);
7359 
7360 	mutex_destroy(&dvlp->svl_mutex);
7361 	cv_destroy(&dvlp->svl_cv);
7362 	sema_destroy(&dvlp->svl_pgr_sema);
7363 	kmem_free(dvlp, sizeof (*dvlp));
7364 	/*
7365 	 * vhci_lun_free may be called before the tgt_dip
7366 	 * initialization so check if the sd is NULL.
7367 	 */
7368 	if (sd != NULL)
7369 		scsi_device_hba_private_set(sd, NULL);
7370 }
7371 
7372 int
vhci_do_scsi_cmd(struct scsi_pkt * pkt)7373 vhci_do_scsi_cmd(struct scsi_pkt *pkt)
7374 {
7375 	int	err = 0;
7376 	int	retry_cnt = 0;
7377 	uint8_t	*sns, skey;
7378 
7379 #ifdef DEBUG
7380 	if (vhci_debug > 5) {
7381 		vhci_print_cdb(pkt->pkt_address.a_hba_tran->tran_hba_dip,
7382 		    CE_WARN, "Vhci command", pkt->pkt_cdbp);
7383 	}
7384 #endif
7385 
7386 retry:
7387 	err = scsi_poll(pkt);
7388 	if (err) {
7389 		if (pkt->pkt_cdbp[0] == SCMD_RELEASE) {
7390 			if (SCBP_C(pkt) == STATUS_RESERVATION_CONFLICT) {
7391 				VHCI_DEBUG(1, (CE_NOTE, NULL,
7392 				    "!v_s_do_s_c: RELEASE conflict\n"));
7393 				return (0);
7394 			}
7395 		}
7396 		if (retry_cnt++ < 6) {
7397 			VHCI_DEBUG(1, (CE_WARN, NULL,
7398 			    "!v_s_do_s_c:retry packet 0x%p "
7399 			    "status 0x%x reason %s",
7400 			    (void *)pkt, SCBP_C(pkt),
7401 			    scsi_rname(pkt->pkt_reason)));
7402 			if ((pkt->pkt_reason == CMD_CMPLT) &&
7403 			    (SCBP_C(pkt) == STATUS_CHECK) &&
7404 			    (pkt->pkt_state & STATE_ARQ_DONE)) {
7405 				sns = (uint8_t *)
7406 				    &(((struct scsi_arq_status *)(uintptr_t)
7407 				    (pkt->pkt_scbp))->sts_sensedata);
7408 				skey = scsi_sense_key(sns);
7409 				VHCI_DEBUG(1, (CE_WARN, NULL,
7410 				    "!v_s_do_s_c:retry "
7411 				    "packet 0x%p  sense data %s", (void *)pkt,
7412 				    scsi_sname(skey)));
7413 			}
7414 			goto retry;
7415 		}
7416 		VHCI_DEBUG(1, (CE_WARN, NULL,
7417 		    "!v_s_do_s_c: failed transport 0x%p 0x%x",
7418 		    (void *)pkt, SCBP_C(pkt)));
7419 		return (0);
7420 	}
7421 
7422 	switch (pkt->pkt_reason) {
7423 		case CMD_TIMEOUT:
7424 			VHCI_DEBUG(1, (CE_WARN, NULL, "!pkt timed "
7425 			    "out (pkt 0x%p)", (void *)pkt));
7426 			return (0);
7427 		case CMD_CMPLT:
7428 			switch (SCBP_C(pkt)) {
7429 				case STATUS_GOOD:
7430 					break;
7431 				case STATUS_CHECK:
7432 					if (pkt->pkt_state & STATE_ARQ_DONE) {
7433 						sns = (uint8_t *)&(((
7434 						    struct scsi_arq_status *)
7435 						    (uintptr_t)
7436 						    (pkt->pkt_scbp))->
7437 						    sts_sensedata);
7438 						skey = scsi_sense_key(sns);
7439 						if ((skey ==
7440 						    KEY_UNIT_ATTENTION) ||
7441 						    (skey ==
7442 						    KEY_NOT_READY)) {
7443 							/*
7444 							 * clear unit attn.
7445 							 */
7446 
7447 							VHCI_DEBUG(1,
7448 							    (CE_WARN, NULL,
7449 							    "!v_s_do_s_c: "
7450 							    "retry "
7451 							    "packet 0x%p sense "
7452 							    "data %s",
7453 							    (void *)pkt,
7454 							    scsi_sname
7455 							    (skey)));
7456 							goto retry;
7457 						}
7458 						VHCI_DEBUG(4, (CE_WARN, NULL,
7459 						    "!ARQ while "
7460 						    "transporting "
7461 						    "(pkt 0x%p)",
7462 						    (void *)pkt));
7463 						return (0);
7464 					}
7465 					return (0);
7466 				default:
7467 					VHCI_DEBUG(1, (CE_WARN, NULL,
7468 					    "!Bad status returned "
7469 					    "(pkt 0x%p, status %x)",
7470 					    (void *)pkt, SCBP_C(pkt)));
7471 					return (0);
7472 			}
7473 			break;
7474 		case CMD_INCOMPLETE:
7475 		case CMD_RESET:
7476 		case CMD_ABORTED:
7477 		case CMD_TRAN_ERR:
7478 			if (retry_cnt++ < 1) {
7479 				VHCI_DEBUG(1, (CE_WARN, NULL,
7480 				    "!v_s_do_s_c: retry packet 0x%p %s",
7481 				    (void *)pkt, scsi_rname(pkt->pkt_reason)));
7482 				goto retry;
7483 			}
7484 			/* FALLTHROUGH */
7485 		default:
7486 			VHCI_DEBUG(1, (CE_WARN, NULL, "!pkt did not "
7487 			    "complete successfully (pkt 0x%p,"
7488 			    "reason %x)", (void *)pkt, pkt->pkt_reason));
7489 			return (0);
7490 	}
7491 	return (1);
7492 }
7493 
7494 static int
vhci_quiesce_lun(struct scsi_vhci_lun * vlun)7495 vhci_quiesce_lun(struct scsi_vhci_lun *vlun)
7496 {
7497 	mdi_pathinfo_t		*pip, *spip;
7498 	dev_info_t		*cdip;
7499 	struct scsi_vhci_priv	*svp;
7500 	mdi_pathinfo_state_t	pstate;
7501 	uint32_t		p_ext_state;
7502 
7503 	cdip = vlun->svl_dip;
7504 	pip = spip = NULL;
7505 	ndi_devi_enter(cdip);
7506 	pip = mdi_get_next_phci_path(cdip, NULL);
7507 	while (pip != NULL) {
7508 		(void) mdi_pi_get_state2(pip, &pstate, &p_ext_state);
7509 		if (pstate != MDI_PATHINFO_STATE_ONLINE) {
7510 			spip = pip;
7511 			pip = mdi_get_next_phci_path(cdip, spip);
7512 			continue;
7513 		}
7514 		mdi_hold_path(pip);
7515 		ndi_devi_exit(cdip);
7516 		svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip);
7517 		mutex_enter(&svp->svp_mutex);
7518 		while (svp->svp_cmds != 0) {
7519 			if (cv_reltimedwait(&svp->svp_cv, &svp->svp_mutex,
7520 			    drv_usectohz(vhci_path_quiesce_timeout * 1000000),
7521 			    TR_CLOCK_TICK) == -1) {
7522 				mutex_exit(&svp->svp_mutex);
7523 				mdi_rele_path(pip);
7524 				VHCI_DEBUG(1, (CE_WARN, NULL,
7525 				    "Quiesce of lun is not successful "
7526 				    "vlun: 0x%p.", (void *)vlun));
7527 				return (0);
7528 			}
7529 		}
7530 		mutex_exit(&svp->svp_mutex);
7531 		ndi_devi_enter(cdip);
7532 		spip = pip;
7533 		pip = mdi_get_next_phci_path(cdip, spip);
7534 		mdi_rele_path(spip);
7535 	}
7536 	ndi_devi_exit(cdip);
7537 	return (1);
7538 }
7539 
7540 static int
vhci_pgr_validate_and_register(scsi_vhci_priv_t * svp)7541 vhci_pgr_validate_and_register(scsi_vhci_priv_t *svp)
7542 {
7543 	scsi_vhci_lun_t		*vlun;
7544 	vhci_prout_t		*prout;
7545 	int			rval, success;
7546 	mdi_pathinfo_t		*pip, *npip;
7547 	scsi_vhci_priv_t	*osvp;
7548 	dev_info_t		*cdip;
7549 	uchar_t			cdb_1;
7550 	uchar_t			temp_res_key[MHIOC_RESV_KEY_SIZE];
7551 
7552 
7553 	/*
7554 	 * see if there are any other paths available; if none,
7555 	 * then there is nothing to do.
7556 	 */
7557 	cdip = svp->svp_svl->svl_dip;
7558 	rval = mdi_select_path(cdip, NULL, MDI_SELECT_ONLINE_PATH |
7559 	    MDI_SELECT_STANDBY_PATH, NULL, &pip);
7560 	if ((rval != MDI_SUCCESS) || (pip == NULL)) {
7561 		VHCI_DEBUG(4, (CE_NOTE, NULL,
7562 		    "%s%d: vhci_pgr_validate_and_register: first path\n",
7563 		    ddi_driver_name(cdip), ddi_get_instance(cdip)));
7564 		return (1);
7565 	}
7566 
7567 	vlun = svp->svp_svl;
7568 	prout = &vlun->svl_prout;
7569 	ASSERT(vlun->svl_pgr_active != 0);
7570 
7571 	/*
7572 	 * When the path was busy/offlined, some other host might have
7573 	 * cleared this key. Validate key on some other path first.
7574 	 * If it fails, return failure.
7575 	 */
7576 
7577 	npip = pip;
7578 	pip = NULL;
7579 	success = 0;
7580 
7581 	/* Save the res key */
7582 	bcopy(prout->res_key, temp_res_key, MHIOC_RESV_KEY_SIZE);
7583 
7584 	/*
7585 	 * Sometimes CDB from application can be a Register_And_Ignore.
7586 	 * Instead of validation, this cdb would result in force registration.
7587 	 * Convert it to normal cdb for validation.
7588 	 * After that be sure to restore the cdb.
7589 	 */
7590 	cdb_1 = vlun->svl_cdb[1];
7591 	vlun->svl_cdb[1] &= 0xe0;
7592 
7593 	do {
7594 		osvp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(npip);
7595 		if (osvp == NULL) {
7596 			VHCI_DEBUG(4, (CE_NOTE, NULL,
7597 			    "vhci_pgr_validate_and_register: no "
7598 			    "client priv! 0x%p offlined?\n",
7599 			    (void *)npip));
7600 			goto next_path_1;
7601 		}
7602 
7603 		if (osvp == svp) {
7604 			VHCI_DEBUG(4, (CE_NOTE, NULL,
7605 			    "vhci_pgr_validate_and_register: same svp 0x%p"
7606 			    " npip 0x%p vlun 0x%p\n",
7607 			    (void *)svp, (void *)npip, (void *)vlun));
7608 			goto next_path_1;
7609 		}
7610 
7611 		VHCI_DEBUG(4, (CE_NOTE, NULL,
7612 		    "vhci_pgr_validate_and_register: First validate on"
7613 		    " osvp 0x%p being done. vlun 0x%p thread 0x%p Before bcopy"
7614 		    " cdb1 %x\n", (void *)osvp, (void *)vlun,
7615 		    (void *)curthread, vlun->svl_cdb[1]));
7616 		vhci_print_prout_keys(vlun, "v_pgr_val_reg: before bcopy:");
7617 
7618 		bcopy(prout->service_key, prout->res_key, MHIOC_RESV_KEY_SIZE);
7619 
7620 		VHCI_DEBUG(4, (CE_WARN, NULL, "vlun 0x%p After bcopy",
7621 		    (void *)vlun));
7622 		vhci_print_prout_keys(vlun, "v_pgr_val_reg: after bcopy: ");
7623 
7624 		rval = vhci_do_prout(osvp);
7625 		if (rval == 1) {
7626 			VHCI_DEBUG(4, (CE_NOTE, NULL,
7627 			    "%s%d: vhci_pgr_validate_and_register: key"
7628 			    " validated thread 0x%p\n", ddi_driver_name(cdip),
7629 			    ddi_get_instance(cdip), (void *)curthread));
7630 			pip = npip;
7631 			success = 1;
7632 			break;
7633 		} else {
7634 			VHCI_DEBUG(4, (CE_NOTE, NULL,
7635 			    "vhci_pgr_validate_and_register: First validation"
7636 			    " on osvp 0x%p failed %x\n", (void *)osvp, rval));
7637 			vhci_print_prout_keys(vlun, "v_pgr_val_reg: failed:");
7638 		}
7639 
7640 		/*
7641 		 * Try other paths
7642 		 */
7643 next_path_1:
7644 		pip = npip;
7645 		rval = mdi_select_path(cdip, NULL,
7646 		    MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH,
7647 		    pip, &npip);
7648 		mdi_rele_path(pip);
7649 	} while ((rval == MDI_SUCCESS) && (npip != NULL));
7650 
7651 
7652 	/* Be sure to restore original cdb */
7653 	vlun->svl_cdb[1] = cdb_1;
7654 
7655 	/* Restore the res_key */
7656 	bcopy(temp_res_key, prout->res_key, MHIOC_RESV_KEY_SIZE);
7657 
7658 	/*
7659 	 * If key could not be registered on any path for the first time,
7660 	 * return success as online should still continue.
7661 	 */
7662 	if (success == 0) {
7663 		return (1);
7664 	}
7665 
7666 	ASSERT(pip != NULL);
7667 
7668 	/*
7669 	 * Force register on new path
7670 	 */
7671 	cdb_1 = vlun->svl_cdb[1];		/* store the cdb */
7672 
7673 	vlun->svl_cdb[1] &= 0xe0;
7674 	vlun->svl_cdb[1] |= VHCI_PROUT_R_AND_IGNORE;
7675 
7676 	vhci_print_prout_keys(vlun, "v_pgr_val_reg: keys before bcopy: ");
7677 
7678 	bcopy(prout->active_service_key, prout->service_key,
7679 	    MHIOC_RESV_KEY_SIZE);
7680 	bcopy(prout->active_res_key, prout->res_key, MHIOC_RESV_KEY_SIZE);
7681 
7682 	vhci_print_prout_keys(vlun, "v_pgr_val_reg:keys after bcopy: ");
7683 
7684 	rval = vhci_do_prout(svp);
7685 	vlun->svl_cdb[1] = cdb_1;		/* restore the cdb */
7686 	if (rval != 1) {
7687 		VHCI_DEBUG(4, (CE_NOTE, NULL,
7688 		    "vhci_pgr_validate_and_register: register on new"
7689 		    " path 0x%p svp 0x%p failed %x\n",
7690 		    (void *)pip, (void *)svp, rval));
7691 		vhci_print_prout_keys(vlun, "v_pgr_val_reg: reg failed: ");
7692 		mdi_rele_path(pip);
7693 		return (0);
7694 	}
7695 
7696 	if (bcmp(prout->service_key, zero_key, MHIOC_RESV_KEY_SIZE) == 0) {
7697 		VHCI_DEBUG(4, (CE_NOTE, NULL,
7698 		    "vhci_pgr_validate_and_register: zero service key\n"));
7699 		mdi_rele_path(pip);
7700 		return (rval);
7701 	}
7702 
7703 	/*
7704 	 * While the key was force registered, some other host might have
7705 	 * cleared the key. Re-validate key on another pre-existing path
7706 	 * before declaring success.
7707 	 */
7708 	npip = pip;
7709 	pip = NULL;
7710 
7711 	/*
7712 	 * Sometimes CDB from application can be Register and Ignore.
7713 	 * Instead of validation, it would result in force registration.
7714 	 * Convert it to normal cdb for validation.
7715 	 * After that be sure to restore the cdb.
7716 	 */
7717 	cdb_1 = vlun->svl_cdb[1];
7718 	vlun->svl_cdb[1] &= 0xe0;
7719 	success = 0;
7720 
7721 	do {
7722 		osvp = (scsi_vhci_priv_t *)
7723 		    mdi_pi_get_vhci_private(npip);
7724 		if (osvp == NULL) {
7725 			VHCI_DEBUG(4, (CE_NOTE, NULL,
7726 			    "vhci_pgr_validate_and_register: no "
7727 			    "client priv! 0x%p offlined?\n",
7728 			    (void *)npip));
7729 			goto next_path_2;
7730 		}
7731 
7732 		if (osvp == svp) {
7733 			VHCI_DEBUG(4, (CE_NOTE, NULL,
7734 			    "vhci_pgr_validate_and_register: same osvp 0x%p"
7735 			    " npip 0x%p vlun 0x%p\n",
7736 			    (void *)svp, (void *)npip, (void *)vlun));
7737 			goto next_path_2;
7738 		}
7739 
7740 		VHCI_DEBUG(4, (CE_NOTE, NULL,
7741 		    "vhci_pgr_validate_and_register: Re-validation on"
7742 		    " osvp 0x%p being done. vlun 0x%p Before bcopy cdb1 %x\n",
7743 		    (void *)osvp, (void *)vlun, vlun->svl_cdb[1]));
7744 		vhci_print_prout_keys(vlun, "v_pgr_val_reg: before bcopy: ");
7745 
7746 		bcopy(prout->service_key, prout->res_key, MHIOC_RESV_KEY_SIZE);
7747 
7748 		vhci_print_prout_keys(vlun, "v_pgr_val_reg: after bcopy: ");
7749 
7750 		rval = vhci_do_prout(osvp);
7751 		if (rval == 1) {
7752 			VHCI_DEBUG(4, (CE_NOTE, NULL,
7753 			    "%s%d: vhci_pgr_validate_and_register: key"
7754 			    " validated thread 0x%p\n", ddi_driver_name(cdip),
7755 			    ddi_get_instance(cdip), (void *)curthread));
7756 			pip = npip;
7757 			success = 1;
7758 			break;
7759 		} else {
7760 			VHCI_DEBUG(4, (CE_NOTE, NULL,
7761 			    "vhci_pgr_validate_and_register: Re-validation on"
7762 			    " osvp 0x%p failed %x\n", (void *)osvp, rval));
7763 			vhci_print_prout_keys(vlun,
7764 			    "v_pgr_val_reg: reval failed: ");
7765 		}
7766 
7767 		/*
7768 		 * Try other paths
7769 		 */
7770 next_path_2:
7771 		pip = npip;
7772 		rval = mdi_select_path(cdip, NULL,
7773 		    MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH,
7774 		    pip, &npip);
7775 		mdi_rele_path(pip);
7776 	} while ((rval == MDI_SUCCESS) && (npip != NULL));
7777 
7778 	/* Be sure to restore original cdb */
7779 	vlun->svl_cdb[1] = cdb_1;
7780 
7781 	if (success == 1) {
7782 		/* Successfully validated registration */
7783 		mdi_rele_path(pip);
7784 		return (1);
7785 	}
7786 
7787 	VHCI_DEBUG(4, (CE_WARN, NULL, "key validation failed"));
7788 
7789 	/*
7790 	 * key invalid, back out by registering key value of 0
7791 	 */
7792 	VHCI_DEBUG(4, (CE_NOTE, NULL,
7793 	    "vhci_pgr_validate_and_register: backout on"
7794 	    " svp 0x%p being done\n", (void *)svp));
7795 	vhci_print_prout_keys(vlun, "v_pgr_val_reg: before bcopy: ");
7796 
7797 	bcopy(prout->service_key, prout->res_key, MHIOC_RESV_KEY_SIZE);
7798 	bzero(prout->service_key, MHIOC_RESV_KEY_SIZE);
7799 
7800 	vhci_print_prout_keys(vlun, "v_pgr_val_reg: before bcopy: ");
7801 
7802 	/*
7803 	 * Get a new path
7804 	 */
7805 	rval = mdi_select_path(cdip, NULL, MDI_SELECT_ONLINE_PATH |
7806 	    MDI_SELECT_STANDBY_PATH, NULL, &pip);
7807 	if ((rval != MDI_SUCCESS) || (pip == NULL)) {
7808 		VHCI_DEBUG(4, (CE_NOTE, NULL,
7809 		    "%s%d: vhci_pgr_validate_and_register: no valid pip\n",
7810 		    ddi_driver_name(cdip), ddi_get_instance(cdip)));
7811 		return (0);
7812 	}
7813 
7814 	if ((rval = vhci_do_prout(svp)) != 1) {
7815 		VHCI_DEBUG(4, (CE_NOTE, NULL,
7816 		    "vhci_pgr_validate_and_register: backout on"
7817 		    " svp 0x%p failed\n", (void *)svp));
7818 		vhci_print_prout_keys(vlun, "backout failed");
7819 
7820 		VHCI_DEBUG(4, (CE_WARN, NULL,
7821 		    "%s%d: vhci_pgr_validate_and_register: key"
7822 		    " validation and backout failed", ddi_driver_name(cdip),
7823 		    ddi_get_instance(cdip)));
7824 		if (rval == VHCI_PGR_ILLEGALOP) {
7825 			VHCI_DEBUG(4, (CE_WARN, NULL,
7826 			    "%s%d: vhci_pgr_validate_and_register: key"
7827 			    " already cleared", ddi_driver_name(cdip),
7828 			    ddi_get_instance(cdip)));
7829 			rval = 1;
7830 		} else
7831 			rval = 0;
7832 	} else {
7833 		VHCI_DEBUG(4, (CE_NOTE, NULL,
7834 		    "%s%d: vhci_pgr_validate_and_register: key"
7835 		    " validation failed, key backed out\n",
7836 		    ddi_driver_name(cdip), ddi_get_instance(cdip)));
7837 		vhci_print_prout_keys(vlun, "v_pgr_val_reg: key backed out: ");
7838 	}
7839 	mdi_rele_path(pip);
7840 
7841 	return (rval);
7842 }
7843 
7844 /*
7845  * taskq routine to dispatch a scsi cmd to vhci_scsi_start.  This ensures
7846  * that vhci_scsi_start is not called in interrupt context.
7847  * As the upper layer gets TRAN_ACCEPT when the command is dispatched, we
7848  * need to complete the command if something goes wrong.
7849  */
7850 static void
vhci_dispatch_scsi_start(void * arg)7851 vhci_dispatch_scsi_start(void *arg)
7852 {
7853 	struct vhci_pkt *vpkt	= (struct vhci_pkt *)arg;
7854 	struct scsi_pkt *tpkt	= vpkt->vpkt_tgt_pkt;
7855 	int rval		= TRAN_BUSY;
7856 
7857 	VHCI_DEBUG(6, (CE_NOTE, NULL, "!vhci_dispatch_scsi_start: sending"
7858 	    " scsi-2 reserve for 0x%p\n",
7859 	    (void *)ADDR2DIP(&(vpkt->vpkt_tgt_pkt->pkt_address))));
7860 
7861 	/*
7862 	 * To prevent the taskq from being called recursively we set the
7863 	 * the VHCI_PKT_THRU_TASKQ bit in the vhci_pkt_states.
7864 	 */
7865 	vpkt->vpkt_state |= VHCI_PKT_THRU_TASKQ;
7866 
7867 	/*
7868 	 * Wait for the transport to get ready to send packets
7869 	 * and if it times out, it will return something other than
7870 	 * TRAN_BUSY. The vhci_reserve_delay may want to
7871 	 * get tuned for other transports and is therefore a global.
7872 	 * Using delay since this routine is called by taskq dispatch
7873 	 * and not called during interrupt context.
7874 	 */
7875 	while ((rval = vhci_scsi_start(&(vpkt->vpkt_tgt_pkt->pkt_address),
7876 	    vpkt->vpkt_tgt_pkt)) == TRAN_BUSY) {
7877 		delay(drv_usectohz(vhci_reserve_delay));
7878 	}
7879 
7880 	switch (rval) {
7881 	case TRAN_ACCEPT:
7882 		return;
7883 
7884 	default:
7885 		/*
7886 		 * This pkt shall be retried, and to ensure another taskq
7887 		 * is dispatched for it, clear the VHCI_PKT_THRU_TASKQ
7888 		 * flag.
7889 		 */
7890 		vpkt->vpkt_state &= ~VHCI_PKT_THRU_TASKQ;
7891 
7892 		/* Ensure that the pkt is retried without a reset */
7893 		tpkt->pkt_reason = CMD_ABORTED;
7894 		tpkt->pkt_statistics |= STAT_ABORTED;
7895 		VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_dispatch_scsi_start: "
7896 		    "TRAN_rval %d returned for dip 0x%p", rval,
7897 		    (void *)ADDR2DIP(&(vpkt->vpkt_tgt_pkt->pkt_address))));
7898 		break;
7899 	}
7900 
7901 	/*
7902 	 * vpkt_org_vpkt should always be NULL here if the retry command
7903 	 * has been successfully dispatched.  If vpkt_org_vpkt != NULL at
7904 	 * this point, it is an error so restore the original vpkt and
7905 	 * return an error to the target driver so it can retry the
7906 	 * command as appropriate.
7907 	 */
7908 	if (vpkt->vpkt_org_vpkt != NULL) {
7909 		struct vhci_pkt		*new_vpkt = vpkt;
7910 		scsi_vhci_priv_t	*svp = (scsi_vhci_priv_t *)
7911 		    mdi_pi_get_vhci_private(vpkt->vpkt_path);
7912 
7913 		vpkt = vpkt->vpkt_org_vpkt;
7914 
7915 		vpkt->vpkt_tgt_pkt->pkt_reason = tpkt->pkt_reason;
7916 		vpkt->vpkt_tgt_pkt->pkt_statistics = tpkt->pkt_statistics;
7917 
7918 		vhci_scsi_destroy_pkt(&svp->svp_psd->sd_address,
7919 		    new_vpkt->vpkt_tgt_pkt);
7920 
7921 		tpkt = vpkt->vpkt_tgt_pkt;
7922 	}
7923 
7924 	scsi_hba_pkt_comp(tpkt);
7925 }
7926 
7927 static void
vhci_initiate_auto_failback(void * arg)7928 vhci_initiate_auto_failback(void *arg)
7929 {
7930 	struct scsi_vhci_lun	*vlun = (struct scsi_vhci_lun *)arg;
7931 	dev_info_t		*vdip, *cdip;
7932 	int			held;
7933 
7934 	cdip = vlun->svl_dip;
7935 	vdip = ddi_get_parent(cdip);
7936 
7937 	VHCI_HOLD_LUN(vlun, VH_SLEEP, held);
7938 
7939 	/*
7940 	 * Perform a final check to see if the active path class is indeed
7941 	 * not the preferred path class.  As in the time the auto failback
7942 	 * was dispatched, an external failover could have been detected.
7943 	 * [Some other host could have detected this condition and triggered
7944 	 *  the auto failback before].
7945 	 * In such a case if we go ahead with failover we will be negating the
7946 	 * whole purpose of auto failback.
7947 	 */
7948 	mutex_enter(&vlun->svl_mutex);
7949 	if (vlun->svl_active_pclass != NULL) {
7950 		char				*best_pclass;
7951 		struct scsi_failover_ops	*fo;
7952 
7953 		fo = vlun->svl_fops;
7954 
7955 		(void) fo->sfo_pathclass_next(NULL, &best_pclass,
7956 		    vlun->svl_fops_ctpriv);
7957 		if (strcmp(vlun->svl_active_pclass, best_pclass) == 0) {
7958 			mutex_exit(&vlun->svl_mutex);
7959 			VHCI_RELEASE_LUN(vlun);
7960 			VHCI_DEBUG(1, (CE_NOTE, NULL, "Not initiating "
7961 			    "auto failback for %s as %s pathclass already "
7962 			    "active.\n", vlun->svl_lun_wwn, best_pclass));
7963 			return;
7964 		}
7965 	}
7966 	mutex_exit(&vlun->svl_mutex);
7967 	if (mdi_failover(vdip, vlun->svl_dip, MDI_FAILOVER_SYNC)
7968 	    == MDI_SUCCESS) {
7969 		vhci_log(CE_NOTE, vdip, "!Auto failback operation "
7970 		    "succeeded for device %s (GUID %s)",
7971 		    ddi_node_name(cdip), vlun->svl_lun_wwn);
7972 	} else {
7973 		vhci_log(CE_NOTE, vdip, "!Auto failback operation "
7974 		    "failed for device %s (GUID %s)",
7975 		    ddi_node_name(cdip), vlun->svl_lun_wwn);
7976 	}
7977 	VHCI_RELEASE_LUN(vlun);
7978 }
7979 
7980 #ifdef DEBUG
7981 static void
vhci_print_prin_keys(vhci_prin_readkeys_t * prin,int numkeys)7982 vhci_print_prin_keys(vhci_prin_readkeys_t *prin, int numkeys)
7983 {
7984 	vhci_clean_print(NULL, 5, "Current PGR Keys",
7985 	    (uchar_t *)prin, numkeys * 8);
7986 }
7987 #endif
7988 
7989 static void
vhci_print_prout_keys(scsi_vhci_lun_t * vlun,char * msg)7990 vhci_print_prout_keys(scsi_vhci_lun_t *vlun, char *msg)
7991 {
7992 	int			i;
7993 	vhci_prout_t		*prout;
7994 	char			buf1[4 * MHIOC_RESV_KEY_SIZE + 1];
7995 	char			buf2[4 * MHIOC_RESV_KEY_SIZE + 1];
7996 	char			buf3[4 * MHIOC_RESV_KEY_SIZE + 1];
7997 	char			buf4[4 * MHIOC_RESV_KEY_SIZE + 1];
7998 
7999 	prout = &vlun->svl_prout;
8000 
8001 	for (i = 0; i < MHIOC_RESV_KEY_SIZE; i++)
8002 		(void) sprintf(&buf1[4 * i], "[%02x]", prout->res_key[i]);
8003 	for (i = 0; i < MHIOC_RESV_KEY_SIZE; i++)
8004 		(void) sprintf(&buf2[(4 * i)], "[%02x]", prout->service_key[i]);
8005 	for (i = 0; i < MHIOC_RESV_KEY_SIZE; i++)
8006 		(void) sprintf(&buf3[4 * i], "[%02x]",
8007 		    prout->active_res_key[i]);
8008 	for (i = 0; i < MHIOC_RESV_KEY_SIZE; i++)
8009 		(void) sprintf(&buf4[4 * i], "[%02x]",
8010 		    prout->active_service_key[i]);
8011 
8012 	/* Printing all in one go. Otherwise it will jumble up */
8013 	VHCI_DEBUG(5, (CE_CONT, NULL, "%s vlun 0x%p, thread 0x%p\n"
8014 	    "res_key:          : %s\n"
8015 	    "service_key       : %s\n"
8016 	    "active_res_key    : %s\n"
8017 	    "active_service_key: %s\n",
8018 	    msg, (void *)vlun, (void *)curthread, buf1, buf2, buf3, buf4));
8019 }
8020 
8021 /*
8022  * Called from vhci_scsi_start to update the pHCI pkt with target packet.
8023  */
8024 static void
vhci_update_pHCI_pkt(struct vhci_pkt * vpkt,struct scsi_pkt * pkt)8025 vhci_update_pHCI_pkt(struct vhci_pkt *vpkt, struct scsi_pkt *pkt)
8026 {
8027 
8028 	ASSERT(vpkt->vpkt_hba_pkt);
8029 
8030 	vpkt->vpkt_hba_pkt->pkt_flags = pkt->pkt_flags;
8031 	vpkt->vpkt_hba_pkt->pkt_flags |= FLAG_NOQUEUE;
8032 
8033 	if ((vpkt->vpkt_hba_pkt->pkt_flags & FLAG_NOINTR) ||
8034 	    MDI_PI_IS_SUSPENDED(vpkt->vpkt_path)) {
8035 		/*
8036 		 * Polled Command is requested or HBA is in
8037 		 * suspended state
8038 		 */
8039 		vpkt->vpkt_hba_pkt->pkt_flags |= FLAG_NOINTR;
8040 		vpkt->vpkt_hba_pkt->pkt_comp = NULL;
8041 	} else {
8042 		vpkt->vpkt_hba_pkt->pkt_comp = vhci_intr;
8043 	}
8044 	vpkt->vpkt_hba_pkt->pkt_time = pkt->pkt_time;
8045 	bcopy(pkt->pkt_cdbp, vpkt->vpkt_hba_pkt->pkt_cdbp,
8046 	    vpkt->vpkt_tgt_init_cdblen);
8047 	vpkt->vpkt_hba_pkt->pkt_resid = pkt->pkt_resid;
8048 
8049 	/* Re-initialize the following pHCI packet state information */
8050 	vpkt->vpkt_hba_pkt->pkt_state = 0;
8051 	vpkt->vpkt_hba_pkt->pkt_statistics = 0;
8052 	vpkt->vpkt_hba_pkt->pkt_reason = 0;
8053 }
8054 
8055 static int
vhci_scsi_bus_power(dev_info_t * parent,void * impl_arg,pm_bus_power_op_t op,void * arg,void * result)8056 vhci_scsi_bus_power(dev_info_t *parent, void *impl_arg, pm_bus_power_op_t op,
8057     void *arg, void *result)
8058 {
8059 	int ret = DDI_SUCCESS;
8060 
8061 	/*
8062 	 * Generic processing in MPxIO framework
8063 	 */
8064 	ret = mdi_bus_power(parent, impl_arg, op, arg, result);
8065 
8066 	switch (ret) {
8067 	case MDI_SUCCESS:
8068 		ret = DDI_SUCCESS;
8069 		break;
8070 	case MDI_FAILURE:
8071 		ret = DDI_FAILURE;
8072 		break;
8073 	default:
8074 		break;
8075 	}
8076 
8077 	return (ret);
8078 }
8079 
8080 static int
vhci_pHCI_cap(struct scsi_address * ap,char * cap,int val,int whom,mdi_pathinfo_t * pip)8081 vhci_pHCI_cap(struct scsi_address *ap, char *cap, int val, int whom,
8082     mdi_pathinfo_t *pip)
8083 {
8084 	dev_info_t		*cdip;
8085 	mdi_pathinfo_t		*npip = NULL;
8086 	scsi_vhci_priv_t	*svp = NULL;
8087 	struct scsi_address	*pap = NULL;
8088 	scsi_hba_tran_t		*hba = NULL;
8089 	int			sps;
8090 	int			mps_flag;
8091 	int			rval = 0;
8092 
8093 	mps_flag = (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH);
8094 	if (pip) {
8095 		/*
8096 		 * If the call is from vhci_pathinfo_state_change,
8097 		 * then this path was busy and is becoming ready to accept IO.
8098 		 */
8099 		ASSERT(ap != NULL);
8100 		hba = ap->a_hba_tran;
8101 		ASSERT(hba != NULL);
8102 		rval = scsi_ifsetcap(ap, cap, val, whom);
8103 
8104 		VHCI_DEBUG(2, (CE_NOTE, NULL,
8105 		    "!vhci_pHCI_cap: only on path %p, ap %p, rval %x\n",
8106 		    (void *)pip, (void *)ap, rval));
8107 
8108 		return (rval);
8109 	}
8110 
8111 	/*
8112 	 * Set capability on all the pHCIs.
8113 	 * If any path is busy, then the capability would be set by
8114 	 * vhci_pathinfo_state_change.
8115 	 */
8116 
8117 	cdip = ADDR2DIP(ap);
8118 	ASSERT(cdip != NULL);
8119 	sps = mdi_select_path(cdip, NULL, mps_flag, NULL, &pip);
8120 	if ((sps != MDI_SUCCESS) || (pip == NULL)) {
8121 		VHCI_DEBUG(2, (CE_WARN, NULL,
8122 		    "!vhci_pHCI_cap: Unable to get a path, dip 0x%p",
8123 		    (void *)cdip));
8124 		return (0);
8125 	}
8126 
8127 again:
8128 	svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip);
8129 	if (svp == NULL) {
8130 		VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_pHCI_cap: "
8131 		    "priv is NULL, pip 0x%p", (void *)pip));
8132 		mdi_rele_path(pip);
8133 		return (rval);
8134 	}
8135 
8136 	if (svp->svp_psd == NULL) {
8137 		VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_pHCI_cap: "
8138 		    "psd is NULL, pip 0x%p, svp 0x%p",
8139 		    (void *)pip, (void *)svp));
8140 		mdi_rele_path(pip);
8141 		return (rval);
8142 	}
8143 
8144 	pap = &svp->svp_psd->sd_address;
8145 	ASSERT(pap != NULL);
8146 	hba = pap->a_hba_tran;
8147 	ASSERT(hba != NULL);
8148 
8149 	if (hba->tran_setcap != NULL) {
8150 		rval = scsi_ifsetcap(pap, cap, val, whom);
8151 
8152 		VHCI_DEBUG(2, (CE_NOTE, NULL,
8153 		    "!vhci_pHCI_cap: path %p, ap %p, rval %x\n",
8154 		    (void *)pip, (void *)ap, rval));
8155 
8156 		/*
8157 		 * Select next path and issue the setcap, repeat
8158 		 * until all paths are exhausted
8159 		 */
8160 		sps = mdi_select_path(cdip, NULL, mps_flag, pip, &npip);
8161 		if ((sps != MDI_SUCCESS) || (npip == NULL)) {
8162 			mdi_rele_path(pip);
8163 			return (1);
8164 		}
8165 		mdi_rele_path(pip);
8166 		pip = npip;
8167 		goto again;
8168 	}
8169 	mdi_rele_path(pip);
8170 	return (rval);
8171 }
8172 
8173 static int
vhci_scsi_bus_config(dev_info_t * pdip,uint_t flags,ddi_bus_config_op_t op,void * arg,dev_info_t ** child)8174 vhci_scsi_bus_config(dev_info_t *pdip, uint_t flags, ddi_bus_config_op_t op,
8175     void *arg, dev_info_t **child)
8176 {
8177 	char *guid;
8178 
8179 	if (vhci_bus_config_debug)
8180 		flags |= NDI_DEVI_DEBUG;
8181 
8182 	if (op == BUS_CONFIG_ONE || op == BUS_UNCONFIG_ONE)
8183 		guid = vhci_devnm_to_guid((char *)arg);
8184 	else
8185 		guid = NULL;
8186 
8187 	if (mdi_vhci_bus_config(pdip, flags, op, arg, child, guid)
8188 	    == MDI_SUCCESS)
8189 		return (NDI_SUCCESS);
8190 	else
8191 		return (NDI_FAILURE);
8192 }
8193 
8194 static int
vhci_scsi_bus_unconfig(dev_info_t * pdip,uint_t flags,ddi_bus_config_op_t op,void * arg)8195 vhci_scsi_bus_unconfig(dev_info_t *pdip, uint_t flags, ddi_bus_config_op_t op,
8196     void *arg)
8197 {
8198 	if (vhci_bus_config_debug)
8199 		flags |= NDI_DEVI_DEBUG;
8200 
8201 	return (ndi_busop_bus_unconfig(pdip, flags, op, arg));
8202 }
8203 
8204 /*
8205  * Take the original vhci_pkt, create a duplicate of the pkt for resending
8206  * as though it originated in ssd.
8207  */
8208 static struct scsi_pkt *
vhci_create_retry_pkt(struct vhci_pkt * vpkt)8209 vhci_create_retry_pkt(struct vhci_pkt *vpkt)
8210 {
8211 	struct vhci_pkt *new_vpkt = NULL;
8212 	struct scsi_pkt	*pkt = NULL;
8213 
8214 	scsi_vhci_priv_t *svp = (scsi_vhci_priv_t *)
8215 	    mdi_pi_get_vhci_private(vpkt->vpkt_path);
8216 
8217 	/*
8218 	 * Ensure consistent data at completion time by setting PKT_CONSISTENT
8219 	 */
8220 	pkt = vhci_scsi_init_pkt(&svp->svp_psd->sd_address, pkt,
8221 	    vpkt->vpkt_tgt_init_bp, vpkt->vpkt_tgt_init_cdblen,
8222 	    vpkt->vpkt_tgt_init_scblen, 0, PKT_CONSISTENT, NULL_FUNC, NULL);
8223 	if (pkt != NULL) {
8224 		new_vpkt = TGTPKT2VHCIPKT(pkt);
8225 
8226 		pkt->pkt_address = vpkt->vpkt_tgt_pkt->pkt_address;
8227 		pkt->pkt_flags = vpkt->vpkt_tgt_pkt->pkt_flags;
8228 		pkt->pkt_time = vpkt->vpkt_tgt_pkt->pkt_time;
8229 		pkt->pkt_comp = vpkt->vpkt_tgt_pkt->pkt_comp;
8230 
8231 		pkt->pkt_resid = 0;
8232 		pkt->pkt_statistics = 0;
8233 		pkt->pkt_reason = 0;
8234 
8235 		bcopy(vpkt->vpkt_tgt_pkt->pkt_cdbp,
8236 		    pkt->pkt_cdbp, vpkt->vpkt_tgt_init_cdblen);
8237 
8238 		/*
8239 		 * Save a pointer to the original vhci_pkt
8240 		 */
8241 		new_vpkt->vpkt_org_vpkt = vpkt;
8242 	}
8243 
8244 	return (pkt);
8245 }
8246 
8247 /*
8248  * Copy the successful completion information from the hba packet into
8249  * the original target pkt from the upper layer.  Returns the original
8250  * vpkt and destroys the new vpkt from the internal retry.
8251  */
8252 static struct vhci_pkt *
vhci_sync_retry_pkt(struct vhci_pkt * vpkt)8253 vhci_sync_retry_pkt(struct vhci_pkt *vpkt)
8254 {
8255 	struct vhci_pkt		*ret_vpkt = NULL;
8256 	struct scsi_pkt		*tpkt = NULL;
8257 	struct scsi_pkt		*hba_pkt = NULL;
8258 	scsi_vhci_priv_t	*svp = (scsi_vhci_priv_t *)
8259 	    mdi_pi_get_vhci_private(vpkt->vpkt_path);
8260 
8261 	ASSERT(vpkt->vpkt_org_vpkt != NULL);
8262 	VHCI_DEBUG(0, (CE_NOTE, NULL, "vhci_sync_retry_pkt: Retry pkt "
8263 	    "completed successfully!\n"));
8264 
8265 	ret_vpkt = vpkt->vpkt_org_vpkt;
8266 	tpkt = ret_vpkt->vpkt_tgt_pkt;
8267 	hba_pkt = vpkt->vpkt_hba_pkt;
8268 
8269 	/*
8270 	 * Copy the good status into the target driver's packet
8271 	 */
8272 	*(tpkt->pkt_scbp) = *(hba_pkt->pkt_scbp);
8273 	tpkt->pkt_resid = hba_pkt->pkt_resid;
8274 	tpkt->pkt_state = hba_pkt->pkt_state;
8275 	tpkt->pkt_statistics = hba_pkt->pkt_statistics;
8276 	tpkt->pkt_reason = hba_pkt->pkt_reason;
8277 
8278 	/*
8279 	 * Destroy the internally created vpkt for the retry
8280 	 */
8281 	vhci_scsi_destroy_pkt(&svp->svp_psd->sd_address,
8282 	    vpkt->vpkt_tgt_pkt);
8283 
8284 	return (ret_vpkt);
8285 }
8286 
8287 /* restart the request sense request */
8288 static void
vhci_uscsi_restart_sense(void * arg)8289 vhci_uscsi_restart_sense(void *arg)
8290 {
8291 	struct buf	*rqbp;
8292 	struct buf	*bp;
8293 	struct scsi_pkt *rqpkt = (struct scsi_pkt *)arg;
8294 	mp_uscsi_cmd_t	*mp_uscmdp;
8295 
8296 	VHCI_DEBUG(4, (CE_WARN, NULL,
8297 	    "vhci_uscsi_restart_sense: enter: rqpkt: %p", (void *)rqpkt));
8298 
8299 	if (scsi_transport(rqpkt) != TRAN_ACCEPT) {
8300 		/* if it fails - need to wakeup the original command */
8301 		mp_uscmdp = rqpkt->pkt_private;
8302 		bp = mp_uscmdp->cmdbp;
8303 		rqbp = mp_uscmdp->rqbp;
8304 		ASSERT(mp_uscmdp && bp && rqbp);
8305 		scsi_free_consistent_buf(rqbp);
8306 		scsi_destroy_pkt(rqpkt);
8307 		bp->b_resid = bp->b_bcount;
8308 		bioerror(bp, EIO);
8309 		biodone(bp);
8310 	}
8311 }
8312 
8313 /*
8314  * auto-rqsense is not enabled so we have to retrieve the request sense
8315  * manually.
8316  */
8317 static int
vhci_uscsi_send_sense(struct scsi_pkt * pkt,mp_uscsi_cmd_t * mp_uscmdp)8318 vhci_uscsi_send_sense(struct scsi_pkt *pkt, mp_uscsi_cmd_t *mp_uscmdp)
8319 {
8320 	struct buf		*rqbp, *cmdbp;
8321 	struct scsi_pkt		*rqpkt;
8322 	int			rval = 0;
8323 
8324 	cmdbp = mp_uscmdp->cmdbp;
8325 	ASSERT(cmdbp != NULL);
8326 
8327 	VHCI_DEBUG(4, (CE_WARN, NULL,
8328 	    "vhci_uscsi_send_sense: enter: bp: %p pkt: %p scmd: %p",
8329 	    (void *)cmdbp, (void *)pkt, (void *)mp_uscmdp));
8330 	/* set up the packet information and cdb */
8331 	if ((rqbp = scsi_alloc_consistent_buf(mp_uscmdp->ap, NULL,
8332 	    SENSE_LENGTH, B_READ, NULL, NULL)) == NULL) {
8333 		return (-1);
8334 	}
8335 
8336 	if ((rqpkt = scsi_init_pkt(mp_uscmdp->ap, NULL, rqbp,
8337 	    CDB_GROUP0, 1, 0, PKT_CONSISTENT, NULL, NULL)) == NULL) {
8338 		scsi_free_consistent_buf(rqbp);
8339 		return (-1);
8340 	}
8341 
8342 	(void) scsi_setup_cdb((union scsi_cdb *)(intptr_t)rqpkt->pkt_cdbp,
8343 	    SCMD_REQUEST_SENSE, 0, SENSE_LENGTH, 0);
8344 
8345 	mp_uscmdp->rqbp = rqbp;
8346 	rqbp->b_private = mp_uscmdp;
8347 	rqpkt->pkt_flags |= FLAG_SENSING;
8348 	rqpkt->pkt_time = 60;
8349 	rqpkt->pkt_comp = vhci_uscsi_iodone;
8350 	rqpkt->pkt_private = mp_uscmdp;
8351 
8352 	/*
8353 	 * NOTE: This code path is related to MPAPI uscsi(4I), so path
8354 	 * selection is not based on path_instance.
8355 	 */
8356 	if (scsi_pkt_allocated_correctly(rqpkt))
8357 		rqpkt->pkt_path_instance = 0;
8358 
8359 	switch (scsi_transport(rqpkt)) {
8360 	case TRAN_ACCEPT:
8361 		VHCI_DEBUG(1, (CE_NOTE, NULL, "vhci_uscsi_send_sense: "
8362 		    "transport accepted."));
8363 		break;
8364 	case TRAN_BUSY:
8365 		VHCI_DEBUG(1, (CE_NOTE, NULL, "vhci_uscsi_send_sense: "
8366 		    "transport busy, setting timeout."));
8367 		vhci_restart_timeid = timeout(vhci_uscsi_restart_sense, rqpkt,
8368 		    (drv_usectohz(5 * 1000000)));
8369 		break;
8370 	default:
8371 		VHCI_DEBUG(1, (CE_NOTE, NULL, "vhci_uscsi_send_sense: "
8372 		    "transport failed"));
8373 		scsi_free_consistent_buf(rqbp);
8374 		scsi_destroy_pkt(rqpkt);
8375 		rval = -1;
8376 	}
8377 
8378 	return (rval);
8379 }
8380 
8381 /*
8382  * done routine for the mpapi uscsi command - this is behaving as though
8383  * FLAG_DIAGNOSE is set meaning there are no retries except for a manual
8384  * request sense.
8385  */
8386 void
vhci_uscsi_iodone(struct scsi_pkt * pkt)8387 vhci_uscsi_iodone(struct scsi_pkt *pkt)
8388 {
8389 	struct buf			*bp;
8390 	mp_uscsi_cmd_t			*mp_uscmdp;
8391 	struct uscsi_cmd		*uscmdp;
8392 	struct scsi_arq_status		*arqstat;
8393 	int				err;
8394 
8395 	mp_uscmdp = (mp_uscsi_cmd_t *)pkt->pkt_private;
8396 	uscmdp = mp_uscmdp->uscmdp;
8397 	bp = mp_uscmdp->cmdbp;
8398 	ASSERT(bp != NULL);
8399 	VHCI_DEBUG(4, (CE_WARN, NULL,
8400 	    "vhci_uscsi_iodone: enter: bp: %p pkt: %p scmd: %p",
8401 	    (void *)bp, (void *)pkt, (void *)mp_uscmdp));
8402 	/* Save the status and the residual into the uscsi_cmd struct */
8403 	uscmdp->uscsi_status = ((*(pkt)->pkt_scbp) & STATUS_MASK);
8404 	uscmdp->uscsi_resid = bp->b_resid;
8405 
8406 	/* return on a very successful command */
8407 	if (pkt->pkt_reason == CMD_CMPLT &&
8408 	    SCBP_C(pkt) == 0 && ((pkt->pkt_flags & FLAG_SENSING) == 0) &&
8409 	    pkt->pkt_resid == 0) {
8410 		mdi_pi_kstat_iosupdate(mp_uscmdp->pip, bp);
8411 		scsi_destroy_pkt(pkt);
8412 		biodone(bp);
8413 		return;
8414 	}
8415 	VHCI_DEBUG(4, (CE_NOTE, NULL, "iodone: reason=0x%x "
8416 	    " pkt_resid=%ld pkt_state: 0x%x b_count: %ld b_resid: %ld",
8417 	    pkt->pkt_reason, pkt->pkt_resid,
8418 	    pkt->pkt_state, bp->b_bcount, bp->b_resid));
8419 
8420 	err = EIO;
8421 
8422 	arqstat = (struct scsi_arq_status *)(intptr_t)(pkt->pkt_scbp);
8423 	if (pkt->pkt_reason != CMD_CMPLT) {
8424 		/*
8425 		 * The command did not complete.
8426 		 */
8427 		VHCI_DEBUG(4, (CE_NOTE, NULL,
8428 		    "vhci_uscsi_iodone: command did not complete."
8429 		    " reason: %x flag: %x", pkt->pkt_reason, pkt->pkt_flags));
8430 		if (pkt->pkt_flags & FLAG_SENSING) {
8431 			MDI_PI_ERRSTAT(mp_uscmdp->pip, MDI_PI_TRANSERR);
8432 		} else if (pkt->pkt_reason == CMD_TIMEOUT) {
8433 			MDI_PI_ERRSTAT(mp_uscmdp->pip, MDI_PI_HARDERR);
8434 			err = ETIMEDOUT;
8435 		}
8436 	} else if (pkt->pkt_state & STATE_ARQ_DONE && mp_uscmdp->arq_enabled) {
8437 		/*
8438 		 * The auto-rqsense happened, and the packet has a filled-in
8439 		 * scsi_arq_status structure, pointed to by pkt_scbp.
8440 		 */
8441 		VHCI_DEBUG(4, (CE_NOTE, NULL,
8442 		    "vhci_uscsi_iodone: received auto-requested sense"));
8443 		if (uscmdp->uscsi_flags & USCSI_RQENABLE) {
8444 			/* get the amount of data to copy into rqbuf */
8445 			int rqlen = SENSE_LENGTH - arqstat->sts_rqpkt_resid;
8446 			rqlen = min(((int)uscmdp->uscsi_rqlen), rqlen);
8447 			uscmdp->uscsi_rqresid = uscmdp->uscsi_rqlen - rqlen;
8448 			uscmdp->uscsi_rqstatus =
8449 			    *((char *)&arqstat->sts_rqpkt_status);
8450 			if (uscmdp->uscsi_rqbuf && uscmdp->uscsi_rqlen &&
8451 			    rqlen != 0) {
8452 				bcopy(&(arqstat->sts_sensedata),
8453 				    uscmdp->uscsi_rqbuf, rqlen);
8454 			}
8455 			mdi_pi_kstat_iosupdate(mp_uscmdp->pip, bp);
8456 			VHCI_DEBUG(4, (CE_NOTE, NULL,
8457 			    "vhci_uscsi_iodone: ARQ "
8458 			    "uscsi_rqstatus=0x%x uscsi_rqresid=%d rqlen: %d "
8459 			    "xfer: %d rqpkt_resid: %d\n",
8460 			    uscmdp->uscsi_rqstatus, uscmdp->uscsi_rqresid,
8461 			    uscmdp->uscsi_rqlen, rqlen,
8462 			    arqstat->sts_rqpkt_resid));
8463 		}
8464 	} else if (pkt->pkt_flags & FLAG_SENSING) {
8465 		struct buf *rqbp;
8466 		struct scsi_status *rqstatus;
8467 
8468 		rqstatus = (struct scsi_status *)pkt->pkt_scbp;
8469 		/* a manual request sense was done - get the information */
8470 		if (uscmdp->uscsi_flags & USCSI_RQENABLE) {
8471 			int rqlen = SENSE_LENGTH - pkt->pkt_resid;
8472 
8473 			rqbp = mp_uscmdp->rqbp;
8474 			/* get the amount of data to copy into rqbuf */
8475 			rqlen = min(((int)uscmdp->uscsi_rqlen), rqlen);
8476 			uscmdp->uscsi_rqresid = uscmdp->uscsi_rqlen - rqlen;
8477 			uscmdp->uscsi_rqstatus = *((char *)rqstatus);
8478 			if (uscmdp->uscsi_rqlen && uscmdp->uscsi_rqbuf) {
8479 				bcopy(rqbp->b_un.b_addr, uscmdp->uscsi_rqbuf,
8480 				    rqlen);
8481 			}
8482 			MDI_PI_ERRSTAT(mp_uscmdp->pip, MDI_PI_TRANSERR);
8483 			scsi_free_consistent_buf(rqbp);
8484 		}
8485 		VHCI_DEBUG(4, (CE_NOTE, NULL, "vhci_uscsi_iodone: FLAG_SENSING"
8486 		    "uscsi_rqstatus=0x%x uscsi_rqresid=%d\n",
8487 		    uscmdp->uscsi_rqstatus, uscmdp->uscsi_rqresid));
8488 	} else {
8489 		struct scsi_status *status =
8490 		    (struct scsi_status *)pkt->pkt_scbp;
8491 		/*
8492 		 * Command completed and we're not getting sense. Check for
8493 		 * errors and decide what to do next.
8494 		 */
8495 		VHCI_DEBUG(4, (CE_NOTE, NULL,
8496 		    "vhci_uscsi_iodone: command appears complete: reason: %x",
8497 		    pkt->pkt_reason));
8498 		if (status->sts_chk) {
8499 			/* need to manually get the request sense */
8500 			if (vhci_uscsi_send_sense(pkt, mp_uscmdp) == 0) {
8501 				scsi_destroy_pkt(pkt);
8502 				return;
8503 			}
8504 		} else {
8505 			VHCI_DEBUG(4, (CE_NOTE, NULL,
8506 			    "vhci_chk_err: appears complete"));
8507 			err = 0;
8508 			mdi_pi_kstat_iosupdate(mp_uscmdp->pip, bp);
8509 			if (pkt->pkt_resid) {
8510 				bp->b_resid += pkt->pkt_resid;
8511 			}
8512 		}
8513 	}
8514 
8515 	if (err) {
8516 		if (bp->b_resid == 0)
8517 			bp->b_resid = bp->b_bcount;
8518 		bioerror(bp, err);
8519 		bp->b_flags |= B_ERROR;
8520 	}
8521 
8522 	scsi_destroy_pkt(pkt);
8523 	biodone(bp);
8524 
8525 	VHCI_DEBUG(4, (CE_WARN, NULL, "vhci_uscsi_iodone: exit"));
8526 }
8527 
8528 /*
8529  * start routine for the mpapi uscsi command
8530  */
8531 int
vhci_uscsi_iostart(struct buf * bp)8532 vhci_uscsi_iostart(struct buf *bp)
8533 {
8534 	struct scsi_pkt		*pkt;
8535 	struct uscsi_cmd	*uscmdp;
8536 	mp_uscsi_cmd_t		*mp_uscmdp;
8537 	int			stat_size, rval;
8538 	int			retry = 0;
8539 
8540 	ASSERT(bp->b_private != NULL);
8541 
8542 	mp_uscmdp = (mp_uscsi_cmd_t *)bp->b_private;
8543 	uscmdp = mp_uscmdp->uscmdp;
8544 	if (uscmdp->uscsi_flags & USCSI_RQENABLE) {
8545 		stat_size = SENSE_LENGTH;
8546 	} else {
8547 		stat_size = 1;
8548 	}
8549 
8550 	pkt = scsi_init_pkt(mp_uscmdp->ap, NULL, bp, uscmdp->uscsi_cdblen,
8551 	    stat_size, 0, 0, SLEEP_FUNC, NULL);
8552 	if (pkt == NULL) {
8553 		VHCI_DEBUG(4, (CE_NOTE, NULL,
8554 		    "vhci_uscsi_iostart: rval: EINVAL"));
8555 		bp->b_resid = bp->b_bcount;
8556 		uscmdp->uscsi_resid = bp->b_bcount;
8557 		bioerror(bp, EINVAL);
8558 		biodone(bp);
8559 		return (EINVAL);
8560 	}
8561 
8562 	pkt->pkt_time = uscmdp->uscsi_timeout;
8563 	bcopy(uscmdp->uscsi_cdb, pkt->pkt_cdbp, (size_t)uscmdp->uscsi_cdblen);
8564 	pkt->pkt_comp = vhci_uscsi_iodone;
8565 	pkt->pkt_private = mp_uscmdp;
8566 	if (uscmdp->uscsi_flags & USCSI_SILENT)
8567 		pkt->pkt_flags |= FLAG_SILENT;
8568 	if (uscmdp->uscsi_flags & USCSI_ISOLATE)
8569 		pkt->pkt_flags |= FLAG_ISOLATE;
8570 	if (uscmdp->uscsi_flags & USCSI_DIAGNOSE)
8571 		pkt->pkt_flags |= FLAG_DIAGNOSE;
8572 	if (uscmdp->uscsi_flags & USCSI_RENEGOT) {
8573 		pkt->pkt_flags |= FLAG_RENEGOTIATE_WIDE_SYNC;
8574 	}
8575 	VHCI_DEBUG(4, (CE_WARN, NULL,
8576 	    "vhci_uscsi_iostart: ap: %p pkt: %p pcdbp: %p uscmdp: %p"
8577 	    " ucdbp: %p pcdblen: %d bp: %p count: %ld pip: %p"
8578 	    " stat_size: %d",
8579 	    (void *)mp_uscmdp->ap, (void *)pkt, (void *)pkt->pkt_cdbp,
8580 	    (void *)uscmdp, (void *)uscmdp->uscsi_cdb, pkt->pkt_cdblen,
8581 	    (void *)bp, bp->b_bcount, (void *)mp_uscmdp->pip, stat_size));
8582 
8583 	/*
8584 	 * NOTE: This code path is related to MPAPI uscsi(4I), so path
8585 	 * selection is not based on path_instance.
8586 	 */
8587 	if (scsi_pkt_allocated_correctly(pkt))
8588 		pkt->pkt_path_instance = 0;
8589 
8590 	while (((rval = scsi_transport(pkt)) == TRAN_BUSY) &&
8591 	    retry < vhci_uscsi_retry_count) {
8592 		delay(drv_usectohz(vhci_uscsi_delay));
8593 		retry++;
8594 	}
8595 	if (retry >= vhci_uscsi_retry_count) {
8596 		VHCI_DEBUG(4, (CE_NOTE, NULL,
8597 		    "vhci_uscsi_iostart: tran_busy - retry: %d", retry));
8598 	}
8599 	switch (rval) {
8600 	case TRAN_ACCEPT:
8601 		rval =  0;
8602 		break;
8603 
8604 	default:
8605 		VHCI_DEBUG(4, (CE_NOTE, NULL,
8606 		    "vhci_uscsi_iostart: rval: %d count: %ld res: %ld",
8607 		    rval, bp->b_bcount, bp->b_resid));
8608 		bp->b_resid = bp->b_bcount;
8609 		uscmdp->uscsi_resid = bp->b_bcount;
8610 		bioerror(bp, EIO);
8611 		scsi_destroy_pkt(pkt);
8612 		biodone(bp);
8613 		rval = EIO;
8614 		MDI_PI_ERRSTAT(mp_uscmdp->pip, MDI_PI_TRANSERR);
8615 		break;
8616 	}
8617 	VHCI_DEBUG(4, (CE_NOTE, NULL,
8618 	    "vhci_uscsi_iostart: exit: rval: %d", rval));
8619 	return (rval);
8620 }
8621 
8622 /* ARGSUSED */
8623 static struct scsi_failover_ops *
vhci_dev_fo(dev_info_t * vdip,struct scsi_device * psd,void ** ctprivp,char ** fo_namep)8624 vhci_dev_fo(dev_info_t *vdip, struct scsi_device *psd,
8625     void **ctprivp, char **fo_namep)
8626 {
8627 	struct scsi_failover_ops	*sfo;
8628 	char				*sfo_name;
8629 	char				*override;
8630 	struct scsi_failover		*sf;
8631 
8632 	ASSERT(psd && psd->sd_inq);
8633 	if ((psd == NULL) || (psd->sd_inq == NULL)) {
8634 		VHCI_DEBUG(1, (CE_NOTE, NULL,
8635 		    "!vhci_dev_fo:return NULL no scsi_device or inquiry"));
8636 		return (NULL);
8637 	}
8638 
8639 	/*
8640 	 * Determine if device is supported under scsi_vhci, and select
8641 	 * failover module.
8642 	 *
8643 	 * See if there is a scsi_vhci.conf file override for this devices's
8644 	 * VID/PID. The following values can be returned:
8645 	 *
8646 	 * NULL		If the NULL is returned then there is no scsi_vhci.conf
8647 	 *		override.  For NULL, we determine the failover_ops for
8648 	 *		this device by checking the sfo_device_probe entry
8649 	 *		point for each 'fops' module, in order.
8650 	 *
8651 	 *		NOTE: Correct operation may depend on module ordering
8652 	 *		of 'specific' (failover modules that are completely
8653 	 *		VID/PID table based) to 'generic' (failover modules
8654 	 *		that based on T10 standards like TPGS).  Currently,
8655 	 *		the value of 'ddi-forceload' in scsi_vhci.conf is used
8656 	 *		to establish the module list and probe order.
8657 	 *
8658 	 * "NONE"	If value "NONE" is returned then there is a
8659 	 *		scsi_vhci.conf VID/PID override to indicate the device
8660 	 *		should not be supported under scsi_vhci (even if there
8661 	 *		is an 'fops' module supporting the device).
8662 	 *
8663 	 * "<other>"	If another value is returned then that value is the
8664 	 *		name of the 'fops' module that should be used.
8665 	 */
8666 	sfo = NULL;	/* "NONE" */
8667 	override = scsi_get_device_type_string(
8668 	    "scsi-vhci-failover-override", vdip, psd);
8669 	if (override == NULL) {
8670 		/* NULL: default: select based on sfo_device_probe results */
8671 		for (sf = scsi_failover_table; sf->sf_mod; sf++) {
8672 			if ((sf->sf_sfo == NULL) ||
8673 			    sf->sf_sfo->sfo_device_probe(psd, psd->sd_inq,
8674 			    ctprivp) == SFO_DEVICE_PROBE_PHCI)
8675 				continue;
8676 
8677 			/* found failover module, supported under scsi_vhci */
8678 			sfo = sf->sf_sfo;
8679 			if (fo_namep && (*fo_namep == NULL)) {
8680 				sfo_name = i_ddi_strdup(sfo->sfo_name,
8681 				    KM_SLEEP);
8682 				*fo_namep = sfo_name;
8683 			}
8684 			break;
8685 		}
8686 	} else if (strcasecmp(override, "NONE")) {
8687 		/* !"NONE": select based on driver.conf specified name */
8688 		for (sf = scsi_failover_table, sfo = NULL; sf->sf_mod; sf++) {
8689 			if ((sf->sf_sfo == NULL) ||
8690 			    (sf->sf_sfo->sfo_name == NULL) ||
8691 			    strcmp(override, sf->sf_sfo->sfo_name))
8692 				continue;
8693 
8694 			/*
8695 			 * NOTE: If sfo_device_probe() has side-effects,
8696 			 * including setting *ctprivp, these are not going
8697 			 * to occur with override config.
8698 			 */
8699 
8700 			/* found failover module, supported under scsi_vhci */
8701 			sfo = sf->sf_sfo;
8702 			if (fo_namep && (*fo_namep == NULL)) {
8703 				sfo_name = kmem_alloc(strlen("conf ") +
8704 				    strlen(sfo->sfo_name) + 1, KM_SLEEP);
8705 				(void) sprintf(sfo_name, "conf %s",
8706 				    sfo->sfo_name);
8707 				*fo_namep = sfo_name;
8708 			}
8709 			break;
8710 		}
8711 	}
8712 	if (override)
8713 		kmem_free(override, strlen(override) + 1);
8714 	return (sfo);
8715 }
8716 
8717 /*
8718  * Determine the device described by cinfo should be enumerated under
8719  * the vHCI or the pHCI - if there is a failover ops then device is
8720  * supported under vHCI.  By agreement with SCSA cinfo is a pointer
8721  * to a scsi_device structure associated with a decorated pHCI probe node.
8722  */
8723 /* ARGSUSED */
8724 int
vhci_is_dev_supported(dev_info_t * vdip,dev_info_t * pdip,void * cinfo)8725 vhci_is_dev_supported(dev_info_t *vdip, dev_info_t *pdip, void *cinfo)
8726 {
8727 	struct scsi_device	*psd = (struct scsi_device *)cinfo;
8728 
8729 	return (vhci_dev_fo(vdip, psd, NULL, NULL) ? MDI_SUCCESS : MDI_FAILURE);
8730 }
8731 
8732 
8733 #ifdef DEBUG
8734 extern struct scsi_key_strings scsi_cmds[];
8735 
8736 static char *
vhci_print_scsi_cmd(char cmd)8737 vhci_print_scsi_cmd(char cmd)
8738 {
8739 	char tmp[64];
8740 	char *cpnt;
8741 
8742 	cpnt = scsi_cmd_name(cmd, scsi_cmds, tmp);
8743 	/* tmp goes out of scope on return and caller sees garbage */
8744 	if (cpnt == tmp) {
8745 		cpnt = "Unknown Command";
8746 	}
8747 	return (cpnt);
8748 }
8749 
8750 extern uchar_t	scsi_cdb_size[];
8751 
8752 static void
vhci_print_cdb(dev_info_t * dip,uint_t level,char * title,uchar_t * cdb)8753 vhci_print_cdb(dev_info_t *dip, uint_t level, char *title, uchar_t *cdb)
8754 {
8755 	int len = scsi_cdb_size[CDB_GROUPID(cdb[0])];
8756 	char buf[256];
8757 
8758 	if (level == CE_NOTE) {
8759 		vhci_log(level, dip, "path cmd %s\n",
8760 		    vhci_print_scsi_cmd(*cdb));
8761 		return;
8762 	}
8763 
8764 	(void) sprintf(buf, "%s for cmd(%s)", title, vhci_print_scsi_cmd(*cdb));
8765 	vhci_clean_print(dip, level, buf, cdb, len);
8766 }
8767 
8768 static void
vhci_clean_print(dev_info_t * dev,uint_t level,char * title,uchar_t * data,int len)8769 vhci_clean_print(dev_info_t *dev, uint_t level, char *title, uchar_t *data,
8770     int len)
8771 {
8772 	int	i;
8773 	int	c;
8774 	char	*format;
8775 	char	buf[256];
8776 	uchar_t	byte;
8777 
8778 	(void) sprintf(buf, "%s:\n", title);
8779 	vhci_log(level, dev, "%s", buf);
8780 	level = CE_CONT;
8781 	for (i = 0; i < len; ) {
8782 		buf[0] = 0;
8783 		for (c = 0; c < 8 && i < len; c++, i++) {
8784 			byte = (uchar_t)data[i];
8785 			if (byte < 0x10)
8786 				format = "0x0%x ";
8787 			else
8788 				format = "0x%x ";
8789 			(void) sprintf(&buf[(int)strlen(buf)], format, byte);
8790 		}
8791 		(void) sprintf(&buf[(int)strlen(buf)], "\n");
8792 
8793 		vhci_log(level, dev, "%s\n", buf);
8794 	}
8795 }
8796 #endif
8797 static void
vhci_invalidate_mpapi_lu(struct scsi_vhci * vhci,scsi_vhci_lun_t * vlun)8798 vhci_invalidate_mpapi_lu(struct scsi_vhci *vhci, scsi_vhci_lun_t *vlun)
8799 {
8800 	char			*svl_wwn;
8801 	mpapi_item_list_t	*ilist;
8802 	mpapi_lu_data_t		*ld;
8803 
8804 	if (vlun == NULL) {
8805 		return;
8806 	} else {
8807 		svl_wwn = vlun->svl_lun_wwn;
8808 	}
8809 
8810 	ilist = vhci->mp_priv->obj_hdr_list[MP_OBJECT_TYPE_MULTIPATH_LU]->head;
8811 
8812 	while (ilist != NULL) {
8813 		ld = (mpapi_lu_data_t *)(ilist->item->idata);
8814 		if ((ld != NULL) && (strncmp(ld->prop.name, svl_wwn,
8815 		    strlen(svl_wwn)) == 0)) {
8816 			ld->valid = 0;
8817 			VHCI_DEBUG(6, (CE_WARN, NULL,
8818 			    "vhci_invalidate_mpapi_lu: "
8819 			    "Invalidated LU(%s)", svl_wwn));
8820 			return;
8821 		}
8822 		ilist = ilist->next;
8823 	}
8824 	VHCI_DEBUG(6, (CE_WARN, NULL, "vhci_invalidate_mpapi_lu: "
8825 	    "Could not find LU(%s) to invalidate.", svl_wwn));
8826 }
8827 
8828 /*
8829  * Return the device's block size (as given by the 'device-blksize'
8830  * property). If the property does not exist, the default DEV_BSIZE
8831  * is returned.
8832  */
8833 size_t
vhci_get_blocksize(dev_info_t * dip)8834 vhci_get_blocksize(dev_info_t *dip)
8835 {
8836 	/*
8837 	 * Unfortunately, 'device-blksize' is typically implemented in
8838 	 * a device as a dynamic property managed by cmlb. As a result,
8839 	 * we cannot merely use ddi_prop_get_int() to get the value.
8840 	 * Instead, we must call the cb_prop_op on the device.
8841 	 * If that fails, we will attempt ddi_prop_get_int() in case
8842 	 * there is a device that defines it as a static property.
8843 	 * If all else fails, we return DEV_BSIZE.
8844 	 */
8845 	struct dev_ops *ops = DEVI(dip)->devi_ops;
8846 
8847 	/*
8848 	 * The DDI property interfaces don't recognize unsigned
8849 	 * values, so we have to cast it outself when we return the value.
8850 	 */
8851 	int blocksize = DEV_BSIZE;
8852 
8853 	/*
8854 	 * According to i_ldi_prop_op(), some nexus drivers apparently do not
8855 	 * always correctly set cb_prop_op, so we must check for
8856 	 * nodev, nulldev, and NULL.
8857 	 */
8858 	if (ops->devo_cb_ops->cb_prop_op != nodev &&
8859 	    ops->devo_cb_ops->cb_prop_op != nulldev &&
8860 	    ops->devo_cb_ops->cb_prop_op != NULL) {
8861 		int proplen = sizeof (blocksize);
8862 		int ret;
8863 
8864 		ret = cdev_prop_op(DDI_DEV_T_ANY, dip, PROP_LEN_AND_VAL_BUF,
8865 		    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM | DDI_PROP_DYNAMIC,
8866 		    "device-blksize", (caddr_t)&blocksize, &proplen);
8867 		if (ret == DDI_PROP_SUCCESS && proplen == sizeof (blocksize) &&
8868 		    blocksize > 0)
8869 			return (blocksize);
8870 	}
8871 
8872 	blocksize = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, "device-blksize",
8873 	    DEV_BSIZE);
8874 	return ((blocksize > 0) ? blocksize : DEV_BSIZE);
8875 }
8876