1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
23 */
24 /*
25 * Copyright 2014 Nexenta Systems, Inc. All rights reserved.
26 */
27
28 /*
29 * Multiplexed I/O SCSI vHCI implementation
30 */
31
32 #include <sys/conf.h>
33 #include <sys/file.h>
34 #include <sys/ddi.h>
35 #include <sys/sunddi.h>
36 #include <sys/scsi/scsi.h>
37 #include <sys/scsi/impl/scsi_reset_notify.h>
38 #include <sys/scsi/impl/services.h>
39 #include <sys/sunmdi.h>
40 #include <sys/mdi_impldefs.h>
41 #include <sys/scsi/adapters/scsi_vhci.h>
42 #include <sys/disp.h>
43 #include <sys/byteorder.h>
44
45 extern uintptr_t scsi_callback_id;
46 extern ddi_dma_attr_t scsi_alloc_attr;
47
48 #ifdef DEBUG
49 int vhci_debug = VHCI_DEBUG_DEFAULT_VAL;
50 #endif
51
52 /* retry for the vhci_do_prout command when a not ready is returned */
53 int vhci_prout_not_ready_retry = 180;
54
55 /*
56 * These values are defined to support the internal retry of
57 * SCSI packets for better sense code handling.
58 */
59 #define VHCI_CMD_CMPLT 0
60 #define VHCI_CMD_RETRY 1
61 #define VHCI_CMD_ERROR -1
62
63 #define PROPFLAGS (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM)
64 #define VHCI_SCSI_PERR 0x47
65 #define VHCI_PGR_ILLEGALOP -2
66 #define VHCI_NUM_UPDATE_TASKQ 8
67 /* changed to 132 to accomodate HDS */
68
69 /*
70 * Version Macros
71 */
72 #define VHCI_NAME_VERSION "SCSI VHCI Driver"
73 char vhci_version_name[] = VHCI_NAME_VERSION;
74
75 int vhci_first_time = 0;
76 clock_t vhci_to_ticks = 0;
77 int vhci_init_wait_timeout = VHCI_INIT_WAIT_TIMEOUT;
78 kcondvar_t vhci_cv;
79 kmutex_t vhci_global_mutex;
80 void *vhci_softstate = NULL; /* for soft state */
81
82 /*
83 * Flag to delay the retry of the reserve command
84 */
85 int vhci_reserve_delay = 100000;
86 static int vhci_path_quiesce_timeout = 60;
87 static uchar_t zero_key[MHIOC_RESV_KEY_SIZE];
88
89 /* uscsi delay for a TRAN_BUSY */
90 static int vhci_uscsi_delay = 100000;
91 static int vhci_uscsi_retry_count = 180;
92 /* uscsi_restart_sense timeout id in case it needs to get canceled */
93 static timeout_id_t vhci_restart_timeid = 0;
94
95 static int vhci_bus_config_debug = 0;
96
97 /*
98 * Bidirectional map of 'target-port' to port id <pid> for support of
99 * iostat(1M) '-Xx' and '-Yx' output.
100 */
101 static kmutex_t vhci_targetmap_mutex;
102 static uint_t vhci_targetmap_pid = 1;
103 static mod_hash_t *vhci_targetmap_bypid; /* <pid> -> 'target-port' */
104 static mod_hash_t *vhci_targetmap_byport; /* 'target-port' -> <pid> */
105
106 /*
107 * functions exported by scsi_vhci struct cb_ops
108 */
109 static int vhci_open(dev_t *, int, int, cred_t *);
110 static int vhci_close(dev_t, int, int, cred_t *);
111 static int vhci_ioctl(dev_t, int, intptr_t, int, cred_t *, int *);
112
113 /*
114 * functions exported by scsi_vhci struct dev_ops
115 */
116 static int vhci_getinfo(dev_info_t *, ddi_info_cmd_t, void *, void **);
117 static int vhci_attach(dev_info_t *, ddi_attach_cmd_t);
118 static int vhci_detach(dev_info_t *, ddi_detach_cmd_t);
119
120 /*
121 * functions exported by scsi_vhci scsi_hba_tran_t transport table
122 */
123 static int vhci_scsi_tgt_init(dev_info_t *, dev_info_t *,
124 scsi_hba_tran_t *, struct scsi_device *);
125 static void vhci_scsi_tgt_free(dev_info_t *, dev_info_t *, scsi_hba_tran_t *,
126 struct scsi_device *);
127 static int vhci_pgr_register_start(scsi_vhci_lun_t *, struct scsi_pkt *);
128 static int vhci_scsi_start(struct scsi_address *, struct scsi_pkt *);
129 static int vhci_scsi_abort(struct scsi_address *, struct scsi_pkt *);
130 static int vhci_scsi_reset(struct scsi_address *, int);
131 static int vhci_scsi_reset_target(struct scsi_address *, int level,
132 uint8_t select_path);
133 static int vhci_scsi_reset_bus(struct scsi_address *);
134 static int vhci_scsi_getcap(struct scsi_address *, char *, int);
135 static int vhci_scsi_setcap(struct scsi_address *, char *, int, int);
136 static int vhci_commoncap(struct scsi_address *, char *, int, int, int);
137 static int vhci_pHCI_cap(struct scsi_address *ap, char *cap, int val, int whom,
138 mdi_pathinfo_t *pip);
139 static struct scsi_pkt *vhci_scsi_init_pkt(struct scsi_address *,
140 struct scsi_pkt *, struct buf *, int, int, int, int, int (*)(), caddr_t);
141 static void vhci_scsi_destroy_pkt(struct scsi_address *, struct scsi_pkt *);
142 static void vhci_scsi_dmafree(struct scsi_address *, struct scsi_pkt *);
143 static void vhci_scsi_sync_pkt(struct scsi_address *, struct scsi_pkt *);
144 static int vhci_scsi_reset_notify(struct scsi_address *, int, void (*)(caddr_t),
145 caddr_t);
146 static int vhci_scsi_get_bus_addr(struct scsi_device *, char *, int);
147 static int vhci_scsi_get_name(struct scsi_device *, char *, int);
148 static int vhci_scsi_bus_power(dev_info_t *, void *, pm_bus_power_op_t,
149 void *, void *);
150 static int vhci_scsi_bus_config(dev_info_t *, uint_t, ddi_bus_config_op_t,
151 void *, dev_info_t **);
152 static int vhci_scsi_bus_unconfig(dev_info_t *, uint_t, ddi_bus_config_op_t,
153 void *);
154 static struct scsi_failover_ops *vhci_dev_fo(dev_info_t *, struct scsi_device *,
155 void **, char **);
156
157 /*
158 * functions registered with the mpxio framework via mdi_vhci_ops_t
159 */
160 static int vhci_pathinfo_init(dev_info_t *, mdi_pathinfo_t *, int);
161 static int vhci_pathinfo_uninit(dev_info_t *, mdi_pathinfo_t *, int);
162 static int vhci_pathinfo_state_change(dev_info_t *, mdi_pathinfo_t *,
163 mdi_pathinfo_state_t, uint32_t, int);
164 static int vhci_pathinfo_online(dev_info_t *, mdi_pathinfo_t *, int);
165 static int vhci_pathinfo_offline(dev_info_t *, mdi_pathinfo_t *, int);
166 static int vhci_failover(dev_info_t *, dev_info_t *, int);
167 static void vhci_client_attached(dev_info_t *);
168 static int vhci_is_dev_supported(dev_info_t *, dev_info_t *, void *);
169
170 static int vhci_ctl(dev_t, int, intptr_t, int, cred_t *, int *);
171 static int vhci_devctl(dev_t, int, intptr_t, int, cred_t *, int *);
172 static int vhci_ioc_get_phci_path(sv_iocdata_t *, caddr_t, int, caddr_t);
173 static int vhci_ioc_get_client_path(sv_iocdata_t *, caddr_t, int, caddr_t);
174 static int vhci_ioc_get_paddr(sv_iocdata_t *, caddr_t, int, caddr_t);
175 static int vhci_ioc_send_client_path(caddr_t, sv_iocdata_t *, int, caddr_t);
176 static void vhci_ioc_devi_to_path(dev_info_t *, caddr_t);
177 static int vhci_get_phci_path_list(dev_info_t *, sv_path_info_t *, uint_t);
178 static int vhci_get_client_path_list(dev_info_t *, sv_path_info_t *, uint_t);
179 static int vhci_get_iocdata(const void *, sv_iocdata_t *, int, caddr_t);
180 static int vhci_get_iocswitchdata(const void *, sv_switch_to_cntlr_iocdata_t *,
181 int, caddr_t);
182 static int vhci_ioc_alloc_pathinfo(sv_path_info_t **, sv_path_info_t **,
183 uint_t, sv_iocdata_t *, int, caddr_t);
184 static void vhci_ioc_free_pathinfo(sv_path_info_t *, sv_path_info_t *, uint_t);
185 static int vhci_ioc_send_pathinfo(sv_path_info_t *, sv_path_info_t *, uint_t,
186 sv_iocdata_t *, int, caddr_t);
187 static int vhci_handle_ext_fo(struct scsi_pkt *, int);
188 static int vhci_efo_watch_cb(caddr_t, struct scsi_watch_result *);
189 static int vhci_quiesce_lun(struct scsi_vhci_lun *);
190 static int vhci_pgr_validate_and_register(scsi_vhci_priv_t *);
191 static void vhci_dispatch_scsi_start(void *);
192 static void vhci_efo_done(void *);
193 static void vhci_initiate_auto_failback(void *);
194 static void vhci_update_pHCI_pkt(struct vhci_pkt *, struct scsi_pkt *);
195 static int vhci_update_pathinfo(struct scsi_device *, mdi_pathinfo_t *,
196 struct scsi_failover_ops *, scsi_vhci_lun_t *, struct scsi_vhci *);
197 static void vhci_kstat_create_pathinfo(mdi_pathinfo_t *);
198 static int vhci_quiesce_paths(dev_info_t *, dev_info_t *,
199 scsi_vhci_lun_t *, char *, char *);
200
201 static char *vhci_devnm_to_guid(char *);
202 static int vhci_bind_transport(struct scsi_address *, struct vhci_pkt *,
203 int, int (*func)(caddr_t));
204 static void vhci_intr(struct scsi_pkt *);
205 static int vhci_do_prout(scsi_vhci_priv_t *);
206 static void vhci_run_cmd(void *);
207 static int vhci_do_prin(struct vhci_pkt **);
208 static struct scsi_pkt *vhci_create_retry_pkt(struct vhci_pkt *);
209 static struct vhci_pkt *vhci_sync_retry_pkt(struct vhci_pkt *);
210 static struct scsi_vhci_lun *vhci_lun_lookup(dev_info_t *);
211 static struct scsi_vhci_lun *vhci_lun_lookup_alloc(dev_info_t *, char *, int *);
212 static void vhci_lun_free(struct scsi_vhci_lun *dvlp, struct scsi_device *sd);
213 static int vhci_recovery_reset(scsi_vhci_lun_t *, struct scsi_address *,
214 uint8_t, uint8_t);
215 void vhci_update_pathstates(void *);
216
217 #ifdef DEBUG
218 static void vhci_print_prin_keys(vhci_prin_readkeys_t *, int);
219 static void vhci_print_cdb(dev_info_t *dip, uint_t level,
220 char *title, uchar_t *cdb);
221 static void vhci_clean_print(dev_info_t *dev, uint_t level,
222 char *title, uchar_t *data, int len);
223 #endif
224 static void vhci_print_prout_keys(scsi_vhci_lun_t *, char *);
225 static void vhci_uscsi_iodone(struct scsi_pkt *pkt);
226 static void vhci_invalidate_mpapi_lu(struct scsi_vhci *, scsi_vhci_lun_t *);
227
228 /*
229 * MP-API related functions
230 */
231 extern int vhci_mpapi_init(struct scsi_vhci *);
232 extern void vhci_mpapi_add_dev_prod(struct scsi_vhci *, char *);
233 extern int vhci_mpapi_ctl(dev_t, int, intptr_t, int, cred_t *, int *);
234 extern void vhci_update_mpapi_data(struct scsi_vhci *,
235 scsi_vhci_lun_t *, mdi_pathinfo_t *);
236 extern void* vhci_get_mpapi_item(struct scsi_vhci *, mpapi_list_header_t *,
237 uint8_t, void*);
238 extern void vhci_mpapi_set_path_state(dev_info_t *, mdi_pathinfo_t *, int);
239 extern int vhci_mpapi_update_tpg_acc_state_for_lu(struct scsi_vhci *,
240 scsi_vhci_lun_t *);
241
242 #define VHCI_DMA_MAX_XFER_CAP INT_MAX
243
244 #define VHCI_MAX_PGR_RETRIES 3
245
246 /*
247 * Macros for the device-type mpxio options
248 */
249 #define LOAD_BALANCE_OPTIONS "load-balance-options"
250 #define LOGICAL_BLOCK_REGION_SIZE "region-size"
251 #define MPXIO_OPTIONS_LIST "device-type-mpxio-options-list"
252 #define DEVICE_TYPE_STR "device-type"
253 #define isdigit(ch) ((ch) >= '0' && (ch) <= '9')
254
255 static struct cb_ops vhci_cb_ops = {
256 vhci_open, /* open */
257 vhci_close, /* close */
258 nodev, /* strategy */
259 nodev, /* print */
260 nodev, /* dump */
261 nodev, /* read */
262 nodev, /* write */
263 vhci_ioctl, /* ioctl */
264 nodev, /* devmap */
265 nodev, /* mmap */
266 nodev, /* segmap */
267 nochpoll, /* chpoll */
268 ddi_prop_op, /* cb_prop_op */
269 0, /* streamtab */
270 D_NEW | D_MP, /* cb_flag */
271 CB_REV, /* rev */
272 nodev, /* aread */
273 nodev /* awrite */
274 };
275
276 static struct dev_ops vhci_ops = {
277 DEVO_REV,
278 0,
279 vhci_getinfo,
280 nulldev, /* identify */
281 nulldev, /* probe */
282 vhci_attach, /* attach and detach are mandatory */
283 vhci_detach,
284 nodev, /* reset */
285 &vhci_cb_ops, /* cb_ops */
286 NULL, /* bus_ops */
287 NULL, /* power */
288 ddi_quiesce_not_needed, /* quiesce */
289 };
290
291 extern struct mod_ops mod_driverops;
292
293 static struct modldrv modldrv = {
294 &mod_driverops,
295 vhci_version_name, /* module name */
296 &vhci_ops
297 };
298
299 static struct modlinkage modlinkage = {
300 MODREV_1,
301 &modldrv,
302 NULL
303 };
304
305 static mdi_vhci_ops_t vhci_opinfo = {
306 MDI_VHCI_OPS_REV,
307 vhci_pathinfo_init, /* Pathinfo node init callback */
308 vhci_pathinfo_uninit, /* Pathinfo uninit callback */
309 vhci_pathinfo_state_change, /* Pathinfo node state change */
310 vhci_failover, /* failover callback */
311 vhci_client_attached, /* client attached callback */
312 vhci_is_dev_supported /* is device supported by mdi */
313 };
314
315 /*
316 * The scsi_failover table defines an ordered set of 'fops' modules supported
317 * by scsi_vhci. Currently, initialize this table from the 'ddi-forceload'
318 * property specified in scsi_vhci.conf.
319 */
320 static struct scsi_failover {
321 ddi_modhandle_t sf_mod;
322 struct scsi_failover_ops *sf_sfo;
323 } *scsi_failover_table;
324 static uint_t scsi_nfailover;
325
326 int
_init(void)327 _init(void)
328 {
329 int rval;
330
331 /*
332 * Allocate soft state and prepare to do ddi_soft_state_zalloc()
333 * before registering with the transport first.
334 */
335 if ((rval = ddi_soft_state_init(&vhci_softstate,
336 sizeof (struct scsi_vhci), 1)) != 0) {
337 VHCI_DEBUG(1, (CE_NOTE, NULL,
338 "!_init:soft state init failed\n"));
339 return (rval);
340 }
341
342 if ((rval = scsi_hba_init(&modlinkage)) != 0) {
343 VHCI_DEBUG(1, (CE_NOTE, NULL,
344 "!_init: scsi hba init failed\n"));
345 ddi_soft_state_fini(&vhci_softstate);
346 return (rval);
347 }
348
349 mutex_init(&vhci_global_mutex, NULL, MUTEX_DRIVER, NULL);
350 cv_init(&vhci_cv, NULL, CV_DRIVER, NULL);
351
352 mutex_init(&vhci_targetmap_mutex, NULL, MUTEX_DRIVER, NULL);
353 vhci_targetmap_byport = mod_hash_create_strhash(
354 "vhci_targetmap_byport", 256, mod_hash_null_valdtor);
355 vhci_targetmap_bypid = mod_hash_create_idhash(
356 "vhci_targetmap_bypid", 256, mod_hash_null_valdtor);
357
358 if ((rval = mod_install(&modlinkage)) != 0) {
359 VHCI_DEBUG(1, (CE_NOTE, NULL, "!_init: mod_install failed\n"));
360 if (vhci_targetmap_bypid)
361 mod_hash_destroy_idhash(vhci_targetmap_bypid);
362 if (vhci_targetmap_byport)
363 mod_hash_destroy_strhash(vhci_targetmap_byport);
364 mutex_destroy(&vhci_targetmap_mutex);
365 cv_destroy(&vhci_cv);
366 mutex_destroy(&vhci_global_mutex);
367 scsi_hba_fini(&modlinkage);
368 ddi_soft_state_fini(&vhci_softstate);
369 }
370 return (rval);
371 }
372
373
374 /*
375 * the system is done with us as a driver, so clean up
376 */
377 int
_fini(void)378 _fini(void)
379 {
380 int rval;
381
382 /*
383 * don't start cleaning up until we know that the module remove
384 * has worked -- if this works, then we know that each instance
385 * has successfully been DDI_DETACHed
386 */
387 if ((rval = mod_remove(&modlinkage)) != 0) {
388 VHCI_DEBUG(4, (CE_NOTE, NULL, "!_fini: mod_remove failed\n"));
389 return (rval);
390 }
391
392 if (vhci_targetmap_bypid)
393 mod_hash_destroy_idhash(vhci_targetmap_bypid);
394 if (vhci_targetmap_byport)
395 mod_hash_destroy_strhash(vhci_targetmap_byport);
396 mutex_destroy(&vhci_targetmap_mutex);
397 cv_destroy(&vhci_cv);
398 mutex_destroy(&vhci_global_mutex);
399 scsi_hba_fini(&modlinkage);
400 ddi_soft_state_fini(&vhci_softstate);
401
402 return (rval);
403 }
404
405 int
_info(struct modinfo * modinfop)406 _info(struct modinfo *modinfop)
407 {
408 return (mod_info(&modlinkage, modinfop));
409 }
410
411 /*
412 * Lookup scsi_failover by "short name" of failover module.
413 */
414 struct scsi_failover_ops *
vhci_failover_ops_by_name(char * name)415 vhci_failover_ops_by_name(char *name)
416 {
417 struct scsi_failover *sf;
418
419 for (sf = scsi_failover_table; sf->sf_mod; sf++) {
420 if (sf->sf_sfo == NULL)
421 continue;
422 if (strcmp(sf->sf_sfo->sfo_name, name) == 0)
423 return (sf->sf_sfo);
424 }
425 return (NULL);
426 }
427
428 /*
429 * Load all scsi_failover_ops 'fops' modules.
430 */
431 static void
vhci_failover_modopen(struct scsi_vhci * vhci)432 vhci_failover_modopen(struct scsi_vhci *vhci)
433 {
434 char **module;
435 int i;
436 struct scsi_failover *sf;
437 char **dt;
438 int e;
439
440 if (scsi_failover_table)
441 return;
442
443 /* Get the list of modules from scsi_vhci.conf */
444 if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY,
445 vhci->vhci_dip, DDI_PROP_DONTPASS, "ddi-forceload",
446 &module, &scsi_nfailover) != DDI_PROP_SUCCESS) {
447 cmn_err(CE_WARN, "scsi_vhci: "
448 "scsi_vhci.conf is missing 'ddi-forceload'");
449 return;
450 }
451 if (scsi_nfailover == 0) {
452 cmn_err(CE_WARN, "scsi_vhci: "
453 "scsi_vhci.conf has empty 'ddi-forceload'");
454 ddi_prop_free(module);
455 return;
456 }
457
458 /* allocate failover table based on number of modules */
459 scsi_failover_table = (struct scsi_failover *)
460 kmem_zalloc(sizeof (struct scsi_failover) * (scsi_nfailover + 1),
461 KM_SLEEP);
462
463 /* loop over modules specified in scsi_vhci.conf and open each module */
464 for (i = 0, sf = scsi_failover_table; i < scsi_nfailover; i++) {
465 if (module[i] == NULL)
466 continue;
467
468 sf->sf_mod = ddi_modopen(module[i], KRTLD_MODE_FIRST, &e);
469 if (sf->sf_mod == NULL) {
470 /*
471 * A module returns EEXIST if other software is
472 * supporting the intended function: for example
473 * the scsi_vhci_f_sum_emc module returns EEXIST
474 * from _init if EMC powerpath software is installed.
475 */
476 if (e != EEXIST)
477 cmn_err(CE_WARN, "scsi_vhci: unable to open "
478 "module '%s', error %d", module[i], e);
479 continue;
480 }
481 sf->sf_sfo = ddi_modsym(sf->sf_mod,
482 "scsi_vhci_failover_ops", &e);
483 if (sf->sf_sfo == NULL) {
484 cmn_err(CE_WARN, "scsi_vhci: "
485 "unable to import 'scsi_failover_ops' from '%s', "
486 "error %d", module[i], e);
487 (void) ddi_modclose(sf->sf_mod);
488 sf->sf_mod = NULL;
489 continue;
490 }
491
492 /* register vid/pid of devices supported with mpapi */
493 for (dt = sf->sf_sfo->sfo_devices; *dt; dt++)
494 vhci_mpapi_add_dev_prod(vhci, *dt);
495 sf++;
496 }
497
498 /* verify that at least the "well-known" modules were there */
499 if (vhci_failover_ops_by_name(SFO_NAME_SYM) == NULL)
500 cmn_err(CE_WARN, "scsi_vhci: well-known module \""
501 SFO_NAME_SYM "\" not defined in scsi_vhci.conf's "
502 "'ddi-forceload'");
503 if (vhci_failover_ops_by_name(SFO_NAME_TPGS) == NULL)
504 cmn_err(CE_WARN, "scsi_vhci: well-known module \""
505 SFO_NAME_TPGS "\" not defined in scsi_vhci.conf's "
506 "'ddi-forceload'");
507
508 /* call sfo_init for modules that need it */
509 for (sf = scsi_failover_table; sf->sf_mod; sf++) {
510 if (sf->sf_sfo && sf->sf_sfo->sfo_init)
511 sf->sf_sfo->sfo_init();
512 }
513
514 ddi_prop_free(module);
515 }
516
517 /*
518 * unload all loaded scsi_failover_ops modules
519 */
520 static void
vhci_failover_modclose()521 vhci_failover_modclose()
522 {
523 struct scsi_failover *sf;
524
525 for (sf = scsi_failover_table; sf->sf_mod; sf++) {
526 if ((sf->sf_mod == NULL) || (sf->sf_sfo == NULL))
527 continue;
528 (void) ddi_modclose(sf->sf_mod);
529 sf->sf_mod = NULL;
530 sf->sf_sfo = NULL;
531 }
532
533 if (scsi_failover_table && scsi_nfailover)
534 kmem_free(scsi_failover_table,
535 sizeof (struct scsi_failover) * (scsi_nfailover + 1));
536 scsi_failover_table = NULL;
537 scsi_nfailover = 0;
538 }
539
540 /* ARGSUSED */
541 static int
vhci_open(dev_t * devp,int flag,int otype,cred_t * credp)542 vhci_open(dev_t *devp, int flag, int otype, cred_t *credp)
543 {
544 struct scsi_vhci *vhci;
545
546 if (otype != OTYP_CHR) {
547 return (EINVAL);
548 }
549
550 vhci = ddi_get_soft_state(vhci_softstate, MINOR2INST(getminor(*devp)));
551 if (vhci == NULL) {
552 VHCI_DEBUG(1, (CE_NOTE, NULL, "vhci_open: failed ENXIO\n"));
553 return (ENXIO);
554 }
555
556 mutex_enter(&vhci->vhci_mutex);
557 if ((flag & FEXCL) && (vhci->vhci_state & VHCI_STATE_OPEN)) {
558 mutex_exit(&vhci->vhci_mutex);
559 vhci_log(CE_NOTE, vhci->vhci_dip,
560 "!vhci%d: Already open\n", getminor(*devp));
561 return (EBUSY);
562 }
563
564 vhci->vhci_state |= VHCI_STATE_OPEN;
565 mutex_exit(&vhci->vhci_mutex);
566 return (0);
567 }
568
569
570 /* ARGSUSED */
571 static int
vhci_close(dev_t dev,int flag,int otype,cred_t * credp)572 vhci_close(dev_t dev, int flag, int otype, cred_t *credp)
573 {
574 struct scsi_vhci *vhci;
575
576 if (otype != OTYP_CHR) {
577 return (EINVAL);
578 }
579
580 vhci = ddi_get_soft_state(vhci_softstate, MINOR2INST(getminor(dev)));
581 if (vhci == NULL) {
582 VHCI_DEBUG(1, (CE_NOTE, NULL, "vhci_close: failed ENXIO\n"));
583 return (ENXIO);
584 }
585
586 mutex_enter(&vhci->vhci_mutex);
587 vhci->vhci_state &= ~VHCI_STATE_OPEN;
588 mutex_exit(&vhci->vhci_mutex);
589
590 return (0);
591 }
592
593 /* ARGSUSED */
594 static int
vhci_ioctl(dev_t dev,int cmd,intptr_t data,int mode,cred_t * credp,int * rval)595 vhci_ioctl(dev_t dev, int cmd, intptr_t data, int mode,
596 cred_t *credp, int *rval)
597 {
598 if (IS_DEVCTL(cmd)) {
599 return (vhci_devctl(dev, cmd, data, mode, credp, rval));
600 } else if (cmd == MP_CMD) {
601 return (vhci_mpapi_ctl(dev, cmd, data, mode, credp, rval));
602 } else {
603 return (vhci_ctl(dev, cmd, data, mode, credp, rval));
604 }
605 }
606
607 /*
608 * attach the module
609 */
610 static int
vhci_attach(dev_info_t * dip,ddi_attach_cmd_t cmd)611 vhci_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
612 {
613 int rval = DDI_FAILURE;
614 int scsi_hba_attached = 0;
615 int vhci_attached = 0;
616 int mutex_initted = 0;
617 int instance;
618 struct scsi_vhci *vhci;
619 scsi_hba_tran_t *tran;
620 char cache_name_buf[64];
621 char *data;
622
623 VHCI_DEBUG(4, (CE_NOTE, NULL, "vhci_attach: cmd=0x%x\n", cmd));
624
625 instance = ddi_get_instance(dip);
626
627 switch (cmd) {
628 case DDI_ATTACH:
629 break;
630
631 case DDI_RESUME:
632 case DDI_PM_RESUME:
633 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_attach: resume not yet"
634 "implemented\n"));
635 return (rval);
636
637 default:
638 VHCI_DEBUG(1, (CE_NOTE, NULL,
639 "!vhci_attach: unknown ddi command\n"));
640 return (rval);
641 }
642
643 /*
644 * Allocate vhci data structure.
645 */
646 if (ddi_soft_state_zalloc(vhci_softstate, instance) != DDI_SUCCESS) {
647 VHCI_DEBUG(1, (CE_NOTE, dip, "!vhci_attach:"
648 "soft state alloc failed\n"));
649 return (DDI_FAILURE);
650 }
651
652 if ((vhci = ddi_get_soft_state(vhci_softstate, instance)) == NULL) {
653 VHCI_DEBUG(1, (CE_NOTE, dip, "!vhci_attach:"
654 "bad soft state\n"));
655 ddi_soft_state_free(vhci_softstate, instance);
656 return (DDI_FAILURE);
657 }
658
659 /* Allocate packet cache */
660 (void) snprintf(cache_name_buf, sizeof (cache_name_buf),
661 "vhci%d_cache", instance);
662
663 mutex_init(&vhci->vhci_mutex, NULL, MUTEX_DRIVER, NULL);
664 mutex_initted++;
665
666 /*
667 * Allocate a transport structure
668 */
669 tran = scsi_hba_tran_alloc(dip, SCSI_HBA_CANSLEEP);
670 ASSERT(tran != NULL);
671
672 vhci->vhci_tran = tran;
673 vhci->vhci_dip = dip;
674 vhci->vhci_instance = instance;
675
676 tran->tran_hba_private = vhci;
677 tran->tran_tgt_init = vhci_scsi_tgt_init;
678 tran->tran_tgt_probe = NULL;
679 tran->tran_tgt_free = vhci_scsi_tgt_free;
680
681 tran->tran_start = vhci_scsi_start;
682 tran->tran_abort = vhci_scsi_abort;
683 tran->tran_reset = vhci_scsi_reset;
684 tran->tran_getcap = vhci_scsi_getcap;
685 tran->tran_setcap = vhci_scsi_setcap;
686 tran->tran_init_pkt = vhci_scsi_init_pkt;
687 tran->tran_destroy_pkt = vhci_scsi_destroy_pkt;
688 tran->tran_dmafree = vhci_scsi_dmafree;
689 tran->tran_sync_pkt = vhci_scsi_sync_pkt;
690 tran->tran_reset_notify = vhci_scsi_reset_notify;
691
692 tran->tran_get_bus_addr = vhci_scsi_get_bus_addr;
693 tran->tran_get_name = vhci_scsi_get_name;
694 tran->tran_bus_reset = NULL;
695 tran->tran_quiesce = NULL;
696 tran->tran_unquiesce = NULL;
697
698 /*
699 * register event notification routines with scsa
700 */
701 tran->tran_get_eventcookie = NULL;
702 tran->tran_add_eventcall = NULL;
703 tran->tran_remove_eventcall = NULL;
704 tran->tran_post_event = NULL;
705
706 tran->tran_bus_power = vhci_scsi_bus_power;
707
708 tran->tran_bus_config = vhci_scsi_bus_config;
709 tran->tran_bus_unconfig = vhci_scsi_bus_unconfig;
710
711 /*
712 * Attach this instance with the mpxio framework
713 */
714 if (mdi_vhci_register(MDI_HCI_CLASS_SCSI, dip, &vhci_opinfo, 0)
715 != MDI_SUCCESS) {
716 VHCI_DEBUG(1, (CE_NOTE, dip, "!vhci_attach:"
717 "mdi_vhci_register failed\n"));
718 goto attach_fail;
719 }
720 vhci_attached++;
721
722 /*
723 * Attach this instance of the hba.
724 *
725 * Regarding dma attributes: Since scsi_vhci is a virtual scsi HBA
726 * driver, it has nothing to do with DMA. However, when calling
727 * scsi_hba_attach_setup() we need to pass something valid in the
728 * dma attributes parameter. So we just use scsi_alloc_attr.
729 * SCSA itself seems to care only for dma_attr_minxfer and
730 * dma_attr_burstsizes fields of dma attributes structure.
731 * It expects those fileds to be non-zero.
732 */
733 if (scsi_hba_attach_setup(dip, &scsi_alloc_attr, tran,
734 SCSI_HBA_ADDR_COMPLEX) != DDI_SUCCESS) {
735 VHCI_DEBUG(1, (CE_NOTE, dip, "!vhci_attach:"
736 "hba attach failed\n"));
737 goto attach_fail;
738 }
739 scsi_hba_attached++;
740
741 if (ddi_create_minor_node(dip, "devctl", S_IFCHR,
742 INST2DEVCTL(instance), DDI_NT_SCSI_NEXUS, 0) != DDI_SUCCESS) {
743 VHCI_DEBUG(1, (CE_NOTE, dip, "!vhci_attach:"
744 " ddi_create_minor_node failed\n"));
745 goto attach_fail;
746 }
747
748 /*
749 * Set pm-want-child-notification property for
750 * power management of the phci and client
751 */
752 if (ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP,
753 "pm-want-child-notification?", NULL, NULL) != DDI_PROP_SUCCESS) {
754 cmn_err(CE_WARN,
755 "%s%d fail to create pm-want-child-notification? prop",
756 ddi_driver_name(dip), ddi_get_instance(dip));
757 goto attach_fail;
758 }
759
760 vhci->vhci_taskq = taskq_create("vhci_taskq", 1, MINCLSYSPRI, 1, 4, 0);
761 vhci->vhci_update_pathstates_taskq =
762 taskq_create("vhci_update_pathstates", VHCI_NUM_UPDATE_TASKQ,
763 MINCLSYSPRI, 1, 4, 0);
764 ASSERT(vhci->vhci_taskq);
765 ASSERT(vhci->vhci_update_pathstates_taskq);
766
767 /*
768 * Set appropriate configuration flags based on options set in
769 * conf file.
770 */
771 vhci->vhci_conf_flags = 0;
772 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, PROPFLAGS,
773 "auto-failback", &data) == DDI_SUCCESS) {
774 if (strcmp(data, "enable") == 0)
775 vhci->vhci_conf_flags |= VHCI_CONF_FLAGS_AUTO_FAILBACK;
776 ddi_prop_free(data);
777 }
778
779 if (!(vhci->vhci_conf_flags & VHCI_CONF_FLAGS_AUTO_FAILBACK))
780 vhci_log(CE_NOTE, dip, "!Auto-failback capability "
781 "disabled through scsi_vhci.conf file.");
782
783 /*
784 * Allocate an mpapi private structure
785 */
786 vhci->mp_priv = kmem_zalloc(sizeof (mpapi_priv_t), KM_SLEEP);
787 if (vhci_mpapi_init(vhci) != 0) {
788 VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_attach: "
789 "vhci_mpapi_init() failed"));
790 }
791
792 vhci_failover_modopen(vhci); /* load failover modules */
793
794 ddi_report_dev(dip);
795 return (DDI_SUCCESS);
796
797 attach_fail:
798 if (vhci_attached)
799 (void) mdi_vhci_unregister(dip, 0);
800
801 if (scsi_hba_attached)
802 (void) scsi_hba_detach(dip);
803
804 if (vhci->vhci_tran)
805 scsi_hba_tran_free(vhci->vhci_tran);
806
807 if (mutex_initted) {
808 mutex_destroy(&vhci->vhci_mutex);
809 }
810
811 ddi_soft_state_free(vhci_softstate, instance);
812 return (DDI_FAILURE);
813 }
814
815
816 /*ARGSUSED*/
817 static int
vhci_detach(dev_info_t * dip,ddi_detach_cmd_t cmd)818 vhci_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
819 {
820 int instance = ddi_get_instance(dip);
821 scsi_hba_tran_t *tran;
822 struct scsi_vhci *vhci;
823
824 VHCI_DEBUG(4, (CE_NOTE, NULL, "vhci_detach: cmd=0x%x\n", cmd));
825
826 if ((tran = ddi_get_driver_private(dip)) == NULL)
827 return (DDI_FAILURE);
828
829 vhci = TRAN2HBAPRIVATE(tran);
830 if (!vhci) {
831 return (DDI_FAILURE);
832 }
833
834 switch (cmd) {
835 case DDI_DETACH:
836 break;
837
838 case DDI_SUSPEND:
839 case DDI_PM_SUSPEND:
840 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_detach: suspend/pm not yet"
841 "implemented\n"));
842 return (DDI_FAILURE);
843
844 default:
845 VHCI_DEBUG(1, (CE_NOTE, NULL,
846 "!vhci_detach: unknown ddi command\n"));
847 return (DDI_FAILURE);
848 }
849
850 (void) mdi_vhci_unregister(dip, 0);
851 (void) scsi_hba_detach(dip);
852 scsi_hba_tran_free(tran);
853
854 if (ddi_prop_remove(DDI_DEV_T_NONE, dip,
855 "pm-want-child-notification?") != DDI_PROP_SUCCESS) {
856 cmn_err(CE_WARN,
857 "%s%d unable to remove prop pm-want_child_notification?",
858 ddi_driver_name(dip), ddi_get_instance(dip));
859 }
860 if (vhci_restart_timeid != 0) {
861 (void) untimeout(vhci_restart_timeid);
862 }
863 vhci_restart_timeid = 0;
864
865 mutex_destroy(&vhci->vhci_mutex);
866 vhci->vhci_dip = NULL;
867 vhci->vhci_tran = NULL;
868 taskq_destroy(vhci->vhci_taskq);
869 taskq_destroy(vhci->vhci_update_pathstates_taskq);
870 ddi_remove_minor_node(dip, NULL);
871 ddi_soft_state_free(vhci_softstate, instance);
872
873 vhci_failover_modclose(); /* unload failover modules */
874 return (DDI_SUCCESS);
875 }
876
877 /*
878 * vhci_getinfo()
879 * Given the device number, return the devinfo pointer or the
880 * instance number.
881 * Note: always succeed DDI_INFO_DEVT2INSTANCE, even before attach.
882 */
883
884 /*ARGSUSED*/
885 static int
vhci_getinfo(dev_info_t * dip,ddi_info_cmd_t cmd,void * arg,void ** result)886 vhci_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result)
887 {
888 struct scsi_vhci *vhcip;
889 int instance = MINOR2INST(getminor((dev_t)arg));
890
891 switch (cmd) {
892 case DDI_INFO_DEVT2DEVINFO:
893 vhcip = ddi_get_soft_state(vhci_softstate, instance);
894 if (vhcip != NULL)
895 *result = vhcip->vhci_dip;
896 else {
897 *result = NULL;
898 return (DDI_FAILURE);
899 }
900 break;
901
902 case DDI_INFO_DEVT2INSTANCE:
903 *result = (void *)(uintptr_t)instance;
904 break;
905
906 default:
907 return (DDI_FAILURE);
908 }
909
910 return (DDI_SUCCESS);
911 }
912
913 /*ARGSUSED*/
914 static int
vhci_scsi_tgt_init(dev_info_t * hba_dip,dev_info_t * tgt_dip,scsi_hba_tran_t * hba_tran,struct scsi_device * sd)915 vhci_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
916 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
917 {
918 char *guid;
919 scsi_vhci_lun_t *vlun;
920 struct scsi_vhci *vhci;
921 clock_t from_ticks;
922 mdi_pathinfo_t *pip;
923 int rval;
924
925 ASSERT(hba_dip != NULL);
926 ASSERT(tgt_dip != NULL);
927
928 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, tgt_dip, PROPFLAGS,
929 MDI_CLIENT_GUID_PROP, &guid) != DDI_SUCCESS) {
930 /*
931 * This must be the .conf node without GUID property.
932 * The node under fp already inserts a delay, so we
933 * just return from here. We rely on this delay to have
934 * all dips be posted to the ndi hotplug thread's newdev
935 * list. This is necessary for the deferred attach
936 * mechanism to work and opens() done soon after boot to
937 * succeed.
938 */
939 VHCI_DEBUG(4, (CE_WARN, hba_dip, "tgt_init: lun guid "
940 "property failed"));
941 return (DDI_NOT_WELL_FORMED);
942 }
943
944 if (ndi_dev_is_persistent_node(tgt_dip) == 0) {
945 /*
946 * This must be .conf node with the GUID property. We don't
947 * merge property by ndi_merge_node() here because the
948 * devi_addr_buf of .conf node is "" always according the
949 * implementation of vhci_scsi_get_name_bus_addr().
950 */
951 ddi_set_name_addr(tgt_dip, NULL);
952 return (DDI_FAILURE);
953 }
954
955 vhci = ddi_get_soft_state(vhci_softstate, ddi_get_instance(hba_dip));
956 ASSERT(vhci != NULL);
957
958 VHCI_DEBUG(4, (CE_NOTE, hba_dip,
959 "!tgt_init: called for %s (instance %d)\n",
960 ddi_driver_name(tgt_dip), ddi_get_instance(tgt_dip)));
961
962 vlun = vhci_lun_lookup(tgt_dip);
963
964 mutex_enter(&vhci_global_mutex);
965
966 from_ticks = ddi_get_lbolt();
967 if (vhci_to_ticks == 0) {
968 vhci_to_ticks = from_ticks +
969 drv_usectohz(vhci_init_wait_timeout);
970 }
971
972 #if DEBUG
973 if (vlun) {
974 VHCI_DEBUG(1, (CE_WARN, hba_dip, "tgt_init: "
975 "vhci_scsi_tgt_init: guid %s : found vlun 0x%p "
976 "from_ticks %lx to_ticks %lx",
977 guid, (void *)vlun, from_ticks, vhci_to_ticks));
978 } else {
979 VHCI_DEBUG(1, (CE_WARN, hba_dip, "tgt_init: "
980 "vhci_scsi_tgt_init: guid %s : vlun not found "
981 "from_ticks %lx to_ticks %lx", guid, from_ticks,
982 vhci_to_ticks));
983 }
984 #endif
985
986 rval = mdi_select_path(tgt_dip, NULL,
987 (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH), NULL, &pip);
988 if (rval == MDI_SUCCESS) {
989 mdi_rele_path(pip);
990 }
991
992 /*
993 * Wait for the following conditions :
994 * 1. no vlun available yet
995 * 2. no path established
996 * 3. timer did not expire
997 */
998 while ((vlun == NULL) || (mdi_client_get_path_count(tgt_dip) == 0) ||
999 (rval != MDI_SUCCESS)) {
1000 if (vlun && vlun->svl_not_supported) {
1001 VHCI_DEBUG(1, (CE_WARN, hba_dip, "tgt_init: "
1002 "vlun 0x%p lun guid %s not supported!",
1003 (void *)vlun, guid));
1004 mutex_exit(&vhci_global_mutex);
1005 ddi_prop_free(guid);
1006 return (DDI_NOT_WELL_FORMED);
1007 }
1008 if ((vhci_first_time == 0) && (from_ticks >= vhci_to_ticks)) {
1009 vhci_first_time = 1;
1010 }
1011 if (vhci_first_time == 1) {
1012 VHCI_DEBUG(1, (CE_WARN, hba_dip, "vhci_scsi_tgt_init: "
1013 "no wait for %s. from_tick %lx, to_tick %lx",
1014 guid, from_ticks, vhci_to_ticks));
1015 mutex_exit(&vhci_global_mutex);
1016 ddi_prop_free(guid);
1017 return (DDI_NOT_WELL_FORMED);
1018 }
1019
1020 if (cv_timedwait(&vhci_cv,
1021 &vhci_global_mutex, vhci_to_ticks) == -1) {
1022 /* Timed out */
1023 #ifdef DEBUG
1024 if (vlun == NULL) {
1025 VHCI_DEBUG(1, (CE_WARN, hba_dip,
1026 "tgt_init: no vlun for %s!", guid));
1027 } else if (mdi_client_get_path_count(tgt_dip) == 0) {
1028 VHCI_DEBUG(1, (CE_WARN, hba_dip,
1029 "tgt_init: client path count is "
1030 "zero for %s!", guid));
1031 } else {
1032 VHCI_DEBUG(1, (CE_WARN, hba_dip,
1033 "tgt_init: client path not "
1034 "available yet for %s!", guid));
1035 }
1036 #endif /* DEBUG */
1037 mutex_exit(&vhci_global_mutex);
1038 ddi_prop_free(guid);
1039 return (DDI_NOT_WELL_FORMED);
1040 }
1041 vlun = vhci_lun_lookup(tgt_dip);
1042 rval = mdi_select_path(tgt_dip, NULL,
1043 (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH),
1044 NULL, &pip);
1045 if (rval == MDI_SUCCESS) {
1046 mdi_rele_path(pip);
1047 }
1048 from_ticks = ddi_get_lbolt();
1049 }
1050 mutex_exit(&vhci_global_mutex);
1051
1052 ASSERT(vlun != NULL);
1053 ddi_prop_free(guid);
1054
1055 scsi_device_hba_private_set(sd, vlun);
1056
1057 return (DDI_SUCCESS);
1058 }
1059
1060 /*ARGSUSED*/
1061 static void
vhci_scsi_tgt_free(dev_info_t * hba_dip,dev_info_t * tgt_dip,scsi_hba_tran_t * hba_tran,struct scsi_device * sd)1062 vhci_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
1063 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
1064 {
1065 struct scsi_vhci_lun *dvlp;
1066 ASSERT(mdi_client_get_path_count(tgt_dip) <= 0);
1067 dvlp = (struct scsi_vhci_lun *)scsi_device_hba_private_get(sd);
1068 ASSERT(dvlp != NULL);
1069
1070 vhci_lun_free(dvlp, sd);
1071 }
1072
1073 /*
1074 * a PGR register command has started; copy the info we need
1075 */
1076 int
vhci_pgr_register_start(scsi_vhci_lun_t * vlun,struct scsi_pkt * pkt)1077 vhci_pgr_register_start(scsi_vhci_lun_t *vlun, struct scsi_pkt *pkt)
1078 {
1079 struct vhci_pkt *vpkt = TGTPKT2VHCIPKT(pkt);
1080 void *addr;
1081
1082 if (!vpkt->vpkt_tgt_init_bp)
1083 return (TRAN_BADPKT);
1084
1085 addr = bp_mapin_common(vpkt->vpkt_tgt_init_bp,
1086 (vpkt->vpkt_flags & CFLAG_NOWAIT) ? VM_NOSLEEP : VM_SLEEP);
1087 if (addr == NULL)
1088 return (TRAN_BUSY);
1089
1090 mutex_enter(&vlun->svl_mutex);
1091
1092 vhci_print_prout_keys(vlun, "v_pgr_reg_start: before bcopy:");
1093
1094 bcopy(addr, &vlun->svl_prout, sizeof (vhci_prout_t) -
1095 (2 * MHIOC_RESV_KEY_SIZE*sizeof (char)));
1096 bcopy(pkt->pkt_cdbp, vlun->svl_cdb, sizeof (vlun->svl_cdb));
1097
1098 vhci_print_prout_keys(vlun, "v_pgr_reg_start: after bcopy:");
1099
1100 vlun->svl_time = pkt->pkt_time;
1101 vlun->svl_bcount = vpkt->vpkt_tgt_init_bp->b_bcount;
1102 vlun->svl_first_path = vpkt->vpkt_path;
1103 mutex_exit(&vlun->svl_mutex);
1104 return (0);
1105 }
1106
1107 /*
1108 * Function name : vhci_scsi_start()
1109 *
1110 * Return Values : TRAN_FATAL_ERROR - vhci has been shutdown
1111 * or other fatal failure
1112 * preventing packet transportation
1113 * TRAN_BUSY - request queue is full
1114 * TRAN_ACCEPT - pkt has been submitted to phci
1115 * (or is held in the waitQ)
1116 * Description : Implements SCSA's tran_start() entry point for
1117 * packet transport
1118 *
1119 */
1120 static int
vhci_scsi_start(struct scsi_address * ap,struct scsi_pkt * pkt)1121 vhci_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt)
1122 {
1123 int rval = TRAN_ACCEPT;
1124 int instance, held;
1125 struct scsi_vhci *vhci = ADDR2VHCI(ap);
1126 struct scsi_vhci_lun *vlun = ADDR2VLUN(ap);
1127 struct vhci_pkt *vpkt = TGTPKT2VHCIPKT(pkt);
1128 int flags = 0;
1129 scsi_vhci_priv_t *svp, *svp_resrv;
1130 dev_info_t *cdip;
1131 client_lb_t lbp;
1132 int restore_lbp = 0;
1133 /* set if pkt is SCSI-II RESERVE cmd */
1134 int pkt_reserve_cmd = 0;
1135 int reserve_failed = 0;
1136 int resrv_instance = 0;
1137 mdi_pathinfo_t *pip;
1138 struct scsi_pkt *rel_pkt;
1139
1140 ASSERT(vhci != NULL);
1141 ASSERT(vpkt != NULL);
1142 ASSERT(vpkt->vpkt_state != VHCI_PKT_ISSUED);
1143 cdip = ADDR2DIP(ap);
1144
1145 /*
1146 * Block IOs if LUN is held or QUIESCED for IOs.
1147 */
1148 if ((VHCI_LUN_IS_HELD(vlun)) ||
1149 ((vlun->svl_flags & VLUN_QUIESCED_FLG) == VLUN_QUIESCED_FLG)) {
1150 return (TRAN_BUSY);
1151 }
1152
1153 /*
1154 * vhci_lun needs to be quiesced before SCSI-II RESERVE command
1155 * can be issued. This may require a cv_timedwait, which is
1156 * dangerous to perform in an interrupt context. So if this
1157 * is a RESERVE command a taskq is dispatched to service it.
1158 * This taskq shall again call vhci_scsi_start, but we shall be
1159 * sure its not in an interrupt context.
1160 */
1161 if ((pkt->pkt_cdbp[0] == SCMD_RESERVE) ||
1162 (pkt->pkt_cdbp[0] == SCMD_RESERVE_G1)) {
1163 if (!(vpkt->vpkt_state & VHCI_PKT_THRU_TASKQ)) {
1164 if (taskq_dispatch(vhci->vhci_taskq,
1165 vhci_dispatch_scsi_start, (void *) vpkt,
1166 KM_NOSLEEP)) {
1167 return (TRAN_ACCEPT);
1168 } else {
1169 return (TRAN_BUSY);
1170 }
1171 }
1172
1173 /*
1174 * Here we ensure that simultaneous SCSI-II RESERVE cmds don't
1175 * get serviced for a lun.
1176 */
1177 VHCI_HOLD_LUN(vlun, VH_NOSLEEP, held);
1178 if (!held) {
1179 return (TRAN_BUSY);
1180 } else if ((vlun->svl_flags & VLUN_QUIESCED_FLG) ==
1181 VLUN_QUIESCED_FLG) {
1182 VHCI_RELEASE_LUN(vlun);
1183 return (TRAN_BUSY);
1184 }
1185
1186 /*
1187 * To ensure that no IOs occur for this LUN for the duration
1188 * of this pkt set the VLUN_QUIESCED_FLG.
1189 * In case this routine needs to exit on error make sure that
1190 * this flag is cleared.
1191 */
1192 vlun->svl_flags |= VLUN_QUIESCED_FLG;
1193 pkt_reserve_cmd = 1;
1194
1195 /*
1196 * if this is a SCSI-II RESERVE command, set load balancing
1197 * policy to be ALTERNATE PATH to ensure that all subsequent
1198 * IOs are routed on the same path. This is because if commands
1199 * are routed across multiple paths then IOs on paths other than
1200 * the one on which the RESERVE was executed will get a
1201 * RESERVATION CONFLICT
1202 */
1203 lbp = mdi_get_lb_policy(cdip);
1204 if (lbp != LOAD_BALANCE_NONE) {
1205 if (vhci_quiesce_lun(vlun) != 1) {
1206 vlun->svl_flags &= ~VLUN_QUIESCED_FLG;
1207 VHCI_RELEASE_LUN(vlun);
1208 return (TRAN_FATAL_ERROR);
1209 }
1210 vlun->svl_lb_policy_save = lbp;
1211 if (mdi_set_lb_policy(cdip, LOAD_BALANCE_NONE) !=
1212 MDI_SUCCESS) {
1213 vlun->svl_flags &= ~VLUN_QUIESCED_FLG;
1214 VHCI_RELEASE_LUN(vlun);
1215 return (TRAN_FATAL_ERROR);
1216 }
1217 restore_lbp = 1;
1218 }
1219
1220 VHCI_DEBUG(2, (CE_NOTE, vhci->vhci_dip,
1221 "!vhci_scsi_start: sending SCSI-2 RESERVE, vlun 0x%p, "
1222 "svl_resrv_pip 0x%p, svl_flags: %x, lb_policy %x",
1223 (void *)vlun, (void *)vlun->svl_resrv_pip, vlun->svl_flags,
1224 mdi_get_lb_policy(cdip)));
1225
1226 /*
1227 * See comments for VLUN_RESERVE_ACTIVE_FLG in scsi_vhci.h
1228 * To narrow this window where a reserve command may be sent
1229 * down an inactive path the path states first need to be
1230 * updated. Before calling vhci_update_pathstates reset
1231 * VLUN_RESERVE_ACTIVE_FLG, just in case it was already set
1232 * for this lun. This shall prevent an unnecessary reset
1233 * from being sent out. Also remember currently reserved path
1234 * just for a case the new reservation will go to another path.
1235 */
1236 if (vlun->svl_flags & VLUN_RESERVE_ACTIVE_FLG) {
1237 resrv_instance = mdi_pi_get_path_instance(
1238 vlun->svl_resrv_pip);
1239 }
1240 vlun->svl_flags &= ~VLUN_RESERVE_ACTIVE_FLG;
1241 vhci_update_pathstates((void *)vlun);
1242 }
1243
1244 instance = ddi_get_instance(vhci->vhci_dip);
1245
1246 /*
1247 * If the command is PRIN with action of zero, then the cmd
1248 * is reading PR keys which requires filtering on completion.
1249 * Data cache sync must be guaranteed.
1250 */
1251 if ((pkt->pkt_cdbp[0] == SCMD_PRIN) && (pkt->pkt_cdbp[1] == 0) &&
1252 (vpkt->vpkt_org_vpkt == NULL)) {
1253 vpkt->vpkt_tgt_init_pkt_flags |= PKT_CONSISTENT;
1254 }
1255
1256 /*
1257 * Do not defer bind for PKT_DMA_PARTIAL
1258 */
1259 if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) == 0) {
1260
1261 /* This is a non pkt_dma_partial case */
1262 if ((rval = vhci_bind_transport(
1263 ap, vpkt, vpkt->vpkt_tgt_init_pkt_flags, NULL_FUNC))
1264 != TRAN_ACCEPT) {
1265 VHCI_DEBUG(6, (CE_WARN, vhci->vhci_dip,
1266 "!vhci%d %x: failed to bind transport: "
1267 "vlun 0x%p pkt_reserved %x restore_lbp %x,"
1268 "lbp %x", instance, rval, (void *)vlun,
1269 pkt_reserve_cmd, restore_lbp, lbp));
1270 if (restore_lbp)
1271 (void) mdi_set_lb_policy(cdip, lbp);
1272 if (pkt_reserve_cmd)
1273 vlun->svl_flags &= ~VLUN_QUIESCED_FLG;
1274 return (rval);
1275 }
1276 VHCI_DEBUG(8, (CE_NOTE, NULL,
1277 "vhci_scsi_start: v_b_t called 0x%p\n", (void *)vpkt));
1278 }
1279 ASSERT(vpkt->vpkt_hba_pkt != NULL);
1280 ASSERT(vpkt->vpkt_path != NULL);
1281
1282 /*
1283 * This is the chance to adjust the pHCI's pkt and other information
1284 * from target driver's pkt.
1285 */
1286 VHCI_DEBUG(8, (CE_NOTE, vhci->vhci_dip, "vhci_scsi_start vpkt %p\n",
1287 (void *)vpkt));
1288 vhci_update_pHCI_pkt(vpkt, pkt);
1289
1290 if (vlun->svl_flags & VLUN_RESERVE_ACTIVE_FLG) {
1291 if (vpkt->vpkt_path != vlun->svl_resrv_pip) {
1292 VHCI_DEBUG(1, (CE_WARN, vhci->vhci_dip,
1293 "!vhci_bind: reserve flag set for vlun 0x%p, but, "
1294 "pktpath 0x%p resrv path 0x%p differ. lb_policy %x",
1295 (void *)vlun, (void *)vpkt->vpkt_path,
1296 (void *)vlun->svl_resrv_pip,
1297 mdi_get_lb_policy(cdip)));
1298 reserve_failed = 1;
1299 }
1300 }
1301
1302 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(vpkt->vpkt_path);
1303 if (svp == NULL || reserve_failed) {
1304 if (pkt_reserve_cmd) {
1305 VHCI_DEBUG(6, (CE_WARN, vhci->vhci_dip,
1306 "!vhci_bind returned null svp vlun 0x%p",
1307 (void *)vlun));
1308 vlun->svl_flags &= ~VLUN_QUIESCED_FLG;
1309 if (restore_lbp)
1310 (void) mdi_set_lb_policy(cdip, lbp);
1311 }
1312 pkt_cleanup:
1313 if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) == 0) {
1314 scsi_destroy_pkt(vpkt->vpkt_hba_pkt);
1315 vpkt->vpkt_hba_pkt = NULL;
1316 if (vpkt->vpkt_path) {
1317 mdi_rele_path(vpkt->vpkt_path);
1318 vpkt->vpkt_path = NULL;
1319 }
1320 }
1321 if ((pkt->pkt_cdbp[0] == SCMD_PROUT) &&
1322 (((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_REGISTER) ||
1323 ((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_R_AND_IGNORE))) {
1324 sema_v(&vlun->svl_pgr_sema);
1325 }
1326 return (TRAN_BUSY);
1327 }
1328
1329 if ((resrv_instance != 0) && (resrv_instance !=
1330 mdi_pi_get_path_instance(vpkt->vpkt_path))) {
1331 /*
1332 * This is an attempt to reserve vpkt->vpkt_path. But the
1333 * previously reserved path referred by resrv_instance might
1334 * still be reserved. Hence we will send a release command
1335 * there in order to avoid a reservation conflict.
1336 */
1337 VHCI_DEBUG(1, (CE_NOTE, vhci->vhci_dip, "!vhci_scsi_start: "
1338 "conflicting reservation on another path, vlun 0x%p, "
1339 "reserved instance %d, new instance: %d, pip: 0x%p",
1340 (void *)vlun, resrv_instance,
1341 mdi_pi_get_path_instance(vpkt->vpkt_path),
1342 (void *)vpkt->vpkt_path));
1343
1344 /*
1345 * In rare cases, the path referred by resrv_instance could
1346 * disappear in the meantime. Calling mdi_select_path() below
1347 * is an attempt to find out if the path still exists. It also
1348 * ensures that the path will be held when the release is sent.
1349 */
1350 rval = mdi_select_path(cdip, NULL, MDI_SELECT_PATH_INSTANCE,
1351 (void *)(intptr_t)resrv_instance, &pip);
1352
1353 if ((rval == MDI_SUCCESS) && (pip != NULL)) {
1354 svp_resrv = (scsi_vhci_priv_t *)
1355 mdi_pi_get_vhci_private(pip);
1356 rel_pkt = scsi_init_pkt(&svp_resrv->svp_psd->sd_address,
1357 NULL, NULL, CDB_GROUP0,
1358 sizeof (struct scsi_arq_status), 0, 0, SLEEP_FUNC,
1359 NULL);
1360
1361 if (rel_pkt == NULL) {
1362 char *p_path;
1363
1364 /*
1365 * This is very unlikely.
1366 * scsi_init_pkt(SLEEP_FUNC) does not fail
1367 * because of resources. But in theory it could
1368 * fail for some other reason. There is not an
1369 * easy way how to recover though. Log a warning
1370 * and return.
1371 */
1372 p_path = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
1373 vhci_log(CE_WARN, vhci->vhci_dip, "!Sending "
1374 "RELEASE(6) to %s failed, a potential "
1375 "reservation conflict ahead.",
1376 ddi_pathname(mdi_pi_get_phci(pip), p_path));
1377 kmem_free(p_path, MAXPATHLEN);
1378
1379 if (restore_lbp)
1380 (void) mdi_set_lb_policy(cdip, lbp);
1381
1382 /* no need to check pkt_reserve_cmd here */
1383 vlun->svl_flags &= ~VLUN_QUIESCED_FLG;
1384 return (TRAN_FATAL_ERROR);
1385 }
1386
1387 rel_pkt->pkt_cdbp[0] = SCMD_RELEASE;
1388 rel_pkt->pkt_time = 60;
1389
1390 /*
1391 * Ignore the return value. If it will fail
1392 * then most likely it is no longer reserved
1393 * anyway.
1394 */
1395 (void) vhci_do_scsi_cmd(rel_pkt);
1396 VHCI_DEBUG(1, (CE_NOTE, NULL,
1397 "!vhci_scsi_start: path 0x%p, issued SCSI-2"
1398 " RELEASE\n", (void *)pip));
1399 scsi_destroy_pkt(rel_pkt);
1400 mdi_rele_path(pip);
1401 }
1402 }
1403
1404 VHCI_INCR_PATH_CMDCOUNT(svp);
1405
1406 /*
1407 * Ensure that no other IOs raced ahead, while a RESERVE cmd was
1408 * QUIESCING the same lun.
1409 */
1410 if ((!pkt_reserve_cmd) &&
1411 ((vlun->svl_flags & VLUN_QUIESCED_FLG) == VLUN_QUIESCED_FLG)) {
1412 VHCI_DECR_PATH_CMDCOUNT(svp);
1413 goto pkt_cleanup;
1414 }
1415
1416 if ((pkt->pkt_cdbp[0] == SCMD_PRIN) ||
1417 (pkt->pkt_cdbp[0] == SCMD_PROUT)) {
1418 /*
1419 * currently this thread only handles running PGR
1420 * commands, so don't bother creating it unless
1421 * something interesting is going to happen (like
1422 * either a PGR out, or a PGR in with enough space
1423 * to hold the keys that are getting returned)
1424 */
1425 mutex_enter(&vlun->svl_mutex);
1426 if (((vlun->svl_flags & VLUN_TASK_D_ALIVE_FLG) == 0) &&
1427 (pkt->pkt_cdbp[0] == SCMD_PROUT)) {
1428 vlun->svl_taskq = taskq_create("vlun_pgr_task_daemon",
1429 1, MINCLSYSPRI, 1, 4, 0);
1430 vlun->svl_flags |= VLUN_TASK_D_ALIVE_FLG;
1431 }
1432 mutex_exit(&vlun->svl_mutex);
1433 if ((pkt->pkt_cdbp[0] == SCMD_PROUT) &&
1434 (((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_REGISTER) ||
1435 ((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_R_AND_IGNORE))) {
1436 if (rval = vhci_pgr_register_start(vlun, pkt)) {
1437 /* an error */
1438 sema_v(&vlun->svl_pgr_sema);
1439 return (rval);
1440 }
1441 }
1442 }
1443
1444 /*
1445 * SCSI-II RESERVE cmd is not expected in polled mode.
1446 * If this changes it needs to be handled for the polled scenario.
1447 */
1448 flags = vpkt->vpkt_hba_pkt->pkt_flags;
1449
1450 /*
1451 * Set the path_instance *before* sending the scsi_pkt down the path
1452 * to mpxio's pHCI so that additional path abstractions at a pHCI
1453 * level (like maybe iSCSI at some point in the future) can update
1454 * the path_instance.
1455 */
1456 if (scsi_pkt_allocated_correctly(vpkt->vpkt_hba_pkt))
1457 vpkt->vpkt_hba_pkt->pkt_path_instance =
1458 mdi_pi_get_path_instance(vpkt->vpkt_path);
1459
1460 rval = scsi_transport(vpkt->vpkt_hba_pkt);
1461 if (rval == TRAN_ACCEPT) {
1462 if (flags & FLAG_NOINTR) {
1463 struct scsi_pkt *tpkt = vpkt->vpkt_tgt_pkt;
1464 struct scsi_pkt *pkt = vpkt->vpkt_hba_pkt;
1465
1466 ASSERT(tpkt != NULL);
1467 *(tpkt->pkt_scbp) = *(pkt->pkt_scbp);
1468 tpkt->pkt_resid = pkt->pkt_resid;
1469 tpkt->pkt_state = pkt->pkt_state;
1470 tpkt->pkt_statistics = pkt->pkt_statistics;
1471 tpkt->pkt_reason = pkt->pkt_reason;
1472
1473 if ((*(pkt->pkt_scbp) == STATUS_CHECK) &&
1474 (pkt->pkt_state & STATE_ARQ_DONE)) {
1475 bcopy(pkt->pkt_scbp, tpkt->pkt_scbp,
1476 vpkt->vpkt_tgt_init_scblen);
1477 }
1478
1479 VHCI_DECR_PATH_CMDCOUNT(svp);
1480 if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) == 0) {
1481 scsi_destroy_pkt(vpkt->vpkt_hba_pkt);
1482 vpkt->vpkt_hba_pkt = NULL;
1483 if (vpkt->vpkt_path) {
1484 mdi_rele_path(vpkt->vpkt_path);
1485 vpkt->vpkt_path = NULL;
1486 }
1487 }
1488 /*
1489 * This path will not automatically retry pkts
1490 * internally, therefore, vpkt_org_vpkt should
1491 * never be set.
1492 */
1493 ASSERT(vpkt->vpkt_org_vpkt == NULL);
1494 scsi_hba_pkt_comp(tpkt);
1495 }
1496 return (rval);
1497 } else if ((pkt->pkt_cdbp[0] == SCMD_PROUT) &&
1498 (((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_REGISTER) ||
1499 ((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_R_AND_IGNORE))) {
1500 /* the command exited with bad status */
1501 sema_v(&vlun->svl_pgr_sema);
1502 } else if (vpkt->vpkt_tgt_pkt->pkt_cdbp[0] == SCMD_PRIN) {
1503 /* the command exited with bad status */
1504 sema_v(&vlun->svl_pgr_sema);
1505 } else if (pkt_reserve_cmd) {
1506 VHCI_DEBUG(6, (CE_WARN, vhci->vhci_dip,
1507 "!vhci_scsi_start: reserve failed vlun 0x%p",
1508 (void *)vlun));
1509 vlun->svl_flags &= ~VLUN_QUIESCED_FLG;
1510 if (restore_lbp)
1511 (void) mdi_set_lb_policy(cdip, lbp);
1512 }
1513
1514 ASSERT(vpkt->vpkt_hba_pkt != NULL);
1515 VHCI_DECR_PATH_CMDCOUNT(svp);
1516
1517 /* Do not destroy phci packet information for PKT_DMA_PARTIAL */
1518 if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) == 0) {
1519 scsi_destroy_pkt(vpkt->vpkt_hba_pkt);
1520 vpkt->vpkt_hba_pkt = NULL;
1521 if (vpkt->vpkt_path) {
1522 MDI_PI_ERRSTAT(vpkt->vpkt_path, MDI_PI_TRANSERR);
1523 mdi_rele_path(vpkt->vpkt_path);
1524 vpkt->vpkt_path = NULL;
1525 }
1526 }
1527 return (TRAN_BUSY);
1528 }
1529
1530 /*
1531 * Function name : vhci_scsi_reset()
1532 *
1533 * Return Values : 0 - reset failed
1534 * 1 - reset succeeded
1535 */
1536
1537 /* ARGSUSED */
1538 static int
vhci_scsi_reset(struct scsi_address * ap,int level)1539 vhci_scsi_reset(struct scsi_address *ap, int level)
1540 {
1541 int rval = 0;
1542
1543 cmn_err(CE_WARN, "!vhci_scsi_reset 0x%x", level);
1544 if ((level == RESET_TARGET) || (level == RESET_LUN)) {
1545 return (vhci_scsi_reset_target(ap, level, TRUE));
1546 } else if (level == RESET_ALL) {
1547 return (vhci_scsi_reset_bus(ap));
1548 }
1549
1550 return (rval);
1551 }
1552
1553 /*
1554 * vhci_recovery_reset:
1555 * Issues reset to the device
1556 * Input:
1557 * vlun - vhci lun pointer of the device
1558 * ap - address of the device
1559 * select_path:
1560 * If select_path is FALSE, then the address specified in ap is
1561 * the path on which reset will be issued.
1562 * If select_path is TRUE, then path is obtained by calling
1563 * mdi_select_path.
1564 *
1565 * recovery_depth:
1566 * Caller can specify the level of reset.
1567 * VHCI_DEPTH_LUN -
1568 * Issues LUN RESET if device supports lun reset.
1569 * VHCI_DEPTH_TARGET -
1570 * If Lun Reset fails or the device does not support
1571 * Lun Reset, issues TARGET RESET
1572 * VHCI_DEPTH_ALL -
1573 * If Lun Reset fails or the device does not support
1574 * Lun Reset, issues TARGET RESET.
1575 * If TARGET RESET does not succeed, issues Bus Reset.
1576 */
1577
1578 static int
vhci_recovery_reset(scsi_vhci_lun_t * vlun,struct scsi_address * ap,uint8_t select_path,uint8_t recovery_depth)1579 vhci_recovery_reset(scsi_vhci_lun_t *vlun, struct scsi_address *ap,
1580 uint8_t select_path, uint8_t recovery_depth)
1581 {
1582 int ret = 0;
1583
1584 ASSERT(ap != NULL);
1585
1586 if (vlun && vlun->svl_support_lun_reset == 1) {
1587 ret = vhci_scsi_reset_target(ap, RESET_LUN,
1588 select_path);
1589 }
1590
1591 recovery_depth--;
1592
1593 if ((ret == 0) && recovery_depth) {
1594 ret = vhci_scsi_reset_target(ap, RESET_TARGET,
1595 select_path);
1596 recovery_depth--;
1597 }
1598
1599 if ((ret == 0) && recovery_depth) {
1600 (void) scsi_reset(ap, RESET_ALL);
1601 }
1602
1603 return (ret);
1604 }
1605
1606 /*
1607 * Note: The scsi_address passed to this routine could be the scsi_address
1608 * for the virtual device or the physical device. No assumptions should be
1609 * made in this routine about the contents of the ap structure.
1610 * Further, note that the child dip would be the dip of the ssd node regardless
1611 * of the scsi_address passed in.
1612 */
1613 static int
vhci_scsi_reset_target(struct scsi_address * ap,int level,uint8_t select_path)1614 vhci_scsi_reset_target(struct scsi_address *ap, int level, uint8_t select_path)
1615 {
1616 dev_info_t *vdip, *cdip;
1617 mdi_pathinfo_t *pip = NULL;
1618 mdi_pathinfo_t *npip = NULL;
1619 int rval = -1;
1620 scsi_vhci_priv_t *svp = NULL;
1621 struct scsi_address *pap = NULL;
1622 scsi_hba_tran_t *hba = NULL;
1623 int sps;
1624 struct scsi_vhci *vhci = NULL;
1625
1626 if (select_path != TRUE) {
1627 ASSERT(ap != NULL);
1628 if (level == RESET_LUN) {
1629 hba = ap->a_hba_tran;
1630 ASSERT(hba != NULL);
1631 return (hba->tran_reset(ap, RESET_LUN));
1632 }
1633 return (scsi_reset(ap, level));
1634 }
1635
1636 cdip = ADDR2DIP(ap);
1637 ASSERT(cdip != NULL);
1638 vdip = ddi_get_parent(cdip);
1639 ASSERT(vdip != NULL);
1640 vhci = ddi_get_soft_state(vhci_softstate, ddi_get_instance(vdip));
1641 ASSERT(vhci != NULL);
1642
1643 rval = mdi_select_path(cdip, NULL, MDI_SELECT_ONLINE_PATH, NULL, &pip);
1644 if ((rval != MDI_SUCCESS) || (pip == NULL)) {
1645 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_scsi_reset_target: "
1646 "Unable to get a path, dip 0x%p", (void *)cdip));
1647 return (0);
1648 }
1649 again:
1650 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip);
1651 if (svp == NULL) {
1652 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_scsi_reset_target: "
1653 "priv is NULL, pip 0x%p", (void *)pip));
1654 mdi_rele_path(pip);
1655 return (0);
1656 }
1657
1658 if (svp->svp_psd == NULL) {
1659 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_scsi_reset_target: "
1660 "psd is NULL, pip 0x%p, svp 0x%p",
1661 (void *)pip, (void *)svp));
1662 mdi_rele_path(pip);
1663 return (0);
1664 }
1665
1666 pap = &svp->svp_psd->sd_address;
1667 hba = pap->a_hba_tran;
1668
1669 ASSERT(pap != NULL);
1670 ASSERT(hba != NULL);
1671
1672 if (hba->tran_reset != NULL) {
1673 if (hba->tran_reset(pap, level) == 0) {
1674 vhci_log(CE_WARN, vdip, "!%s%d: "
1675 "path %s, reset %d failed",
1676 ddi_driver_name(cdip), ddi_get_instance(cdip),
1677 mdi_pi_spathname(pip), level);
1678
1679 /*
1680 * Select next path and issue the reset, repeat
1681 * until all paths are exhausted
1682 */
1683 sps = mdi_select_path(cdip, NULL,
1684 MDI_SELECT_ONLINE_PATH, pip, &npip);
1685 if ((sps != MDI_SUCCESS) || (npip == NULL)) {
1686 mdi_rele_path(pip);
1687 return (0);
1688 }
1689 mdi_rele_path(pip);
1690 pip = npip;
1691 goto again;
1692 }
1693 mdi_rele_path(pip);
1694 mutex_enter(&vhci->vhci_mutex);
1695 scsi_hba_reset_notify_callback(&vhci->vhci_mutex,
1696 &vhci->vhci_reset_notify_listf);
1697 mutex_exit(&vhci->vhci_mutex);
1698 VHCI_DEBUG(6, (CE_NOTE, NULL, "!vhci_scsi_reset_target: "
1699 "reset %d sent down pip:%p for cdip:%p\n", level,
1700 (void *)pip, (void *)cdip));
1701 return (1);
1702 }
1703 mdi_rele_path(pip);
1704 return (0);
1705 }
1706
1707
1708 /* ARGSUSED */
1709 static int
vhci_scsi_reset_bus(struct scsi_address * ap)1710 vhci_scsi_reset_bus(struct scsi_address *ap)
1711 {
1712 return (1);
1713 }
1714
1715
1716 /*
1717 * called by vhci_getcap and vhci_setcap to get and set (respectively)
1718 * SCSI capabilities
1719 */
1720 /* ARGSUSED */
1721 static int
vhci_commoncap(struct scsi_address * ap,char * cap,int val,int tgtonly,int doset)1722 vhci_commoncap(struct scsi_address *ap, char *cap,
1723 int val, int tgtonly, int doset)
1724 {
1725 struct scsi_vhci *vhci = ADDR2VHCI(ap);
1726 struct scsi_vhci_lun *vlun = ADDR2VLUN(ap);
1727 int cidx;
1728 int rval = 0;
1729
1730 if (cap == (char *)0) {
1731 VHCI_DEBUG(3, (CE_WARN, vhci->vhci_dip,
1732 "!vhci_commoncap: invalid arg"));
1733 return (rval);
1734 }
1735
1736 if (vlun == NULL) {
1737 VHCI_DEBUG(3, (CE_WARN, vhci->vhci_dip,
1738 "!vhci_commoncap: vlun is null"));
1739 return (rval);
1740 }
1741
1742 if ((cidx = scsi_hba_lookup_capstr(cap)) == -1) {
1743 return (UNDEFINED);
1744 }
1745
1746 /*
1747 * Process setcap request.
1748 */
1749 if (doset) {
1750 /*
1751 * At present, we can only set binary (0/1) values
1752 */
1753 switch (cidx) {
1754 case SCSI_CAP_ARQ:
1755 if (val == 0) {
1756 rval = 0;
1757 } else {
1758 rval = 1;
1759 }
1760 break;
1761
1762 case SCSI_CAP_LUN_RESET:
1763 if (tgtonly == 0) {
1764 VHCI_DEBUG(1, (CE_WARN, vhci->vhci_dip,
1765 "scsi_vhci_setcap: "
1766 "Returning error since whom = 0"));
1767 rval = -1;
1768 break;
1769 }
1770 /*
1771 * Set the capability accordingly.
1772 */
1773 mutex_enter(&vlun->svl_mutex);
1774 vlun->svl_support_lun_reset = val;
1775 rval = val;
1776 mutex_exit(&vlun->svl_mutex);
1777 break;
1778
1779 case SCSI_CAP_SECTOR_SIZE:
1780 mutex_enter(&vlun->svl_mutex);
1781 vlun->svl_sector_size = val;
1782 vlun->svl_setcap_done = 1;
1783 mutex_exit(&vlun->svl_mutex);
1784 (void) vhci_pHCI_cap(ap, cap, val, tgtonly, NULL);
1785
1786 /* Always return success */
1787 rval = 1;
1788 break;
1789
1790 default:
1791 VHCI_DEBUG(6, (CE_WARN, vhci->vhci_dip,
1792 "!vhci_setcap: unsupported %d", cidx));
1793 rval = UNDEFINED;
1794 break;
1795 }
1796
1797 VHCI_DEBUG(6, (CE_NOTE, vhci->vhci_dip,
1798 "!set cap: cap=%s, val/tgtonly/doset/rval = "
1799 "0x%x/0x%x/0x%x/%d\n",
1800 cap, val, tgtonly, doset, rval));
1801
1802 } else {
1803 /*
1804 * Process getcap request.
1805 */
1806 switch (cidx) {
1807 case SCSI_CAP_DMA_MAX:
1808 /*
1809 * For X86 this capability is caught in scsi_ifgetcap().
1810 * XXX Should this be getting the value from the pHCI?
1811 */
1812 rval = (int)VHCI_DMA_MAX_XFER_CAP;
1813 break;
1814
1815 case SCSI_CAP_INITIATOR_ID:
1816 rval = 0x00;
1817 break;
1818
1819 case SCSI_CAP_ARQ:
1820 case SCSI_CAP_RESET_NOTIFICATION:
1821 case SCSI_CAP_TAGGED_QING:
1822 rval = 1;
1823 break;
1824
1825 case SCSI_CAP_SCSI_VERSION:
1826 rval = 3;
1827 break;
1828
1829 case SCSI_CAP_INTERCONNECT_TYPE:
1830 rval = INTERCONNECT_FABRIC;
1831 break;
1832
1833 case SCSI_CAP_LUN_RESET:
1834 /*
1835 * scsi_vhci will always return success for LUN reset.
1836 * When request for doing LUN reset comes
1837 * through scsi_reset entry point, at that time attempt
1838 * will be made to do reset through all the possible
1839 * paths.
1840 */
1841 mutex_enter(&vlun->svl_mutex);
1842 rval = vlun->svl_support_lun_reset;
1843 mutex_exit(&vlun->svl_mutex);
1844 VHCI_DEBUG(4, (CE_WARN, vhci->vhci_dip,
1845 "scsi_vhci_getcap:"
1846 "Getting the Lun reset capability %d", rval));
1847 break;
1848
1849 case SCSI_CAP_SECTOR_SIZE:
1850 mutex_enter(&vlun->svl_mutex);
1851 rval = vlun->svl_sector_size;
1852 mutex_exit(&vlun->svl_mutex);
1853 break;
1854
1855 case SCSI_CAP_CDB_LEN:
1856 rval = VHCI_SCSI_CDB_SIZE;
1857 break;
1858
1859 case SCSI_CAP_DMA_MAX_ARCH:
1860 /*
1861 * For X86 this capability is caught in scsi_ifgetcap().
1862 * XXX Should this be getting the value from the pHCI?
1863 */
1864 rval = 0;
1865 break;
1866
1867 default:
1868 VHCI_DEBUG(6, (CE_WARN, vhci->vhci_dip,
1869 "!vhci_getcap: unsupported %d", cidx));
1870 rval = UNDEFINED;
1871 break;
1872 }
1873
1874 VHCI_DEBUG(6, (CE_NOTE, vhci->vhci_dip,
1875 "!get cap: cap=%s, val/tgtonly/doset/rval = "
1876 "0x%x/0x%x/0x%x/%d\n",
1877 cap, val, tgtonly, doset, rval));
1878 }
1879 return (rval);
1880 }
1881
1882
1883 /*
1884 * Function name : vhci_scsi_getcap()
1885 *
1886 */
1887 static int
vhci_scsi_getcap(struct scsi_address * ap,char * cap,int whom)1888 vhci_scsi_getcap(struct scsi_address *ap, char *cap, int whom)
1889 {
1890 return (vhci_commoncap(ap, cap, 0, whom, 0));
1891 }
1892
1893 static int
vhci_scsi_setcap(struct scsi_address * ap,char * cap,int value,int whom)1894 vhci_scsi_setcap(struct scsi_address *ap, char *cap, int value, int whom)
1895 {
1896 return (vhci_commoncap(ap, cap, value, whom, 1));
1897 }
1898
1899 /*
1900 * Function name : vhci_scsi_abort()
1901 */
1902 /* ARGSUSED */
1903 static int
vhci_scsi_abort(struct scsi_address * ap,struct scsi_pkt * pkt)1904 vhci_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt)
1905 {
1906 return (0);
1907 }
1908
1909 /*
1910 * Function name : vhci_scsi_init_pkt
1911 *
1912 * Return Values : pointer to scsi_pkt, or NULL
1913 */
1914 /* ARGSUSED */
1915 static struct scsi_pkt *
vhci_scsi_init_pkt(struct scsi_address * ap,struct scsi_pkt * pkt,struct buf * bp,int cmdlen,int statuslen,int tgtlen,int flags,int (* callback)(caddr_t),caddr_t arg)1916 vhci_scsi_init_pkt(struct scsi_address *ap, struct scsi_pkt *pkt,
1917 struct buf *bp, int cmdlen, int statuslen, int tgtlen,
1918 int flags, int (*callback)(caddr_t), caddr_t arg)
1919 {
1920 struct scsi_vhci *vhci = ADDR2VHCI(ap);
1921 struct vhci_pkt *vpkt;
1922 int rval;
1923 int newpkt = 0;
1924 struct scsi_pkt *pktp;
1925
1926
1927 if (pkt == NULL) {
1928 if (cmdlen > VHCI_SCSI_CDB_SIZE) {
1929 if ((cmdlen != VHCI_SCSI_OSD_CDB_SIZE) ||
1930 ((flags & VHCI_SCSI_OSD_PKT_FLAGS) !=
1931 VHCI_SCSI_OSD_PKT_FLAGS)) {
1932 VHCI_DEBUG(1, (CE_NOTE, NULL,
1933 "!init pkt: cdb size not supported\n"));
1934 return (NULL);
1935 }
1936 }
1937
1938 pktp = scsi_hba_pkt_alloc(vhci->vhci_dip,
1939 ap, cmdlen, statuslen, tgtlen, sizeof (*vpkt), callback,
1940 arg);
1941
1942 if (pktp == NULL) {
1943 return (NULL);
1944 }
1945
1946 /* Get the vhci's private structure */
1947 vpkt = (struct vhci_pkt *)(pktp->pkt_ha_private);
1948 ASSERT(vpkt);
1949
1950 /* Save the target driver's packet */
1951 vpkt->vpkt_tgt_pkt = pktp;
1952
1953 /*
1954 * Save pkt_tgt_init_pkt fields if deferred binding
1955 * is needed or for other purposes.
1956 */
1957 vpkt->vpkt_tgt_init_pkt_flags = flags;
1958 vpkt->vpkt_flags = (callback == NULL_FUNC) ? CFLAG_NOWAIT : 0;
1959 vpkt->vpkt_state = VHCI_PKT_IDLE;
1960 vpkt->vpkt_tgt_init_cdblen = cmdlen;
1961 vpkt->vpkt_tgt_init_scblen = statuslen;
1962 newpkt = 1;
1963 } else { /* pkt not NULL */
1964 vpkt = pkt->pkt_ha_private;
1965 }
1966
1967 VHCI_DEBUG(8, (CE_NOTE, NULL, "vhci_scsi_init_pkt "
1968 "vpkt %p flags %x\n", (void *)vpkt, flags));
1969
1970 /* Clear any stale error flags */
1971 if (bp) {
1972 bioerror(bp, 0);
1973 }
1974
1975 vpkt->vpkt_tgt_init_bp = bp;
1976
1977 if (flags & PKT_DMA_PARTIAL) {
1978
1979 /*
1980 * Immediate binding is needed.
1981 * Target driver may not set this flag in next invocation.
1982 * vhci has to remember this flag was set during first
1983 * invocation of vhci_scsi_init_pkt.
1984 */
1985 vpkt->vpkt_flags |= CFLAG_DMA_PARTIAL;
1986 }
1987
1988 if (vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) {
1989
1990 /*
1991 * Re-initialize some of the target driver packet state
1992 * information.
1993 */
1994 vpkt->vpkt_tgt_pkt->pkt_state = 0;
1995 vpkt->vpkt_tgt_pkt->pkt_statistics = 0;
1996 vpkt->vpkt_tgt_pkt->pkt_reason = 0;
1997
1998 /*
1999 * Binding a vpkt->vpkt_path for this IO at init_time.
2000 * If an IO error happens later, target driver will clear
2001 * this vpkt->vpkt_path binding before re-init IO again.
2002 */
2003 VHCI_DEBUG(8, (CE_NOTE, NULL,
2004 "vhci_scsi_init_pkt: calling v_b_t %p, newpkt %d\n",
2005 (void *)vpkt, newpkt));
2006 if (pkt && vpkt->vpkt_hba_pkt) {
2007 VHCI_DEBUG(4, (CE_NOTE, NULL,
2008 "v_s_i_p calling update_pHCI_pkt resid %ld\n",
2009 pkt->pkt_resid));
2010 vhci_update_pHCI_pkt(vpkt, pkt);
2011 }
2012 if (callback == SLEEP_FUNC) {
2013 rval = vhci_bind_transport(
2014 ap, vpkt, flags, callback);
2015 } else {
2016 rval = vhci_bind_transport(
2017 ap, vpkt, flags, NULL_FUNC);
2018 }
2019 VHCI_DEBUG(8, (CE_NOTE, NULL,
2020 "vhci_scsi_init_pkt: v_b_t called 0x%p rval 0x%x\n",
2021 (void *)vpkt, rval));
2022 if (bp) {
2023 if (rval == TRAN_FATAL_ERROR) {
2024 /*
2025 * No paths available. Could not bind
2026 * any pHCI. Setting EFAULT as a way
2027 * to indicate no DMA is mapped.
2028 */
2029 bioerror(bp, EFAULT);
2030 } else {
2031 /*
2032 * Do not indicate any pHCI errors to
2033 * target driver otherwise.
2034 */
2035 bioerror(bp, 0);
2036 }
2037 }
2038 if (rval != TRAN_ACCEPT) {
2039 VHCI_DEBUG(8, (CE_NOTE, NULL,
2040 "vhci_scsi_init_pkt: "
2041 "v_b_t failed 0x%p newpkt %x\n",
2042 (void *)vpkt, newpkt));
2043 if (newpkt) {
2044 scsi_hba_pkt_free(ap,
2045 vpkt->vpkt_tgt_pkt);
2046 }
2047 return (NULL);
2048 }
2049 ASSERT(vpkt->vpkt_hba_pkt != NULL);
2050 ASSERT(vpkt->vpkt_path != NULL);
2051
2052 /* Update the resid for the target driver */
2053 vpkt->vpkt_tgt_pkt->pkt_resid =
2054 vpkt->vpkt_hba_pkt->pkt_resid;
2055 }
2056
2057 return (vpkt->vpkt_tgt_pkt);
2058 }
2059
2060 /*
2061 * Function name : vhci_scsi_destroy_pkt
2062 *
2063 * Return Values : none
2064 */
2065 static void
vhci_scsi_destroy_pkt(struct scsi_address * ap,struct scsi_pkt * pkt)2066 vhci_scsi_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
2067 {
2068 struct vhci_pkt *vpkt = (struct vhci_pkt *)pkt->pkt_ha_private;
2069
2070 VHCI_DEBUG(8, (CE_NOTE, NULL,
2071 "vhci_scsi_destroy_pkt: vpkt 0x%p\n", (void *)vpkt));
2072
2073 vpkt->vpkt_tgt_init_pkt_flags = 0;
2074 if (vpkt->vpkt_hba_pkt) {
2075 scsi_destroy_pkt(vpkt->vpkt_hba_pkt);
2076 vpkt->vpkt_hba_pkt = NULL;
2077 }
2078 if (vpkt->vpkt_path) {
2079 mdi_rele_path(vpkt->vpkt_path);
2080 vpkt->vpkt_path = NULL;
2081 }
2082
2083 ASSERT(vpkt->vpkt_state != VHCI_PKT_ISSUED);
2084 scsi_hba_pkt_free(ap, vpkt->vpkt_tgt_pkt);
2085 }
2086
2087 /*
2088 * Function name : vhci_scsi_dmafree()
2089 *
2090 * Return Values : none
2091 */
2092 /*ARGSUSED*/
2093 static void
vhci_scsi_dmafree(struct scsi_address * ap,struct scsi_pkt * pkt)2094 vhci_scsi_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
2095 {
2096 struct vhci_pkt *vpkt = (struct vhci_pkt *)pkt->pkt_ha_private;
2097
2098 VHCI_DEBUG(6, (CE_NOTE, NULL,
2099 "vhci_scsi_dmafree: vpkt 0x%p\n", (void *)vpkt));
2100
2101 ASSERT(vpkt != NULL);
2102 if (vpkt->vpkt_hba_pkt) {
2103 scsi_destroy_pkt(vpkt->vpkt_hba_pkt);
2104 vpkt->vpkt_hba_pkt = NULL;
2105 }
2106 if (vpkt->vpkt_path) {
2107 mdi_rele_path(vpkt->vpkt_path);
2108 vpkt->vpkt_path = NULL;
2109 }
2110 }
2111
2112 /*
2113 * Function name : vhci_scsi_sync_pkt()
2114 *
2115 * Return Values : none
2116 */
2117 /*ARGSUSED*/
2118 static void
vhci_scsi_sync_pkt(struct scsi_address * ap,struct scsi_pkt * pkt)2119 vhci_scsi_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
2120 {
2121 struct vhci_pkt *vpkt = (struct vhci_pkt *)pkt->pkt_ha_private;
2122
2123 ASSERT(vpkt != NULL);
2124 if (vpkt->vpkt_hba_pkt) {
2125 scsi_sync_pkt(vpkt->vpkt_hba_pkt);
2126 }
2127 }
2128
2129 /*
2130 * routine for reset notification setup, to register or cancel.
2131 */
2132 static int
vhci_scsi_reset_notify(struct scsi_address * ap,int flag,void (* callback)(caddr_t),caddr_t arg)2133 vhci_scsi_reset_notify(struct scsi_address *ap, int flag,
2134 void (*callback)(caddr_t), caddr_t arg)
2135 {
2136 struct scsi_vhci *vhci = ADDR2VHCI(ap);
2137 return (scsi_hba_reset_notify_setup(ap, flag, callback, arg,
2138 &vhci->vhci_mutex, &vhci->vhci_reset_notify_listf));
2139 }
2140
2141 static int
vhci_scsi_get_name_bus_addr(struct scsi_device * sd,char * name,int len,int bus_addr)2142 vhci_scsi_get_name_bus_addr(struct scsi_device *sd,
2143 char *name, int len, int bus_addr)
2144 {
2145 dev_info_t *cdip;
2146 char *guid;
2147 scsi_vhci_lun_t *vlun;
2148
2149 ASSERT(sd != NULL);
2150 ASSERT(name != NULL);
2151
2152 *name = 0;
2153 cdip = sd->sd_dev;
2154
2155 ASSERT(cdip != NULL);
2156
2157 if (mdi_component_is_client(cdip, NULL) != MDI_SUCCESS)
2158 return (1);
2159
2160 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, cdip, PROPFLAGS,
2161 MDI_CLIENT_GUID_PROP, &guid) != DDI_SUCCESS)
2162 return (1);
2163
2164 /*
2165 * Message is "sd# at scsi_vhci0: unit-address <guid>: <bus_addr>".
2166 * <guid> bus_addr argument == 0
2167 * <bus_addr> bus_addr argument != 0
2168 * Since the <guid> is already provided with unit-address, we just
2169 * provide failover module in <bus_addr> to keep output shorter.
2170 */
2171 vlun = ADDR2VLUN(&sd->sd_address);
2172 if (bus_addr == 0) {
2173 /* report the guid: */
2174 (void) snprintf(name, len, "g%s", guid);
2175 } else if (vlun && vlun->svl_fops_name) {
2176 /* report the name of the failover module */
2177 (void) snprintf(name, len, "%s", vlun->svl_fops_name);
2178 }
2179
2180 ddi_prop_free(guid);
2181 return (1);
2182 }
2183
2184 static int
vhci_scsi_get_bus_addr(struct scsi_device * sd,char * name,int len)2185 vhci_scsi_get_bus_addr(struct scsi_device *sd, char *name, int len)
2186 {
2187 return (vhci_scsi_get_name_bus_addr(sd, name, len, 1));
2188 }
2189
2190 static int
vhci_scsi_get_name(struct scsi_device * sd,char * name,int len)2191 vhci_scsi_get_name(struct scsi_device *sd, char *name, int len)
2192 {
2193 return (vhci_scsi_get_name_bus_addr(sd, name, len, 0));
2194 }
2195
2196 /*
2197 * Return a pointer to the guid part of the devnm.
2198 * devnm format is "nodename@busaddr", busaddr format is "gGUID".
2199 */
2200 static char *
vhci_devnm_to_guid(char * devnm)2201 vhci_devnm_to_guid(char *devnm)
2202 {
2203 char *cp = devnm;
2204
2205 if (devnm == NULL)
2206 return (NULL);
2207
2208 while (*cp != '\0' && *cp != '@')
2209 cp++;
2210 if (*cp == '@' && *(cp + 1) == 'g')
2211 return (cp + 2);
2212 return (NULL);
2213 }
2214
2215 static int
vhci_bind_transport(struct scsi_address * ap,struct vhci_pkt * vpkt,int flags,int (* func)(caddr_t))2216 vhci_bind_transport(struct scsi_address *ap, struct vhci_pkt *vpkt, int flags,
2217 int (*func)(caddr_t))
2218 {
2219 struct scsi_vhci *vhci = ADDR2VHCI(ap);
2220 dev_info_t *cdip = ADDR2DIP(ap);
2221 mdi_pathinfo_t *pip = NULL;
2222 mdi_pathinfo_t *npip = NULL;
2223 scsi_vhci_priv_t *svp = NULL;
2224 struct scsi_device *psd = NULL;
2225 struct scsi_address *address = NULL;
2226 struct scsi_pkt *pkt = NULL;
2227 int rval = -1;
2228 int pgr_sema_held = 0;
2229 int held;
2230 int mps_flag = MDI_SELECT_ONLINE_PATH;
2231 struct scsi_vhci_lun *vlun;
2232 int path_instance = 0;
2233
2234 vlun = ADDR2VLUN(ap);
2235 ASSERT(vlun != 0);
2236
2237 if ((vpkt->vpkt_tgt_pkt->pkt_cdbp[0] == SCMD_PROUT) &&
2238 (((vpkt->vpkt_tgt_pkt->pkt_cdbp[1] & 0x1f) ==
2239 VHCI_PROUT_REGISTER) ||
2240 ((vpkt->vpkt_tgt_pkt->pkt_cdbp[1] & 0x1f) ==
2241 VHCI_PROUT_R_AND_IGNORE))) {
2242 if (!sema_tryp(&vlun->svl_pgr_sema))
2243 return (TRAN_BUSY);
2244 pgr_sema_held = 1;
2245 if (vlun->svl_first_path != NULL) {
2246 rval = mdi_select_path(cdip, NULL,
2247 MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH,
2248 NULL, &pip);
2249 if ((rval != MDI_SUCCESS) || (pip == NULL)) {
2250 VHCI_DEBUG(4, (CE_NOTE, NULL,
2251 "vhci_bind_transport: path select fail\n"));
2252 } else {
2253 npip = pip;
2254 do {
2255 if (npip == vlun->svl_first_path) {
2256 VHCI_DEBUG(4, (CE_NOTE, NULL,
2257 "vhci_bind_transport: "
2258 "valid first path 0x%p\n",
2259 (void *)
2260 vlun->svl_first_path));
2261 pip = vlun->svl_first_path;
2262 goto bind_path;
2263 }
2264 pip = npip;
2265 rval = mdi_select_path(cdip, NULL,
2266 MDI_SELECT_ONLINE_PATH |
2267 MDI_SELECT_STANDBY_PATH,
2268 pip, &npip);
2269 mdi_rele_path(pip);
2270 } while ((rval == MDI_SUCCESS) &&
2271 (npip != NULL));
2272 }
2273 }
2274
2275 if (vlun->svl_first_path) {
2276 VHCI_DEBUG(4, (CE_NOTE, NULL,
2277 "vhci_bind_transport: invalid first path 0x%p\n",
2278 (void *)vlun->svl_first_path));
2279 vlun->svl_first_path = NULL;
2280 }
2281 } else if (vpkt->vpkt_tgt_pkt->pkt_cdbp[0] == SCMD_PRIN) {
2282 if ((vpkt->vpkt_state & VHCI_PKT_THRU_TASKQ) == 0) {
2283 if (!sema_tryp(&vlun->svl_pgr_sema))
2284 return (TRAN_BUSY);
2285 }
2286 pgr_sema_held = 1;
2287 }
2288
2289 /*
2290 * If the path is already bound for PKT_PARTIAL_DMA case,
2291 * try to use the same path.
2292 */
2293 if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) && vpkt->vpkt_path) {
2294 VHCI_DEBUG(4, (CE_NOTE, NULL,
2295 "vhci_bind_transport: PKT_PARTIAL_DMA "
2296 "vpkt 0x%p, path 0x%p\n",
2297 (void *)vpkt, (void *)vpkt->vpkt_path));
2298 pip = vpkt->vpkt_path;
2299 goto bind_path;
2300 }
2301
2302 /*
2303 * Get path_instance. Non-zero with FLAG_PKT_PATH_INSTANCE set
2304 * indicates that mdi_select_path should be called to select a
2305 * specific instance.
2306 *
2307 * NB: Condition pkt_path_instance reference on proper allocation.
2308 */
2309 if ((vpkt->vpkt_tgt_pkt->pkt_flags & FLAG_PKT_PATH_INSTANCE) &&
2310 scsi_pkt_allocated_correctly(vpkt->vpkt_tgt_pkt)) {
2311 path_instance = vpkt->vpkt_tgt_pkt->pkt_path_instance;
2312 }
2313
2314 /*
2315 * If reservation is active bind the transport directly to the pip
2316 * with the reservation.
2317 */
2318 if (vpkt->vpkt_hba_pkt == NULL) {
2319 if (vlun->svl_flags & VLUN_RESERVE_ACTIVE_FLG) {
2320 if (MDI_PI_IS_ONLINE(vlun->svl_resrv_pip)) {
2321 pip = vlun->svl_resrv_pip;
2322 mdi_hold_path(pip);
2323 vlun->svl_waiting_for_activepath = 0;
2324 rval = MDI_SUCCESS;
2325 goto bind_path;
2326 } else {
2327 if (pgr_sema_held) {
2328 sema_v(&vlun->svl_pgr_sema);
2329 }
2330 return (TRAN_BUSY);
2331 }
2332 }
2333 try_again:
2334 rval = mdi_select_path(cdip, vpkt->vpkt_tgt_init_bp,
2335 path_instance ? MDI_SELECT_PATH_INSTANCE : 0,
2336 (void *)(intptr_t)path_instance, &pip);
2337 if (rval == MDI_BUSY) {
2338 if (pgr_sema_held) {
2339 sema_v(&vlun->svl_pgr_sema);
2340 }
2341 return (TRAN_BUSY);
2342 } else if (rval == MDI_DEVI_ONLINING) {
2343 /*
2344 * if we are here then we are in the midst of
2345 * an attach/probe of the client device.
2346 * We attempt to bind to ONLINE path if available,
2347 * else it is OK to bind to a STANDBY path (instead
2348 * of triggering a failover) because IO associated
2349 * with attach/probe (eg. INQUIRY, block 0 read)
2350 * are completed by targets even on passive paths
2351 * If no ONLINE paths available, it is important
2352 * to set svl_waiting_for_activepath for two
2353 * reasons: (1) avoid sense analysis in the
2354 * "external failure detection" codepath in
2355 * vhci_intr(). Failure to do so will result in
2356 * infinite loop (unless an ONLINE path becomes
2357 * available at some point) (2) avoid
2358 * unnecessary failover (see "---Waiting For Active
2359 * Path---" comment below).
2360 */
2361 VHCI_DEBUG(1, (CE_NOTE, NULL, "!%p in onlining "
2362 "state\n", (void *)cdip));
2363 pip = NULL;
2364 rval = mdi_select_path(cdip, vpkt->vpkt_tgt_init_bp,
2365 mps_flag, NULL, &pip);
2366 if ((rval != MDI_SUCCESS) || (pip == NULL)) {
2367 if (vlun->svl_waiting_for_activepath == 0) {
2368 vlun->svl_waiting_for_activepath = 1;
2369 vlun->svl_wfa_time = gethrtime();
2370 }
2371 mps_flag |= MDI_SELECT_STANDBY_PATH;
2372 rval = mdi_select_path(cdip,
2373 vpkt->vpkt_tgt_init_bp,
2374 mps_flag, NULL, &pip);
2375 if ((rval != MDI_SUCCESS) || (pip == NULL)) {
2376 if (pgr_sema_held) {
2377 sema_v(&vlun->svl_pgr_sema);
2378 }
2379 return (TRAN_FATAL_ERROR);
2380 }
2381 goto bind_path;
2382 }
2383 } else if ((rval == MDI_FAILURE) ||
2384 ((rval == MDI_NOPATH) && (path_instance))) {
2385 if (pgr_sema_held) {
2386 sema_v(&vlun->svl_pgr_sema);
2387 }
2388 return (TRAN_FATAL_ERROR);
2389 }
2390
2391 if ((pip == NULL) || (rval == MDI_NOPATH)) {
2392 while (vlun->svl_waiting_for_activepath) {
2393 /*
2394 * ---Waiting For Active Path---
2395 * This device was discovered across a
2396 * passive path; lets wait for a little
2397 * bit, hopefully an active path will
2398 * show up obviating the need for a
2399 * failover
2400 */
2401 if ((gethrtime() - vlun->svl_wfa_time) >=
2402 (60 * NANOSEC)) {
2403 vlun->svl_waiting_for_activepath = 0;
2404 } else {
2405 drv_usecwait(1000);
2406 if (vlun->svl_waiting_for_activepath
2407 == 0) {
2408 /*
2409 * an active path has come
2410 * online!
2411 */
2412 goto try_again;
2413 }
2414 }
2415 }
2416 VHCI_HOLD_LUN(vlun, VH_NOSLEEP, held);
2417 if (!held) {
2418 VHCI_DEBUG(4, (CE_NOTE, NULL,
2419 "!Lun not held\n"));
2420 if (pgr_sema_held) {
2421 sema_v(&vlun->svl_pgr_sema);
2422 }
2423 return (TRAN_BUSY);
2424 }
2425 /*
2426 * now that the LUN is stable, one last check
2427 * to make sure no other changes sneaked in
2428 * (like a path coming online or a
2429 * failover initiated by another thread)
2430 */
2431 pip = NULL;
2432 rval = mdi_select_path(cdip, vpkt->vpkt_tgt_init_bp,
2433 0, NULL, &pip);
2434 if (pip != NULL) {
2435 VHCI_RELEASE_LUN(vlun);
2436 vlun->svl_waiting_for_activepath = 0;
2437 goto bind_path;
2438 }
2439
2440 /*
2441 * Check if there is an ONLINE path OR a STANDBY path
2442 * available. If none is available, do not attempt
2443 * to do a failover, just return a fatal error at this
2444 * point.
2445 */
2446 npip = NULL;
2447 rval = mdi_select_path(cdip, NULL,
2448 (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH),
2449 NULL, &npip);
2450 if ((npip == NULL) || (rval != MDI_SUCCESS)) {
2451 /*
2452 * No paths available, jus return FATAL error.
2453 */
2454 VHCI_RELEASE_LUN(vlun);
2455 if (pgr_sema_held) {
2456 sema_v(&vlun->svl_pgr_sema);
2457 }
2458 return (TRAN_FATAL_ERROR);
2459 }
2460 mdi_rele_path(npip);
2461 if (!(vpkt->vpkt_state & VHCI_PKT_IN_FAILOVER)) {
2462 VHCI_DEBUG(1, (CE_NOTE, NULL, "!invoking "
2463 "mdi_failover\n"));
2464 rval = mdi_failover(vhci->vhci_dip, cdip,
2465 MDI_FAILOVER_ASYNC);
2466 } else {
2467 rval = vlun->svl_failover_status;
2468 }
2469 if (rval == MDI_FAILURE) {
2470 VHCI_RELEASE_LUN(vlun);
2471 if (pgr_sema_held) {
2472 sema_v(&vlun->svl_pgr_sema);
2473 }
2474 return (TRAN_FATAL_ERROR);
2475 } else if (rval == MDI_BUSY) {
2476 VHCI_RELEASE_LUN(vlun);
2477 if (pgr_sema_held) {
2478 sema_v(&vlun->svl_pgr_sema);
2479 }
2480 return (TRAN_BUSY);
2481 } else {
2482 if (pgr_sema_held) {
2483 sema_v(&vlun->svl_pgr_sema);
2484 }
2485 vpkt->vpkt_state |= VHCI_PKT_IN_FAILOVER;
2486 return (TRAN_BUSY);
2487 }
2488 }
2489 vlun->svl_waiting_for_activepath = 0;
2490 bind_path:
2491 vpkt->vpkt_path = pip;
2492 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip);
2493 ASSERT(svp != NULL);
2494
2495 psd = svp->svp_psd;
2496 ASSERT(psd != NULL);
2497 address = &psd->sd_address;
2498 } else {
2499 pkt = vpkt->vpkt_hba_pkt;
2500 address = &pkt->pkt_address;
2501 }
2502
2503 /* Verify match of specified path_instance and selected path_instance */
2504 ASSERT((path_instance == 0) ||
2505 (path_instance == mdi_pi_get_path_instance(vpkt->vpkt_path)));
2506
2507 /*
2508 * For PKT_PARTIAL_DMA case, call pHCI's scsi_init_pkt whenever
2509 * target driver calls vhci_scsi_init_pkt.
2510 */
2511 if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) &&
2512 vpkt->vpkt_path && vpkt->vpkt_hba_pkt) {
2513 VHCI_DEBUG(4, (CE_NOTE, NULL,
2514 "vhci_bind_transport: PKT_PARTIAL_DMA "
2515 "vpkt 0x%p, path 0x%p hba_pkt 0x%p\n",
2516 (void *)vpkt, (void *)vpkt->vpkt_path, (void *)pkt));
2517 pkt = vpkt->vpkt_hba_pkt;
2518 address = &pkt->pkt_address;
2519 }
2520
2521 if (pkt == NULL || (vpkt->vpkt_flags & CFLAG_DMA_PARTIAL)) {
2522 pkt = scsi_init_pkt(address, pkt,
2523 vpkt->vpkt_tgt_init_bp, vpkt->vpkt_tgt_init_cdblen,
2524 vpkt->vpkt_tgt_init_scblen, 0, flags, func, NULL);
2525
2526 if (pkt == NULL) {
2527 VHCI_DEBUG(4, (CE_NOTE, NULL,
2528 "!bind transport: 0x%p 0x%p 0x%p\n",
2529 (void *)vhci, (void *)psd, (void *)vpkt));
2530 if ((vpkt->vpkt_hba_pkt == NULL) && vpkt->vpkt_path) {
2531 MDI_PI_ERRSTAT(vpkt->vpkt_path,
2532 MDI_PI_TRANSERR);
2533 mdi_rele_path(vpkt->vpkt_path);
2534 vpkt->vpkt_path = NULL;
2535 }
2536 if (pgr_sema_held) {
2537 sema_v(&vlun->svl_pgr_sema);
2538 }
2539 /*
2540 * Consider it a fatal error if b_error is
2541 * set as a result of DMA binding failure
2542 * vs. a condition of being temporarily out of
2543 * some resource
2544 */
2545 if (vpkt->vpkt_tgt_init_bp == NULL ||
2546 geterror(vpkt->vpkt_tgt_init_bp))
2547 return (TRAN_FATAL_ERROR);
2548 else
2549 return (TRAN_BUSY);
2550 }
2551 }
2552
2553 pkt->pkt_private = vpkt;
2554 vpkt->vpkt_hba_pkt = pkt;
2555 return (TRAN_ACCEPT);
2556 }
2557
2558
2559 /*PRINTFLIKE3*/
2560 void
vhci_log(int level,dev_info_t * dip,const char * fmt,...)2561 vhci_log(int level, dev_info_t *dip, const char *fmt, ...)
2562 {
2563 char buf[256];
2564 va_list ap;
2565
2566 va_start(ap, fmt);
2567 (void) vsprintf(buf, fmt, ap);
2568 va_end(ap);
2569
2570 scsi_log(dip, "scsi_vhci", level, buf);
2571 }
2572
2573 /* do a PGR out with the information we've saved away */
2574 static int
vhci_do_prout(scsi_vhci_priv_t * svp)2575 vhci_do_prout(scsi_vhci_priv_t *svp)
2576 {
2577
2578 struct scsi_pkt *new_pkt;
2579 struct buf *bp;
2580 scsi_vhci_lun_t *vlun = svp->svp_svl;
2581 int rval, retry, nr_retry, ua_retry;
2582 uint8_t *sns, skey;
2583
2584 bp = getrbuf(KM_SLEEP);
2585 bp->b_flags = B_WRITE;
2586 bp->b_resid = 0;
2587 bp->b_un.b_addr = (caddr_t)&vlun->svl_prout;
2588 bp->b_bcount = vlun->svl_bcount;
2589
2590 VHCI_INCR_PATH_CMDCOUNT(svp);
2591
2592 new_pkt = scsi_init_pkt(&svp->svp_psd->sd_address, NULL, bp,
2593 CDB_GROUP1, sizeof (struct scsi_arq_status), 0, 0,
2594 SLEEP_FUNC, NULL);
2595 if (new_pkt == NULL) {
2596 VHCI_DECR_PATH_CMDCOUNT(svp);
2597 freerbuf(bp);
2598 cmn_err(CE_WARN, "!vhci_do_prout: scsi_init_pkt failed");
2599 return (0);
2600 }
2601 mutex_enter(&vlun->svl_mutex);
2602 bp->b_un.b_addr = (caddr_t)&vlun->svl_prout;
2603 bp->b_bcount = vlun->svl_bcount;
2604 bcopy(vlun->svl_cdb, new_pkt->pkt_cdbp,
2605 sizeof (vlun->svl_cdb));
2606 new_pkt->pkt_time = vlun->svl_time;
2607 mutex_exit(&vlun->svl_mutex);
2608 new_pkt->pkt_flags = FLAG_NOINTR;
2609
2610 ua_retry = nr_retry = retry = 0;
2611 again:
2612 rval = vhci_do_scsi_cmd(new_pkt);
2613 if (rval != 1) {
2614 if ((new_pkt->pkt_reason == CMD_CMPLT) &&
2615 (SCBP_C(new_pkt) == STATUS_CHECK) &&
2616 (new_pkt->pkt_state & STATE_ARQ_DONE)) {
2617 sns = (uint8_t *)
2618 &(((struct scsi_arq_status *)(uintptr_t)
2619 (new_pkt->pkt_scbp))->sts_sensedata);
2620 skey = scsi_sense_key(sns);
2621 if ((skey == KEY_UNIT_ATTENTION) ||
2622 (skey == KEY_NOT_READY)) {
2623 int max_retry;
2624 struct scsi_failover_ops *fops;
2625 fops = vlun->svl_fops;
2626 rval = fops->sfo_analyze_sense(svp->svp_psd,
2627 sns, vlun->svl_fops_ctpriv);
2628 if (rval == SCSI_SENSE_NOT_READY) {
2629 max_retry = vhci_prout_not_ready_retry;
2630 retry = nr_retry++;
2631 delay(1*drv_usectohz(1000000));
2632 } else {
2633 /* chk for state change and update */
2634 if (rval == SCSI_SENSE_STATE_CHANGED) {
2635 int held;
2636 VHCI_HOLD_LUN(vlun,
2637 VH_NOSLEEP, held);
2638 if (!held) {
2639 rval = TRAN_BUSY;
2640 } else {
2641 /* chk for alua first */
2642 vhci_update_pathstates(
2643 (void *)vlun);
2644 }
2645 }
2646 retry = ua_retry++;
2647 max_retry = VHCI_MAX_PGR_RETRIES;
2648 }
2649 if (retry < max_retry) {
2650 VHCI_DEBUG(4, (CE_WARN, NULL,
2651 "!vhci_do_prout retry 0x%x "
2652 "(0x%x 0x%x 0x%x)",
2653 SCBP_C(new_pkt),
2654 new_pkt->pkt_cdbp[0],
2655 new_pkt->pkt_cdbp[1],
2656 new_pkt->pkt_cdbp[2]));
2657 goto again;
2658 }
2659 rval = 0;
2660 VHCI_DEBUG(4, (CE_WARN, NULL,
2661 "!vhci_do_prout 0x%x "
2662 "(0x%x 0x%x 0x%x)",
2663 SCBP_C(new_pkt),
2664 new_pkt->pkt_cdbp[0],
2665 new_pkt->pkt_cdbp[1],
2666 new_pkt->pkt_cdbp[2]));
2667 } else if (skey == KEY_ILLEGAL_REQUEST)
2668 rval = VHCI_PGR_ILLEGALOP;
2669 }
2670 } else {
2671 rval = 1;
2672 }
2673 scsi_destroy_pkt(new_pkt);
2674 VHCI_DECR_PATH_CMDCOUNT(svp);
2675 freerbuf(bp);
2676 return (rval);
2677 }
2678
2679 static void
vhci_run_cmd(void * arg)2680 vhci_run_cmd(void *arg)
2681 {
2682 struct scsi_pkt *pkt = (struct scsi_pkt *)arg;
2683 struct scsi_pkt *tpkt;
2684 scsi_vhci_priv_t *svp;
2685 mdi_pathinfo_t *pip, *npip;
2686 scsi_vhci_lun_t *vlun;
2687 dev_info_t *cdip;
2688 scsi_vhci_priv_t *nsvp;
2689 int fail = 0;
2690 int rval;
2691 struct vhci_pkt *vpkt;
2692 uchar_t cdb_1;
2693 vhci_prout_t *prout;
2694
2695 vpkt = (struct vhci_pkt *)pkt->pkt_private;
2696 tpkt = vpkt->vpkt_tgt_pkt;
2697 pip = vpkt->vpkt_path;
2698 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip);
2699 if (svp == NULL) {
2700 tpkt->pkt_reason = CMD_TRAN_ERR;
2701 tpkt->pkt_statistics = STAT_ABORTED;
2702 goto done;
2703 }
2704 vlun = svp->svp_svl;
2705 prout = &vlun->svl_prout;
2706 if (SCBP_C(pkt) != STATUS_GOOD)
2707 fail++;
2708 cdip = vlun->svl_dip;
2709 pip = npip = NULL;
2710 rval = mdi_select_path(cdip, NULL,
2711 MDI_SELECT_ONLINE_PATH|MDI_SELECT_STANDBY_PATH, NULL, &npip);
2712 if ((rval != MDI_SUCCESS) || (npip == NULL)) {
2713 VHCI_DEBUG(4, (CE_NOTE, NULL,
2714 "vhci_run_cmd: no path! 0x%p\n", (void *)svp));
2715 tpkt->pkt_reason = CMD_TRAN_ERR;
2716 tpkt->pkt_statistics = STAT_ABORTED;
2717 goto done;
2718 }
2719
2720 cdb_1 = vlun->svl_cdb[1];
2721 vlun->svl_cdb[1] &= 0xe0;
2722 vlun->svl_cdb[1] |= VHCI_PROUT_R_AND_IGNORE;
2723
2724 do {
2725 nsvp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(npip);
2726 if (nsvp == NULL) {
2727 VHCI_DEBUG(4, (CE_NOTE, NULL,
2728 "vhci_run_cmd: no "
2729 "client priv! 0x%p offlined?\n",
2730 (void *)npip));
2731 goto next_path;
2732 }
2733 if (vlun->svl_first_path == npip) {
2734 goto next_path;
2735 } else {
2736 if (vhci_do_prout(nsvp) != 1)
2737 fail++;
2738 }
2739 next_path:
2740 pip = npip;
2741 rval = mdi_select_path(cdip, NULL,
2742 MDI_SELECT_ONLINE_PATH|MDI_SELECT_STANDBY_PATH,
2743 pip, &npip);
2744 mdi_rele_path(pip);
2745 } while ((rval == MDI_SUCCESS) && (npip != NULL));
2746
2747 vlun->svl_cdb[1] = cdb_1;
2748
2749 if (fail) {
2750 VHCI_DEBUG(4, (CE_WARN, NULL, "%s%d: key registration failed, "
2751 "couldn't be replicated on all paths",
2752 ddi_driver_name(cdip), ddi_get_instance(cdip)));
2753 vhci_print_prout_keys(vlun, "vhci_run_cmd: ");
2754
2755 if (SCBP_C(pkt) != STATUS_GOOD) {
2756 tpkt->pkt_reason = CMD_TRAN_ERR;
2757 tpkt->pkt_statistics = STAT_ABORTED;
2758 }
2759 } else {
2760 vlun->svl_pgr_active = 1;
2761 vhci_print_prout_keys(vlun, "vhci_run_cmd: before bcopy:");
2762
2763 bcopy((const void *)prout->service_key,
2764 (void *)prout->active_service_key, MHIOC_RESV_KEY_SIZE);
2765 bcopy((const void *)prout->res_key,
2766 (void *)prout->active_res_key, MHIOC_RESV_KEY_SIZE);
2767
2768 vhci_print_prout_keys(vlun, "vhci_run_cmd: after bcopy:");
2769 }
2770 done:
2771 if (SCBP_C(pkt) == STATUS_GOOD)
2772 vlun->svl_first_path = NULL;
2773
2774 if (svp)
2775 VHCI_DECR_PATH_CMDCOUNT(svp);
2776
2777 if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) == 0) {
2778 scsi_destroy_pkt(pkt);
2779 vpkt->vpkt_hba_pkt = NULL;
2780 if (vpkt->vpkt_path) {
2781 mdi_rele_path(vpkt->vpkt_path);
2782 vpkt->vpkt_path = NULL;
2783 }
2784 }
2785
2786 sema_v(&vlun->svl_pgr_sema);
2787 /*
2788 * The PROUT commands are not included in the automatic retry
2789 * mechanism, therefore, vpkt_org_vpkt should never be set here.
2790 */
2791 ASSERT(vpkt->vpkt_org_vpkt == NULL);
2792 scsi_hba_pkt_comp(tpkt);
2793 }
2794
2795 /*
2796 * Get the keys registered with this target. Since we will have
2797 * registered the same key with multiple initiators, strip out
2798 * any duplicate keys.
2799 *
2800 * The pointers which will be used to filter the registered keys from
2801 * the device will be stored in filter_prin and filter_pkt. If the
2802 * allocation length of the buffer was sufficient for the number of
2803 * parameter data bytes available to be returned by the device then the
2804 * key filtering will use the keylist returned from the original
2805 * request. If the allocation length of the buffer was not sufficient,
2806 * then the filtering will use the keylist returned from the request
2807 * that is resent below.
2808 *
2809 * If the device returns an additional length field that is greater than
2810 * the allocation length of the buffer, then allocate a new buffer which
2811 * can accommodate the number of parameter data bytes available to be
2812 * returned. Resend the scsi PRIN command, filter out the duplicate
2813 * keys and return as many of the unique keys found that was originally
2814 * requested and set the additional length field equal to the data bytes
2815 * of unique reservation keys available to be returned.
2816 *
2817 * If the device returns an additional length field that is less than or
2818 * equal to the allocation length of the buffer, then all the available
2819 * keys registered were returned by the device. Filter out the
2820 * duplicate keys and return all of the unique keys found and set the
2821 * additional length field equal to the data bytes of the reservation
2822 * keys to be returned.
2823 */
2824
2825 #define VHCI_PRIN_HEADER_SZ (sizeof (prin->length) + sizeof (prin->generation))
2826
2827 static int
vhci_do_prin(struct vhci_pkt ** intr_vpkt)2828 vhci_do_prin(struct vhci_pkt **intr_vpkt)
2829 {
2830 scsi_vhci_priv_t *svp;
2831 struct vhci_pkt *vpkt = *intr_vpkt;
2832 vhci_prin_readkeys_t *prin;
2833 scsi_vhci_lun_t *vlun;
2834 struct scsi_vhci *vhci = ADDR2VHCI(&vpkt->vpkt_tgt_pkt->pkt_address);
2835
2836 struct buf *new_bp = NULL;
2837 struct scsi_pkt *new_pkt = NULL;
2838 struct vhci_pkt *new_vpkt = NULL;
2839 uint32_t needed_length;
2840 int rval = VHCI_CMD_CMPLT;
2841 uint32_t prin_length = 0;
2842 uint32_t svl_prin_length = 0;
2843
2844 ASSERT(vpkt->vpkt_path);
2845 svp = mdi_pi_get_vhci_private(vpkt->vpkt_path);
2846 ASSERT(svp);
2847 vlun = svp->svp_svl;
2848 ASSERT(vlun);
2849
2850 /*
2851 * If the caller only asked for an amount of data that would not
2852 * be enough to include any key data it is likely that they will
2853 * send the next command with a buffer size based on the information
2854 * from this header. Doing recovery on this would be a duplication
2855 * of efforts.
2856 */
2857 if (vpkt->vpkt_tgt_init_bp->b_bcount <= VHCI_PRIN_HEADER_SZ) {
2858 rval = VHCI_CMD_CMPLT;
2859 goto exit;
2860 }
2861
2862 if (vpkt->vpkt_org_vpkt == NULL) {
2863 /*
2864 * Can fail as sleep is not allowed.
2865 */
2866 prin = (vhci_prin_readkeys_t *)
2867 bp_mapin_common(vpkt->vpkt_tgt_init_bp, VM_NOSLEEP);
2868 } else {
2869 /*
2870 * The retry buf doesn't need to be mapped in.
2871 */
2872 prin = (vhci_prin_readkeys_t *)
2873 vpkt->vpkt_tgt_init_bp->b_un.b_daddr;
2874 }
2875
2876 if (prin == NULL) {
2877 VHCI_DEBUG(5, (CE_WARN, NULL,
2878 "vhci_do_prin: bp_mapin_common failed."));
2879 rval = VHCI_CMD_ERROR;
2880 goto fail;
2881 }
2882
2883 prin_length = BE_32(prin->length);
2884
2885 /*
2886 * According to SPC-3r22, sec 4.3.4.6: "If the amount of
2887 * information to be transferred exceeds the maximum value
2888 * that the ALLOCATION LENGTH field is capable of specifying,
2889 * the device server shall...terminate the command with CHECK
2890 * CONDITION status". The ALLOCATION LENGTH field of the
2891 * PERSISTENT RESERVE IN command is 2 bytes. We should never
2892 * get here with an ADDITIONAL LENGTH greater than 0xFFFF
2893 * so if we do, then it is an error!
2894 */
2895
2896
2897 if ((prin_length + VHCI_PRIN_HEADER_SZ) > 0xFFFF) {
2898 VHCI_DEBUG(5, (CE_NOTE, NULL,
2899 "vhci_do_prin: Device returned invalid "
2900 "length 0x%x\n", prin_length));
2901 rval = VHCI_CMD_ERROR;
2902 goto fail;
2903 }
2904 needed_length = prin_length + VHCI_PRIN_HEADER_SZ;
2905
2906 /*
2907 * If prin->length is greater than the byte count allocated in the
2908 * original buffer, then resend the request with enough buffer
2909 * allocated to get all of the available registered keys.
2910 */
2911 if ((vpkt->vpkt_tgt_init_bp->b_bcount < needed_length) &&
2912 (vpkt->vpkt_org_vpkt == NULL)) {
2913
2914 new_pkt = vhci_create_retry_pkt(vpkt);
2915 if (new_pkt == NULL) {
2916 rval = VHCI_CMD_ERROR;
2917 goto fail;
2918 }
2919 new_vpkt = TGTPKT2VHCIPKT(new_pkt);
2920
2921 /*
2922 * This is the buf with buffer pointer
2923 * where the prin readkeys will be
2924 * returned from the device
2925 */
2926 new_bp = scsi_alloc_consistent_buf(&svp->svp_psd->sd_address,
2927 NULL, needed_length, B_READ, NULL_FUNC, NULL);
2928 if ((new_bp == NULL) || (new_bp->b_un.b_addr == NULL)) {
2929 if (new_bp) {
2930 scsi_free_consistent_buf(new_bp);
2931 }
2932 vhci_scsi_destroy_pkt(&new_pkt->pkt_address, new_pkt);
2933 rval = VHCI_CMD_ERROR;
2934 goto fail;
2935 }
2936 new_bp->b_bcount = needed_length;
2937 new_pkt->pkt_cdbp[7] = (uchar_t)(needed_length >> 8);
2938 new_pkt->pkt_cdbp[8] = (uchar_t)needed_length;
2939
2940 rval = VHCI_CMD_RETRY;
2941
2942 new_vpkt->vpkt_tgt_init_bp = new_bp;
2943 }
2944
2945 if (rval == VHCI_CMD_RETRY) {
2946
2947 /*
2948 * There were more keys then the original request asked for.
2949 */
2950 mdi_pathinfo_t *path_holder = vpkt->vpkt_path;
2951
2952 /*
2953 * Release the old path because it does not matter which path
2954 * this command is sent down. This allows the normal bind
2955 * transport mechanism to be used.
2956 */
2957 if (vpkt->vpkt_path != NULL) {
2958 mdi_rele_path(vpkt->vpkt_path);
2959 vpkt->vpkt_path = NULL;
2960 }
2961
2962 /*
2963 * Dispatch the retry command
2964 */
2965 if (taskq_dispatch(vhci->vhci_taskq, vhci_dispatch_scsi_start,
2966 (void *) new_vpkt, KM_NOSLEEP) == NULL) {
2967 if (path_holder) {
2968 vpkt->vpkt_path = path_holder;
2969 mdi_hold_path(path_holder);
2970 }
2971 scsi_free_consistent_buf(new_bp);
2972 vhci_scsi_destroy_pkt(&new_pkt->pkt_address, new_pkt);
2973 rval = VHCI_CMD_ERROR;
2974 goto fail;
2975 }
2976
2977 /*
2978 * If we return VHCI_CMD_RETRY, that means the caller
2979 * is going to bail and wait for the reissued command
2980 * to complete. In that case, we need to decrement
2981 * the path command count right now. In any other
2982 * case, it'll be decremented by the caller.
2983 */
2984 VHCI_DECR_PATH_CMDCOUNT(svp);
2985 goto exit;
2986
2987 }
2988
2989 if (rval == VHCI_CMD_CMPLT) {
2990 /*
2991 * The original request got all of the keys or the recovery
2992 * packet returns.
2993 */
2994 int new;
2995 int old;
2996 int num_keys = prin_length / MHIOC_RESV_KEY_SIZE;
2997
2998 VHCI_DEBUG(4, (CE_NOTE, NULL, "vhci_do_prin: %d keys read\n",
2999 num_keys));
3000
3001 #ifdef DEBUG
3002 VHCI_DEBUG(5, (CE_NOTE, NULL, "vhci_do_prin: from storage\n"));
3003 if (vhci_debug == 5)
3004 vhci_print_prin_keys(prin, num_keys);
3005 VHCI_DEBUG(5, (CE_NOTE, NULL,
3006 "vhci_do_prin: MPxIO old keys:\n"));
3007 if (vhci_debug == 5)
3008 vhci_print_prin_keys(&vlun->svl_prin, num_keys);
3009 #endif
3010
3011 /*
3012 * Filter out all duplicate keys returned from the device
3013 * We know that we use a different key for every host, so we
3014 * can simply strip out duplicates. Otherwise we would need to
3015 * do more bookkeeping to figure out which keys to strip out.
3016 */
3017
3018 new = 0;
3019
3020 /*
3021 * If we got at least 1 key copy it.
3022 */
3023 if (num_keys > 0) {
3024 vlun->svl_prin.keylist[0] = prin->keylist[0];
3025 new++;
3026 }
3027
3028 /*
3029 * find next unique key.
3030 */
3031 for (old = 1; old < num_keys; old++) {
3032 int j;
3033 int match = 0;
3034
3035 if (new >= VHCI_NUM_RESV_KEYS)
3036 break;
3037 for (j = 0; j < new; j++) {
3038 if (bcmp(&prin->keylist[old],
3039 &vlun->svl_prin.keylist[j],
3040 sizeof (mhioc_resv_key_t)) == 0) {
3041 match = 1;
3042 break;
3043 }
3044 }
3045 if (!match) {
3046 vlun->svl_prin.keylist[new] =
3047 prin->keylist[old];
3048 new++;
3049 }
3050 }
3051
3052 /* Stored Big Endian */
3053 vlun->svl_prin.generation = prin->generation;
3054 svl_prin_length = new * sizeof (mhioc_resv_key_t);
3055 /* Stored Big Endian */
3056 vlun->svl_prin.length = BE_32(svl_prin_length);
3057 svl_prin_length += VHCI_PRIN_HEADER_SZ;
3058
3059 /*
3060 * If we arrived at this point after issuing a retry, make sure
3061 * that we put everything back the way it originally was so
3062 * that the target driver can complete the command correctly.
3063 */
3064 if (vpkt->vpkt_org_vpkt != NULL) {
3065 new_bp = vpkt->vpkt_tgt_init_bp;
3066
3067 scsi_free_consistent_buf(new_bp);
3068
3069 vpkt = vhci_sync_retry_pkt(vpkt);
3070 *intr_vpkt = vpkt;
3071
3072 /*
3073 * Make sure the original buffer is mapped into kernel
3074 * space before we try to copy the filtered keys into
3075 * it.
3076 */
3077 prin = (vhci_prin_readkeys_t *)bp_mapin_common(
3078 vpkt->vpkt_tgt_init_bp, VM_NOSLEEP);
3079 }
3080
3081 /*
3082 * Now copy the desired number of prin keys into the original
3083 * target buffer.
3084 */
3085 if (svl_prin_length <= vpkt->vpkt_tgt_init_bp->b_bcount) {
3086 /*
3087 * It is safe to return all of the available unique
3088 * keys
3089 */
3090 bcopy(&vlun->svl_prin, prin, svl_prin_length);
3091 } else {
3092 /*
3093 * Not all of the available keys were requested by the
3094 * original command.
3095 */
3096 bcopy(&vlun->svl_prin, prin,
3097 vpkt->vpkt_tgt_init_bp->b_bcount);
3098 }
3099 #ifdef DEBUG
3100 VHCI_DEBUG(5, (CE_NOTE, NULL,
3101 "vhci_do_prin: To Application:\n"));
3102 if (vhci_debug == 5)
3103 vhci_print_prin_keys(prin, new);
3104 VHCI_DEBUG(5, (CE_NOTE, NULL,
3105 "vhci_do_prin: MPxIO new keys:\n"));
3106 if (vhci_debug == 5)
3107 vhci_print_prin_keys(&vlun->svl_prin, new);
3108 #endif
3109 }
3110 fail:
3111 if (rval == VHCI_CMD_ERROR) {
3112 /*
3113 * If we arrived at this point after issuing a
3114 * retry, make sure that we put everything back
3115 * the way it originally was so that ssd can
3116 * complete the command correctly.
3117 */
3118
3119 if (vpkt->vpkt_org_vpkt != NULL) {
3120 new_bp = vpkt->vpkt_tgt_init_bp;
3121 if (new_bp != NULL) {
3122 scsi_free_consistent_buf(new_bp);
3123 }
3124
3125 new_vpkt = vpkt;
3126 vpkt = vpkt->vpkt_org_vpkt;
3127
3128 vhci_scsi_destroy_pkt(&svp->svp_psd->sd_address,
3129 new_vpkt->vpkt_tgt_pkt);
3130 }
3131
3132 /*
3133 * Mark this command completion as having an error so that
3134 * ssd will retry the command.
3135 */
3136
3137 vpkt->vpkt_tgt_pkt->pkt_reason = CMD_ABORTED;
3138 vpkt->vpkt_tgt_pkt->pkt_statistics |= STAT_ABORTED;
3139
3140 rval = VHCI_CMD_CMPLT;
3141 }
3142 exit:
3143 /*
3144 * Make sure that the semaphore is only released once.
3145 */
3146 if (rval == VHCI_CMD_CMPLT) {
3147 sema_v(&vlun->svl_pgr_sema);
3148 }
3149
3150 return (rval);
3151 }
3152
3153 static void
vhci_intr(struct scsi_pkt * pkt)3154 vhci_intr(struct scsi_pkt *pkt)
3155 {
3156 struct vhci_pkt *vpkt = (struct vhci_pkt *)pkt->pkt_private;
3157 struct scsi_pkt *tpkt;
3158 scsi_vhci_priv_t *svp;
3159 scsi_vhci_lun_t *vlun;
3160 int rval, held;
3161 struct scsi_failover_ops *fops;
3162 uint8_t *sns, skey, asc, ascq;
3163 mdi_pathinfo_t *lpath;
3164 static char *timeout_err = "Command Timeout";
3165 static char *parity_err = "Parity Error";
3166 char *err_str = NULL;
3167 dev_info_t *vdip, *cdip;
3168 char *cpath;
3169
3170 ASSERT(vpkt != NULL);
3171 tpkt = vpkt->vpkt_tgt_pkt;
3172 ASSERT(tpkt != NULL);
3173 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(vpkt->vpkt_path);
3174 ASSERT(svp != NULL);
3175 vlun = svp->svp_svl;
3176 ASSERT(vlun != NULL);
3177 lpath = vpkt->vpkt_path;
3178
3179 /*
3180 * sync up the target driver's pkt with the pkt that
3181 * we actually used
3182 */
3183 *(tpkt->pkt_scbp) = *(pkt->pkt_scbp);
3184 tpkt->pkt_resid = pkt->pkt_resid;
3185 tpkt->pkt_state = pkt->pkt_state;
3186 tpkt->pkt_statistics = pkt->pkt_statistics;
3187 tpkt->pkt_reason = pkt->pkt_reason;
3188
3189 /* Return path_instance information back to the target driver. */
3190 if (scsi_pkt_allocated_correctly(tpkt)) {
3191 if (scsi_pkt_allocated_correctly(pkt)) {
3192 /*
3193 * If both packets were correctly allocated,
3194 * return path returned by pHCI.
3195 */
3196 tpkt->pkt_path_instance = pkt->pkt_path_instance;
3197 } else {
3198 /* Otherwise return path of pHCI we used */
3199 tpkt->pkt_path_instance =
3200 mdi_pi_get_path_instance(lpath);
3201 }
3202 }
3203
3204 if (pkt->pkt_cdbp[0] == SCMD_PROUT &&
3205 ((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_REGISTER) ||
3206 ((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_R_AND_IGNORE)) {
3207 if ((SCBP_C(pkt) != STATUS_GOOD) ||
3208 (pkt->pkt_reason != CMD_CMPLT)) {
3209 sema_v(&vlun->svl_pgr_sema);
3210 }
3211 } else if (pkt->pkt_cdbp[0] == SCMD_PRIN) {
3212 if (pkt->pkt_reason != CMD_CMPLT ||
3213 (SCBP_C(pkt) != STATUS_GOOD)) {
3214 sema_v(&vlun->svl_pgr_sema);
3215 }
3216 }
3217
3218 switch (pkt->pkt_reason) {
3219 case CMD_CMPLT:
3220 /*
3221 * cmd completed successfully, check for scsi errors
3222 */
3223 switch (*(pkt->pkt_scbp)) {
3224 case STATUS_CHECK:
3225 if (pkt->pkt_state & STATE_ARQ_DONE) {
3226 sns = (uint8_t *)
3227 &(((struct scsi_arq_status *)(uintptr_t)
3228 (pkt->pkt_scbp))->sts_sensedata);
3229 skey = scsi_sense_key(sns);
3230 asc = scsi_sense_asc(sns);
3231 ascq = scsi_sense_ascq(sns);
3232 fops = vlun->svl_fops;
3233 ASSERT(fops != NULL);
3234 VHCI_DEBUG(4, (CE_NOTE, NULL, "vhci_intr: "
3235 "Received sns key %x esc %x escq %x\n",
3236 skey, asc, ascq));
3237
3238 if (vlun->svl_waiting_for_activepath == 1) {
3239 /*
3240 * if we are here it means we are
3241 * in the midst of a probe/attach
3242 * through a passive path; this
3243 * case is exempt from sense analysis
3244 * for detection of ext. failover
3245 * because that would unnecessarily
3246 * increase attach time.
3247 */
3248 bcopy(pkt->pkt_scbp, tpkt->pkt_scbp,
3249 vpkt->vpkt_tgt_init_scblen);
3250 break;
3251 }
3252 if (asc == VHCI_SCSI_PERR) {
3253 /*
3254 * parity error
3255 */
3256 err_str = parity_err;
3257 bcopy(pkt->pkt_scbp, tpkt->pkt_scbp,
3258 vpkt->vpkt_tgt_init_scblen);
3259 break;
3260 }
3261 rval = fops->sfo_analyze_sense(svp->svp_psd,
3262 sns, vlun->svl_fops_ctpriv);
3263 if ((rval == SCSI_SENSE_NOFAILOVER) ||
3264 (rval == SCSI_SENSE_UNKNOWN) ||
3265 (rval == SCSI_SENSE_NOT_READY)) {
3266 bcopy(pkt->pkt_scbp, tpkt->pkt_scbp,
3267 vpkt->vpkt_tgt_init_scblen);
3268 break;
3269 } else if (rval == SCSI_SENSE_STATE_CHANGED) {
3270 struct scsi_vhci *vhci;
3271 vhci = ADDR2VHCI(&tpkt->pkt_address);
3272 VHCI_HOLD_LUN(vlun, VH_NOSLEEP, held);
3273 if (!held) {
3274 /*
3275 * looks like some other thread
3276 * has already detected this
3277 * condition
3278 */
3279 tpkt->pkt_state &=
3280 ~STATE_ARQ_DONE;
3281 *(tpkt->pkt_scbp) =
3282 STATUS_BUSY;
3283 break;
3284 }
3285 (void) taskq_dispatch(
3286 vhci->vhci_update_pathstates_taskq,
3287 vhci_update_pathstates,
3288 (void *)vlun, KM_SLEEP);
3289 } else {
3290 /*
3291 * externally initiated failover
3292 * has occurred or is in progress
3293 */
3294 VHCI_HOLD_LUN(vlun, VH_NOSLEEP, held);
3295 if (!held) {
3296 /*
3297 * looks like some other thread
3298 * has already detected this
3299 * condition
3300 */
3301 tpkt->pkt_state &=
3302 ~STATE_ARQ_DONE;
3303 *(tpkt->pkt_scbp) =
3304 STATUS_BUSY;
3305 break;
3306 } else {
3307 rval = vhci_handle_ext_fo
3308 (pkt, rval);
3309 if (rval == BUSY_RETURN) {
3310 tpkt->pkt_state &=
3311 ~STATE_ARQ_DONE;
3312 *(tpkt->pkt_scbp) =
3313 STATUS_BUSY;
3314 break;
3315 }
3316 bcopy(pkt->pkt_scbp,
3317 tpkt->pkt_scbp,
3318 vpkt->vpkt_tgt_init_scblen);
3319 break;
3320 }
3321 }
3322 }
3323 break;
3324
3325 /*
3326 * If this is a good SCSI-II RELEASE cmd completion then restore
3327 * the load balancing policy and reset VLUN_RESERVE_ACTIVE_FLG.
3328 * If this is a good SCSI-II RESERVE cmd completion then set
3329 * VLUN_RESERVE_ACTIVE_FLG.
3330 */
3331 case STATUS_GOOD:
3332 if ((pkt->pkt_cdbp[0] == SCMD_RELEASE) ||
3333 (pkt->pkt_cdbp[0] == SCMD_RELEASE_G1)) {
3334 (void) mdi_set_lb_policy(vlun->svl_dip,
3335 vlun->svl_lb_policy_save);
3336 vlun->svl_flags &= ~VLUN_RESERVE_ACTIVE_FLG;
3337 VHCI_DEBUG(1, (CE_WARN, NULL,
3338 "!vhci_intr: vlun 0x%p release path 0x%p",
3339 (void *)vlun, (void *)vpkt->vpkt_path));
3340 }
3341
3342 if ((pkt->pkt_cdbp[0] == SCMD_RESERVE) ||
3343 (pkt->pkt_cdbp[0] == SCMD_RESERVE_G1)) {
3344 vlun->svl_flags |= VLUN_RESERVE_ACTIVE_FLG;
3345 vlun->svl_resrv_pip = vpkt->vpkt_path;
3346 VHCI_DEBUG(1, (CE_WARN, NULL,
3347 "!vhci_intr: vlun 0x%p reserved path 0x%p",
3348 (void *)vlun, (void *)vpkt->vpkt_path));
3349 }
3350 break;
3351
3352 case STATUS_RESERVATION_CONFLICT:
3353 VHCI_DEBUG(1, (CE_WARN, NULL,
3354 "!vhci_intr: vlun 0x%p "
3355 "reserve conflict on path 0x%p",
3356 (void *)vlun, (void *)vpkt->vpkt_path));
3357 /* FALLTHROUGH */
3358 default:
3359 break;
3360 }
3361
3362 /*
3363 * Update I/O completion statistics for the path
3364 */
3365 mdi_pi_kstat_iosupdate(vpkt->vpkt_path, vpkt->vpkt_tgt_init_bp);
3366
3367 /*
3368 * Command completed successfully, release the dma binding and
3369 * destroy the transport side of the packet.
3370 */
3371 if ((pkt->pkt_cdbp[0] == SCMD_PROUT) &&
3372 (((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_REGISTER) ||
3373 ((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_R_AND_IGNORE))) {
3374 if (SCBP_C(pkt) == STATUS_GOOD) {
3375 ASSERT(vlun->svl_taskq);
3376 svp->svp_last_pkt_reason = pkt->pkt_reason;
3377 (void) taskq_dispatch(vlun->svl_taskq,
3378 vhci_run_cmd, pkt, KM_SLEEP);
3379 return;
3380 }
3381 }
3382 if ((SCBP_C(pkt) == STATUS_GOOD) &&
3383 (pkt->pkt_cdbp[0] == SCMD_PRIN) && vpkt->vpkt_tgt_init_bp) {
3384 /*
3385 * If the action (value in byte 1 of the cdb) is zero,
3386 * we're reading keys, and that's the only condition
3387 * where we need to be concerned with filtering keys
3388 * and potential retries. Otherwise, we simply signal
3389 * the semaphore and move on.
3390 */
3391 if (pkt->pkt_cdbp[1] == 0) {
3392 /*
3393 * If this is the completion of an internal
3394 * retry then we need to make sure that the
3395 * pkt and tpkt pointers are readjusted so
3396 * the calls to scsi_destroy_pkt and pkt_comp
3397 * below work * correctly.
3398 */
3399 if (vpkt->vpkt_org_vpkt != NULL) {
3400 pkt = vpkt->vpkt_org_vpkt->vpkt_hba_pkt;
3401 tpkt = vpkt->vpkt_org_vpkt->
3402 vpkt_tgt_pkt;
3403
3404 /*
3405 * If this command was issued through
3406 * the taskq then we need to clear
3407 * this flag for proper processing in
3408 * the case of a retry from the target
3409 * driver.
3410 */
3411 vpkt->vpkt_state &=
3412 ~VHCI_PKT_THRU_TASKQ;
3413 }
3414
3415 /*
3416 * if vhci_do_prin returns VHCI_CMD_CMPLT then
3417 * vpkt will contain the address of the
3418 * original vpkt
3419 */
3420 if (vhci_do_prin(&vpkt) == VHCI_CMD_RETRY) {
3421 /*
3422 * The command has been resent to get
3423 * all the keys from the device. Don't
3424 * complete the command with ssd until
3425 * the retry completes.
3426 */
3427 return;
3428 }
3429 } else {
3430 sema_v(&vlun->svl_pgr_sema);
3431 }
3432 }
3433
3434 break;
3435
3436 case CMD_TIMEOUT:
3437 if ((pkt->pkt_statistics &
3438 (STAT_BUS_RESET|STAT_DEV_RESET|STAT_ABORTED)) == 0) {
3439
3440 VHCI_DEBUG(1, (CE_NOTE, NULL,
3441 "!scsi vhci timeout invoked\n"));
3442
3443 (void) vhci_recovery_reset(vlun, &pkt->pkt_address,
3444 FALSE, VHCI_DEPTH_ALL);
3445 }
3446 MDI_PI_ERRSTAT(lpath, MDI_PI_TRANSERR);
3447 tpkt->pkt_statistics |= STAT_ABORTED;
3448 err_str = timeout_err;
3449 break;
3450
3451 case CMD_TRAN_ERR:
3452 /*
3453 * This status is returned if the transport has sent the cmd
3454 * down the link to the target and then some error occurs.
3455 * In case of SCSI-II RESERVE cmd, we don't know if the
3456 * reservation been accepted by the target or not, so we need
3457 * to clear the reservation.
3458 */
3459 if ((pkt->pkt_cdbp[0] == SCMD_RESERVE) ||
3460 (pkt->pkt_cdbp[0] == SCMD_RESERVE_G1)) {
3461 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_intr received"
3462 " cmd_tran_err for scsi-2 reserve cmd\n"));
3463 if (!vhci_recovery_reset(vlun, &pkt->pkt_address,
3464 TRUE, VHCI_DEPTH_TARGET)) {
3465 VHCI_DEBUG(1, (CE_WARN, NULL,
3466 "!vhci_intr cmd_tran_err reset failed!"));
3467 }
3468 }
3469 break;
3470
3471 case CMD_DEV_GONE:
3472 /*
3473 * If this is the last path then report CMD_DEV_GONE to the
3474 * target driver, otherwise report BUSY to triggger retry.
3475 */
3476 if (vlun->svl_dip &&
3477 (mdi_client_get_path_count(vlun->svl_dip) <= 1)) {
3478 struct scsi_vhci *vhci;
3479 vhci = ADDR2VHCI(&tpkt->pkt_address);
3480 VHCI_DEBUG(1, (CE_NOTE, NULL, "vhci_intr received "
3481 "cmd_dev_gone on last path\n"));
3482 (void) vhci_invalidate_mpapi_lu(vhci, vlun);
3483 break;
3484 }
3485
3486 /* Report CMD_CMPLT-with-BUSY to cause retry. */
3487 VHCI_DEBUG(1, (CE_NOTE, NULL, "vhci_intr received "
3488 "cmd_dev_gone\n"));
3489 tpkt->pkt_reason = CMD_CMPLT;
3490 tpkt->pkt_state = STATE_GOT_BUS |
3491 STATE_GOT_TARGET | STATE_SENT_CMD |
3492 STATE_GOT_STATUS;
3493 *(tpkt->pkt_scbp) = STATUS_BUSY;
3494 break;
3495
3496 default:
3497 break;
3498 }
3499
3500 /*
3501 * SCSI-II RESERVE cmd has been serviced by the lower layers clear
3502 * the flag so the lun is not QUIESCED any longer.
3503 * Also clear the VHCI_PKT_THRU_TASKQ flag, to ensure that if this pkt
3504 * is retried, a taskq shall again be dispatched to service it. Else
3505 * it may lead to a system hang if the retry is within interrupt
3506 * context.
3507 */
3508 if ((pkt->pkt_cdbp[0] == SCMD_RESERVE) ||
3509 (pkt->pkt_cdbp[0] == SCMD_RESERVE_G1)) {
3510 vlun->svl_flags &= ~VLUN_QUIESCED_FLG;
3511 vpkt->vpkt_state &= ~VHCI_PKT_THRU_TASKQ;
3512 }
3513
3514 /*
3515 * vpkt_org_vpkt should always be NULL here if the retry command
3516 * has been successfully processed. If vpkt_org_vpkt != NULL at
3517 * this point, it is an error so restore the original vpkt and
3518 * return an error to the target driver so it can retry the
3519 * command as appropriate.
3520 */
3521 if (vpkt->vpkt_org_vpkt != NULL) {
3522 struct vhci_pkt *new_vpkt = vpkt;
3523 vpkt = vpkt->vpkt_org_vpkt;
3524
3525 vhci_scsi_destroy_pkt(&svp->svp_psd->sd_address,
3526 new_vpkt->vpkt_tgt_pkt);
3527
3528 /*
3529 * Mark this command completion as having an error so that
3530 * ssd will retry the command.
3531 */
3532 vpkt->vpkt_tgt_pkt->pkt_reason = CMD_ABORTED;
3533 vpkt->vpkt_tgt_pkt->pkt_statistics |= STAT_ABORTED;
3534
3535 pkt = vpkt->vpkt_hba_pkt;
3536 tpkt = vpkt->vpkt_tgt_pkt;
3537 }
3538
3539 if ((err_str != NULL) && (pkt->pkt_reason !=
3540 svp->svp_last_pkt_reason)) {
3541 cdip = vlun->svl_dip;
3542 vdip = ddi_get_parent(cdip);
3543 cpath = kmem_alloc(MAXPATHLEN, KM_SLEEP);
3544 vhci_log(CE_WARN, vdip, "!%s (%s%d): %s on path %s",
3545 ddi_pathname(cdip, cpath), ddi_driver_name(cdip),
3546 ddi_get_instance(cdip), err_str,
3547 mdi_pi_spathname(vpkt->vpkt_path));
3548 kmem_free(cpath, MAXPATHLEN);
3549 }
3550 svp->svp_last_pkt_reason = pkt->pkt_reason;
3551 VHCI_DECR_PATH_CMDCOUNT(svp);
3552
3553 /*
3554 * For PARTIAL_DMA, vhci should not free the path.
3555 * Target driver will call into vhci_scsi_dmafree or
3556 * destroy pkt to release this path.
3557 */
3558 if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) == 0) {
3559 scsi_destroy_pkt(pkt);
3560 vpkt->vpkt_hba_pkt = NULL;
3561 if (vpkt->vpkt_path) {
3562 mdi_rele_path(vpkt->vpkt_path);
3563 vpkt->vpkt_path = NULL;
3564 }
3565 }
3566
3567 scsi_hba_pkt_comp(tpkt);
3568 }
3569
3570 /*
3571 * two possibilities: (1) failover has completed
3572 * or (2) is in progress; update our path states for
3573 * the former case; for the latter case,
3574 * initiate a scsi_watch request to
3575 * determine when failover completes - vlun is HELD
3576 * until failover completes; BUSY is returned to upper
3577 * layer in both the cases
3578 */
3579 static int
vhci_handle_ext_fo(struct scsi_pkt * pkt,int fostat)3580 vhci_handle_ext_fo(struct scsi_pkt *pkt, int fostat)
3581 {
3582 struct vhci_pkt *vpkt = (struct vhci_pkt *)pkt->pkt_private;
3583 struct scsi_pkt *tpkt;
3584 scsi_vhci_priv_t *svp;
3585 scsi_vhci_lun_t *vlun;
3586 struct scsi_vhci *vhci;
3587 scsi_vhci_swarg_t *swarg;
3588 char *path;
3589
3590 ASSERT(vpkt != NULL);
3591 tpkt = vpkt->vpkt_tgt_pkt;
3592 ASSERT(tpkt != NULL);
3593 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(vpkt->vpkt_path);
3594 ASSERT(svp != NULL);
3595 vlun = svp->svp_svl;
3596 ASSERT(vlun != NULL);
3597 ASSERT(VHCI_LUN_IS_HELD(vlun));
3598
3599 vhci = ADDR2VHCI(&tpkt->pkt_address);
3600
3601 if (fostat == SCSI_SENSE_INACTIVE) {
3602 VHCI_DEBUG(1, (CE_NOTE, NULL, "!Failover "
3603 "detected for %s; updating path states...\n",
3604 vlun->svl_lun_wwn));
3605 /*
3606 * set the vlun flag to indicate to the task that the target
3607 * port group needs updating
3608 */
3609 vlun->svl_flags |= VLUN_UPDATE_TPG;
3610 (void) taskq_dispatch(vhci->vhci_update_pathstates_taskq,
3611 vhci_update_pathstates, (void *)vlun, KM_SLEEP);
3612 } else {
3613 path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
3614 vhci_log(CE_NOTE, ddi_get_parent(vlun->svl_dip),
3615 "!%s (%s%d): Waiting for externally initiated failover "
3616 "to complete", ddi_pathname(vlun->svl_dip, path),
3617 ddi_driver_name(vlun->svl_dip),
3618 ddi_get_instance(vlun->svl_dip));
3619 kmem_free(path, MAXPATHLEN);
3620 swarg = kmem_alloc(sizeof (*swarg), KM_NOSLEEP);
3621 if (swarg == NULL) {
3622 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_handle_ext_fo: "
3623 "request packet allocation for %s failed....\n",
3624 vlun->svl_lun_wwn));
3625 VHCI_RELEASE_LUN(vlun);
3626 return (PKT_RETURN);
3627 }
3628 swarg->svs_svp = svp;
3629 swarg->svs_tos = gethrtime();
3630 swarg->svs_pi = vpkt->vpkt_path;
3631 swarg->svs_release_lun = 0;
3632 swarg->svs_done = 0;
3633 /*
3634 * place a hold on the path...we don't want it to
3635 * vanish while scsi_watch is in progress
3636 */
3637 mdi_hold_path(vpkt->vpkt_path);
3638 svp->svp_sw_token = scsi_watch_request_submit(svp->svp_psd,
3639 VHCI_FOWATCH_INTERVAL, SENSE_LENGTH, vhci_efo_watch_cb,
3640 (caddr_t)swarg);
3641 }
3642 return (BUSY_RETURN);
3643 }
3644
3645 /*
3646 * vhci_efo_watch_cb:
3647 * Callback from scsi_watch request to check the failover status.
3648 * Completion is either due to successful failover or timeout.
3649 * Upon successful completion, vhci_update_path_states is called.
3650 * For timeout condition, vhci_efo_done is called.
3651 * Always returns 0 to scsi_watch to keep retrying till vhci_efo_done
3652 * terminates this request properly in a separate thread.
3653 */
3654
3655 static int
vhci_efo_watch_cb(caddr_t arg,struct scsi_watch_result * resultp)3656 vhci_efo_watch_cb(caddr_t arg, struct scsi_watch_result *resultp)
3657 {
3658 struct scsi_status *statusp = resultp->statusp;
3659 uint8_t *sensep = (uint8_t *)resultp->sensep;
3660 struct scsi_pkt *pkt = resultp->pkt;
3661 scsi_vhci_swarg_t *swarg;
3662 scsi_vhci_priv_t *svp;
3663 scsi_vhci_lun_t *vlun;
3664 struct scsi_vhci *vhci;
3665 dev_info_t *vdip;
3666 int rval, updt_paths;
3667
3668 swarg = (scsi_vhci_swarg_t *)(uintptr_t)arg;
3669 svp = swarg->svs_svp;
3670 if (swarg->svs_done) {
3671 /*
3672 * Already completed failover or timedout.
3673 * Waiting for vhci_efo_done to terminate this scsi_watch.
3674 */
3675 return (0);
3676 }
3677
3678 ASSERT(svp != NULL);
3679 vlun = svp->svp_svl;
3680 ASSERT(vlun != NULL);
3681 ASSERT(VHCI_LUN_IS_HELD(vlun));
3682 vlun->svl_efo_update_path = 0;
3683 vdip = ddi_get_parent(vlun->svl_dip);
3684 vhci = ddi_get_soft_state(vhci_softstate,
3685 ddi_get_instance(vdip));
3686
3687 updt_paths = 0;
3688
3689 if (pkt->pkt_reason != CMD_CMPLT) {
3690 if ((gethrtime() - swarg->svs_tos) >= VHCI_EXTFO_TIMEOUT) {
3691 swarg->svs_release_lun = 1;
3692 goto done;
3693 }
3694 return (0);
3695 }
3696 if (*((unsigned char *)statusp) == STATUS_CHECK) {
3697 rval = vlun->svl_fops->sfo_analyze_sense(svp->svp_psd, sensep,
3698 vlun->svl_fops_ctpriv);
3699 switch (rval) {
3700 /*
3701 * Only update path states in case path is definitely
3702 * inactive, or no failover occurred. For all other
3703 * check conditions continue pinging. A unexpected
3704 * check condition shouldn't cause pinging to complete
3705 * prematurely.
3706 */
3707 case SCSI_SENSE_INACTIVE:
3708 case SCSI_SENSE_NOFAILOVER:
3709 updt_paths = 1;
3710 break;
3711 default:
3712 if ((gethrtime() - swarg->svs_tos)
3713 >= VHCI_EXTFO_TIMEOUT) {
3714 swarg->svs_release_lun = 1;
3715 goto done;
3716 }
3717 return (0);
3718 }
3719 } else if (*((unsigned char *)statusp) ==
3720 STATUS_RESERVATION_CONFLICT) {
3721 updt_paths = 1;
3722 } else if ((*((unsigned char *)statusp)) &
3723 (STATUS_BUSY | STATUS_QFULL)) {
3724 return (0);
3725 }
3726 if ((*((unsigned char *)statusp) == STATUS_GOOD) ||
3727 (updt_paths == 1)) {
3728 /*
3729 * we got here because we had detected an
3730 * externally initiated failover; things
3731 * have settled down now, so let's
3732 * start up a task to update the
3733 * path states and target port group
3734 */
3735 vlun->svl_efo_update_path = 1;
3736 swarg->svs_done = 1;
3737 vlun->svl_swarg = swarg;
3738 vlun->svl_flags |= VLUN_UPDATE_TPG;
3739 (void) taskq_dispatch(vhci->vhci_update_pathstates_taskq,
3740 vhci_update_pathstates, (void *)vlun,
3741 KM_SLEEP);
3742 return (0);
3743 }
3744 if ((gethrtime() - swarg->svs_tos) >= VHCI_EXTFO_TIMEOUT) {
3745 swarg->svs_release_lun = 1;
3746 goto done;
3747 }
3748 return (0);
3749 done:
3750 swarg->svs_done = 1;
3751 (void) taskq_dispatch(vhci->vhci_taskq,
3752 vhci_efo_done, (void *)swarg, KM_SLEEP);
3753 return (0);
3754 }
3755
3756 /*
3757 * vhci_efo_done:
3758 * cleanly terminates scsi_watch and free up resources.
3759 * Called as taskq function in vhci_efo_watch_cb for EFO timeout condition
3760 * or by vhci_update_path_states invoked during external initiated
3761 * failover completion.
3762 */
3763 static void
vhci_efo_done(void * arg)3764 vhci_efo_done(void *arg)
3765 {
3766 scsi_vhci_lun_t *vlun;
3767 scsi_vhci_swarg_t *swarg = (scsi_vhci_swarg_t *)arg;
3768 scsi_vhci_priv_t *svp = swarg->svs_svp;
3769 ASSERT(svp);
3770
3771 vlun = svp->svp_svl;
3772 ASSERT(vlun);
3773
3774 /* Wait for clean termination of scsi_watch */
3775 (void) scsi_watch_request_terminate(svp->svp_sw_token,
3776 SCSI_WATCH_TERMINATE_ALL_WAIT);
3777 svp->svp_sw_token = NULL;
3778
3779 /* release path and freeup resources to indicate failover completion */
3780 mdi_rele_path(swarg->svs_pi);
3781 if (swarg->svs_release_lun) {
3782 VHCI_RELEASE_LUN(vlun);
3783 }
3784 kmem_free((void *)swarg, sizeof (*swarg));
3785 }
3786
3787 /*
3788 * Update the path states
3789 * vlun should be HELD when this is invoked.
3790 * Calls vhci_efo_done to cleanup resources allocated for EFO.
3791 */
3792 void
vhci_update_pathstates(void * arg)3793 vhci_update_pathstates(void *arg)
3794 {
3795 mdi_pathinfo_t *pip, *npip;
3796 dev_info_t *dip;
3797 struct scsi_failover_ops *fo;
3798 struct scsi_vhci_priv *svp;
3799 struct scsi_device *psd;
3800 struct scsi_path_opinfo opinfo;
3801 char *pclass, *tptr;
3802 struct scsi_vhci_lun *vlun = (struct scsi_vhci_lun *)arg;
3803 int sps; /* mdi_select_path() status */
3804 char *cpath;
3805 struct scsi_vhci *vhci;
3806 struct scsi_pkt *pkt;
3807 struct buf *bp;
3808 struct scsi_vhci_priv *svp_conflict = NULL;
3809
3810 ASSERT(VHCI_LUN_IS_HELD(vlun));
3811 dip = vlun->svl_dip;
3812 pip = npip = NULL;
3813
3814 vhci = ddi_get_soft_state(vhci_softstate,
3815 ddi_get_instance(ddi_get_parent(dip)));
3816
3817 sps = mdi_select_path(dip, NULL, (MDI_SELECT_ONLINE_PATH |
3818 MDI_SELECT_STANDBY_PATH | MDI_SELECT_NO_PREFERRED), NULL, &npip);
3819 if ((npip == NULL) || (sps != MDI_SUCCESS)) {
3820 goto done;
3821 }
3822
3823 fo = vlun->svl_fops;
3824 do {
3825 pip = npip;
3826 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip);
3827 psd = svp->svp_psd;
3828 if (fo->sfo_path_get_opinfo(psd, &opinfo,
3829 vlun->svl_fops_ctpriv) != 0) {
3830 sps = mdi_select_path(dip, NULL,
3831 (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH |
3832 MDI_SELECT_NO_PREFERRED), pip, &npip);
3833 mdi_rele_path(pip);
3834 continue;
3835 }
3836
3837 if (mdi_prop_lookup_string(pip, "path-class", &pclass) !=
3838 MDI_SUCCESS) {
3839 VHCI_DEBUG(1, (CE_NOTE, NULL,
3840 "!vhci_update_pathstates: prop lookup failed for "
3841 "path 0x%p\n", (void *)pip));
3842 sps = mdi_select_path(dip, NULL,
3843 (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH |
3844 MDI_SELECT_NO_PREFERRED), pip, &npip);
3845 mdi_rele_path(pip);
3846 continue;
3847 }
3848
3849 /*
3850 * Need to update the "path-class" property
3851 * value in the device tree if different
3852 * from the existing value.
3853 */
3854 if (strcmp(pclass, opinfo.opinfo_path_attr) != 0) {
3855 (void) mdi_prop_update_string(pip, "path-class",
3856 opinfo.opinfo_path_attr);
3857 }
3858
3859 /*
3860 * Only change the state if needed. i.e. Don't call
3861 * mdi_pi_set_state to ONLINE a path if its already
3862 * ONLINE. Same for STANDBY paths.
3863 */
3864
3865 if ((opinfo.opinfo_path_state == SCSI_PATH_ACTIVE ||
3866 opinfo.opinfo_path_state == SCSI_PATH_ACTIVE_NONOPT)) {
3867 if (!(MDI_PI_IS_ONLINE(pip))) {
3868 VHCI_DEBUG(1, (CE_NOTE, NULL,
3869 "!vhci_update_pathstates: marking path"
3870 " 0x%p as ONLINE\n", (void *)pip));
3871 cpath = kmem_alloc(MAXPATHLEN, KM_SLEEP);
3872 vhci_log(CE_NOTE, ddi_get_parent(dip), "!%s "
3873 "(%s%d): path %s "
3874 "is now ONLINE because of "
3875 "an externally initiated failover",
3876 ddi_pathname(dip, cpath),
3877 ddi_driver_name(dip),
3878 ddi_get_instance(dip),
3879 mdi_pi_spathname(pip));
3880 kmem_free(cpath, MAXPATHLEN);
3881 mdi_pi_set_state(pip,
3882 MDI_PATHINFO_STATE_ONLINE);
3883 mdi_pi_set_preferred(pip,
3884 opinfo.opinfo_preferred);
3885 tptr = kmem_alloc(strlen
3886 (opinfo.opinfo_path_attr)+1, KM_SLEEP);
3887 (void) strlcpy(tptr, opinfo.opinfo_path_attr,
3888 (strlen(opinfo.opinfo_path_attr)+1));
3889 mutex_enter(&vlun->svl_mutex);
3890 if (vlun->svl_active_pclass != NULL) {
3891 kmem_free(vlun->svl_active_pclass,
3892 strlen(vlun->svl_active_pclass)+1);
3893 }
3894 vlun->svl_active_pclass = tptr;
3895 if (vlun->svl_waiting_for_activepath) {
3896 vlun->svl_waiting_for_activepath = 0;
3897 }
3898 mutex_exit(&vlun->svl_mutex);
3899 } else if (MDI_PI_IS_ONLINE(pip)) {
3900 if (strcmp(pclass, opinfo.opinfo_path_attr)
3901 != 0) {
3902 mdi_pi_set_preferred(pip,
3903 opinfo.opinfo_preferred);
3904 mutex_enter(&vlun->svl_mutex);
3905 if (vlun->svl_active_pclass == NULL ||
3906 strcmp(opinfo.opinfo_path_attr,
3907 vlun->svl_active_pclass) != 0) {
3908 mutex_exit(&vlun->svl_mutex);
3909 tptr = kmem_alloc(strlen
3910 (opinfo.opinfo_path_attr)+1,
3911 KM_SLEEP);
3912 (void) strlcpy(tptr,
3913 opinfo.opinfo_path_attr,
3914 (strlen
3915 (opinfo.opinfo_path_attr)
3916 +1));
3917 mutex_enter(&vlun->svl_mutex);
3918 } else {
3919 /*
3920 * No need to update
3921 * svl_active_pclass
3922 */
3923 tptr = NULL;
3924 mutex_exit(&vlun->svl_mutex);
3925 }
3926 if (tptr) {
3927 if (vlun->svl_active_pclass
3928 != NULL) {
3929 kmem_free(vlun->
3930 svl_active_pclass,
3931 strlen(vlun->
3932 svl_active_pclass)
3933 +1);
3934 }
3935 vlun->svl_active_pclass = tptr;
3936 mutex_exit(&vlun->svl_mutex);
3937 }
3938 }
3939 }
3940
3941 /* Check for Reservation Conflict */
3942 bp = scsi_alloc_consistent_buf(
3943 &svp->svp_psd->sd_address, (struct buf *)NULL,
3944 DEV_BSIZE, B_READ, NULL, NULL);
3945 if (!bp) {
3946 VHCI_DEBUG(1, (CE_NOTE, NULL,
3947 "!vhci_update_pathstates: No resources "
3948 "(buf)\n"));
3949 mdi_rele_path(pip);
3950 goto done;
3951 }
3952 pkt = scsi_init_pkt(&svp->svp_psd->sd_address, NULL, bp,
3953 CDB_GROUP1, sizeof (struct scsi_arq_status), 0,
3954 PKT_CONSISTENT, NULL, NULL);
3955 if (pkt) {
3956 (void) scsi_setup_cdb((union scsi_cdb *)
3957 (uintptr_t)pkt->pkt_cdbp, SCMD_READ, 1, 1,
3958 0);
3959 pkt->pkt_time = 3*30;
3960 pkt->pkt_flags = FLAG_NOINTR;
3961 pkt->pkt_path_instance =
3962 mdi_pi_get_path_instance(pip);
3963
3964 if ((scsi_transport(pkt) == TRAN_ACCEPT) &&
3965 (pkt->pkt_reason == CMD_CMPLT) &&
3966 (SCBP_C(pkt) ==
3967 STATUS_RESERVATION_CONFLICT)) {
3968 VHCI_DEBUG(1, (CE_NOTE, NULL,
3969 "!vhci_update_pathstates: reserv. "
3970 "conflict to be resolved on 0x%p\n",
3971 (void *)pip));
3972 svp_conflict = svp;
3973 }
3974 scsi_destroy_pkt(pkt);
3975 }
3976 scsi_free_consistent_buf(bp);
3977 } else if ((opinfo.opinfo_path_state == SCSI_PATH_INACTIVE) &&
3978 !(MDI_PI_IS_STANDBY(pip))) {
3979 VHCI_DEBUG(1, (CE_NOTE, NULL,
3980 "!vhci_update_pathstates: marking path"
3981 " 0x%p as STANDBY\n", (void *)pip));
3982 cpath = kmem_alloc(MAXPATHLEN, KM_SLEEP);
3983 vhci_log(CE_NOTE, ddi_get_parent(dip), "!%s "
3984 "(%s%d): path %s "
3985 "is now STANDBY because of "
3986 "an externally initiated failover",
3987 ddi_pathname(dip, cpath),
3988 ddi_driver_name(dip),
3989 ddi_get_instance(dip),
3990 mdi_pi_spathname(pip));
3991 kmem_free(cpath, MAXPATHLEN);
3992 mdi_pi_set_state(pip,
3993 MDI_PATHINFO_STATE_STANDBY);
3994 mdi_pi_set_preferred(pip,
3995 opinfo.opinfo_preferred);
3996 mutex_enter(&vlun->svl_mutex);
3997 if (vlun->svl_active_pclass != NULL) {
3998 if (strcmp(vlun->svl_active_pclass,
3999 opinfo.opinfo_path_attr) == 0) {
4000 kmem_free(vlun->
4001 svl_active_pclass,
4002 strlen(vlun->
4003 svl_active_pclass)+1);
4004 vlun->svl_active_pclass = NULL;
4005 }
4006 }
4007 mutex_exit(&vlun->svl_mutex);
4008 }
4009 (void) mdi_prop_free(pclass);
4010 sps = mdi_select_path(dip, NULL,
4011 (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH |
4012 MDI_SELECT_NO_PREFERRED), pip, &npip);
4013 mdi_rele_path(pip);
4014
4015 } while ((npip != NULL) && (sps == MDI_SUCCESS));
4016
4017 /*
4018 * Check to see if this vlun has an active SCSI-II RESERVE. If so
4019 * clear the reservation by sending a reset, so the host doesn't
4020 * receive a reservation conflict. The reset has to be sent via a
4021 * working path. Let's use a path referred to by svp_conflict as it
4022 * should be working.
4023 * Reset VLUN_RESERVE_ACTIVE_FLG for this vlun. Also notify ssd
4024 * of the reset, explicitly.
4025 */
4026 if (vlun->svl_flags & VLUN_RESERVE_ACTIVE_FLG) {
4027 if (svp_conflict && (vlun->svl_xlf_capable == 0)) {
4028 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_update_pathstates:"
4029 " sending recovery reset on 0x%p, path_state: %x",
4030 svp_conflict->svp_psd->sd_private,
4031 mdi_pi_get_state((mdi_pathinfo_t *)
4032 svp_conflict->svp_psd->sd_private)));
4033
4034 (void) vhci_recovery_reset(vlun,
4035 &svp_conflict->svp_psd->sd_address, FALSE,
4036 VHCI_DEPTH_TARGET);
4037 }
4038 vlun->svl_flags &= ~VLUN_RESERVE_ACTIVE_FLG;
4039 mutex_enter(&vhci->vhci_mutex);
4040 scsi_hba_reset_notify_callback(&vhci->vhci_mutex,
4041 &vhci->vhci_reset_notify_listf);
4042 mutex_exit(&vhci->vhci_mutex);
4043 }
4044 if (vlun->svl_flags & VLUN_UPDATE_TPG) {
4045 /*
4046 * Update the AccessState of related MP-API TPGs
4047 */
4048 (void) vhci_mpapi_update_tpg_acc_state_for_lu(vhci, vlun);
4049 vlun->svl_flags &= ~VLUN_UPDATE_TPG;
4050 }
4051 done:
4052 if (vlun->svl_efo_update_path) {
4053 vlun->svl_efo_update_path = 0;
4054 vhci_efo_done(vlun->svl_swarg);
4055 vlun->svl_swarg = 0;
4056 }
4057 VHCI_RELEASE_LUN(vlun);
4058 }
4059
4060 /* ARGSUSED */
4061 static int
vhci_pathinfo_init(dev_info_t * vdip,mdi_pathinfo_t * pip,int flags)4062 vhci_pathinfo_init(dev_info_t *vdip, mdi_pathinfo_t *pip, int flags)
4063 {
4064 scsi_hba_tran_t *hba = NULL;
4065 struct scsi_device *psd = NULL;
4066 scsi_vhci_lun_t *vlun = NULL;
4067 dev_info_t *pdip = NULL;
4068 dev_info_t *tgt_dip;
4069 struct scsi_vhci *vhci;
4070 char *guid;
4071 scsi_vhci_priv_t *svp = NULL;
4072 int rval = MDI_FAILURE;
4073 int vlun_alloced = 0;
4074
4075 ASSERT(vdip != NULL);
4076 ASSERT(pip != NULL);
4077
4078 vhci = ddi_get_soft_state(vhci_softstate, ddi_get_instance(vdip));
4079 ASSERT(vhci != NULL);
4080
4081 pdip = mdi_pi_get_phci(pip);
4082 ASSERT(pdip != NULL);
4083
4084 hba = ddi_get_driver_private(pdip);
4085 ASSERT(hba != NULL);
4086
4087 tgt_dip = mdi_pi_get_client(pip);
4088 ASSERT(tgt_dip != NULL);
4089
4090 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, tgt_dip, PROPFLAGS,
4091 MDI_CLIENT_GUID_PROP, &guid) != DDI_SUCCESS) {
4092 VHCI_DEBUG(1, (CE_WARN, NULL,
4093 "vhci_pathinfo_init: lun guid property failed"));
4094 goto failure;
4095 }
4096
4097 vlun = vhci_lun_lookup_alloc(tgt_dip, guid, &vlun_alloced);
4098 ddi_prop_free(guid);
4099
4100 vlun->svl_dip = tgt_dip;
4101
4102 svp = kmem_zalloc(sizeof (*svp), KM_SLEEP);
4103 svp->svp_svl = vlun;
4104
4105 /*
4106 * Initialize svl_lb_policy_save only for newly allocated vlun. Writing
4107 * to svl_lb_policy_save later could accidentally overwrite saved lb
4108 * policy.
4109 */
4110 if (vlun_alloced) {
4111 vlun->svl_lb_policy_save = mdi_get_lb_policy(tgt_dip);
4112 }
4113
4114 mutex_init(&svp->svp_mutex, NULL, MUTEX_DRIVER, NULL);
4115 cv_init(&svp->svp_cv, NULL, CV_DRIVER, NULL);
4116
4117 psd = kmem_zalloc(sizeof (*psd), KM_SLEEP);
4118 mutex_init(&psd->sd_mutex, NULL, MUTEX_DRIVER, NULL);
4119
4120 if (hba->tran_hba_flags & SCSI_HBA_ADDR_COMPLEX) {
4121 /*
4122 * For a SCSI_HBA_ADDR_COMPLEX transport we store a pointer to
4123 * scsi_device in the scsi_address structure. This allows an
4124 * an HBA driver to find its scsi_device(9S) and
4125 * per-scsi_device(9S) HBA private data given a
4126 * scsi_address(9S) by using scsi_address_device(9F) and
4127 * scsi_device_hba_private_get(9F)).
4128 */
4129 psd->sd_address.a.a_sd = psd;
4130 } else if (hba->tran_hba_flags & SCSI_HBA_TRAN_CLONE) {
4131 /*
4132 * Clone transport structure if requested, so
4133 * Self enumerating HBAs always need to use cloning
4134 */
4135 scsi_hba_tran_t *clone =
4136 kmem_alloc(sizeof (scsi_hba_tran_t), KM_SLEEP);
4137 bcopy(hba, clone, sizeof (scsi_hba_tran_t));
4138 hba = clone;
4139 hba->tran_sd = psd;
4140 } else {
4141 /*
4142 * SPI pHCI unit-address. If we ever need to support this
4143 * we could set a.spi.a_target/a.spi.a_lun based on pathinfo
4144 * node unit-address properties. For now we fail...
4145 */
4146 goto failure;
4147 }
4148
4149 psd->sd_dev = tgt_dip;
4150 psd->sd_address.a_hba_tran = hba;
4151
4152 /*
4153 * Mark scsi_device as being associated with a pathinfo node. For
4154 * a scsi_device structure associated with a devinfo node,
4155 * scsi_ctlops_initchild sets this field to NULL.
4156 */
4157 psd->sd_pathinfo = pip;
4158
4159 /*
4160 * LEGACY: sd_private: set for older mpxio-capable pHCI drivers with
4161 * too much scsi_vhci/mdi/ndi knowledge. Remove this code when all
4162 * mpxio-capable pHCI drivers use SCSA enumeration services (or at
4163 * least have been changed to use sd_pathinfo instead).
4164 */
4165 psd->sd_private = (caddr_t)pip;
4166
4167 /* See scsi_hba.c for info on sd_tran_safe kludge */
4168 psd->sd_tran_safe = hba;
4169
4170 svp->svp_psd = psd;
4171 mdi_pi_set_vhci_private(pip, (caddr_t)svp);
4172
4173 /*
4174 * call hba's target init entry point if it exists
4175 */
4176 if (hba->tran_tgt_init != NULL) {
4177 psd->sd_tran_tgt_free_done = 0;
4178 if ((rval = (*hba->tran_tgt_init)(pdip, tgt_dip,
4179 hba, psd)) != DDI_SUCCESS) {
4180 VHCI_DEBUG(1, (CE_WARN, pdip,
4181 "!vhci_pathinfo_init: tran_tgt_init failed for "
4182 "path=0x%p rval=%x", (void *)pip, rval));
4183 goto failure;
4184 }
4185 }
4186
4187 svp->svp_new_path = 1;
4188
4189 VHCI_DEBUG(4, (CE_NOTE, NULL, "!vhci_pathinfo_init: path:%p\n",
4190 (void *)pip));
4191 return (MDI_SUCCESS);
4192
4193 failure:
4194 if (psd) {
4195 mutex_destroy(&psd->sd_mutex);
4196 kmem_free(psd, sizeof (*psd));
4197 }
4198 if (svp) {
4199 mdi_pi_set_vhci_private(pip, NULL);
4200 mutex_destroy(&svp->svp_mutex);
4201 cv_destroy(&svp->svp_cv);
4202 kmem_free(svp, sizeof (*svp));
4203 }
4204 if (hba && (hba->tran_hba_flags & SCSI_HBA_TRAN_CLONE))
4205 kmem_free(hba, sizeof (scsi_hba_tran_t));
4206
4207 if (vlun_alloced)
4208 vhci_lun_free(vlun, NULL);
4209
4210 return (rval);
4211 }
4212
4213 /* ARGSUSED */
4214 static int
vhci_pathinfo_uninit(dev_info_t * vdip,mdi_pathinfo_t * pip,int flags)4215 vhci_pathinfo_uninit(dev_info_t *vdip, mdi_pathinfo_t *pip, int flags)
4216 {
4217 scsi_hba_tran_t *hba = NULL;
4218 struct scsi_device *psd = NULL;
4219 dev_info_t *pdip = NULL;
4220 dev_info_t *cdip = NULL;
4221 scsi_vhci_priv_t *svp = NULL;
4222
4223 ASSERT(vdip != NULL);
4224 ASSERT(pip != NULL);
4225
4226 pdip = mdi_pi_get_phci(pip);
4227 ASSERT(pdip != NULL);
4228
4229 cdip = mdi_pi_get_client(pip);
4230 ASSERT(cdip != NULL);
4231
4232 hba = ddi_get_driver_private(pdip);
4233 ASSERT(hba != NULL);
4234
4235 vhci_mpapi_set_path_state(vdip, pip, MP_DRVR_PATH_STATE_UNINIT);
4236 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip);
4237 if (svp == NULL) {
4238 /* path already freed. Nothing to do. */
4239 return (MDI_SUCCESS);
4240 }
4241
4242 psd = svp->svp_psd;
4243 ASSERT(psd != NULL);
4244
4245 if (hba->tran_hba_flags & SCSI_HBA_ADDR_COMPLEX) {
4246 /* Verify plumbing */
4247 ASSERT(psd->sd_address.a_hba_tran == hba);
4248 ASSERT(psd->sd_address.a.a_sd == psd);
4249 } else if (hba->tran_hba_flags & SCSI_HBA_TRAN_CLONE) {
4250 /* Switch to cloned scsi_hba_tran(9S) structure */
4251 hba = psd->sd_address.a_hba_tran;
4252 ASSERT(hba->tran_hba_flags & SCSI_HBA_TRAN_CLONE);
4253 ASSERT(hba->tran_sd == psd);
4254 }
4255
4256 if ((hba->tran_tgt_free != NULL) && !psd->sd_tran_tgt_free_done) {
4257 (*hba->tran_tgt_free) (pdip, cdip, hba, psd);
4258 psd->sd_tran_tgt_free_done = 1;
4259 }
4260 mutex_destroy(&psd->sd_mutex);
4261 if (hba->tran_hba_flags & SCSI_HBA_TRAN_CLONE) {
4262 kmem_free(hba, sizeof (*hba));
4263 }
4264
4265 mdi_pi_set_vhci_private(pip, NULL);
4266
4267 /*
4268 * Free the pathinfo related scsi_device inquiry data. Note that this
4269 * matches what happens for scsi_hba.c devinfo case at uninitchild time.
4270 */
4271 if (psd->sd_inq)
4272 kmem_free((caddr_t)psd->sd_inq, sizeof (struct scsi_inquiry));
4273 kmem_free((caddr_t)psd, sizeof (*psd));
4274
4275 mutex_destroy(&svp->svp_mutex);
4276 cv_destroy(&svp->svp_cv);
4277 kmem_free((caddr_t)svp, sizeof (*svp));
4278
4279 VHCI_DEBUG(4, (CE_NOTE, NULL, "!vhci_pathinfo_uninit: path=0x%p\n",
4280 (void *)pip));
4281 return (MDI_SUCCESS);
4282 }
4283
4284 /* ARGSUSED */
4285 static int
vhci_pathinfo_state_change(dev_info_t * vdip,mdi_pathinfo_t * pip,mdi_pathinfo_state_t state,uint32_t ext_state,int flags)4286 vhci_pathinfo_state_change(dev_info_t *vdip, mdi_pathinfo_t *pip,
4287 mdi_pathinfo_state_t state, uint32_t ext_state, int flags)
4288 {
4289 int rval = MDI_SUCCESS;
4290 scsi_vhci_priv_t *svp;
4291 scsi_vhci_lun_t *vlun;
4292 int held;
4293 int op = (flags & 0xf00) >> 8;
4294 struct scsi_vhci *vhci;
4295
4296 vhci = ddi_get_soft_state(vhci_softstate, ddi_get_instance(vdip));
4297
4298 if (flags & MDI_EXT_STATE_CHANGE) {
4299 /*
4300 * We do not want to issue any commands down the path in case
4301 * sync flag is set. Lower layers might not be ready to accept
4302 * any I/O commands.
4303 */
4304 if (op == DRIVER_DISABLE)
4305 return (MDI_SUCCESS);
4306
4307 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip);
4308 if (svp == NULL) {
4309 return (MDI_FAILURE);
4310 }
4311 vlun = svp->svp_svl;
4312
4313 if (flags & MDI_BEFORE_STATE_CHANGE) {
4314 /*
4315 * Hold the LUN.
4316 */
4317 VHCI_HOLD_LUN(vlun, VH_SLEEP, held);
4318 if (flags & MDI_DISABLE_OP) {
4319 /*
4320 * Issue scsi reset if it happens to be
4321 * reserved path.
4322 */
4323 if (vlun->svl_flags & VLUN_RESERVE_ACTIVE_FLG) {
4324 /*
4325 * if reservation pending on
4326 * this path, dont' mark the
4327 * path busy
4328 */
4329 if (op == DRIVER_DISABLE_TRANSIENT) {
4330 VHCI_DEBUG(1, (CE_NOTE, NULL,
4331 "!vhci_pathinfo"
4332 "_state_change (pip:%p): "
4333 " reservation: fail busy\n",
4334 (void *)pip));
4335 return (MDI_FAILURE);
4336 }
4337 if (pip == vlun->svl_resrv_pip) {
4338 if (vhci_recovery_reset(
4339 svp->svp_svl,
4340 &svp->svp_psd->sd_address,
4341 TRUE,
4342 VHCI_DEPTH_TARGET) == 0) {
4343 VHCI_DEBUG(1,
4344 (CE_NOTE, NULL,
4345 "!vhci_pathinfo"
4346 "_state_change "
4347 " (pip:%p): "
4348 "reset failed, "
4349 "give up!\n",
4350 (void *)pip));
4351 }
4352 vlun->svl_flags &=
4353 ~VLUN_RESERVE_ACTIVE_FLG;
4354 }
4355 }
4356 } else if (flags & MDI_ENABLE_OP) {
4357 if (((vhci->vhci_conf_flags &
4358 VHCI_CONF_FLAGS_AUTO_FAILBACK) ==
4359 VHCI_CONF_FLAGS_AUTO_FAILBACK) &&
4360 MDI_PI_IS_USER_DISABLE(pip) &&
4361 MDI_PI_IS_STANDBY(pip)) {
4362 struct scsi_failover_ops *fo;
4363 char *best_pclass, *pclass = NULL;
4364 int best_class, rv;
4365 /*
4366 * Failback if enabling a standby path
4367 * and it is the primary class or
4368 * preferred class
4369 */
4370 best_class = mdi_pi_get_preferred(pip);
4371 if (best_class == 0) {
4372 /*
4373 * if not preferred - compare
4374 * path-class with class
4375 */
4376 fo = vlun->svl_fops;
4377 (void) fo->sfo_pathclass_next(
4378 NULL, &best_pclass,
4379 vlun->svl_fops_ctpriv);
4380 pclass = NULL;
4381 rv = mdi_prop_lookup_string(pip,
4382 "path-class", &pclass);
4383 if (rv != MDI_SUCCESS ||
4384 pclass == NULL) {
4385 vhci_log(CE_NOTE, vdip,
4386 "!path-class "
4387 " lookup "
4388 "failed. rv: %d"
4389 "class: %p", rv,
4390 (void *)pclass);
4391 } else if (strncmp(pclass,
4392 best_pclass,
4393 strlen(best_pclass)) == 0) {
4394 best_class = 1;
4395 }
4396 if (rv == MDI_SUCCESS &&
4397 pclass != NULL) {
4398 rv = mdi_prop_free(
4399 pclass);
4400 if (rv !=
4401 DDI_PROP_SUCCESS) {
4402 vhci_log(
4403 CE_NOTE,
4404 vdip,
4405 "!path-"
4406 "class"
4407 " free"
4408 " failed"
4409 " rv: %d"
4410 " class: "
4411 "%p",
4412 rv,
4413 (void *)
4414 pclass);
4415 }
4416 }
4417 }
4418 if (best_class == 1) {
4419 VHCI_DEBUG(1, (CE_NOTE, NULL,
4420 "preferred path: %p "
4421 "USER_DISABLE->USER_ENABLE "
4422 "transition for lun %s\n",
4423 (void *)pip,
4424 vlun->svl_lun_wwn));
4425 (void) taskq_dispatch(
4426 vhci->vhci_taskq,
4427 vhci_initiate_auto_failback,
4428 (void *) vlun, KM_SLEEP);
4429 }
4430 }
4431 /*
4432 * if PGR is active, revalidate key and
4433 * register on this path also, if key is
4434 * still valid
4435 */
4436 sema_p(&vlun->svl_pgr_sema);
4437 if (vlun->svl_pgr_active)
4438 (void)
4439 vhci_pgr_validate_and_register(svp);
4440 sema_v(&vlun->svl_pgr_sema);
4441 /*
4442 * Inform target driver about any
4443 * reservations to be reinstated if target
4444 * has dropped reservation during the busy
4445 * period.
4446 */
4447 mutex_enter(&vhci->vhci_mutex);
4448 scsi_hba_reset_notify_callback(
4449 &vhci->vhci_mutex,
4450 &vhci->vhci_reset_notify_listf);
4451 mutex_exit(&vhci->vhci_mutex);
4452 }
4453 }
4454 if (flags & MDI_AFTER_STATE_CHANGE) {
4455 if (flags & MDI_ENABLE_OP) {
4456 mutex_enter(&vhci_global_mutex);
4457 cv_broadcast(&vhci_cv);
4458 mutex_exit(&vhci_global_mutex);
4459 }
4460 if (vlun->svl_setcap_done) {
4461 (void) vhci_pHCI_cap(&svp->svp_psd->sd_address,
4462 "sector-size", vlun->svl_sector_size,
4463 1, pip);
4464 }
4465
4466 /*
4467 * Release the LUN
4468 */
4469 VHCI_RELEASE_LUN(vlun);
4470
4471 /*
4472 * Path transition is complete.
4473 * Run callback to indicate target driver to
4474 * retry to prevent IO starvation.
4475 */
4476 if (scsi_callback_id != 0) {
4477 ddi_run_callback(&scsi_callback_id);
4478 }
4479 }
4480 } else {
4481 switch (state) {
4482 case MDI_PATHINFO_STATE_ONLINE:
4483 rval = vhci_pathinfo_online(vdip, pip, flags);
4484 break;
4485
4486 case MDI_PATHINFO_STATE_OFFLINE:
4487 rval = vhci_pathinfo_offline(vdip, pip, flags);
4488 break;
4489
4490 default:
4491 break;
4492 }
4493 /*
4494 * Path transition is complete.
4495 * Run callback to indicate target driver to
4496 * retry to prevent IO starvation.
4497 */
4498 if ((rval == MDI_SUCCESS) && (scsi_callback_id != 0)) {
4499 ddi_run_callback(&scsi_callback_id);
4500 }
4501 return (rval);
4502 }
4503
4504 return (MDI_SUCCESS);
4505 }
4506
4507 /*
4508 * Parse the mpxio load balancing options. The datanameptr
4509 * will point to a string containing the load-balance-options value.
4510 * The load-balance-options value will be a property that
4511 * defines the load-balance algorithm and any arguments to that
4512 * algorithm.
4513 * For example:
4514 * device-type-mpxio-options-list=
4515 * "device-type=SUN SENA", "load-balance-options=logical-block-options"
4516 * "device-type=SUN SE6920", "round-robin-options";
4517 * logical-block-options="load-balance=logical-block", "region-size=15";
4518 * round-robin-options="load-balance=round-robin";
4519 *
4520 * If the load-balance is not defined the load balance algorithm will
4521 * default to the global setting. There will be default values assigned
4522 * to the arguments (region-size=18) and if an argument is one
4523 * that is not known, it will be ignored.
4524 */
4525 static void
vhci_parse_mpxio_lb_options(dev_info_t * dip,dev_info_t * cdip,caddr_t datanameptr)4526 vhci_parse_mpxio_lb_options(dev_info_t *dip, dev_info_t *cdip,
4527 caddr_t datanameptr)
4528 {
4529 char *dataptr, *next_entry;
4530 caddr_t config_list = NULL;
4531 int config_list_len = 0, list_len = 0;
4532 int region_size = -1;
4533 client_lb_t load_balance;
4534
4535 if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, datanameptr,
4536 (caddr_t)&config_list, &config_list_len) != DDI_PROP_SUCCESS) {
4537 return;
4538 }
4539
4540 list_len = config_list_len;
4541 next_entry = config_list;
4542 while (config_list_len > 0) {
4543 dataptr = next_entry;
4544
4545 if (strncmp(mdi_load_balance, dataptr,
4546 strlen(mdi_load_balance)) == 0) {
4547 /* get the load-balance scheme */
4548 dataptr += strlen(mdi_load_balance) + 1;
4549 if (strcmp(dataptr, LOAD_BALANCE_PROP_RR) == 0) {
4550 (void) mdi_set_lb_policy(cdip, LOAD_BALANCE_RR);
4551 load_balance = LOAD_BALANCE_RR;
4552 } else if (strcmp(dataptr,
4553 LOAD_BALANCE_PROP_LBA) == 0) {
4554 (void) mdi_set_lb_policy(cdip,
4555 LOAD_BALANCE_LBA);
4556 load_balance = LOAD_BALANCE_LBA;
4557 } else if (strcmp(dataptr,
4558 LOAD_BALANCE_PROP_NONE) == 0) {
4559 (void) mdi_set_lb_policy(cdip,
4560 LOAD_BALANCE_NONE);
4561 load_balance = LOAD_BALANCE_NONE;
4562 }
4563 } else if (strncmp(dataptr, LOGICAL_BLOCK_REGION_SIZE,
4564 strlen(LOGICAL_BLOCK_REGION_SIZE)) == 0) {
4565 int i = 0;
4566 char *ptr;
4567 char *tmp;
4568
4569 tmp = dataptr + (strlen(LOGICAL_BLOCK_REGION_SIZE) + 1);
4570 /* check for numeric value */
4571 for (ptr = tmp; i < strlen(tmp); i++, ptr++) {
4572 if (!isdigit(*ptr)) {
4573 cmn_err(CE_WARN,
4574 "Illegal region size: %s."
4575 " Setting to default value: %d",
4576 tmp,
4577 LOAD_BALANCE_DEFAULT_REGION_SIZE);
4578 region_size =
4579 LOAD_BALANCE_DEFAULT_REGION_SIZE;
4580 break;
4581 }
4582 }
4583 if (i >= strlen(tmp)) {
4584 region_size = stoi(&tmp);
4585 }
4586 (void) mdi_set_lb_region_size(cdip, region_size);
4587 }
4588 config_list_len -= (strlen(next_entry) + 1);
4589 next_entry += strlen(next_entry) + 1;
4590 }
4591 #ifdef DEBUG
4592 if ((region_size >= 0) && (load_balance != LOAD_BALANCE_LBA)) {
4593 VHCI_DEBUG(1, (CE_NOTE, dip,
4594 "!vhci_parse_mpxio_lb_options: region-size: %d"
4595 "only valid for load-balance=logical-block\n",
4596 region_size));
4597 }
4598 #endif
4599 if ((region_size == -1) && (load_balance == LOAD_BALANCE_LBA)) {
4600 VHCI_DEBUG(1, (CE_NOTE, dip,
4601 "!vhci_parse_mpxio_lb_options: No region-size"
4602 " defined load-balance=logical-block."
4603 " Default to: %d\n", LOAD_BALANCE_DEFAULT_REGION_SIZE));
4604 (void) mdi_set_lb_region_size(cdip,
4605 LOAD_BALANCE_DEFAULT_REGION_SIZE);
4606 }
4607 if (list_len > 0) {
4608 kmem_free(config_list, list_len);
4609 }
4610 }
4611
4612 /*
4613 * Parse the device-type-mpxio-options-list looking for the key of
4614 * "load-balance-options". If found, parse the load balancing options.
4615 * Check the comment of the vhci_get_device_type_mpxio_options()
4616 * for the device-type-mpxio-options-list.
4617 */
4618 static void
vhci_parse_mpxio_options(dev_info_t * dip,dev_info_t * cdip,caddr_t datanameptr,int list_len)4619 vhci_parse_mpxio_options(dev_info_t *dip, dev_info_t *cdip,
4620 caddr_t datanameptr, int list_len)
4621 {
4622 char *dataptr;
4623 int len;
4624
4625 /*
4626 * get the data list
4627 */
4628 dataptr = datanameptr;
4629 len = 0;
4630 while (len < list_len &&
4631 strncmp(dataptr, DEVICE_TYPE_STR, strlen(DEVICE_TYPE_STR))
4632 != 0) {
4633 if (strncmp(dataptr, LOAD_BALANCE_OPTIONS,
4634 strlen(LOAD_BALANCE_OPTIONS)) == 0) {
4635 len += strlen(LOAD_BALANCE_OPTIONS) + 1;
4636 dataptr += strlen(LOAD_BALANCE_OPTIONS) + 1;
4637 vhci_parse_mpxio_lb_options(dip, cdip, dataptr);
4638 }
4639 len += strlen(dataptr) + 1;
4640 dataptr += strlen(dataptr) + 1;
4641 }
4642 }
4643
4644 /*
4645 * Check the inquriy string returned from the device with the device-type
4646 * Check for the existence of the device-type-mpxio-options-list and
4647 * if found parse the list checking for a match with the device-type
4648 * value and the inquiry string returned from the device. If a match
4649 * is found, parse the mpxio options list. The format of the
4650 * device-type-mpxio-options-list is:
4651 * device-type-mpxio-options-list=
4652 * "device-type=SUN SENA", "load-balance-options=logical-block-options"
4653 * "device-type=SUN SE6920", "round-robin-options";
4654 * logical-block-options="load-balance=logical-block", "region-size=15";
4655 * round-robin-options="load-balance=round-robin";
4656 */
4657 void
vhci_get_device_type_mpxio_options(dev_info_t * dip,dev_info_t * cdip,struct scsi_device * devp)4658 vhci_get_device_type_mpxio_options(dev_info_t *dip, dev_info_t *cdip,
4659 struct scsi_device *devp)
4660 {
4661
4662 caddr_t config_list = NULL;
4663 caddr_t vidptr, datanameptr;
4664 int vidlen, dupletlen = 0;
4665 int config_list_len = 0, len;
4666 struct scsi_inquiry *inq = devp->sd_inq;
4667
4668 /*
4669 * look up the device-type-mpxio-options-list and walk thru
4670 * the list compare the vendor ids of the earlier inquiry command and
4671 * with those vids in the list if there is a match, lookup
4672 * the mpxio-options value
4673 */
4674 if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
4675 MPXIO_OPTIONS_LIST,
4676 (caddr_t)&config_list, &config_list_len) == DDI_PROP_SUCCESS) {
4677
4678 /*
4679 * Compare vids in each duplet - if it matches,
4680 * parse the mpxio options list.
4681 */
4682 for (len = config_list_len, vidptr = config_list; len > 0;
4683 len -= dupletlen) {
4684
4685 dupletlen = 0;
4686
4687 if (strlen(vidptr) != 0 &&
4688 strncmp(vidptr, DEVICE_TYPE_STR,
4689 strlen(DEVICE_TYPE_STR)) == 0) {
4690 /* point to next duplet */
4691 datanameptr = vidptr + strlen(vidptr) + 1;
4692 /* add len of this duplet */
4693 dupletlen += strlen(vidptr) + 1;
4694 /* get to device type */
4695 vidptr += strlen(DEVICE_TYPE_STR) + 1;
4696 vidlen = strlen(vidptr);
4697 if ((vidlen != 0) &&
4698 bcmp(inq->inq_vid, vidptr, vidlen) == 0) {
4699 vhci_parse_mpxio_options(dip, cdip,
4700 datanameptr, len - dupletlen);
4701 break;
4702 }
4703 /* get to next duplet */
4704 vidptr += strlen(vidptr) + 1;
4705 }
4706 /* get to the next device-type */
4707 while (len - dupletlen > 0 &&
4708 strlen(vidptr) != 0 &&
4709 strncmp(vidptr, DEVICE_TYPE_STR,
4710 strlen(DEVICE_TYPE_STR)) != 0) {
4711 dupletlen += strlen(vidptr) + 1;
4712 vidptr += strlen(vidptr) + 1;
4713 }
4714 }
4715 if (config_list_len > 0) {
4716 kmem_free(config_list, config_list_len);
4717 }
4718 }
4719 }
4720
4721 static int
vhci_update_pathinfo(struct scsi_device * psd,mdi_pathinfo_t * pip,struct scsi_failover_ops * fo,scsi_vhci_lun_t * vlun,struct scsi_vhci * vhci)4722 vhci_update_pathinfo(struct scsi_device *psd, mdi_pathinfo_t *pip,
4723 struct scsi_failover_ops *fo,
4724 scsi_vhci_lun_t *vlun,
4725 struct scsi_vhci *vhci)
4726 {
4727 struct scsi_path_opinfo opinfo;
4728 char *pclass, *best_pclass;
4729 char *resrv_pclass = NULL;
4730 int force_rereserve = 0;
4731 int update_pathinfo_done = 0;
4732
4733 if (fo->sfo_path_get_opinfo(psd, &opinfo, vlun->svl_fops_ctpriv) != 0) {
4734 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_update_pathinfo: "
4735 "Failed to get operation info for path:%p\n", (void *)pip));
4736 return (MDI_FAILURE);
4737 }
4738 /* set the xlf capable flag in the vlun for future use */
4739 vlun->svl_xlf_capable = opinfo.opinfo_xlf_capable;
4740 (void) mdi_prop_update_string(pip, "path-class",
4741 opinfo.opinfo_path_attr);
4742
4743 pclass = opinfo.opinfo_path_attr;
4744 if (opinfo.opinfo_path_state == SCSI_PATH_ACTIVE) {
4745 mutex_enter(&vlun->svl_mutex);
4746 if (vlun->svl_active_pclass != NULL) {
4747 if (strcmp(vlun->svl_active_pclass, pclass) != 0) {
4748 mutex_exit(&vlun->svl_mutex);
4749 /*
4750 * Externally initiated failover has happened;
4751 * force the path state to be STANDBY/ONLINE,
4752 * next IO will trigger failover and thus
4753 * sync-up the pathstates. Reason we don't
4754 * sync-up immediately by invoking
4755 * vhci_update_pathstates() is because it
4756 * needs a VHCI_HOLD_LUN() and we don't
4757 * want to block here.
4758 *
4759 * Further, if the device is an ALUA device,
4760 * then failure to exactly match 'pclass' and
4761 * 'svl_active_pclass'(as is the case here)
4762 * indicates that the currently active path
4763 * is a 'non-optimized' path - which means
4764 * that 'svl_active_pclass' needs to be
4765 * replaced with opinfo.opinfo_path_state
4766 * value.
4767 */
4768
4769 if (SCSI_FAILOVER_IS_TPGS(vlun->svl_fops)) {
4770 char *tptr;
4771
4772 /*
4773 * The device is ALUA compliant. The
4774 * state need to be changed to online
4775 * rather than standby state which is
4776 * done typically for a asymmetric
4777 * device that is non ALUA compliant.
4778 */
4779 mdi_pi_set_state(pip,
4780 MDI_PATHINFO_STATE_ONLINE);
4781 tptr = kmem_alloc(strlen
4782 (opinfo.opinfo_path_attr)+1,
4783 KM_SLEEP);
4784 (void) strlcpy(tptr,
4785 opinfo.opinfo_path_attr,
4786 (strlen(opinfo.opinfo_path_attr)
4787 +1));
4788 mutex_enter(&vlun->svl_mutex);
4789 kmem_free(vlun->svl_active_pclass,
4790 strlen(vlun->svl_active_pclass)+1);
4791 vlun->svl_active_pclass = tptr;
4792 mutex_exit(&vlun->svl_mutex);
4793 } else {
4794 /*
4795 * Non ALUA device case.
4796 */
4797 mdi_pi_set_state(pip,
4798 MDI_PATHINFO_STATE_STANDBY);
4799 }
4800 vlun->svl_fo_support = opinfo.opinfo_mode;
4801 mdi_pi_set_preferred(pip,
4802 opinfo.opinfo_preferred);
4803 update_pathinfo_done = 1;
4804 }
4805
4806 /*
4807 * Find out a class of currently reserved path if there
4808 * is any.
4809 */
4810 if ((vlun->svl_flags & VLUN_RESERVE_ACTIVE_FLG) &&
4811 mdi_prop_lookup_string(vlun->svl_resrv_pip,
4812 "path-class", &resrv_pclass) != MDI_SUCCESS) {
4813 VHCI_DEBUG(1, (CE_NOTE, NULL,
4814 "!vhci_update_pathinfo: prop lookup "
4815 "failed for path 0x%p\n",
4816 (void *)vlun->svl_resrv_pip));
4817 /*
4818 * Something is wrong with the reserved path.
4819 * We can't do much with that right here. Just
4820 * force re-reservation to another path.
4821 */
4822 force_rereserve = 1;
4823 }
4824
4825 (void) fo->sfo_pathclass_next(NULL, &best_pclass,
4826 vlun->svl_fops_ctpriv);
4827 if ((force_rereserve == 1) || ((resrv_pclass != NULL) &&
4828 (strcmp(pclass, best_pclass) == 0) &&
4829 (strcmp(resrv_pclass, best_pclass) != 0))) {
4830 /*
4831 * Inform target driver that a reservation
4832 * should be reinstated because the reserved
4833 * path is not the most preferred one.
4834 */
4835 mutex_enter(&vhci->vhci_mutex);
4836 scsi_hba_reset_notify_callback(
4837 &vhci->vhci_mutex,
4838 &vhci->vhci_reset_notify_listf);
4839 mutex_exit(&vhci->vhci_mutex);
4840 }
4841
4842 if (update_pathinfo_done == 1) {
4843 return (MDI_SUCCESS);
4844 }
4845 } else {
4846 char *tptr;
4847
4848 /*
4849 * lets release the mutex before we try to
4850 * allocate since the potential to sleep is
4851 * possible.
4852 */
4853 mutex_exit(&vlun->svl_mutex);
4854 tptr = kmem_alloc(strlen(pclass)+1, KM_SLEEP);
4855 (void) strlcpy(tptr, pclass, (strlen(pclass)+1));
4856 mutex_enter(&vlun->svl_mutex);
4857 vlun->svl_active_pclass = tptr;
4858 }
4859 mutex_exit(&vlun->svl_mutex);
4860 mdi_pi_set_state(pip, MDI_PATHINFO_STATE_ONLINE);
4861 vlun->svl_waiting_for_activepath = 0;
4862 } else if (opinfo.opinfo_path_state == SCSI_PATH_ACTIVE_NONOPT) {
4863 mutex_enter(&vlun->svl_mutex);
4864 if (vlun->svl_active_pclass == NULL) {
4865 char *tptr;
4866
4867 mutex_exit(&vlun->svl_mutex);
4868 tptr = kmem_alloc(strlen(pclass)+1, KM_SLEEP);
4869 (void) strlcpy(tptr, pclass, (strlen(pclass)+1));
4870 mutex_enter(&vlun->svl_mutex);
4871 vlun->svl_active_pclass = tptr;
4872 }
4873 mutex_exit(&vlun->svl_mutex);
4874 mdi_pi_set_state(pip, MDI_PATHINFO_STATE_ONLINE);
4875 vlun->svl_waiting_for_activepath = 0;
4876 } else if (opinfo.opinfo_path_state == SCSI_PATH_INACTIVE) {
4877 mutex_enter(&vlun->svl_mutex);
4878 if (vlun->svl_active_pclass != NULL) {
4879 if (strcmp(vlun->svl_active_pclass, pclass) == 0) {
4880 mutex_exit(&vlun->svl_mutex);
4881 /*
4882 * externally initiated failover has happened;
4883 * force state to ONLINE (see comment above)
4884 */
4885 mdi_pi_set_state(pip,
4886 MDI_PATHINFO_STATE_ONLINE);
4887 vlun->svl_fo_support = opinfo.opinfo_mode;
4888 mdi_pi_set_preferred(pip,
4889 opinfo.opinfo_preferred);
4890 return (MDI_SUCCESS);
4891 }
4892 }
4893 mutex_exit(&vlun->svl_mutex);
4894 mdi_pi_set_state(pip, MDI_PATHINFO_STATE_STANDBY);
4895
4896 /*
4897 * Initiate auto-failback, if enabled, for path if path-state
4898 * is transitioning from OFFLINE->STANDBY and pathclass is the
4899 * preferred pathclass for this storage.
4900 * NOTE: In case where opinfo_path_state is SCSI_PATH_ACTIVE
4901 * (above), where the pi state is set to STANDBY, we don't
4902 * initiate auto-failback as the next IO shall take care of.
4903 * this. See comment above.
4904 */
4905 (void) fo->sfo_pathclass_next(NULL, &best_pclass,
4906 vlun->svl_fops_ctpriv);
4907 if (((vhci->vhci_conf_flags & VHCI_CONF_FLAGS_AUTO_FAILBACK) ==
4908 VHCI_CONF_FLAGS_AUTO_FAILBACK) &&
4909 (strcmp(pclass, best_pclass) == 0) &&
4910 ((MDI_PI_OLD_STATE(pip) == MDI_PATHINFO_STATE_OFFLINE)||
4911 (MDI_PI_OLD_STATE(pip) == MDI_PATHINFO_STATE_INIT))) {
4912 VHCI_DEBUG(1, (CE_NOTE, NULL, "%s pathclass path: %p"
4913 " OFFLINE->STANDBY transition for lun %s\n",
4914 best_pclass, (void *)pip, vlun->svl_lun_wwn));
4915 (void) taskq_dispatch(vhci->vhci_taskq,
4916 vhci_initiate_auto_failback, (void *) vlun,
4917 KM_SLEEP);
4918 }
4919 }
4920 vlun->svl_fo_support = opinfo.opinfo_mode;
4921 mdi_pi_set_preferred(pip, opinfo.opinfo_preferred);
4922
4923 VHCI_DEBUG(8, (CE_NOTE, NULL, "vhci_update_pathinfo: opinfo_rev = %x,"
4924 " opinfo_path_state = %x opinfo_preferred = %x, opinfo_mode = %x\n",
4925 opinfo.opinfo_rev, opinfo.opinfo_path_state,
4926 opinfo.opinfo_preferred, opinfo.opinfo_mode));
4927
4928 return (MDI_SUCCESS);
4929 }
4930
4931 /*
4932 * Form the kstat name and and call mdi_pi_kstat_create()
4933 */
4934 void
vhci_kstat_create_pathinfo(mdi_pathinfo_t * pip)4935 vhci_kstat_create_pathinfo(mdi_pathinfo_t *pip)
4936 {
4937 dev_info_t *tgt_dip;
4938 dev_info_t *pdip;
4939 char *guid;
4940 char *target_port, *target_port_dup;
4941 char ks_name[KSTAT_STRLEN];
4942 uint_t pid;
4943 int by_id;
4944 mod_hash_val_t hv;
4945
4946
4947 /* return if we have already allocated kstats */
4948 if (mdi_pi_kstat_exists(pip))
4949 return;
4950
4951 /*
4952 * We need instance numbers to create a kstat name, return if we don't
4953 * have instance numbers assigned yet.
4954 */
4955 tgt_dip = mdi_pi_get_client(pip);
4956 pdip = mdi_pi_get_phci(pip);
4957 if ((ddi_get_instance(tgt_dip) == -1) || (ddi_get_instance(pdip) == -1))
4958 return;
4959
4960 /*
4961 * A path oriented kstat has a ks_name of the form:
4962 *
4963 * <client-driver><instance>.t<pid>.<pHCI-driver><instance>
4964 *
4965 * We maintain a bidirectional 'target-port' to <pid> map,
4966 * called targetmap. All pathinfo nodes with the same
4967 * 'target-port' map to the same <pid>. The iostat(1M) code,
4968 * when parsing a path oriented kstat name, uses the <pid> as
4969 * a SCSI_VHCI_GET_TARGET_LONGNAME ioctl argument in order
4970 * to get the 'target-port'. For KSTAT_FLAG_PERSISTENT kstats,
4971 * this ioctl needs to translate a <pid> to a 'target-port'
4972 * even after all pathinfo nodes associated with the
4973 * 'target-port' have been destroyed. This is needed to support
4974 * consistent first-iteration activity-since-boot iostat(1M)
4975 * output. Because of this requirement, the mapping can't be
4976 * based on pathinfo information in a devinfo snapshot.
4977 */
4978
4979 /* determine 'target-port' */
4980 if (mdi_prop_lookup_string(pip,
4981 SCSI_ADDR_PROP_TARGET_PORT, &target_port) == MDI_SUCCESS) {
4982 target_port_dup = i_ddi_strdup(target_port, KM_SLEEP);
4983 (void) mdi_prop_free(target_port);
4984 by_id = 1;
4985 } else {
4986 /*
4987 * If the pHCI did not set up 'target-port' on this
4988 * pathinfo node, assume that our client is the only
4989 * one with paths to the device by using the guid
4990 * value as the 'target-port'. Since no other client
4991 * will have the same guid, no other client will use
4992 * the same <pid>. NOTE: a client with an instance
4993 * number always has a guid.
4994 */
4995 (void) ddi_prop_lookup_string(DDI_DEV_T_ANY, tgt_dip,
4996 PROPFLAGS, MDI_CLIENT_GUID_PROP, &guid);
4997 target_port_dup = i_ddi_strdup(guid, KM_SLEEP);
4998 ddi_prop_free(guid);
4999
5000 /*
5001 * For this type of mapping we don't want the
5002 * <id> -> 'target-port' mapping to be made. This
5003 * will cause the SCSI_VHCI_GET_TARGET_LONGNAME ioctl
5004 * to fail, and the iostat(1M) long '-n' output will
5005 * still use the <pid>. We do this because we just
5006 * made up the 'target-port' using the guid, and we
5007 * don't want to expose that fact in iostat output.
5008 */
5009 by_id = 0;
5010 }
5011
5012 /* find/establish <pid> given 'target-port' */
5013 mutex_enter(&vhci_targetmap_mutex);
5014 if (mod_hash_find(vhci_targetmap_byport,
5015 (mod_hash_key_t)target_port_dup, &hv) == 0) {
5016 pid = (int)(intptr_t)hv; /* mapping exists */
5017 } else {
5018 pid = vhci_targetmap_pid++; /* new mapping */
5019
5020 (void) mod_hash_insert(vhci_targetmap_byport,
5021 (mod_hash_key_t)target_port_dup,
5022 (mod_hash_val_t)(intptr_t)pid);
5023 if (by_id) {
5024 (void) mod_hash_insert(vhci_targetmap_bypid,
5025 (mod_hash_key_t)(uintptr_t)pid,
5026 (mod_hash_val_t)(uintptr_t)target_port_dup);
5027 }
5028 target_port_dup = NULL; /* owned by hash */
5029 }
5030 mutex_exit(&vhci_targetmap_mutex);
5031
5032 /* form kstat name */
5033 (void) snprintf(ks_name, KSTAT_STRLEN, "%s%d.t%d.%s%d",
5034 ddi_driver_name(tgt_dip), ddi_get_instance(tgt_dip),
5035 pid, ddi_driver_name(pdip), ddi_get_instance(pdip));
5036
5037 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_path_online: path:%p "
5038 "kstat %s: pid %x <-> port %s\n", (void *)pip,
5039 ks_name, pid, target_port_dup));
5040 if (target_port_dup)
5041 kmem_free(target_port_dup, strlen(target_port_dup) + 1);
5042
5043 /* call mdi to create kstats with the name we built */
5044 (void) mdi_pi_kstat_create(pip, ks_name);
5045 }
5046
5047 /* ARGSUSED */
5048 static int
vhci_pathinfo_online(dev_info_t * vdip,mdi_pathinfo_t * pip,int flags)5049 vhci_pathinfo_online(dev_info_t *vdip, mdi_pathinfo_t *pip, int flags)
5050 {
5051 scsi_hba_tran_t *hba = NULL;
5052 struct scsi_device *psd = NULL;
5053 scsi_vhci_lun_t *vlun = NULL;
5054 dev_info_t *pdip = NULL;
5055 dev_info_t *cdip;
5056 dev_info_t *tgt_dip;
5057 struct scsi_vhci *vhci;
5058 char *guid;
5059 struct scsi_failover_ops *sfo;
5060 scsi_vhci_priv_t *svp = NULL;
5061 struct scsi_address *ap;
5062 struct scsi_pkt *pkt;
5063 int rval = MDI_FAILURE;
5064 mpapi_item_list_t *list_ptr;
5065 mpapi_lu_data_t *ld;
5066
5067 ASSERT(vdip != NULL);
5068 ASSERT(pip != NULL);
5069
5070 vhci = ddi_get_soft_state(vhci_softstate, ddi_get_instance(vdip));
5071 ASSERT(vhci != NULL);
5072
5073 pdip = mdi_pi_get_phci(pip);
5074 hba = ddi_get_driver_private(pdip);
5075 ASSERT(hba != NULL);
5076
5077 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip);
5078 ASSERT(svp != NULL);
5079
5080 cdip = mdi_pi_get_client(pip);
5081 ASSERT(cdip != NULL);
5082 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, cdip, PROPFLAGS,
5083 MDI_CLIENT_GUID_PROP, &guid) != DDI_SUCCESS) {
5084 VHCI_DEBUG(1, (CE_WARN, NULL, "vhci_path_online: lun guid "
5085 "property failed"));
5086 goto failure;
5087 }
5088
5089 vlun = vhci_lun_lookup(cdip);
5090 ASSERT(vlun != NULL);
5091
5092 ddi_prop_free(guid);
5093
5094 vlun->svl_dip = mdi_pi_get_client(pip);
5095 ASSERT(vlun->svl_dip != NULL);
5096
5097 psd = svp->svp_psd;
5098 ASSERT(psd != NULL);
5099
5100 ap = &psd->sd_address;
5101
5102 /*
5103 * Get inquiry data into pathinfo related scsi_device structure.
5104 * Free sq_inq when pathinfo related scsi_device structure is destroyed
5105 * by vhci_pathinfo_uninit(). In other words, vhci maintains its own
5106 * copy of scsi_device and scsi_inquiry data on a per-path basis.
5107 */
5108 if (scsi_probe(psd, SLEEP_FUNC) != SCSIPROBE_EXISTS) {
5109 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_pathinfo_online: "
5110 "scsi_probe failed path:%p rval:%x\n", (void *)pip, rval));
5111 rval = MDI_FAILURE;
5112 goto failure;
5113 }
5114
5115 /*
5116 * See if we have a failover module to support the device.
5117 *
5118 * We re-probe to determine the failover ops for each path. This
5119 * is done in case there are any path-specific side-effects associated
5120 * with the sfo_device_probe implementation.
5121 *
5122 * Give the first successfull sfo_device_probe the opportunity to
5123 * establish 'ctpriv', vlun/client private data. The ctpriv will
5124 * then be passed into the failover module on all other sfo_device_*()
5125 * operations (and must be freed by sfo_device_unprobe implementation).
5126 *
5127 * NOTE: While sfo_device_probe is done once per path,
5128 * sfo_device_unprobe only occurs once - when the vlun is destroyed.
5129 *
5130 * NOTE: We don't currently support per-path fops private data
5131 * mechanism.
5132 */
5133 sfo = vhci_dev_fo(vdip, psd,
5134 &vlun->svl_fops_ctpriv, &vlun->svl_fops_name);
5135
5136 /* check path configuration result with current vlun state */
5137 if (((sfo && vlun->svl_fops) && (sfo != vlun->svl_fops)) ||
5138 (sfo && vlun->svl_not_supported) ||
5139 ((sfo == NULL) && vlun->svl_fops)) {
5140 /* Getting different results for different paths. */
5141 VHCI_DEBUG(1, (CE_NOTE, vhci->vhci_dip,
5142 "!vhci_pathinfo_online: dev (path 0x%p) contradiction\n",
5143 (void *)pip));
5144 cmn_err(CE_WARN, "scsi_vhci: failover contradiction: "
5145 "'%s'.vs.'%s': path %s\n",
5146 vlun->svl_fops ? vlun->svl_fops->sfo_name : "NULL",
5147 sfo ? sfo->sfo_name : "NULL", mdi_pi_pathname(pip));
5148 vlun->svl_not_supported = 1;
5149 rval = MDI_NOT_SUPPORTED;
5150 goto done;
5151 } else if (sfo == NULL) {
5152 /* No failover module - device not supported under vHCI. */
5153 VHCI_DEBUG(1, (CE_NOTE, vhci->vhci_dip,
5154 "!vhci_pathinfo_online: dev (path 0x%p) not "
5155 "supported\n", (void *)pip));
5156
5157 /* XXX does this contradict vhci_is_dev_supported ? */
5158 vlun->svl_not_supported = 1;
5159 rval = MDI_NOT_SUPPORTED;
5160 goto done;
5161 }
5162
5163 /* failover supported for device - save failover_ops in vlun */
5164 vlun->svl_fops = sfo;
5165 ASSERT(vlun->svl_fops_name != NULL);
5166
5167 /*
5168 * Obtain the device-type based mpxio options as specified in
5169 * scsi_vhci.conf file.
5170 *
5171 * NOTE: currently, the end result is a call to
5172 * mdi_set_lb_region_size().
5173 */
5174 tgt_dip = psd->sd_dev;
5175 ASSERT(tgt_dip != NULL);
5176 vhci_get_device_type_mpxio_options(vdip, tgt_dip, psd);
5177
5178 /*
5179 * if PGR is active, revalidate key and register on this path also,
5180 * if key is still valid
5181 */
5182 sema_p(&vlun->svl_pgr_sema);
5183 if (vlun->svl_pgr_active) {
5184 rval = vhci_pgr_validate_and_register(svp);
5185 if (rval != 1) {
5186 rval = MDI_FAILURE;
5187 sema_v(&vlun->svl_pgr_sema);
5188 goto failure;
5189 }
5190 }
5191 sema_v(&vlun->svl_pgr_sema);
5192
5193 if (svp->svp_new_path) {
5194 /*
5195 * Last chance to perform any cleanup operations on this
5196 * new path before making this path completely online.
5197 */
5198 svp->svp_new_path = 0;
5199
5200 /*
5201 * If scsi_vhci knows the lun is alread RESERVE'd,
5202 * then skip the issue of RELEASE on new path.
5203 */
5204 if ((vlun->svl_flags & VLUN_RESERVE_ACTIVE_FLG) == 0) {
5205 /*
5206 * Issue SCSI-2 RELEASE only for the first time on
5207 * a new path just in case the host rebooted and
5208 * a reservation is still pending on this path.
5209 * IBM Shark storage does not clear RESERVE upon
5210 * host reboot.
5211 */
5212 pkt = scsi_init_pkt(ap, NULL, NULL, CDB_GROUP0,
5213 sizeof (struct scsi_arq_status), 0, 0,
5214 SLEEP_FUNC, NULL);
5215 if (pkt == NULL) {
5216 VHCI_DEBUG(1, (CE_NOTE, NULL,
5217 "!vhci_pathinfo_online: "
5218 "Release init_pkt failed :%p\n",
5219 (void *)pip));
5220 rval = MDI_FAILURE;
5221 goto failure;
5222 }
5223 pkt->pkt_cdbp[0] = SCMD_RELEASE;
5224 pkt->pkt_time = 60;
5225
5226 VHCI_DEBUG(1, (CE_NOTE, NULL,
5227 "!vhci_path_online: path:%p "
5228 "Issued SCSI-2 RELEASE\n", (void *)pip));
5229
5230 /* Ignore the return value */
5231 (void) vhci_do_scsi_cmd(pkt);
5232 scsi_destroy_pkt(pkt);
5233 }
5234 }
5235
5236 rval = vhci_update_pathinfo(psd, pip, sfo, vlun, vhci);
5237 if (rval == MDI_FAILURE) {
5238 goto failure;
5239 }
5240
5241 /* Initialize MP-API data */
5242 vhci_update_mpapi_data(vhci, vlun, pip);
5243
5244 /*
5245 * MP-API also needs the Inquiry data to be maintained in the
5246 * mp_vendor_prop_t structure, so find the lun and update its
5247 * structure with this data.
5248 */
5249 list_ptr = (mpapi_item_list_t *)vhci_get_mpapi_item(vhci, NULL,
5250 MP_OBJECT_TYPE_MULTIPATH_LU, (void *)vlun);
5251 ld = (mpapi_lu_data_t *)list_ptr->item->idata;
5252 if (ld != NULL) {
5253 bcopy(psd->sd_inq->inq_vid, ld->prop.prodInfo.vendor, 8);
5254 bcopy(psd->sd_inq->inq_pid, ld->prop.prodInfo.product, 16);
5255 bcopy(psd->sd_inq->inq_revision, ld->prop.prodInfo.revision, 4);
5256 } else {
5257 VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_pathinfo_online: "
5258 "mpapi_lu_data_t is NULL"));
5259 }
5260
5261 /* create kstats for path */
5262 vhci_kstat_create_pathinfo(pip);
5263
5264 done:
5265 mutex_enter(&vhci_global_mutex);
5266 cv_broadcast(&vhci_cv);
5267 mutex_exit(&vhci_global_mutex);
5268
5269 if (vlun->svl_setcap_done) {
5270 (void) vhci_pHCI_cap(ap, "sector-size",
5271 vlun->svl_sector_size, 1, pip);
5272 }
5273
5274 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_path_online: path:%p\n",
5275 (void *)pip));
5276
5277 failure:
5278 return (rval);
5279 }
5280
5281 /*
5282 * path offline handler. Release all bindings that will not be
5283 * released by the normal packet transport/completion code path.
5284 * Since we don't (presently) keep any bindings alive outside of
5285 * the in-transport packets (which will be released on completion)
5286 * there is not much to do here.
5287 */
5288 /* ARGSUSED */
5289 static int
vhci_pathinfo_offline(dev_info_t * vdip,mdi_pathinfo_t * pip,int flags)5290 vhci_pathinfo_offline(dev_info_t *vdip, mdi_pathinfo_t *pip, int flags)
5291 {
5292 scsi_hba_tran_t *hba = NULL;
5293 struct scsi_device *psd = NULL;
5294 dev_info_t *pdip = NULL;
5295 dev_info_t *cdip = NULL;
5296 scsi_vhci_priv_t *svp = NULL;
5297
5298 ASSERT(vdip != NULL);
5299 ASSERT(pip != NULL);
5300
5301 pdip = mdi_pi_get_phci(pip);
5302 ASSERT(pdip != NULL);
5303 if (pdip == NULL) {
5304 VHCI_DEBUG(1, (CE_WARN, vdip, "Invalid path 0x%p: NULL "
5305 "phci dip", (void *)pip));
5306 return (MDI_FAILURE);
5307 }
5308
5309 cdip = mdi_pi_get_client(pip);
5310 ASSERT(cdip != NULL);
5311 if (cdip == NULL) {
5312 VHCI_DEBUG(1, (CE_WARN, vdip, "Invalid path 0x%p: NULL "
5313 "client dip", (void *)pip));
5314 return (MDI_FAILURE);
5315 }
5316
5317 hba = ddi_get_driver_private(pdip);
5318 ASSERT(hba != NULL);
5319
5320 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip);
5321 if (svp == NULL) {
5322 /*
5323 * mdi_pathinfo node in INIT state can have vHCI private
5324 * information set to null
5325 */
5326 VHCI_DEBUG(1, (CE_NOTE, vdip, "!vhci_pathinfo_offline: "
5327 "svp is NULL for pip 0x%p\n", (void *)pip));
5328 return (MDI_SUCCESS);
5329 }
5330
5331 psd = svp->svp_psd;
5332 ASSERT(psd != NULL);
5333
5334 mutex_enter(&svp->svp_mutex);
5335
5336 VHCI_DEBUG(1, (CE_NOTE, vdip, "!vhci_pathinfo_offline: "
5337 "%d cmds pending on path: 0x%p\n", svp->svp_cmds, (void *)pip));
5338 while (svp->svp_cmds != 0) {
5339 if (cv_reltimedwait(&svp->svp_cv, &svp->svp_mutex,
5340 drv_usectohz(vhci_path_quiesce_timeout * 1000000),
5341 TR_CLOCK_TICK) == -1) {
5342 /*
5343 * The timeout time reached without the condition
5344 * being signaled.
5345 */
5346 VHCI_DEBUG(1, (CE_NOTE, vdip, "!vhci_pathinfo_offline: "
5347 "Timeout reached on path 0x%p without the cond\n",
5348 (void *)pip));
5349 VHCI_DEBUG(1, (CE_NOTE, vdip, "!vhci_pathinfo_offline: "
5350 "%d cmds still pending on path: 0x%p\n",
5351 svp->svp_cmds, (void *)pip));
5352 break;
5353 }
5354 }
5355 mutex_exit(&svp->svp_mutex);
5356
5357 /*
5358 * Check to see if this vlun has an active SCSI-II RESERVE. And this
5359 * is the pip for the path that has been reserved.
5360 * If so clear the reservation by sending a reset, so the host will not
5361 * get a reservation conflict. Reset the flag VLUN_RESERVE_ACTIVE_FLG
5362 * for this lun. Also a reset notify is sent to the target driver
5363 * just in case the POR check condition is cleared by some other layer
5364 * in the stack.
5365 */
5366 if (svp->svp_svl->svl_flags & VLUN_RESERVE_ACTIVE_FLG) {
5367 if (pip == svp->svp_svl->svl_resrv_pip) {
5368 if (vhci_recovery_reset(svp->svp_svl,
5369 &svp->svp_psd->sd_address, TRUE,
5370 VHCI_DEPTH_TARGET) == 0) {
5371 VHCI_DEBUG(1, (CE_NOTE, NULL,
5372 "!vhci_pathinfo_offline (pip:%p):"
5373 "reset failed, retrying\n", (void *)pip));
5374 delay(1*drv_usectohz(1000000));
5375 if (vhci_recovery_reset(svp->svp_svl,
5376 &svp->svp_psd->sd_address, TRUE,
5377 VHCI_DEPTH_TARGET) == 0) {
5378 VHCI_DEBUG(1, (CE_NOTE, NULL,
5379 "!vhci_pathinfo_offline "
5380 "(pip:%p): reset failed, "
5381 "giving up!\n", (void *)pip));
5382 }
5383 }
5384 svp->svp_svl->svl_flags &= ~VLUN_RESERVE_ACTIVE_FLG;
5385 }
5386 }
5387
5388 mdi_pi_set_state(pip, MDI_PATHINFO_STATE_OFFLINE);
5389 vhci_mpapi_set_path_state(vdip, pip, MP_DRVR_PATH_STATE_REMOVED);
5390
5391 VHCI_DEBUG(1, (CE_NOTE, NULL,
5392 "!vhci_pathinfo_offline: offlined path 0x%p\n", (void *)pip));
5393 return (MDI_SUCCESS);
5394 }
5395
5396
5397 /*
5398 * routine for SCSI VHCI IOCTL implementation.
5399 */
5400 /* ARGSUSED */
5401 static int
vhci_ctl(dev_t dev,int cmd,intptr_t data,int mode,cred_t * credp,int * rval)5402 vhci_ctl(dev_t dev, int cmd, intptr_t data, int mode, cred_t *credp, int *rval)
5403 {
5404 struct scsi_vhci *vhci;
5405 dev_info_t *vdip;
5406 mdi_pathinfo_t *pip;
5407 int instance, held;
5408 int retval = 0;
5409 caddr_t phci_path = NULL, client_path = NULL;
5410 caddr_t paddr = NULL;
5411 sv_iocdata_t ioc;
5412 sv_iocdata_t *pioc = &ioc;
5413 sv_switch_to_cntlr_iocdata_t iocsc;
5414 sv_switch_to_cntlr_iocdata_t *piocsc = &iocsc;
5415 caddr_t s;
5416 scsi_vhci_lun_t *vlun;
5417 struct scsi_failover_ops *fo;
5418 char *pclass;
5419
5420 /* Check for validity of vhci structure */
5421 vhci = ddi_get_soft_state(vhci_softstate, MINOR2INST(getminor(dev)));
5422 if (vhci == NULL) {
5423 return (ENXIO);
5424 }
5425
5426 mutex_enter(&vhci->vhci_mutex);
5427 if ((vhci->vhci_state & VHCI_STATE_OPEN) == 0) {
5428 mutex_exit(&vhci->vhci_mutex);
5429 return (ENXIO);
5430 }
5431 mutex_exit(&vhci->vhci_mutex);
5432
5433 /* Get the vhci dip */
5434 vdip = vhci->vhci_dip;
5435 ASSERT(vdip != NULL);
5436 instance = ddi_get_instance(vdip);
5437
5438 /* Allocate memory for getting parameters from userland */
5439 phci_path = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
5440 client_path = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
5441 paddr = kmem_zalloc(MAXNAMELEN, KM_SLEEP);
5442
5443 /*
5444 * Set a local variable indicating the ioctl name. Used for
5445 * printing debug strings.
5446 */
5447 switch (cmd) {
5448 case SCSI_VHCI_GET_CLIENT_MULTIPATH_INFO:
5449 s = "GET_CLIENT_MULTIPATH_INFO";
5450 break;
5451
5452 case SCSI_VHCI_GET_PHCI_MULTIPATH_INFO:
5453 s = "GET_PHCI_MULTIPATH_INFO";
5454 break;
5455
5456 case SCSI_VHCI_GET_CLIENT_NAME:
5457 s = "GET_CLIENT_NAME";
5458 break;
5459
5460 case SCSI_VHCI_PATH_ONLINE:
5461 s = "PATH_ONLINE";
5462 break;
5463
5464 case SCSI_VHCI_PATH_OFFLINE:
5465 s = "PATH_OFFLINE";
5466 break;
5467
5468 case SCSI_VHCI_PATH_STANDBY:
5469 s = "PATH_STANDBY";
5470 break;
5471
5472 case SCSI_VHCI_PATH_TEST:
5473 s = "PATH_TEST";
5474 break;
5475
5476 case SCSI_VHCI_SWITCH_TO_CNTLR:
5477 s = "SWITCH_TO_CNTLR";
5478 break;
5479 case SCSI_VHCI_PATH_DISABLE:
5480 s = "PATH_DISABLE";
5481 break;
5482 case SCSI_VHCI_PATH_ENABLE:
5483 s = "PATH_ENABLE";
5484 break;
5485
5486 case SCSI_VHCI_GET_TARGET_LONGNAME:
5487 s = "GET_TARGET_LONGNAME";
5488 break;
5489
5490 #ifdef DEBUG
5491 case SCSI_VHCI_CONFIGURE_PHCI:
5492 s = "CONFIGURE_PHCI";
5493 break;
5494
5495 case SCSI_VHCI_UNCONFIGURE_PHCI:
5496 s = "UNCONFIGURE_PHCI";
5497 break;
5498 #endif
5499
5500 default:
5501 s = "Unknown";
5502 vhci_log(CE_NOTE, vdip,
5503 "!vhci%d: ioctl %x (unsupported ioctl)", instance, cmd);
5504 retval = ENOTSUP;
5505 break;
5506 }
5507 if (retval != 0) {
5508 goto end;
5509 }
5510
5511 VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci%d: ioctl <%s>", instance, s));
5512
5513 /*
5514 * Get IOCTL parameters from userland
5515 */
5516 switch (cmd) {
5517 case SCSI_VHCI_GET_CLIENT_MULTIPATH_INFO:
5518 case SCSI_VHCI_GET_PHCI_MULTIPATH_INFO:
5519 case SCSI_VHCI_GET_CLIENT_NAME:
5520 case SCSI_VHCI_PATH_ONLINE:
5521 case SCSI_VHCI_PATH_OFFLINE:
5522 case SCSI_VHCI_PATH_STANDBY:
5523 case SCSI_VHCI_PATH_TEST:
5524 case SCSI_VHCI_PATH_DISABLE:
5525 case SCSI_VHCI_PATH_ENABLE:
5526 case SCSI_VHCI_GET_TARGET_LONGNAME:
5527 #ifdef DEBUG
5528 case SCSI_VHCI_CONFIGURE_PHCI:
5529 case SCSI_VHCI_UNCONFIGURE_PHCI:
5530 #endif
5531 retval = vhci_get_iocdata((const void *)data, pioc, mode, s);
5532 break;
5533
5534 case SCSI_VHCI_SWITCH_TO_CNTLR:
5535 retval = vhci_get_iocswitchdata((const void *)data, piocsc,
5536 mode, s);
5537 break;
5538 }
5539 if (retval != 0) {
5540 goto end;
5541 }
5542
5543
5544 /*
5545 * Process the IOCTL
5546 */
5547 switch (cmd) {
5548 case SCSI_VHCI_GET_CLIENT_MULTIPATH_INFO:
5549 {
5550 uint_t num_paths; /* Num paths to client dev */
5551 sv_path_info_t *upibuf = NULL; /* To keep userland values */
5552 sv_path_info_t *kpibuf = NULL; /* Kernel data for ioctls */
5553 dev_info_t *cdip; /* Client device dip */
5554
5555 if (pioc->ret_elem == NULL) {
5556 retval = EINVAL;
5557 break;
5558 }
5559
5560 /* Get client device path from user land */
5561 if (vhci_ioc_get_client_path(pioc, client_path, mode, s)) {
5562 retval = EFAULT;
5563 break;
5564 }
5565
5566 VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> "
5567 "client <%s>", s, client_path));
5568
5569 /* Get number of paths to this client device */
5570 if ((cdip = mdi_client_path2devinfo(vdip, client_path))
5571 == NULL) {
5572 retval = ENXIO;
5573 VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
5574 "client dip doesn't exist. invalid path <%s>",
5575 s, client_path));
5576 break;
5577 }
5578 num_paths = mdi_client_get_path_count(cdip);
5579
5580 if (ddi_copyout(&num_paths, pioc->ret_elem,
5581 sizeof (num_paths), mode)) {
5582 VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
5583 "num_paths copyout failed", s));
5584 retval = EFAULT;
5585 break;
5586 }
5587
5588 /* If user just wanted num_paths, then return */
5589 if (pioc->buf_elem == 0 || pioc->ret_buf == NULL ||
5590 num_paths == 0) {
5591 break;
5592 }
5593
5594 /* Set num_paths to value as much as can be sent to userland */
5595 if (num_paths > pioc->buf_elem) {
5596 num_paths = pioc->buf_elem;
5597 }
5598
5599 /* Allocate memory and get userland pointers */
5600 if (vhci_ioc_alloc_pathinfo(&upibuf, &kpibuf, num_paths,
5601 pioc, mode, s) != 0) {
5602 retval = EFAULT;
5603 break;
5604 }
5605 ASSERT(upibuf != NULL);
5606 ASSERT(kpibuf != NULL);
5607
5608 /*
5609 * Get the path information and send it to userland.
5610 */
5611 if (vhci_get_client_path_list(cdip, kpibuf, num_paths)
5612 != MDI_SUCCESS) {
5613 retval = ENXIO;
5614 vhci_ioc_free_pathinfo(upibuf, kpibuf, num_paths);
5615 break;
5616 }
5617
5618 if (vhci_ioc_send_pathinfo(upibuf, kpibuf, num_paths,
5619 pioc, mode, s)) {
5620 retval = EFAULT;
5621 vhci_ioc_free_pathinfo(upibuf, kpibuf, num_paths);
5622 break;
5623 }
5624
5625 /* Free the memory allocated for path information */
5626 vhci_ioc_free_pathinfo(upibuf, kpibuf, num_paths);
5627 break;
5628 }
5629
5630 case SCSI_VHCI_GET_PHCI_MULTIPATH_INFO:
5631 {
5632 uint_t num_paths; /* Num paths to client dev */
5633 sv_path_info_t *upibuf = NULL; /* To keep userland values */
5634 sv_path_info_t *kpibuf = NULL; /* Kernel data for ioctls */
5635 dev_info_t *pdip; /* PHCI device dip */
5636
5637 if (pioc->ret_elem == NULL) {
5638 retval = EINVAL;
5639 break;
5640 }
5641
5642 /* Get PHCI device path from user land */
5643 if (vhci_ioc_get_phci_path(pioc, phci_path, mode, s)) {
5644 retval = EFAULT;
5645 break;
5646 }
5647
5648 VHCI_DEBUG(6, (CE_WARN, vdip,
5649 "!vhci_ioctl: ioctl <%s> phci <%s>", s, phci_path));
5650
5651 /* Get number of devices associated with this PHCI device */
5652 if ((pdip = mdi_phci_path2devinfo(vdip, phci_path)) == NULL) {
5653 VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
5654 "phci dip doesn't exist. invalid path <%s>",
5655 s, phci_path));
5656 retval = ENXIO;
5657 break;
5658 }
5659
5660 num_paths = mdi_phci_get_path_count(pdip);
5661
5662 if (ddi_copyout(&num_paths, pioc->ret_elem,
5663 sizeof (num_paths), mode)) {
5664 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
5665 "num_paths copyout failed", s));
5666 retval = EFAULT;
5667 break;
5668 }
5669
5670 /* If user just wanted num_paths, then return */
5671 if (pioc->buf_elem == 0 || pioc->ret_buf == NULL ||
5672 num_paths == 0) {
5673 break;
5674 }
5675
5676 /* Set num_paths to value as much as can be sent to userland */
5677 if (num_paths > pioc->buf_elem) {
5678 num_paths = pioc->buf_elem;
5679 }
5680
5681 /* Allocate memory and get userland pointers */
5682 if (vhci_ioc_alloc_pathinfo(&upibuf, &kpibuf, num_paths,
5683 pioc, mode, s) != 0) {
5684 retval = EFAULT;
5685 break;
5686 }
5687 ASSERT(upibuf != NULL);
5688 ASSERT(kpibuf != NULL);
5689
5690 /*
5691 * Get the path information and send it to userland.
5692 */
5693 if (vhci_get_phci_path_list(pdip, kpibuf, num_paths)
5694 != MDI_SUCCESS) {
5695 retval = ENXIO;
5696 vhci_ioc_free_pathinfo(upibuf, kpibuf, num_paths);
5697 break;
5698 }
5699
5700 if (vhci_ioc_send_pathinfo(upibuf, kpibuf, num_paths,
5701 pioc, mode, s)) {
5702 retval = EFAULT;
5703 vhci_ioc_free_pathinfo(upibuf, kpibuf, num_paths);
5704 break;
5705 }
5706
5707 /* Free the memory allocated for path information */
5708 vhci_ioc_free_pathinfo(upibuf, kpibuf, num_paths);
5709 break;
5710 }
5711
5712 case SCSI_VHCI_GET_CLIENT_NAME:
5713 {
5714 dev_info_t *cdip, *pdip;
5715
5716 /* Get PHCI path and device address from user land */
5717 if (vhci_ioc_get_phci_path(pioc, phci_path, mode, s) ||
5718 vhci_ioc_get_paddr(pioc, paddr, mode, s)) {
5719 retval = EFAULT;
5720 break;
5721 }
5722
5723 VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> "
5724 "phci <%s>, paddr <%s>", s, phci_path, paddr));
5725
5726 /* Get the PHCI dip */
5727 if ((pdip = mdi_phci_path2devinfo(vdip, phci_path)) == NULL) {
5728 VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
5729 "phci dip doesn't exist. invalid path <%s>",
5730 s, phci_path));
5731 retval = ENXIO;
5732 break;
5733 }
5734
5735 if ((pip = mdi_pi_find(pdip, NULL, paddr)) == NULL) {
5736 VHCI_DEBUG(1, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> "
5737 "pathinfo doesn't exist. invalid device addr", s));
5738 retval = ENXIO;
5739 break;
5740 }
5741
5742 /* Get the client device pathname and send to userland */
5743 cdip = mdi_pi_get_client(pip);
5744 vhci_ioc_devi_to_path(cdip, client_path);
5745
5746 VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> "
5747 "client <%s>", s, client_path));
5748
5749 if (vhci_ioc_send_client_path(client_path, pioc, mode, s)) {
5750 retval = EFAULT;
5751 break;
5752 }
5753 break;
5754 }
5755
5756 case SCSI_VHCI_PATH_ONLINE:
5757 case SCSI_VHCI_PATH_OFFLINE:
5758 case SCSI_VHCI_PATH_STANDBY:
5759 case SCSI_VHCI_PATH_TEST:
5760 {
5761 dev_info_t *pdip; /* PHCI dip */
5762
5763 /* Get PHCI path and device address from user land */
5764 if (vhci_ioc_get_phci_path(pioc, phci_path, mode, s) ||
5765 vhci_ioc_get_paddr(pioc, paddr, mode, s)) {
5766 retval = EFAULT;
5767 break;
5768 }
5769
5770 VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> "
5771 "phci <%s>, paddr <%s>", s, phci_path, paddr));
5772
5773 /* Get the PHCI dip */
5774 if ((pdip = mdi_phci_path2devinfo(vdip, phci_path)) == NULL) {
5775 VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
5776 "phci dip doesn't exist. invalid path <%s>",
5777 s, phci_path));
5778 retval = ENXIO;
5779 break;
5780 }
5781
5782 if ((pip = mdi_pi_find(pdip, NULL, paddr)) == NULL) {
5783 VHCI_DEBUG(1, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> "
5784 "pathinfo doesn't exist. invalid device addr", s));
5785 retval = ENXIO;
5786 break;
5787 }
5788
5789 VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> "
5790 "Calling MDI function to change device state", s));
5791
5792 switch (cmd) {
5793 case SCSI_VHCI_PATH_ONLINE:
5794 retval = mdi_pi_online(pip, 0);
5795 break;
5796
5797 case SCSI_VHCI_PATH_OFFLINE:
5798 retval = mdi_pi_offline(pip, 0);
5799 break;
5800
5801 case SCSI_VHCI_PATH_STANDBY:
5802 retval = mdi_pi_standby(pip, 0);
5803 break;
5804
5805 case SCSI_VHCI_PATH_TEST:
5806 break;
5807 }
5808 break;
5809 }
5810
5811 case SCSI_VHCI_SWITCH_TO_CNTLR:
5812 {
5813 dev_info_t *cdip;
5814 struct scsi_device *devp;
5815
5816 /* Get the client device pathname */
5817 if (ddi_copyin(piocsc->client, client_path,
5818 MAXPATHLEN, mode)) {
5819 VHCI_DEBUG(2, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> "
5820 "client_path copyin failed", s));
5821 retval = EFAULT;
5822 break;
5823 }
5824
5825 /* Get the path class to which user wants to switch */
5826 if (ddi_copyin(piocsc->class, paddr, MAXNAMELEN, mode)) {
5827 VHCI_DEBUG(2, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> "
5828 "controller_class copyin failed", s));
5829 retval = EFAULT;
5830 break;
5831 }
5832
5833 /* Perform validity checks */
5834 if ((cdip = mdi_client_path2devinfo(vdip,
5835 client_path)) == NULL) {
5836 VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
5837 "client dip doesn't exist. invalid path <%s>",
5838 s, client_path));
5839 retval = ENXIO;
5840 break;
5841 }
5842
5843 VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: Calling MDI func "
5844 "to switch controller"));
5845 VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: client <%s> "
5846 "class <%s>", client_path, paddr));
5847
5848 if (strcmp(paddr, PCLASS_PRIMARY) &&
5849 strcmp(paddr, PCLASS_SECONDARY)) {
5850 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
5851 "invalid path class <%s>", s, paddr));
5852 retval = ENXIO;
5853 break;
5854 }
5855
5856 devp = ddi_get_driver_private(cdip);
5857 if (devp == NULL) {
5858 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
5859 "invalid scsi device <%s>", s, client_path));
5860 retval = ENXIO;
5861 break;
5862 }
5863 vlun = ADDR2VLUN(&devp->sd_address);
5864 ASSERT(vlun);
5865
5866 /*
5867 * Checking to see if device has only one pclass, PRIMARY.
5868 * If so this device doesn't support failovers. Assumed
5869 * that the devices with one pclass is PRIMARY, as thats the
5870 * case today. If this is not true and in future other
5871 * symmetric devices are supported with other pclass, this
5872 * IOCTL shall have to be overhauled anyways as now the only
5873 * arguments it accepts are PRIMARY and SECONDARY.
5874 */
5875 fo = vlun->svl_fops;
5876 if (fo->sfo_pathclass_next(PCLASS_PRIMARY, &pclass,
5877 vlun->svl_fops_ctpriv)) {
5878 retval = ENOTSUP;
5879 break;
5880 }
5881
5882 VHCI_HOLD_LUN(vlun, VH_SLEEP, held);
5883 mutex_enter(&vlun->svl_mutex);
5884 if (vlun->svl_active_pclass != NULL) {
5885 if (strcmp(vlun->svl_active_pclass, paddr) == 0) {
5886 mutex_exit(&vlun->svl_mutex);
5887 retval = EALREADY;
5888 VHCI_RELEASE_LUN(vlun);
5889 break;
5890 }
5891 }
5892 mutex_exit(&vlun->svl_mutex);
5893 /* Call mdi function to cause a switch over */
5894 retval = mdi_failover(vdip, cdip, MDI_FAILOVER_SYNC);
5895 if (retval == MDI_SUCCESS) {
5896 retval = 0;
5897 } else if (retval == MDI_BUSY) {
5898 retval = EBUSY;
5899 } else {
5900 retval = EIO;
5901 }
5902 VHCI_RELEASE_LUN(vlun);
5903 break;
5904 }
5905
5906 case SCSI_VHCI_PATH_ENABLE:
5907 case SCSI_VHCI_PATH_DISABLE:
5908 {
5909 dev_info_t *cdip, *pdip;
5910
5911 /*
5912 * Get client device path from user land
5913 */
5914 if (vhci_ioc_get_client_path(pioc, client_path, mode, s)) {
5915 retval = EFAULT;
5916 break;
5917 }
5918
5919 /*
5920 * Get Phci device path from user land
5921 */
5922 if (vhci_ioc_get_phci_path(pioc, phci_path, mode, s)) {
5923 retval = EFAULT;
5924 break;
5925 }
5926
5927 /*
5928 * Get the devinfo for the Phci.
5929 */
5930 if ((pdip = mdi_phci_path2devinfo(vdip, phci_path)) == NULL) {
5931 VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
5932 "phci dip doesn't exist. invalid path <%s>",
5933 s, phci_path));
5934 retval = ENXIO;
5935 break;
5936 }
5937
5938 /*
5939 * If the client path is set to /scsi_vhci then we need
5940 * to do the operation on all the clients so set cdip to NULL.
5941 * Else, try to get the client dip.
5942 */
5943 if (strcmp(client_path, "/scsi_vhci") == 0) {
5944 cdip = NULL;
5945 } else {
5946 if ((cdip = mdi_client_path2devinfo(vdip,
5947 client_path)) == NULL) {
5948 retval = ENXIO;
5949 VHCI_DEBUG(1, (CE_WARN, NULL,
5950 "!vhci_ioctl: ioctl <%s> client dip "
5951 "doesn't exist. invalid path <%s>",
5952 s, client_path));
5953 break;
5954 }
5955 }
5956
5957 if (cmd == SCSI_VHCI_PATH_ENABLE)
5958 retval = mdi_pi_enable(cdip, pdip, USER_DISABLE);
5959 else
5960 retval = mdi_pi_disable(cdip, pdip, USER_DISABLE);
5961
5962 break;
5963 }
5964
5965 case SCSI_VHCI_GET_TARGET_LONGNAME:
5966 {
5967 uint_t pid = pioc->buf_elem;
5968 char *target_port;
5969 mod_hash_val_t hv;
5970
5971 /* targetmap lookup of 'target-port' by <pid> */
5972 if (mod_hash_find(vhci_targetmap_bypid,
5973 (mod_hash_key_t)(uintptr_t)pid, &hv) != 0) {
5974 /*
5975 * NOTE: failure to find the mapping is OK for guid
5976 * based 'target-port' values.
5977 */
5978 VHCI_DEBUG(3, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
5979 "targetport mapping doesn't exist: pid %d",
5980 s, pid));
5981 retval = ENXIO;
5982 break;
5983 }
5984
5985 /* copyout 'target-port' result */
5986 target_port = (char *)hv;
5987 if (copyoutstr(target_port, pioc->addr, MAXNAMELEN, NULL)) {
5988 VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
5989 "targetport copyout failed: len: %d",
5990 s, (int)strlen(target_port)));
5991 retval = EFAULT;
5992 }
5993 break;
5994 }
5995
5996 #ifdef DEBUG
5997 case SCSI_VHCI_CONFIGURE_PHCI:
5998 {
5999 dev_info_t *pdip;
6000
6001 /* Get PHCI path and device address from user land */
6002 if (vhci_ioc_get_phci_path(pioc, phci_path, mode, s)) {
6003 retval = EFAULT;
6004 break;
6005 }
6006
6007 VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> "
6008 "phci <%s>", s, phci_path));
6009
6010 /* Get the PHCI dip */
6011 if ((pdip = e_ddi_hold_devi_by_path(phci_path, 0)) == NULL) {
6012 VHCI_DEBUG(3, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
6013 "phci dip doesn't exist. invalid path <%s>",
6014 s, phci_path));
6015 retval = ENXIO;
6016 break;
6017 }
6018
6019 if (ndi_devi_config(pdip,
6020 NDI_DEVFS_CLEAN|NDI_DEVI_PERSIST) != NDI_SUCCESS) {
6021 retval = EIO;
6022 }
6023
6024 ddi_release_devi(pdip);
6025 break;
6026 }
6027
6028 case SCSI_VHCI_UNCONFIGURE_PHCI:
6029 {
6030 dev_info_t *pdip;
6031
6032 /* Get PHCI path and device address from user land */
6033 if (vhci_ioc_get_phci_path(pioc, phci_path, mode, s)) {
6034 retval = EFAULT;
6035 break;
6036 }
6037
6038 VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> "
6039 "phci <%s>", s, phci_path));
6040
6041 /* Get the PHCI dip */
6042 if ((pdip = e_ddi_hold_devi_by_path(phci_path, 0)) == NULL) {
6043 VHCI_DEBUG(3, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
6044 "phci dip doesn't exist. invalid path <%s>",
6045 s, phci_path));
6046 retval = ENXIO;
6047 break;
6048 }
6049
6050 if (ndi_devi_unconfig(pdip,
6051 NDI_DEVI_REMOVE|NDI_DEVFS_CLEAN) != NDI_SUCCESS) {
6052 retval = EBUSY;
6053 }
6054
6055 ddi_release_devi(pdip);
6056 break;
6057 }
6058 #endif
6059 }
6060
6061 end:
6062 /* Free the memory allocated above */
6063 if (phci_path != NULL) {
6064 kmem_free(phci_path, MAXPATHLEN);
6065 }
6066 if (client_path != NULL) {
6067 kmem_free(client_path, MAXPATHLEN);
6068 }
6069 if (paddr != NULL) {
6070 kmem_free(paddr, MAXNAMELEN);
6071 }
6072 return (retval);
6073 }
6074
6075 /*
6076 * devctl IOCTL support for client device DR
6077 */
6078 /* ARGSUSED */
6079 int
vhci_devctl(dev_t dev,int cmd,intptr_t arg,int mode,cred_t * credp,int * rvalp)6080 vhci_devctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp,
6081 int *rvalp)
6082 {
6083 dev_info_t *self;
6084 dev_info_t *child;
6085 scsi_hba_tran_t *hba;
6086 struct devctl_iocdata *dcp;
6087 struct scsi_vhci *vhci;
6088 int rv = 0;
6089 int retval = 0;
6090 scsi_vhci_priv_t *svp;
6091 mdi_pathinfo_t *pip;
6092
6093 if ((vhci = ddi_get_soft_state(vhci_softstate,
6094 MINOR2INST(getminor(dev)))) == NULL)
6095 return (ENXIO);
6096
6097 /*
6098 * check if :devctl minor device has been opened
6099 */
6100 mutex_enter(&vhci->vhci_mutex);
6101 if ((vhci->vhci_state & VHCI_STATE_OPEN) == 0) {
6102 mutex_exit(&vhci->vhci_mutex);
6103 return (ENXIO);
6104 }
6105 mutex_exit(&vhci->vhci_mutex);
6106
6107 self = vhci->vhci_dip;
6108 hba = ddi_get_driver_private(self);
6109 if (hba == NULL)
6110 return (ENXIO);
6111
6112 /*
6113 * We can use the generic implementation for these ioctls
6114 */
6115 switch (cmd) {
6116 case DEVCTL_DEVICE_GETSTATE:
6117 case DEVCTL_DEVICE_ONLINE:
6118 case DEVCTL_DEVICE_OFFLINE:
6119 case DEVCTL_DEVICE_REMOVE:
6120 case DEVCTL_BUS_GETSTATE:
6121 return (ndi_devctl_ioctl(self, cmd, arg, mode, 0));
6122 }
6123
6124 /*
6125 * read devctl ioctl data
6126 */
6127 if (ndi_dc_allochdl((void *)arg, &dcp) != NDI_SUCCESS)
6128 return (EFAULT);
6129
6130 switch (cmd) {
6131
6132 case DEVCTL_DEVICE_RESET:
6133 /*
6134 * lookup and hold child device
6135 */
6136 if ((child = ndi_devi_find(self, ndi_dc_getname(dcp),
6137 ndi_dc_getaddr(dcp))) == NULL) {
6138 rv = ENXIO;
6139 break;
6140 }
6141 retval = mdi_select_path(child, NULL,
6142 (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH),
6143 NULL, &pip);
6144 if ((retval != MDI_SUCCESS) || (pip == NULL)) {
6145 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioctl:"
6146 "Unable to get a path, dip 0x%p", (void *)child));
6147 rv = ENXIO;
6148 break;
6149 }
6150 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip);
6151 if (vhci_recovery_reset(svp->svp_svl,
6152 &svp->svp_psd->sd_address, TRUE,
6153 VHCI_DEPTH_TARGET) == 0) {
6154 VHCI_DEBUG(1, (CE_NOTE, NULL,
6155 "!vhci_ioctl(pip:%p): "
6156 "reset failed\n", (void *)pip));
6157 rv = ENXIO;
6158 }
6159 mdi_rele_path(pip);
6160 break;
6161
6162 case DEVCTL_BUS_QUIESCE:
6163 case DEVCTL_BUS_UNQUIESCE:
6164 case DEVCTL_BUS_RESET:
6165 case DEVCTL_BUS_RESETALL:
6166 #ifdef DEBUG
6167 case DEVCTL_BUS_CONFIGURE:
6168 case DEVCTL_BUS_UNCONFIGURE:
6169 #endif
6170 rv = ENOTSUP;
6171 break;
6172
6173 default:
6174 rv = ENOTTY;
6175 } /* end of outer switch */
6176
6177 ndi_dc_freehdl(dcp);
6178 return (rv);
6179 }
6180
6181 /*
6182 * Routine to get the PHCI pathname from ioctl structures in userland
6183 */
6184 /* ARGSUSED */
6185 static int
vhci_ioc_get_phci_path(sv_iocdata_t * pioc,caddr_t phci_path,int mode,caddr_t s)6186 vhci_ioc_get_phci_path(sv_iocdata_t *pioc, caddr_t phci_path,
6187 int mode, caddr_t s)
6188 {
6189 int retval = 0;
6190
6191 if (ddi_copyin(pioc->phci, phci_path, MAXPATHLEN, mode)) {
6192 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioc_get_phci: ioctl <%s> "
6193 "phci_path copyin failed", s));
6194 retval = EFAULT;
6195 }
6196 return (retval);
6197
6198 }
6199
6200
6201 /*
6202 * Routine to get the Client device pathname from ioctl structures in userland
6203 */
6204 /* ARGSUSED */
6205 static int
vhci_ioc_get_client_path(sv_iocdata_t * pioc,caddr_t client_path,int mode,caddr_t s)6206 vhci_ioc_get_client_path(sv_iocdata_t *pioc, caddr_t client_path,
6207 int mode, caddr_t s)
6208 {
6209 int retval = 0;
6210
6211 if (ddi_copyin(pioc->client, client_path, MAXPATHLEN, mode)) {
6212 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioc_get_client: "
6213 "ioctl <%s> client_path copyin failed", s));
6214 retval = EFAULT;
6215 }
6216 return (retval);
6217 }
6218
6219
6220 /*
6221 * Routine to get physical device address from ioctl structure in userland
6222 */
6223 /* ARGSUSED */
6224 static int
vhci_ioc_get_paddr(sv_iocdata_t * pioc,caddr_t paddr,int mode,caddr_t s)6225 vhci_ioc_get_paddr(sv_iocdata_t *pioc, caddr_t paddr, int mode, caddr_t s)
6226 {
6227 int retval = 0;
6228
6229 if (ddi_copyin(pioc->addr, paddr, MAXNAMELEN, mode)) {
6230 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioc_get_paddr: "
6231 "ioctl <%s> device addr copyin failed", s));
6232 retval = EFAULT;
6233 }
6234 return (retval);
6235 }
6236
6237
6238 /*
6239 * Routine to send client device pathname to userland.
6240 */
6241 /* ARGSUSED */
6242 static int
vhci_ioc_send_client_path(caddr_t client_path,sv_iocdata_t * pioc,int mode,caddr_t s)6243 vhci_ioc_send_client_path(caddr_t client_path, sv_iocdata_t *pioc,
6244 int mode, caddr_t s)
6245 {
6246 int retval = 0;
6247
6248 if (ddi_copyout(client_path, pioc->client, MAXPATHLEN, mode)) {
6249 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioc_send_client: "
6250 "ioctl <%s> client_path copyout failed", s));
6251 retval = EFAULT;
6252 }
6253 return (retval);
6254 }
6255
6256
6257 /*
6258 * Routine to translated dev_info pointer (dip) to device pathname.
6259 */
6260 static void
vhci_ioc_devi_to_path(dev_info_t * dip,caddr_t path)6261 vhci_ioc_devi_to_path(dev_info_t *dip, caddr_t path)
6262 {
6263 (void) ddi_pathname(dip, path);
6264 }
6265
6266
6267 /*
6268 * vhci_get_phci_path_list:
6269 * get information about devices associated with a
6270 * given PHCI device.
6271 *
6272 * Return Values:
6273 * path information elements
6274 */
6275 int
vhci_get_phci_path_list(dev_info_t * pdip,sv_path_info_t * pibuf,uint_t num_elems)6276 vhci_get_phci_path_list(dev_info_t *pdip, sv_path_info_t *pibuf,
6277 uint_t num_elems)
6278 {
6279 uint_t count, done;
6280 mdi_pathinfo_t *pip;
6281 sv_path_info_t *ret_pip;
6282 int status;
6283 size_t prop_size;
6284 int circular;
6285
6286 /*
6287 * Get the PHCI structure and retrieve the path information
6288 * from the GUID hash table.
6289 */
6290
6291 ret_pip = pibuf;
6292 count = 0;
6293
6294 ndi_devi_enter(pdip, &circular);
6295
6296 done = (count >= num_elems);
6297 pip = mdi_get_next_client_path(pdip, NULL);
6298 while (pip && !done) {
6299 mdi_pi_lock(pip);
6300 (void) ddi_pathname(mdi_pi_get_phci(pip),
6301 ret_pip->device.ret_phci);
6302 (void) strcpy(ret_pip->ret_addr, mdi_pi_get_addr(pip));
6303 (void) mdi_pi_get_state2(pip, &ret_pip->ret_state,
6304 &ret_pip->ret_ext_state);
6305
6306 status = mdi_prop_size(pip, &prop_size);
6307 if (status == MDI_SUCCESS && ret_pip->ret_prop.ret_buf_size) {
6308 *ret_pip->ret_prop.ret_buf_size = (uint_t)prop_size;
6309 }
6310
6311 #ifdef DEBUG
6312 if (status != MDI_SUCCESS) {
6313 VHCI_DEBUG(2, (CE_WARN, NULL,
6314 "!vhci_get_phci_path_list: "
6315 "phci <%s>, prop size failure 0x%x",
6316 ret_pip->device.ret_phci, status));
6317 }
6318 #endif /* DEBUG */
6319
6320
6321 if (status == MDI_SUCCESS && ret_pip->ret_prop.buf &&
6322 prop_size && ret_pip->ret_prop.buf_size >= prop_size) {
6323 status = mdi_prop_pack(pip,
6324 &ret_pip->ret_prop.buf,
6325 ret_pip->ret_prop.buf_size);
6326
6327 #ifdef DEBUG
6328 if (status != MDI_SUCCESS) {
6329 VHCI_DEBUG(2, (CE_WARN, NULL,
6330 "!vhci_get_phci_path_list: "
6331 "phci <%s>, prop pack failure 0x%x",
6332 ret_pip->device.ret_phci, status));
6333 }
6334 #endif /* DEBUG */
6335 }
6336
6337 mdi_pi_unlock(pip);
6338 pip = mdi_get_next_client_path(pdip, pip);
6339 ret_pip++;
6340 count++;
6341 done = (count >= num_elems);
6342 }
6343
6344 ndi_devi_exit(pdip, circular);
6345
6346 return (MDI_SUCCESS);
6347 }
6348
6349
6350 /*
6351 * vhci_get_client_path_list:
6352 * get information about various paths associated with a
6353 * given client device.
6354 *
6355 * Return Values:
6356 * path information elements
6357 */
6358 int
vhci_get_client_path_list(dev_info_t * cdip,sv_path_info_t * pibuf,uint_t num_elems)6359 vhci_get_client_path_list(dev_info_t *cdip, sv_path_info_t *pibuf,
6360 uint_t num_elems)
6361 {
6362 uint_t count, done;
6363 mdi_pathinfo_t *pip;
6364 sv_path_info_t *ret_pip;
6365 int status;
6366 size_t prop_size;
6367 int circular;
6368
6369 ret_pip = pibuf;
6370 count = 0;
6371
6372 ndi_devi_enter(cdip, &circular);
6373
6374 done = (count >= num_elems);
6375 pip = mdi_get_next_phci_path(cdip, NULL);
6376 while (pip && !done) {
6377 mdi_pi_lock(pip);
6378 (void) ddi_pathname(mdi_pi_get_phci(pip),
6379 ret_pip->device.ret_phci);
6380 (void) strcpy(ret_pip->ret_addr, mdi_pi_get_addr(pip));
6381 (void) mdi_pi_get_state2(pip, &ret_pip->ret_state,
6382 &ret_pip->ret_ext_state);
6383
6384 status = mdi_prop_size(pip, &prop_size);
6385 if (status == MDI_SUCCESS && ret_pip->ret_prop.ret_buf_size) {
6386 *ret_pip->ret_prop.ret_buf_size = (uint_t)prop_size;
6387 }
6388
6389 #ifdef DEBUG
6390 if (status != MDI_SUCCESS) {
6391 VHCI_DEBUG(2, (CE_WARN, NULL,
6392 "!vhci_get_client_path_list: "
6393 "phci <%s>, prop size failure 0x%x",
6394 ret_pip->device.ret_phci, status));
6395 }
6396 #endif /* DEBUG */
6397
6398
6399 if (status == MDI_SUCCESS && ret_pip->ret_prop.buf &&
6400 prop_size && ret_pip->ret_prop.buf_size >= prop_size) {
6401 status = mdi_prop_pack(pip,
6402 &ret_pip->ret_prop.buf,
6403 ret_pip->ret_prop.buf_size);
6404
6405 #ifdef DEBUG
6406 if (status != MDI_SUCCESS) {
6407 VHCI_DEBUG(2, (CE_WARN, NULL,
6408 "!vhci_get_client_path_list: "
6409 "phci <%s>, prop pack failure 0x%x",
6410 ret_pip->device.ret_phci, status));
6411 }
6412 #endif /* DEBUG */
6413 }
6414
6415 mdi_pi_unlock(pip);
6416 pip = mdi_get_next_phci_path(cdip, pip);
6417 ret_pip++;
6418 count++;
6419 done = (count >= num_elems);
6420 }
6421
6422 ndi_devi_exit(cdip, circular);
6423
6424 return (MDI_SUCCESS);
6425 }
6426
6427
6428 /*
6429 * Routine to get ioctl argument structure from userland.
6430 */
6431 /* ARGSUSED */
6432 static int
vhci_get_iocdata(const void * data,sv_iocdata_t * pioc,int mode,caddr_t s)6433 vhci_get_iocdata(const void *data, sv_iocdata_t *pioc, int mode, caddr_t s)
6434 {
6435 int retval = 0;
6436
6437 #ifdef _MULTI_DATAMODEL
6438 switch (ddi_model_convert_from(mode & FMODELS)) {
6439 case DDI_MODEL_ILP32:
6440 {
6441 sv_iocdata32_t ioc32;
6442
6443 if (ddi_copyin(data, &ioc32, sizeof (ioc32), mode)) {
6444 retval = EFAULT;
6445 break;
6446 }
6447 pioc->client = (caddr_t)(uintptr_t)ioc32.client;
6448 pioc->phci = (caddr_t)(uintptr_t)ioc32.phci;
6449 pioc->addr = (caddr_t)(uintptr_t)ioc32.addr;
6450 pioc->buf_elem = (uint_t)ioc32.buf_elem;
6451 pioc->ret_buf = (sv_path_info_t *)(uintptr_t)ioc32.ret_buf;
6452 pioc->ret_elem = (uint_t *)(uintptr_t)ioc32.ret_elem;
6453 break;
6454 }
6455
6456 case DDI_MODEL_NONE:
6457 if (ddi_copyin(data, pioc, sizeof (*pioc), mode)) {
6458 retval = EFAULT;
6459 break;
6460 }
6461 break;
6462 }
6463 #else /* _MULTI_DATAMODEL */
6464 if (ddi_copyin(data, pioc, sizeof (*pioc), mode)) {
6465 retval = EFAULT;
6466 }
6467 #endif /* _MULTI_DATAMODEL */
6468
6469 #ifdef DEBUG
6470 if (retval) {
6471 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_get_ioc: cmd <%s> "
6472 "iocdata copyin failed", s));
6473 }
6474 #endif
6475
6476 return (retval);
6477 }
6478
6479
6480 /*
6481 * Routine to get the ioctl argument for ioctl causing controller switchover.
6482 */
6483 /* ARGSUSED */
6484 static int
vhci_get_iocswitchdata(const void * data,sv_switch_to_cntlr_iocdata_t * piocsc,int mode,caddr_t s)6485 vhci_get_iocswitchdata(const void *data, sv_switch_to_cntlr_iocdata_t *piocsc,
6486 int mode, caddr_t s)
6487 {
6488 int retval = 0;
6489
6490 #ifdef _MULTI_DATAMODEL
6491 switch (ddi_model_convert_from(mode & FMODELS)) {
6492 case DDI_MODEL_ILP32:
6493 {
6494 sv_switch_to_cntlr_iocdata32_t ioc32;
6495
6496 if (ddi_copyin(data, &ioc32, sizeof (ioc32), mode)) {
6497 retval = EFAULT;
6498 break;
6499 }
6500 piocsc->client = (caddr_t)(uintptr_t)ioc32.client;
6501 piocsc->class = (caddr_t)(uintptr_t)ioc32.class;
6502 break;
6503 }
6504
6505 case DDI_MODEL_NONE:
6506 if (ddi_copyin(data, piocsc, sizeof (*piocsc), mode)) {
6507 retval = EFAULT;
6508 }
6509 break;
6510 }
6511 #else /* _MULTI_DATAMODEL */
6512 if (ddi_copyin(data, piocsc, sizeof (*piocsc), mode)) {
6513 retval = EFAULT;
6514 }
6515 #endif /* _MULTI_DATAMODEL */
6516
6517 #ifdef DEBUG
6518 if (retval) {
6519 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_get_ioc: cmd <%s> "
6520 "switch_to_cntlr_iocdata copyin failed", s));
6521 }
6522 #endif
6523
6524 return (retval);
6525 }
6526
6527
6528 /*
6529 * Routine to allocate memory for the path information structures.
6530 * It allocates two chunks of memory - one for keeping userland
6531 * pointers/values for path information and path properties, second for
6532 * keeping allocating kernel memory for path properties. These path
6533 * properties are finally copied to userland.
6534 */
6535 /* ARGSUSED */
6536 static int
vhci_ioc_alloc_pathinfo(sv_path_info_t ** upibuf,sv_path_info_t ** kpibuf,uint_t num_paths,sv_iocdata_t * pioc,int mode,caddr_t s)6537 vhci_ioc_alloc_pathinfo(sv_path_info_t **upibuf, sv_path_info_t **kpibuf,
6538 uint_t num_paths, sv_iocdata_t *pioc, int mode, caddr_t s)
6539 {
6540 sv_path_info_t *pi;
6541 uint_t bufsize;
6542 int retval = 0;
6543 int index;
6544
6545 /* Allocate memory */
6546 *upibuf = (sv_path_info_t *)
6547 kmem_zalloc(sizeof (sv_path_info_t) * num_paths, KM_SLEEP);
6548 ASSERT(*upibuf != NULL);
6549 *kpibuf = (sv_path_info_t *)
6550 kmem_zalloc(sizeof (sv_path_info_t) * num_paths, KM_SLEEP);
6551 ASSERT(*kpibuf != NULL);
6552
6553 /*
6554 * Get the path info structure from the user space.
6555 * We are interested in the following fields:
6556 * - user size of buffer for per path properties.
6557 * - user address of buffer for path info properties.
6558 * - user pointer for returning actual buffer size
6559 * Keep these fields in the 'upibuf' structures.
6560 * Allocate buffer for per path info properties in kernel
6561 * structure ('kpibuf').
6562 * Size of these buffers will be equal to the size of buffers
6563 * in the user space.
6564 */
6565 #ifdef _MULTI_DATAMODEL
6566 switch (ddi_model_convert_from(mode & FMODELS)) {
6567 case DDI_MODEL_ILP32:
6568 {
6569 sv_path_info32_t *src;
6570 sv_path_info32_t pi32;
6571
6572 src = (sv_path_info32_t *)pioc->ret_buf;
6573 pi = (sv_path_info_t *)*upibuf;
6574 for (index = 0; index < num_paths; index++, src++, pi++) {
6575 if (ddi_copyin(src, &pi32, sizeof (pi32), mode)) {
6576 retval = EFAULT;
6577 break;
6578 }
6579
6580 pi->ret_prop.buf_size =
6581 (uint_t)pi32.ret_prop.buf_size;
6582 pi->ret_prop.ret_buf_size =
6583 (uint_t *)(uintptr_t)pi32.ret_prop.ret_buf_size;
6584 pi->ret_prop.buf =
6585 (caddr_t)(uintptr_t)pi32.ret_prop.buf;
6586 }
6587 break;
6588 }
6589
6590 case DDI_MODEL_NONE:
6591 if (ddi_copyin(pioc->ret_buf, *upibuf,
6592 sizeof (sv_path_info_t) * num_paths, mode)) {
6593 retval = EFAULT;
6594 }
6595 break;
6596 }
6597 #else /* _MULTI_DATAMODEL */
6598 if (ddi_copyin(pioc->ret_buf, *upibuf,
6599 sizeof (sv_path_info_t) * num_paths, mode)) {
6600 retval = EFAULT;
6601 }
6602 #endif /* _MULTI_DATAMODEL */
6603
6604 if (retval != 0) {
6605 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_alloc_path_info: "
6606 "ioctl <%s> normal: path_info copyin failed", s));
6607 kmem_free(*upibuf, sizeof (sv_path_info_t) * num_paths);
6608 kmem_free(*kpibuf, sizeof (sv_path_info_t) * num_paths);
6609 *upibuf = NULL;
6610 *kpibuf = NULL;
6611 return (retval);
6612 }
6613
6614 /*
6615 * Allocate memory for per path properties.
6616 */
6617 for (index = 0, pi = *kpibuf; index < num_paths; index++, pi++) {
6618 bufsize = (*upibuf)[index].ret_prop.buf_size;
6619
6620 if (bufsize && bufsize <= SV_PROP_MAX_BUF_SIZE) {
6621 pi->ret_prop.buf_size = bufsize;
6622 pi->ret_prop.buf = (caddr_t)
6623 kmem_zalloc(bufsize, KM_SLEEP);
6624 ASSERT(pi->ret_prop.buf != NULL);
6625 } else {
6626 pi->ret_prop.buf_size = 0;
6627 pi->ret_prop.buf = NULL;
6628 }
6629
6630 if ((*upibuf)[index].ret_prop.ret_buf_size != NULL) {
6631 pi->ret_prop.ret_buf_size = (uint_t *)kmem_zalloc(
6632 sizeof (*pi->ret_prop.ret_buf_size), KM_SLEEP);
6633 ASSERT(pi->ret_prop.ret_buf_size != NULL);
6634 } else {
6635 pi->ret_prop.ret_buf_size = NULL;
6636 }
6637 }
6638
6639 return (0);
6640 }
6641
6642
6643 /*
6644 * Routine to free memory for the path information structures.
6645 * This is the memory which was allocated earlier.
6646 */
6647 /* ARGSUSED */
6648 static void
vhci_ioc_free_pathinfo(sv_path_info_t * upibuf,sv_path_info_t * kpibuf,uint_t num_paths)6649 vhci_ioc_free_pathinfo(sv_path_info_t *upibuf, sv_path_info_t *kpibuf,
6650 uint_t num_paths)
6651 {
6652 sv_path_info_t *pi;
6653 int index;
6654
6655 /* Free memory for per path properties */
6656 for (index = 0, pi = kpibuf; index < num_paths; index++, pi++) {
6657 if (pi->ret_prop.ret_buf_size != NULL) {
6658 kmem_free(pi->ret_prop.ret_buf_size,
6659 sizeof (*pi->ret_prop.ret_buf_size));
6660 }
6661
6662 if (pi->ret_prop.buf != NULL) {
6663 kmem_free(pi->ret_prop.buf, pi->ret_prop.buf_size);
6664 }
6665 }
6666
6667 /* Free memory for path info structures */
6668 kmem_free(upibuf, sizeof (sv_path_info_t) * num_paths);
6669 kmem_free(kpibuf, sizeof (sv_path_info_t) * num_paths);
6670 }
6671
6672
6673 /*
6674 * Routine to copy path information and path properties to userland.
6675 */
6676 /* ARGSUSED */
6677 static int
vhci_ioc_send_pathinfo(sv_path_info_t * upibuf,sv_path_info_t * kpibuf,uint_t num_paths,sv_iocdata_t * pioc,int mode,caddr_t s)6678 vhci_ioc_send_pathinfo(sv_path_info_t *upibuf, sv_path_info_t *kpibuf,
6679 uint_t num_paths, sv_iocdata_t *pioc, int mode, caddr_t s)
6680 {
6681 int retval = 0, index;
6682 sv_path_info_t *upi_ptr;
6683 sv_path_info32_t *upi32_ptr;
6684
6685 #ifdef _MULTI_DATAMODEL
6686 switch (ddi_model_convert_from(mode & FMODELS)) {
6687 case DDI_MODEL_ILP32:
6688 goto copy_32bit;
6689
6690 case DDI_MODEL_NONE:
6691 goto copy_normal;
6692 }
6693 #else /* _MULTI_DATAMODEL */
6694
6695 goto copy_normal;
6696
6697 #endif /* _MULTI_DATAMODEL */
6698
6699 copy_normal:
6700
6701 /*
6702 * Copy path information and path properties to user land.
6703 * Pointer fields inside the path property structure were
6704 * saved in the 'upibuf' structure earlier.
6705 */
6706 upi_ptr = pioc->ret_buf;
6707 for (index = 0; index < num_paths; index++) {
6708 if (ddi_copyout(kpibuf[index].device.ret_ct,
6709 upi_ptr[index].device.ret_ct, MAXPATHLEN, mode)) {
6710 retval = EFAULT;
6711 break;
6712 }
6713
6714 if (ddi_copyout(kpibuf[index].ret_addr,
6715 upi_ptr[index].ret_addr, MAXNAMELEN, mode)) {
6716 retval = EFAULT;
6717 break;
6718 }
6719
6720 if (ddi_copyout(&kpibuf[index].ret_state,
6721 &upi_ptr[index].ret_state, sizeof (kpibuf[index].ret_state),
6722 mode)) {
6723 retval = EFAULT;
6724 break;
6725 }
6726
6727 if (ddi_copyout(&kpibuf[index].ret_ext_state,
6728 &upi_ptr[index].ret_ext_state,
6729 sizeof (kpibuf[index].ret_ext_state), mode)) {
6730 retval = EFAULT;
6731 break;
6732 }
6733
6734 if ((kpibuf[index].ret_prop.ret_buf_size != NULL) &&
6735 ddi_copyout(kpibuf[index].ret_prop.ret_buf_size,
6736 upibuf[index].ret_prop.ret_buf_size,
6737 sizeof (*upibuf[index].ret_prop.ret_buf_size), mode)) {
6738 retval = EFAULT;
6739 break;
6740 }
6741
6742 if ((kpibuf[index].ret_prop.buf != NULL) &&
6743 ddi_copyout(kpibuf[index].ret_prop.buf,
6744 upibuf[index].ret_prop.buf,
6745 upibuf[index].ret_prop.buf_size, mode)) {
6746 retval = EFAULT;
6747 break;
6748 }
6749 }
6750
6751 #ifdef DEBUG
6752 if (retval) {
6753 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_get_ioc: ioctl <%s> "
6754 "normal: path_info copyout failed", s));
6755 }
6756 #endif
6757
6758 return (retval);
6759
6760 copy_32bit:
6761 /*
6762 * Copy path information and path properties to user land.
6763 * Pointer fields inside the path property structure were
6764 * saved in the 'upibuf' structure earlier.
6765 */
6766 upi32_ptr = (sv_path_info32_t *)pioc->ret_buf;
6767 for (index = 0; index < num_paths; index++) {
6768 if (ddi_copyout(kpibuf[index].device.ret_ct,
6769 upi32_ptr[index].device.ret_ct, MAXPATHLEN, mode)) {
6770 retval = EFAULT;
6771 break;
6772 }
6773
6774 if (ddi_copyout(kpibuf[index].ret_addr,
6775 upi32_ptr[index].ret_addr, MAXNAMELEN, mode)) {
6776 retval = EFAULT;
6777 break;
6778 }
6779
6780 if (ddi_copyout(&kpibuf[index].ret_state,
6781 &upi32_ptr[index].ret_state,
6782 sizeof (kpibuf[index].ret_state), mode)) {
6783 retval = EFAULT;
6784 break;
6785 }
6786
6787 if (ddi_copyout(&kpibuf[index].ret_ext_state,
6788 &upi32_ptr[index].ret_ext_state,
6789 sizeof (kpibuf[index].ret_ext_state), mode)) {
6790 retval = EFAULT;
6791 break;
6792 }
6793 if ((kpibuf[index].ret_prop.ret_buf_size != NULL) &&
6794 ddi_copyout(kpibuf[index].ret_prop.ret_buf_size,
6795 upibuf[index].ret_prop.ret_buf_size,
6796 sizeof (*upibuf[index].ret_prop.ret_buf_size), mode)) {
6797 retval = EFAULT;
6798 break;
6799 }
6800
6801 if ((kpibuf[index].ret_prop.buf != NULL) &&
6802 ddi_copyout(kpibuf[index].ret_prop.buf,
6803 upibuf[index].ret_prop.buf,
6804 upibuf[index].ret_prop.buf_size, mode)) {
6805 retval = EFAULT;
6806 break;
6807 }
6808 }
6809
6810 #ifdef DEBUG
6811 if (retval) {
6812 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_get_ioc: ioctl <%s> "
6813 "normal: path_info copyout failed", s));
6814 }
6815 #endif
6816
6817 return (retval);
6818 }
6819
6820
6821 /*
6822 * vhci_failover()
6823 * This routine expects VHCI_HOLD_LUN before being invoked. It can be invoked
6824 * as MDI_FAILOVER_ASYNC or MDI_FAILOVER_SYNC. For Asynchronous failovers
6825 * this routine shall VHCI_RELEASE_LUN on exiting. For synchronous failovers
6826 * it is the callers responsibility to release lun.
6827 */
6828
6829 /* ARGSUSED */
6830 static int
vhci_failover(dev_info_t * vdip,dev_info_t * cdip,int flags)6831 vhci_failover(dev_info_t *vdip, dev_info_t *cdip, int flags)
6832 {
6833 char *guid;
6834 scsi_vhci_lun_t *vlun = NULL;
6835 struct scsi_vhci *vhci;
6836 mdi_pathinfo_t *pip, *npip;
6837 char *s_pclass, *pclass1, *pclass2, *pclass;
6838 char active_pclass_copy[255], *active_pclass_ptr;
6839 char *ptr1, *ptr2;
6840 mdi_pathinfo_state_t pi_state;
6841 uint32_t pi_ext_state;
6842 scsi_vhci_priv_t *svp;
6843 struct scsi_device *sd;
6844 struct scsi_failover_ops *sfo;
6845 int sps; /* mdi_select_path() status */
6846 int activation_done = 0;
6847 int rval, retval = MDI_FAILURE;
6848 int reserve_pending, check_condition, UA_condition;
6849 struct scsi_pkt *pkt;
6850 struct buf *bp;
6851
6852 vhci = ddi_get_soft_state(vhci_softstate, ddi_get_instance(vdip));
6853 sd = ddi_get_driver_private(cdip);
6854 vlun = ADDR2VLUN(&sd->sd_address);
6855 ASSERT(vlun != 0);
6856 ASSERT(VHCI_LUN_IS_HELD(vlun));
6857 guid = vlun->svl_lun_wwn;
6858 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(1): guid %s\n", guid));
6859 vhci_log(CE_NOTE, vdip, "!Initiating failover for device %s "
6860 "(GUID %s)", ddi_node_name(cdip), guid);
6861
6862 /*
6863 * Lets maintain a local copy of the vlun->svl_active_pclass
6864 * for the rest of the processing. Accessing the field
6865 * directly in the loop below causes loop logic to break
6866 * especially when the field gets updated by other threads
6867 * update path status etc and causes 'paths are not currently
6868 * available' condition to be declared prematurely.
6869 */
6870 mutex_enter(&vlun->svl_mutex);
6871 if (vlun->svl_active_pclass != NULL) {
6872 (void) strlcpy(active_pclass_copy, vlun->svl_active_pclass,
6873 sizeof (active_pclass_copy));
6874 active_pclass_ptr = &active_pclass_copy[0];
6875 mutex_exit(&vlun->svl_mutex);
6876 if (vhci_quiesce_paths(vdip, cdip, vlun, guid,
6877 active_pclass_ptr) != 0) {
6878 retval = MDI_FAILURE;
6879 }
6880 } else {
6881 /*
6882 * can happen only when the available path to device
6883 * discovered is a STANDBY path.
6884 */
6885 mutex_exit(&vlun->svl_mutex);
6886 active_pclass_copy[0] = '\0';
6887 active_pclass_ptr = NULL;
6888 }
6889
6890 sfo = vlun->svl_fops;
6891 ASSERT(sfo != NULL);
6892 pclass1 = s_pclass = active_pclass_ptr;
6893 VHCI_DEBUG(1, (CE_NOTE, NULL, "!(%s)failing over from %s\n", guid,
6894 (s_pclass == NULL ? "<none>" : s_pclass)));
6895
6896 next_pathclass:
6897
6898 rval = sfo->sfo_pathclass_next(pclass1, &pclass2,
6899 vlun->svl_fops_ctpriv);
6900 if (rval == ENOENT) {
6901 if (s_pclass == NULL) {
6902 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(4)(%s): "
6903 "failed, no more pathclasses\n", guid));
6904 goto done;
6905 } else {
6906 (void) sfo->sfo_pathclass_next(NULL, &pclass2,
6907 vlun->svl_fops_ctpriv);
6908 }
6909 } else if (rval == EINVAL) {
6910 vhci_log(CE_NOTE, vdip, "!Failover operation failed for "
6911 "device %s (GUID %s): Invalid path-class %s",
6912 ddi_node_name(cdip), guid,
6913 ((pclass1 == NULL) ? "<none>" : pclass1));
6914 goto done;
6915 }
6916 if ((s_pclass != NULL) && (strcmp(pclass2, s_pclass) == 0)) {
6917 /*
6918 * paths are not currently available
6919 */
6920 vhci_log(CE_NOTE, vdip, "!Failover path currently unavailable"
6921 " for device %s (GUID %s)",
6922 ddi_node_name(cdip), guid);
6923 goto done;
6924 }
6925 pip = npip = NULL;
6926 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(5.2)(%s): considering "
6927 "%s as failover destination\n", guid, pclass2));
6928 sps = mdi_select_path(cdip, NULL, MDI_SELECT_STANDBY_PATH, NULL, &npip);
6929 if ((npip == NULL) || (sps != MDI_SUCCESS)) {
6930 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(%s): no "
6931 "STANDBY paths found (status:%x)!\n", guid, sps));
6932 pclass1 = pclass2;
6933 goto next_pathclass;
6934 }
6935 do {
6936 pclass = NULL;
6937 if ((mdi_prop_lookup_string(npip, "path-class",
6938 &pclass) != MDI_SUCCESS) || (strcmp(pclass2,
6939 pclass) != 0)) {
6940 VHCI_DEBUG(1, (CE_NOTE, NULL,
6941 "!vhci_failover(5.5)(%s): skipping path "
6942 "%p(%s)...\n", guid, (void *)npip, pclass));
6943 pip = npip;
6944 sps = mdi_select_path(cdip, NULL,
6945 MDI_SELECT_STANDBY_PATH, pip, &npip);
6946 mdi_rele_path(pip);
6947 (void) mdi_prop_free(pclass);
6948 continue;
6949 }
6950 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(npip);
6951
6952 /*
6953 * Issue READ at non-zer block on this STANDBY path.
6954 * Purple returns
6955 * 1. RESERVATION_CONFLICT if reservation is pending
6956 * 2. POR check condition if it reset happened.
6957 * 2. failover Check Conditions if one is already in progress.
6958 */
6959 reserve_pending = 0;
6960 check_condition = 0;
6961 UA_condition = 0;
6962
6963 bp = scsi_alloc_consistent_buf(&svp->svp_psd->sd_address,
6964 (struct buf *)NULL, DEV_BSIZE, B_READ, NULL, NULL);
6965 if (!bp) {
6966 VHCI_DEBUG(1, (CE_NOTE, NULL,
6967 "vhci_failover !No resources (buf)\n"));
6968 mdi_rele_path(npip);
6969 goto done;
6970 }
6971 pkt = scsi_init_pkt(&svp->svp_psd->sd_address, NULL, bp,
6972 CDB_GROUP1, sizeof (struct scsi_arq_status), 0,
6973 PKT_CONSISTENT, NULL, NULL);
6974 if (pkt) {
6975 (void) scsi_setup_cdb((union scsi_cdb *)(uintptr_t)
6976 pkt->pkt_cdbp, SCMD_READ, 1, 1, 0);
6977 pkt->pkt_flags = FLAG_NOINTR;
6978 check_path_again:
6979 pkt->pkt_path_instance = mdi_pi_get_path_instance(npip);
6980 pkt->pkt_time = 3*30;
6981
6982 if (scsi_transport(pkt) == TRAN_ACCEPT) {
6983 switch (pkt->pkt_reason) {
6984 case CMD_CMPLT:
6985 switch (SCBP_C(pkt)) {
6986 case STATUS_GOOD:
6987 /* Already failed over */
6988 activation_done = 1;
6989 break;
6990 case STATUS_RESERVATION_CONFLICT:
6991 reserve_pending = 1;
6992 break;
6993 case STATUS_CHECK:
6994 check_condition = 1;
6995 break;
6996 }
6997 }
6998 }
6999 if (check_condition &&
7000 (pkt->pkt_state & STATE_ARQ_DONE)) {
7001 uint8_t *sns, skey, asc, ascq;
7002 sns = (uint8_t *)
7003 &(((struct scsi_arq_status *)(uintptr_t)
7004 (pkt->pkt_scbp))->sts_sensedata);
7005 skey = scsi_sense_key(sns);
7006 asc = scsi_sense_asc(sns);
7007 ascq = scsi_sense_ascq(sns);
7008 if (skey == KEY_UNIT_ATTENTION &&
7009 asc == 0x29) {
7010 /* Already failed over */
7011 VHCI_DEBUG(1, (CE_NOTE, NULL,
7012 "!vhci_failover(7)(%s): "
7013 "path 0x%p POR UA condition\n",
7014 guid, (void *)npip));
7015 if (UA_condition == 0) {
7016 UA_condition = 1;
7017 goto check_path_again;
7018 }
7019 } else {
7020 activation_done = 0;
7021 VHCI_DEBUG(1, (CE_NOTE, NULL,
7022 "!vhci_failover(%s): path 0x%p "
7023 "unhandled chkcond %x %x %x\n",
7024 guid, (void *)npip, skey,
7025 asc, ascq));
7026 }
7027 }
7028 scsi_destroy_pkt(pkt);
7029 }
7030 scsi_free_consistent_buf(bp);
7031
7032 if (activation_done) {
7033 mdi_rele_path(npip);
7034 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(7)(%s): "
7035 "path 0x%p already failedover\n", guid,
7036 (void *)npip));
7037 break;
7038 }
7039 if (reserve_pending && (vlun->svl_xlf_capable == 0)) {
7040 (void) vhci_recovery_reset(vlun,
7041 &svp->svp_psd->sd_address,
7042 FALSE, VHCI_DEPTH_ALL);
7043 }
7044 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(6)(%s): "
7045 "activating path 0x%p(psd:%p)\n", guid, (void *)npip,
7046 (void *)svp->svp_psd));
7047 if (sfo->sfo_path_activate(svp->svp_psd, pclass2,
7048 vlun->svl_fops_ctpriv) == 0) {
7049 activation_done = 1;
7050 mdi_rele_path(npip);
7051 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(7)(%s): "
7052 "path 0x%p successfully activated\n", guid,
7053 (void *)npip));
7054 break;
7055 }
7056 pip = npip;
7057 sps = mdi_select_path(cdip, NULL, MDI_SELECT_STANDBY_PATH,
7058 pip, &npip);
7059 mdi_rele_path(pip);
7060 } while ((npip != NULL) && (sps == MDI_SUCCESS));
7061 if (activation_done == 0) {
7062 pclass1 = pclass2;
7063 goto next_pathclass;
7064 }
7065
7066 /*
7067 * if we are here, we have succeeded in activating path npip of
7068 * pathclass pclass2; let us validate all paths of pclass2 by
7069 * "ping"-ing each one and mark the good ones ONLINE
7070 * Also, set the state of the paths belonging to the previously
7071 * active pathclass to STANDBY
7072 */
7073 pip = npip = NULL;
7074 sps = mdi_select_path(cdip, NULL, (MDI_SELECT_ONLINE_PATH |
7075 MDI_SELECT_STANDBY_PATH | MDI_SELECT_USER_DISABLE_PATH),
7076 NULL, &npip);
7077 if (npip == NULL || sps != MDI_SUCCESS) {
7078 VHCI_DEBUG(1, (CE_NOTE, NULL, "!Failover operation failed for "
7079 "device %s (GUID %s): paths may be busy\n",
7080 ddi_node_name(cdip), guid));
7081 goto done;
7082 }
7083 do {
7084 (void) mdi_pi_get_state2(npip, &pi_state, &pi_ext_state);
7085 if (mdi_prop_lookup_string(npip, "path-class", &pclass)
7086 != MDI_SUCCESS) {
7087 pip = npip;
7088 sps = mdi_select_path(cdip, NULL,
7089 (MDI_SELECT_ONLINE_PATH |
7090 MDI_SELECT_STANDBY_PATH |
7091 MDI_SELECT_USER_DISABLE_PATH),
7092 pip, &npip);
7093 mdi_rele_path(pip);
7094 continue;
7095 }
7096 if (strcmp(pclass, pclass2) == 0) {
7097 if (pi_state == MDI_PATHINFO_STATE_STANDBY) {
7098 svp = (scsi_vhci_priv_t *)
7099 mdi_pi_get_vhci_private(npip);
7100 VHCI_DEBUG(1, (CE_NOTE, NULL,
7101 "!vhci_failover(8)(%s): "
7102 "pinging path 0x%p\n",
7103 guid, (void *)npip));
7104 if (sfo->sfo_path_ping(svp->svp_psd,
7105 vlun->svl_fops_ctpriv) == 1) {
7106 mdi_pi_set_state(npip,
7107 MDI_PATHINFO_STATE_ONLINE);
7108 VHCI_DEBUG(1, (CE_NOTE, NULL,
7109 "!vhci_failover(9)(%s): "
7110 "path 0x%p ping successful, "
7111 "marked online\n", guid,
7112 (void *)npip));
7113 MDI_PI_ERRSTAT(npip, MDI_PI_FAILTO);
7114 }
7115 }
7116 } else if ((s_pclass != NULL) && (strcmp(pclass, s_pclass)
7117 == 0)) {
7118 if (pi_state == MDI_PATHINFO_STATE_ONLINE) {
7119 mdi_pi_set_state(npip,
7120 MDI_PATHINFO_STATE_STANDBY);
7121 VHCI_DEBUG(1, (CE_NOTE, NULL,
7122 "!vhci_failover(10)(%s): path 0x%p marked "
7123 "STANDBY\n", guid, (void *)npip));
7124 MDI_PI_ERRSTAT(npip, MDI_PI_FAILFROM);
7125 }
7126 }
7127 (void) mdi_prop_free(pclass);
7128 pip = npip;
7129 sps = mdi_select_path(cdip, NULL, (MDI_SELECT_ONLINE_PATH |
7130 MDI_SELECT_STANDBY_PATH|MDI_SELECT_USER_DISABLE_PATH),
7131 pip, &npip);
7132 mdi_rele_path(pip);
7133 } while ((npip != NULL) && (sps == MDI_SUCCESS));
7134
7135 /*
7136 * Update the AccessState of related MP-API TPGs
7137 */
7138 (void) vhci_mpapi_update_tpg_acc_state_for_lu(vhci, vlun);
7139
7140 vhci_log(CE_NOTE, vdip, "!Failover operation completed successfully "
7141 "for device %s (GUID %s): failed over from %s to %s",
7142 ddi_node_name(cdip), guid, ((s_pclass == NULL) ? "<none>" :
7143 s_pclass), pclass2);
7144 ptr1 = kmem_alloc(strlen(pclass2)+1, KM_SLEEP);
7145 (void) strlcpy(ptr1, pclass2, (strlen(pclass2)+1));
7146 mutex_enter(&vlun->svl_mutex);
7147 ptr2 = vlun->svl_active_pclass;
7148 vlun->svl_active_pclass = ptr1;
7149 mutex_exit(&vlun->svl_mutex);
7150 if (ptr2) {
7151 kmem_free(ptr2, strlen(ptr2)+1);
7152 }
7153 mutex_enter(&vhci->vhci_mutex);
7154 scsi_hba_reset_notify_callback(&vhci->vhci_mutex,
7155 &vhci->vhci_reset_notify_listf);
7156 /* All reservations are cleared upon these resets. */
7157 vlun->svl_flags &= ~VLUN_RESERVE_ACTIVE_FLG;
7158 mutex_exit(&vhci->vhci_mutex);
7159 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(11): DONE! Active "
7160 "pathclass for %s is now %s\n", guid, pclass2));
7161 retval = MDI_SUCCESS;
7162
7163 done:
7164 vlun->svl_failover_status = retval;
7165 if (flags == MDI_FAILOVER_ASYNC) {
7166 VHCI_RELEASE_LUN(vlun);
7167 VHCI_DEBUG(6, (CE_NOTE, NULL, "!vhci_failover(12): DONE! "
7168 "releasing lun, as failover was ASYNC\n"));
7169 } else {
7170 VHCI_DEBUG(6, (CE_NOTE, NULL, "!vhci_failover(12): DONE! "
7171 "NOT releasing lun, as failover was SYNC\n"));
7172 }
7173 return (retval);
7174 }
7175
7176 /*
7177 * vhci_client_attached is called after the successful attach of a
7178 * client devinfo node.
7179 */
7180 static void
vhci_client_attached(dev_info_t * cdip)7181 vhci_client_attached(dev_info_t *cdip)
7182 {
7183 mdi_pathinfo_t *pip;
7184 int circular;
7185
7186 /*
7187 * At this point the client has attached and it's instance number is
7188 * valid, so we can set up kstats. We need to do this here because it
7189 * is possible for paths to go online prior to client attach, in which
7190 * case the call to vhci_kstat_create_pathinfo in vhci_pathinfo_online
7191 * was a noop.
7192 */
7193 ndi_devi_enter(cdip, &circular);
7194 for (pip = mdi_get_next_phci_path(cdip, NULL); pip;
7195 pip = mdi_get_next_phci_path(cdip, pip))
7196 vhci_kstat_create_pathinfo(pip);
7197 ndi_devi_exit(cdip, circular);
7198 }
7199
7200 /*
7201 * quiesce all of the online paths
7202 */
7203 static int
vhci_quiesce_paths(dev_info_t * vdip,dev_info_t * cdip,scsi_vhci_lun_t * vlun,char * guid,char * active_pclass_ptr)7204 vhci_quiesce_paths(dev_info_t *vdip, dev_info_t *cdip, scsi_vhci_lun_t *vlun,
7205 char *guid, char *active_pclass_ptr)
7206 {
7207 scsi_vhci_priv_t *svp;
7208 char *s_pclass = NULL;
7209 mdi_pathinfo_t *npip, *pip;
7210 int sps;
7211
7212 /* quiesce currently active paths */
7213 s_pclass = NULL;
7214 pip = npip = NULL;
7215 sps = mdi_select_path(cdip, NULL, MDI_SELECT_ONLINE_PATH, NULL, &npip);
7216 if ((npip == NULL) || (sps != MDI_SUCCESS)) {
7217 return (1);
7218 }
7219 do {
7220 if (mdi_prop_lookup_string(npip, "path-class",
7221 &s_pclass) != MDI_SUCCESS) {
7222 mdi_rele_path(npip);
7223 vhci_log(CE_NOTE, vdip, "!Failover operation failed "
7224 "for device %s (GUID %s) due to an internal "
7225 "error", ddi_node_name(cdip), guid);
7226 return (1);
7227 }
7228 if (strcmp(s_pclass, active_pclass_ptr) == 0) {
7229 /*
7230 * quiesce path. Free s_pclass since
7231 * we don't need it anymore
7232 */
7233 VHCI_DEBUG(1, (CE_NOTE, NULL,
7234 "!vhci_failover(2)(%s): failing over "
7235 "from %s; quiescing path %p\n",
7236 guid, s_pclass, (void *)npip));
7237 (void) mdi_prop_free(s_pclass);
7238 svp = (scsi_vhci_priv_t *)
7239 mdi_pi_get_vhci_private(npip);
7240 if (svp == NULL) {
7241 VHCI_DEBUG(1, (CE_NOTE, NULL,
7242 "!vhci_failover(2.5)(%s): no "
7243 "client priv! %p offlined?\n",
7244 guid, (void *)npip));
7245 pip = npip;
7246 sps = mdi_select_path(cdip, NULL,
7247 MDI_SELECT_ONLINE_PATH, pip, &npip);
7248 mdi_rele_path(pip);
7249 continue;
7250 }
7251 if (scsi_abort(&svp->svp_psd->sd_address, NULL)
7252 == 0) {
7253 (void) vhci_recovery_reset(vlun,
7254 &svp->svp_psd->sd_address, FALSE,
7255 VHCI_DEPTH_TARGET);
7256 }
7257 mutex_enter(&svp->svp_mutex);
7258 if (svp->svp_cmds == 0) {
7259 VHCI_DEBUG(1, (CE_NOTE, NULL,
7260 "!vhci_failover(3)(%s):"
7261 "quiesced path %p\n", guid, (void *)npip));
7262 } else {
7263 while (svp->svp_cmds != 0) {
7264 cv_wait(&svp->svp_cv, &svp->svp_mutex);
7265 VHCI_DEBUG(1, (CE_NOTE, NULL,
7266 "!vhci_failover(3.cv)(%s):"
7267 "quiesced path %p\n", guid,
7268 (void *)npip));
7269 }
7270 }
7271 mutex_exit(&svp->svp_mutex);
7272 } else {
7273 /*
7274 * make sure we freeup the memory
7275 */
7276 (void) mdi_prop_free(s_pclass);
7277 }
7278 pip = npip;
7279 sps = mdi_select_path(cdip, NULL, MDI_SELECT_ONLINE_PATH,
7280 pip, &npip);
7281 mdi_rele_path(pip);
7282 } while ((npip != NULL) && (sps == MDI_SUCCESS));
7283 return (0);
7284 }
7285
7286 static struct scsi_vhci_lun *
vhci_lun_lookup(dev_info_t * tgt_dip)7287 vhci_lun_lookup(dev_info_t *tgt_dip)
7288 {
7289 return ((struct scsi_vhci_lun *)
7290 mdi_client_get_vhci_private(tgt_dip));
7291 }
7292
7293 static struct scsi_vhci_lun *
vhci_lun_lookup_alloc(dev_info_t * tgt_dip,char * guid,int * didalloc)7294 vhci_lun_lookup_alloc(dev_info_t *tgt_dip, char *guid, int *didalloc)
7295 {
7296 struct scsi_vhci_lun *svl;
7297
7298 if (svl = vhci_lun_lookup(tgt_dip)) {
7299 return (svl);
7300 }
7301
7302 svl = kmem_zalloc(sizeof (*svl), KM_SLEEP);
7303 svl->svl_lun_wwn = kmem_zalloc(strlen(guid)+1, KM_SLEEP);
7304 (void) strcpy(svl->svl_lun_wwn, guid);
7305 mutex_init(&svl->svl_mutex, NULL, MUTEX_DRIVER, NULL);
7306 cv_init(&svl->svl_cv, NULL, CV_DRIVER, NULL);
7307 sema_init(&svl->svl_pgr_sema, 1, NULL, SEMA_DRIVER, NULL);
7308 svl->svl_waiting_for_activepath = 1;
7309 svl->svl_sector_size = 1;
7310 mdi_client_set_vhci_private(tgt_dip, svl);
7311 *didalloc = 1;
7312 VHCI_DEBUG(1, (CE_NOTE, NULL,
7313 "vhci_lun_lookup_alloc: guid %s vlun 0x%p\n",
7314 guid, (void *)svl));
7315 return (svl);
7316 }
7317
7318 static void
vhci_lun_free(struct scsi_vhci_lun * dvlp,struct scsi_device * sd)7319 vhci_lun_free(struct scsi_vhci_lun *dvlp, struct scsi_device *sd)
7320 {
7321 char *guid;
7322
7323 guid = dvlp->svl_lun_wwn;
7324 ASSERT(guid != NULL);
7325 VHCI_DEBUG(4, (CE_NOTE, NULL, "!vhci_lun_free: %s\n", guid));
7326
7327 mutex_enter(&dvlp->svl_mutex);
7328 if (dvlp->svl_active_pclass != NULL) {
7329 kmem_free(dvlp->svl_active_pclass,
7330 strlen(dvlp->svl_active_pclass)+1);
7331 }
7332 dvlp->svl_active_pclass = NULL;
7333 mutex_exit(&dvlp->svl_mutex);
7334
7335 if (dvlp->svl_lun_wwn != NULL) {
7336 kmem_free(dvlp->svl_lun_wwn, strlen(dvlp->svl_lun_wwn)+1);
7337 }
7338 dvlp->svl_lun_wwn = NULL;
7339
7340 if (dvlp->svl_fops_name) {
7341 kmem_free(dvlp->svl_fops_name, strlen(dvlp->svl_fops_name)+1);
7342 }
7343 dvlp->svl_fops_name = NULL;
7344
7345 if (dvlp->svl_fops_ctpriv != NULL &&
7346 dvlp->svl_fops != NULL) {
7347 dvlp->svl_fops->sfo_device_unprobe(sd, dvlp->svl_fops_ctpriv);
7348 }
7349
7350 if (dvlp->svl_flags & VLUN_TASK_D_ALIVE_FLG)
7351 taskq_destroy(dvlp->svl_taskq);
7352
7353 mutex_destroy(&dvlp->svl_mutex);
7354 cv_destroy(&dvlp->svl_cv);
7355 sema_destroy(&dvlp->svl_pgr_sema);
7356 kmem_free(dvlp, sizeof (*dvlp));
7357 /*
7358 * vhci_lun_free may be called before the tgt_dip
7359 * initialization so check if the sd is NULL.
7360 */
7361 if (sd != NULL)
7362 scsi_device_hba_private_set(sd, NULL);
7363 }
7364
7365 int
vhci_do_scsi_cmd(struct scsi_pkt * pkt)7366 vhci_do_scsi_cmd(struct scsi_pkt *pkt)
7367 {
7368 int err = 0;
7369 int retry_cnt = 0;
7370 uint8_t *sns, skey;
7371
7372 #ifdef DEBUG
7373 if (vhci_debug > 5) {
7374 vhci_print_cdb(pkt->pkt_address.a_hba_tran->tran_hba_dip,
7375 CE_WARN, "Vhci command", pkt->pkt_cdbp);
7376 }
7377 #endif
7378
7379 retry:
7380 err = scsi_poll(pkt);
7381 if (err) {
7382 if (pkt->pkt_cdbp[0] == SCMD_RELEASE) {
7383 if (SCBP_C(pkt) == STATUS_RESERVATION_CONFLICT) {
7384 VHCI_DEBUG(1, (CE_NOTE, NULL,
7385 "!v_s_do_s_c: RELEASE conflict\n"));
7386 return (0);
7387 }
7388 }
7389 if (retry_cnt++ < 6) {
7390 VHCI_DEBUG(1, (CE_WARN, NULL,
7391 "!v_s_do_s_c:retry packet 0x%p "
7392 "status 0x%x reason %s",
7393 (void *)pkt, SCBP_C(pkt),
7394 scsi_rname(pkt->pkt_reason)));
7395 if ((pkt->pkt_reason == CMD_CMPLT) &&
7396 (SCBP_C(pkt) == STATUS_CHECK) &&
7397 (pkt->pkt_state & STATE_ARQ_DONE)) {
7398 sns = (uint8_t *)
7399 &(((struct scsi_arq_status *)(uintptr_t)
7400 (pkt->pkt_scbp))->sts_sensedata);
7401 skey = scsi_sense_key(sns);
7402 VHCI_DEBUG(1, (CE_WARN, NULL,
7403 "!v_s_do_s_c:retry "
7404 "packet 0x%p sense data %s", (void *)pkt,
7405 scsi_sname(skey)));
7406 }
7407 goto retry;
7408 }
7409 VHCI_DEBUG(1, (CE_WARN, NULL,
7410 "!v_s_do_s_c: failed transport 0x%p 0x%x",
7411 (void *)pkt, SCBP_C(pkt)));
7412 return (0);
7413 }
7414
7415 switch (pkt->pkt_reason) {
7416 case CMD_TIMEOUT:
7417 VHCI_DEBUG(1, (CE_WARN, NULL, "!pkt timed "
7418 "out (pkt 0x%p)", (void *)pkt));
7419 return (0);
7420 case CMD_CMPLT:
7421 switch (SCBP_C(pkt)) {
7422 case STATUS_GOOD:
7423 break;
7424 case STATUS_CHECK:
7425 if (pkt->pkt_state & STATE_ARQ_DONE) {
7426 sns = (uint8_t *)&(((
7427 struct scsi_arq_status *)
7428 (uintptr_t)
7429 (pkt->pkt_scbp))->
7430 sts_sensedata);
7431 skey = scsi_sense_key(sns);
7432 if ((skey ==
7433 KEY_UNIT_ATTENTION) ||
7434 (skey ==
7435 KEY_NOT_READY)) {
7436 /*
7437 * clear unit attn.
7438 */
7439
7440 VHCI_DEBUG(1,
7441 (CE_WARN, NULL,
7442 "!v_s_do_s_c: "
7443 "retry "
7444 "packet 0x%p sense "
7445 "data %s",
7446 (void *)pkt,
7447 scsi_sname
7448 (skey)));
7449 goto retry;
7450 }
7451 VHCI_DEBUG(4, (CE_WARN, NULL,
7452 "!ARQ while "
7453 "transporting "
7454 "(pkt 0x%p)",
7455 (void *)pkt));
7456 return (0);
7457 }
7458 return (0);
7459 default:
7460 VHCI_DEBUG(1, (CE_WARN, NULL,
7461 "!Bad status returned "
7462 "(pkt 0x%p, status %x)",
7463 (void *)pkt, SCBP_C(pkt)));
7464 return (0);
7465 }
7466 break;
7467 case CMD_INCOMPLETE:
7468 case CMD_RESET:
7469 case CMD_ABORTED:
7470 case CMD_TRAN_ERR:
7471 if (retry_cnt++ < 1) {
7472 VHCI_DEBUG(1, (CE_WARN, NULL,
7473 "!v_s_do_s_c: retry packet 0x%p %s",
7474 (void *)pkt, scsi_rname(pkt->pkt_reason)));
7475 goto retry;
7476 }
7477 /* FALLTHROUGH */
7478 default:
7479 VHCI_DEBUG(1, (CE_WARN, NULL, "!pkt did not "
7480 "complete successfully (pkt 0x%p,"
7481 "reason %x)", (void *)pkt, pkt->pkt_reason));
7482 return (0);
7483 }
7484 return (1);
7485 }
7486
7487 static int
vhci_quiesce_lun(struct scsi_vhci_lun * vlun)7488 vhci_quiesce_lun(struct scsi_vhci_lun *vlun)
7489 {
7490 mdi_pathinfo_t *pip, *spip;
7491 dev_info_t *cdip;
7492 struct scsi_vhci_priv *svp;
7493 mdi_pathinfo_state_t pstate;
7494 uint32_t p_ext_state;
7495 int circular;
7496
7497 cdip = vlun->svl_dip;
7498 pip = spip = NULL;
7499 ndi_devi_enter(cdip, &circular);
7500 pip = mdi_get_next_phci_path(cdip, NULL);
7501 while (pip != NULL) {
7502 (void) mdi_pi_get_state2(pip, &pstate, &p_ext_state);
7503 if (pstate != MDI_PATHINFO_STATE_ONLINE) {
7504 spip = pip;
7505 pip = mdi_get_next_phci_path(cdip, spip);
7506 continue;
7507 }
7508 mdi_hold_path(pip);
7509 ndi_devi_exit(cdip, circular);
7510 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip);
7511 mutex_enter(&svp->svp_mutex);
7512 while (svp->svp_cmds != 0) {
7513 if (cv_reltimedwait(&svp->svp_cv, &svp->svp_mutex,
7514 drv_usectohz(vhci_path_quiesce_timeout * 1000000),
7515 TR_CLOCK_TICK) == -1) {
7516 mutex_exit(&svp->svp_mutex);
7517 mdi_rele_path(pip);
7518 VHCI_DEBUG(1, (CE_WARN, NULL,
7519 "Quiesce of lun is not successful "
7520 "vlun: 0x%p.", (void *)vlun));
7521 return (0);
7522 }
7523 }
7524 mutex_exit(&svp->svp_mutex);
7525 ndi_devi_enter(cdip, &circular);
7526 spip = pip;
7527 pip = mdi_get_next_phci_path(cdip, spip);
7528 mdi_rele_path(spip);
7529 }
7530 ndi_devi_exit(cdip, circular);
7531 return (1);
7532 }
7533
7534 static int
vhci_pgr_validate_and_register(scsi_vhci_priv_t * svp)7535 vhci_pgr_validate_and_register(scsi_vhci_priv_t *svp)
7536 {
7537 scsi_vhci_lun_t *vlun;
7538 vhci_prout_t *prout;
7539 int rval, success;
7540 mdi_pathinfo_t *pip, *npip;
7541 scsi_vhci_priv_t *osvp;
7542 dev_info_t *cdip;
7543 uchar_t cdb_1;
7544 uchar_t temp_res_key[MHIOC_RESV_KEY_SIZE];
7545
7546
7547 /*
7548 * see if there are any other paths available; if none,
7549 * then there is nothing to do.
7550 */
7551 cdip = svp->svp_svl->svl_dip;
7552 rval = mdi_select_path(cdip, NULL, MDI_SELECT_ONLINE_PATH |
7553 MDI_SELECT_STANDBY_PATH, NULL, &pip);
7554 if ((rval != MDI_SUCCESS) || (pip == NULL)) {
7555 VHCI_DEBUG(4, (CE_NOTE, NULL,
7556 "%s%d: vhci_pgr_validate_and_register: first path\n",
7557 ddi_driver_name(cdip), ddi_get_instance(cdip)));
7558 return (1);
7559 }
7560
7561 vlun = svp->svp_svl;
7562 prout = &vlun->svl_prout;
7563 ASSERT(vlun->svl_pgr_active != 0);
7564
7565 /*
7566 * When the path was busy/offlined, some other host might have
7567 * cleared this key. Validate key on some other path first.
7568 * If it fails, return failure.
7569 */
7570
7571 npip = pip;
7572 pip = NULL;
7573 success = 0;
7574
7575 /* Save the res key */
7576 bcopy(prout->res_key, temp_res_key, MHIOC_RESV_KEY_SIZE);
7577
7578 /*
7579 * Sometimes CDB from application can be a Register_And_Ignore.
7580 * Instead of validation, this cdb would result in force registration.
7581 * Convert it to normal cdb for validation.
7582 * After that be sure to restore the cdb.
7583 */
7584 cdb_1 = vlun->svl_cdb[1];
7585 vlun->svl_cdb[1] &= 0xe0;
7586
7587 do {
7588 osvp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(npip);
7589 if (osvp == NULL) {
7590 VHCI_DEBUG(4, (CE_NOTE, NULL,
7591 "vhci_pgr_validate_and_register: no "
7592 "client priv! 0x%p offlined?\n",
7593 (void *)npip));
7594 goto next_path_1;
7595 }
7596
7597 if (osvp == svp) {
7598 VHCI_DEBUG(4, (CE_NOTE, NULL,
7599 "vhci_pgr_validate_and_register: same svp 0x%p"
7600 " npip 0x%p vlun 0x%p\n",
7601 (void *)svp, (void *)npip, (void *)vlun));
7602 goto next_path_1;
7603 }
7604
7605 VHCI_DEBUG(4, (CE_NOTE, NULL,
7606 "vhci_pgr_validate_and_register: First validate on"
7607 " osvp 0x%p being done. vlun 0x%p thread 0x%p Before bcopy"
7608 " cdb1 %x\n", (void *)osvp, (void *)vlun,
7609 (void *)curthread, vlun->svl_cdb[1]));
7610 vhci_print_prout_keys(vlun, "v_pgr_val_reg: before bcopy:");
7611
7612 bcopy(prout->service_key, prout->res_key, MHIOC_RESV_KEY_SIZE);
7613
7614 VHCI_DEBUG(4, (CE_WARN, NULL, "vlun 0x%p After bcopy",
7615 (void *)vlun));
7616 vhci_print_prout_keys(vlun, "v_pgr_val_reg: after bcopy: ");
7617
7618 rval = vhci_do_prout(osvp);
7619 if (rval == 1) {
7620 VHCI_DEBUG(4, (CE_NOTE, NULL,
7621 "%s%d: vhci_pgr_validate_and_register: key"
7622 " validated thread 0x%p\n", ddi_driver_name(cdip),
7623 ddi_get_instance(cdip), (void *)curthread));
7624 pip = npip;
7625 success = 1;
7626 break;
7627 } else {
7628 VHCI_DEBUG(4, (CE_NOTE, NULL,
7629 "vhci_pgr_validate_and_register: First validation"
7630 " on osvp 0x%p failed %x\n", (void *)osvp, rval));
7631 vhci_print_prout_keys(vlun, "v_pgr_val_reg: failed:");
7632 }
7633
7634 /*
7635 * Try other paths
7636 */
7637 next_path_1:
7638 pip = npip;
7639 rval = mdi_select_path(cdip, NULL,
7640 MDI_SELECT_ONLINE_PATH|MDI_SELECT_STANDBY_PATH,
7641 pip, &npip);
7642 mdi_rele_path(pip);
7643 } while ((rval == MDI_SUCCESS) && (npip != NULL));
7644
7645
7646 /* Be sure to restore original cdb */
7647 vlun->svl_cdb[1] = cdb_1;
7648
7649 /* Restore the res_key */
7650 bcopy(temp_res_key, prout->res_key, MHIOC_RESV_KEY_SIZE);
7651
7652 /*
7653 * If key could not be registered on any path for the first time,
7654 * return success as online should still continue.
7655 */
7656 if (success == 0) {
7657 return (1);
7658 }
7659
7660 ASSERT(pip != NULL);
7661
7662 /*
7663 * Force register on new path
7664 */
7665 cdb_1 = vlun->svl_cdb[1]; /* store the cdb */
7666
7667 vlun->svl_cdb[1] &= 0xe0;
7668 vlun->svl_cdb[1] |= VHCI_PROUT_R_AND_IGNORE;
7669
7670 vhci_print_prout_keys(vlun, "v_pgr_val_reg: keys before bcopy: ");
7671
7672 bcopy(prout->active_service_key, prout->service_key,
7673 MHIOC_RESV_KEY_SIZE);
7674 bcopy(prout->active_res_key, prout->res_key, MHIOC_RESV_KEY_SIZE);
7675
7676 vhci_print_prout_keys(vlun, "v_pgr_val_reg:keys after bcopy: ");
7677
7678 rval = vhci_do_prout(svp);
7679 vlun->svl_cdb[1] = cdb_1; /* restore the cdb */
7680 if (rval != 1) {
7681 VHCI_DEBUG(4, (CE_NOTE, NULL,
7682 "vhci_pgr_validate_and_register: register on new"
7683 " path 0x%p svp 0x%p failed %x\n",
7684 (void *)pip, (void *)svp, rval));
7685 vhci_print_prout_keys(vlun, "v_pgr_val_reg: reg failed: ");
7686 mdi_rele_path(pip);
7687 return (0);
7688 }
7689
7690 if (bcmp(prout->service_key, zero_key, MHIOC_RESV_KEY_SIZE) == 0) {
7691 VHCI_DEBUG(4, (CE_NOTE, NULL,
7692 "vhci_pgr_validate_and_register: zero service key\n"));
7693 mdi_rele_path(pip);
7694 return (rval);
7695 }
7696
7697 /*
7698 * While the key was force registered, some other host might have
7699 * cleared the key. Re-validate key on another pre-existing path
7700 * before declaring success.
7701 */
7702 npip = pip;
7703 pip = NULL;
7704
7705 /*
7706 * Sometimes CDB from application can be Register and Ignore.
7707 * Instead of validation, it would result in force registration.
7708 * Convert it to normal cdb for validation.
7709 * After that be sure to restore the cdb.
7710 */
7711 cdb_1 = vlun->svl_cdb[1];
7712 vlun->svl_cdb[1] &= 0xe0;
7713 success = 0;
7714
7715 do {
7716 osvp = (scsi_vhci_priv_t *)
7717 mdi_pi_get_vhci_private(npip);
7718 if (osvp == NULL) {
7719 VHCI_DEBUG(4, (CE_NOTE, NULL,
7720 "vhci_pgr_validate_and_register: no "
7721 "client priv! 0x%p offlined?\n",
7722 (void *)npip));
7723 goto next_path_2;
7724 }
7725
7726 if (osvp == svp) {
7727 VHCI_DEBUG(4, (CE_NOTE, NULL,
7728 "vhci_pgr_validate_and_register: same osvp 0x%p"
7729 " npip 0x%p vlun 0x%p\n",
7730 (void *)svp, (void *)npip, (void *)vlun));
7731 goto next_path_2;
7732 }
7733
7734 VHCI_DEBUG(4, (CE_NOTE, NULL,
7735 "vhci_pgr_validate_and_register: Re-validation on"
7736 " osvp 0x%p being done. vlun 0x%p Before bcopy cdb1 %x\n",
7737 (void *)osvp, (void *)vlun, vlun->svl_cdb[1]));
7738 vhci_print_prout_keys(vlun, "v_pgr_val_reg: before bcopy: ");
7739
7740 bcopy(prout->service_key, prout->res_key, MHIOC_RESV_KEY_SIZE);
7741
7742 vhci_print_prout_keys(vlun, "v_pgr_val_reg: after bcopy: ");
7743
7744 rval = vhci_do_prout(osvp);
7745 if (rval == 1) {
7746 VHCI_DEBUG(4, (CE_NOTE, NULL,
7747 "%s%d: vhci_pgr_validate_and_register: key"
7748 " validated thread 0x%p\n", ddi_driver_name(cdip),
7749 ddi_get_instance(cdip), (void *)curthread));
7750 pip = npip;
7751 success = 1;
7752 break;
7753 } else {
7754 VHCI_DEBUG(4, (CE_NOTE, NULL,
7755 "vhci_pgr_validate_and_register: Re-validation on"
7756 " osvp 0x%p failed %x\n", (void *)osvp, rval));
7757 vhci_print_prout_keys(vlun,
7758 "v_pgr_val_reg: reval failed: ");
7759 }
7760
7761 /*
7762 * Try other paths
7763 */
7764 next_path_2:
7765 pip = npip;
7766 rval = mdi_select_path(cdip, NULL,
7767 MDI_SELECT_ONLINE_PATH|MDI_SELECT_STANDBY_PATH,
7768 pip, &npip);
7769 mdi_rele_path(pip);
7770 } while ((rval == MDI_SUCCESS) && (npip != NULL));
7771
7772 /* Be sure to restore original cdb */
7773 vlun->svl_cdb[1] = cdb_1;
7774
7775 if (success == 1) {
7776 /* Successfully validated registration */
7777 mdi_rele_path(pip);
7778 return (1);
7779 }
7780
7781 VHCI_DEBUG(4, (CE_WARN, NULL, "key validation failed"));
7782
7783 /*
7784 * key invalid, back out by registering key value of 0
7785 */
7786 VHCI_DEBUG(4, (CE_NOTE, NULL,
7787 "vhci_pgr_validate_and_register: backout on"
7788 " svp 0x%p being done\n", (void *)svp));
7789 vhci_print_prout_keys(vlun, "v_pgr_val_reg: before bcopy: ");
7790
7791 bcopy(prout->service_key, prout->res_key, MHIOC_RESV_KEY_SIZE);
7792 bzero(prout->service_key, MHIOC_RESV_KEY_SIZE);
7793
7794 vhci_print_prout_keys(vlun, "v_pgr_val_reg: before bcopy: ");
7795
7796 /*
7797 * Get a new path
7798 */
7799 rval = mdi_select_path(cdip, NULL, MDI_SELECT_ONLINE_PATH |
7800 MDI_SELECT_STANDBY_PATH, NULL, &pip);
7801 if ((rval != MDI_SUCCESS) || (pip == NULL)) {
7802 VHCI_DEBUG(4, (CE_NOTE, NULL,
7803 "%s%d: vhci_pgr_validate_and_register: no valid pip\n",
7804 ddi_driver_name(cdip), ddi_get_instance(cdip)));
7805 return (0);
7806 }
7807
7808 if ((rval = vhci_do_prout(svp)) != 1) {
7809 VHCI_DEBUG(4, (CE_NOTE, NULL,
7810 "vhci_pgr_validate_and_register: backout on"
7811 " svp 0x%p failed\n", (void *)svp));
7812 vhci_print_prout_keys(vlun, "backout failed");
7813
7814 VHCI_DEBUG(4, (CE_WARN, NULL,
7815 "%s%d: vhci_pgr_validate_and_register: key"
7816 " validation and backout failed", ddi_driver_name(cdip),
7817 ddi_get_instance(cdip)));
7818 if (rval == VHCI_PGR_ILLEGALOP) {
7819 VHCI_DEBUG(4, (CE_WARN, NULL,
7820 "%s%d: vhci_pgr_validate_and_register: key"
7821 " already cleared", ddi_driver_name(cdip),
7822 ddi_get_instance(cdip)));
7823 rval = 1;
7824 } else
7825 rval = 0;
7826 } else {
7827 VHCI_DEBUG(4, (CE_NOTE, NULL,
7828 "%s%d: vhci_pgr_validate_and_register: key"
7829 " validation failed, key backed out\n",
7830 ddi_driver_name(cdip), ddi_get_instance(cdip)));
7831 vhci_print_prout_keys(vlun, "v_pgr_val_reg: key backed out: ");
7832 }
7833 mdi_rele_path(pip);
7834
7835 return (rval);
7836 }
7837
7838 /*
7839 * taskq routine to dispatch a scsi cmd to vhci_scsi_start. This ensures
7840 * that vhci_scsi_start is not called in interrupt context.
7841 * As the upper layer gets TRAN_ACCEPT when the command is dispatched, we
7842 * need to complete the command if something goes wrong.
7843 */
7844 static void
vhci_dispatch_scsi_start(void * arg)7845 vhci_dispatch_scsi_start(void *arg)
7846 {
7847 struct vhci_pkt *vpkt = (struct vhci_pkt *)arg;
7848 struct scsi_pkt *tpkt = vpkt->vpkt_tgt_pkt;
7849 int rval = TRAN_BUSY;
7850
7851 VHCI_DEBUG(6, (CE_NOTE, NULL, "!vhci_dispatch_scsi_start: sending"
7852 " scsi-2 reserve for 0x%p\n",
7853 (void *)ADDR2DIP(&(vpkt->vpkt_tgt_pkt->pkt_address))));
7854
7855 /*
7856 * To prevent the taskq from being called recursively we set the
7857 * the VHCI_PKT_THRU_TASKQ bit in the vhci_pkt_states.
7858 */
7859 vpkt->vpkt_state |= VHCI_PKT_THRU_TASKQ;
7860
7861 /*
7862 * Wait for the transport to get ready to send packets
7863 * and if it times out, it will return something other than
7864 * TRAN_BUSY. The vhci_reserve_delay may want to
7865 * get tuned for other transports and is therefore a global.
7866 * Using delay since this routine is called by taskq dispatch
7867 * and not called during interrupt context.
7868 */
7869 while ((rval = vhci_scsi_start(&(vpkt->vpkt_tgt_pkt->pkt_address),
7870 vpkt->vpkt_tgt_pkt)) == TRAN_BUSY) {
7871 delay(drv_usectohz(vhci_reserve_delay));
7872 }
7873
7874 switch (rval) {
7875 case TRAN_ACCEPT:
7876 return;
7877
7878 default:
7879 /*
7880 * This pkt shall be retried, and to ensure another taskq
7881 * is dispatched for it, clear the VHCI_PKT_THRU_TASKQ
7882 * flag.
7883 */
7884 vpkt->vpkt_state &= ~VHCI_PKT_THRU_TASKQ;
7885
7886 /* Ensure that the pkt is retried without a reset */
7887 tpkt->pkt_reason = CMD_ABORTED;
7888 tpkt->pkt_statistics |= STAT_ABORTED;
7889 VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_dispatch_scsi_start: "
7890 "TRAN_rval %d returned for dip 0x%p", rval,
7891 (void *)ADDR2DIP(&(vpkt->vpkt_tgt_pkt->pkt_address))));
7892 break;
7893 }
7894
7895 /*
7896 * vpkt_org_vpkt should always be NULL here if the retry command
7897 * has been successfully dispatched. If vpkt_org_vpkt != NULL at
7898 * this point, it is an error so restore the original vpkt and
7899 * return an error to the target driver so it can retry the
7900 * command as appropriate.
7901 */
7902 if (vpkt->vpkt_org_vpkt != NULL) {
7903 struct vhci_pkt *new_vpkt = vpkt;
7904 scsi_vhci_priv_t *svp = (scsi_vhci_priv_t *)
7905 mdi_pi_get_vhci_private(vpkt->vpkt_path);
7906
7907 vpkt = vpkt->vpkt_org_vpkt;
7908
7909 vpkt->vpkt_tgt_pkt->pkt_reason = tpkt->pkt_reason;
7910 vpkt->vpkt_tgt_pkt->pkt_statistics = tpkt->pkt_statistics;
7911
7912 vhci_scsi_destroy_pkt(&svp->svp_psd->sd_address,
7913 new_vpkt->vpkt_tgt_pkt);
7914
7915 tpkt = vpkt->vpkt_tgt_pkt;
7916 }
7917
7918 scsi_hba_pkt_comp(tpkt);
7919 }
7920
7921 static void
vhci_initiate_auto_failback(void * arg)7922 vhci_initiate_auto_failback(void *arg)
7923 {
7924 struct scsi_vhci_lun *vlun = (struct scsi_vhci_lun *)arg;
7925 dev_info_t *vdip, *cdip;
7926 int held;
7927
7928 cdip = vlun->svl_dip;
7929 vdip = ddi_get_parent(cdip);
7930
7931 VHCI_HOLD_LUN(vlun, VH_SLEEP, held);
7932
7933 /*
7934 * Perform a final check to see if the active path class is indeed
7935 * not the preferred path class. As in the time the auto failback
7936 * was dispatched, an external failover could have been detected.
7937 * [Some other host could have detected this condition and triggered
7938 * the auto failback before].
7939 * In such a case if we go ahead with failover we will be negating the
7940 * whole purpose of auto failback.
7941 */
7942 mutex_enter(&vlun->svl_mutex);
7943 if (vlun->svl_active_pclass != NULL) {
7944 char *best_pclass;
7945 struct scsi_failover_ops *fo;
7946
7947 fo = vlun->svl_fops;
7948
7949 (void) fo->sfo_pathclass_next(NULL, &best_pclass,
7950 vlun->svl_fops_ctpriv);
7951 if (strcmp(vlun->svl_active_pclass, best_pclass) == 0) {
7952 mutex_exit(&vlun->svl_mutex);
7953 VHCI_RELEASE_LUN(vlun);
7954 VHCI_DEBUG(1, (CE_NOTE, NULL, "Not initiating "
7955 "auto failback for %s as %s pathclass already "
7956 "active.\n", vlun->svl_lun_wwn, best_pclass));
7957 return;
7958 }
7959 }
7960 mutex_exit(&vlun->svl_mutex);
7961 if (mdi_failover(vdip, vlun->svl_dip, MDI_FAILOVER_SYNC)
7962 == MDI_SUCCESS) {
7963 vhci_log(CE_NOTE, vdip, "!Auto failback operation "
7964 "succeeded for device %s (GUID %s)",
7965 ddi_node_name(cdip), vlun->svl_lun_wwn);
7966 } else {
7967 vhci_log(CE_NOTE, vdip, "!Auto failback operation "
7968 "failed for device %s (GUID %s)",
7969 ddi_node_name(cdip), vlun->svl_lun_wwn);
7970 }
7971 VHCI_RELEASE_LUN(vlun);
7972 }
7973
7974 #ifdef DEBUG
7975 static void
vhci_print_prin_keys(vhci_prin_readkeys_t * prin,int numkeys)7976 vhci_print_prin_keys(vhci_prin_readkeys_t *prin, int numkeys)
7977 {
7978 vhci_clean_print(NULL, 5, "Current PGR Keys",
7979 (uchar_t *)prin, numkeys * 8);
7980 }
7981 #endif
7982
7983 static void
vhci_print_prout_keys(scsi_vhci_lun_t * vlun,char * msg)7984 vhci_print_prout_keys(scsi_vhci_lun_t *vlun, char *msg)
7985 {
7986 int i;
7987 vhci_prout_t *prout;
7988 char buf1[4*MHIOC_RESV_KEY_SIZE + 1];
7989 char buf2[4*MHIOC_RESV_KEY_SIZE + 1];
7990 char buf3[4*MHIOC_RESV_KEY_SIZE + 1];
7991 char buf4[4*MHIOC_RESV_KEY_SIZE + 1];
7992
7993 prout = &vlun->svl_prout;
7994
7995 for (i = 0; i < MHIOC_RESV_KEY_SIZE; i++)
7996 (void) sprintf(&buf1[4*i], "[%02x]", prout->res_key[i]);
7997 for (i = 0; i < MHIOC_RESV_KEY_SIZE; i++)
7998 (void) sprintf(&buf2[(4*i)], "[%02x]", prout->service_key[i]);
7999 for (i = 0; i < MHIOC_RESV_KEY_SIZE; i++)
8000 (void) sprintf(&buf3[4*i], "[%02x]", prout->active_res_key[i]);
8001 for (i = 0; i < MHIOC_RESV_KEY_SIZE; i++)
8002 (void) sprintf(&buf4[4*i], "[%02x]",
8003 prout->active_service_key[i]);
8004
8005 /* Printing all in one go. Otherwise it will jumble up */
8006 VHCI_DEBUG(5, (CE_CONT, NULL, "%s vlun 0x%p, thread 0x%p\n"
8007 "res_key: : %s\n"
8008 "service_key : %s\n"
8009 "active_res_key : %s\n"
8010 "active_service_key: %s\n",
8011 msg, (void *)vlun, (void *)curthread, buf1, buf2, buf3, buf4));
8012 }
8013
8014 /*
8015 * Called from vhci_scsi_start to update the pHCI pkt with target packet.
8016 */
8017 static void
vhci_update_pHCI_pkt(struct vhci_pkt * vpkt,struct scsi_pkt * pkt)8018 vhci_update_pHCI_pkt(struct vhci_pkt *vpkt, struct scsi_pkt *pkt)
8019 {
8020
8021 ASSERT(vpkt->vpkt_hba_pkt);
8022
8023 vpkt->vpkt_hba_pkt->pkt_flags = pkt->pkt_flags;
8024 vpkt->vpkt_hba_pkt->pkt_flags |= FLAG_NOQUEUE;
8025
8026 if ((vpkt->vpkt_hba_pkt->pkt_flags & FLAG_NOINTR) ||
8027 MDI_PI_IS_SUSPENDED(vpkt->vpkt_path)) {
8028 /*
8029 * Polled Command is requested or HBA is in
8030 * suspended state
8031 */
8032 vpkt->vpkt_hba_pkt->pkt_flags |= FLAG_NOINTR;
8033 vpkt->vpkt_hba_pkt->pkt_comp = NULL;
8034 } else {
8035 vpkt->vpkt_hba_pkt->pkt_comp = vhci_intr;
8036 }
8037 vpkt->vpkt_hba_pkt->pkt_time = pkt->pkt_time;
8038 bcopy(pkt->pkt_cdbp, vpkt->vpkt_hba_pkt->pkt_cdbp,
8039 vpkt->vpkt_tgt_init_cdblen);
8040 vpkt->vpkt_hba_pkt->pkt_resid = pkt->pkt_resid;
8041
8042 /* Re-initialize the following pHCI packet state information */
8043 vpkt->vpkt_hba_pkt->pkt_state = 0;
8044 vpkt->vpkt_hba_pkt->pkt_statistics = 0;
8045 vpkt->vpkt_hba_pkt->pkt_reason = 0;
8046 }
8047
8048 static int
vhci_scsi_bus_power(dev_info_t * parent,void * impl_arg,pm_bus_power_op_t op,void * arg,void * result)8049 vhci_scsi_bus_power(dev_info_t *parent, void *impl_arg, pm_bus_power_op_t op,
8050 void *arg, void *result)
8051 {
8052 int ret = DDI_SUCCESS;
8053
8054 /*
8055 * Generic processing in MPxIO framework
8056 */
8057 ret = mdi_bus_power(parent, impl_arg, op, arg, result);
8058
8059 switch (ret) {
8060 case MDI_SUCCESS:
8061 ret = DDI_SUCCESS;
8062 break;
8063 case MDI_FAILURE:
8064 ret = DDI_FAILURE;
8065 break;
8066 default:
8067 break;
8068 }
8069
8070 return (ret);
8071 }
8072
8073 static int
vhci_pHCI_cap(struct scsi_address * ap,char * cap,int val,int whom,mdi_pathinfo_t * pip)8074 vhci_pHCI_cap(struct scsi_address *ap, char *cap, int val, int whom,
8075 mdi_pathinfo_t *pip)
8076 {
8077 dev_info_t *cdip;
8078 mdi_pathinfo_t *npip = NULL;
8079 scsi_vhci_priv_t *svp = NULL;
8080 struct scsi_address *pap = NULL;
8081 scsi_hba_tran_t *hba = NULL;
8082 int sps;
8083 int mps_flag;
8084 int rval = 0;
8085
8086 mps_flag = (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH);
8087 if (pip) {
8088 /*
8089 * If the call is from vhci_pathinfo_state_change,
8090 * then this path was busy and is becoming ready to accept IO.
8091 */
8092 ASSERT(ap != NULL);
8093 hba = ap->a_hba_tran;
8094 ASSERT(hba != NULL);
8095 rval = scsi_ifsetcap(ap, cap, val, whom);
8096
8097 VHCI_DEBUG(2, (CE_NOTE, NULL,
8098 "!vhci_pHCI_cap: only on path %p, ap %p, rval %x\n",
8099 (void *)pip, (void *)ap, rval));
8100
8101 return (rval);
8102 }
8103
8104 /*
8105 * Set capability on all the pHCIs.
8106 * If any path is busy, then the capability would be set by
8107 * vhci_pathinfo_state_change.
8108 */
8109
8110 cdip = ADDR2DIP(ap);
8111 ASSERT(cdip != NULL);
8112 sps = mdi_select_path(cdip, NULL, mps_flag, NULL, &pip);
8113 if ((sps != MDI_SUCCESS) || (pip == NULL)) {
8114 VHCI_DEBUG(2, (CE_WARN, NULL,
8115 "!vhci_pHCI_cap: Unable to get a path, dip 0x%p",
8116 (void *)cdip));
8117 return (0);
8118 }
8119
8120 again:
8121 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip);
8122 if (svp == NULL) {
8123 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_pHCI_cap: "
8124 "priv is NULL, pip 0x%p", (void *)pip));
8125 mdi_rele_path(pip);
8126 return (rval);
8127 }
8128
8129 if (svp->svp_psd == NULL) {
8130 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_pHCI_cap: "
8131 "psd is NULL, pip 0x%p, svp 0x%p",
8132 (void *)pip, (void *)svp));
8133 mdi_rele_path(pip);
8134 return (rval);
8135 }
8136
8137 pap = &svp->svp_psd->sd_address;
8138 ASSERT(pap != NULL);
8139 hba = pap->a_hba_tran;
8140 ASSERT(hba != NULL);
8141
8142 if (hba->tran_setcap != NULL) {
8143 rval = scsi_ifsetcap(pap, cap, val, whom);
8144
8145 VHCI_DEBUG(2, (CE_NOTE, NULL,
8146 "!vhci_pHCI_cap: path %p, ap %p, rval %x\n",
8147 (void *)pip, (void *)ap, rval));
8148
8149 /*
8150 * Select next path and issue the setcap, repeat
8151 * until all paths are exhausted
8152 */
8153 sps = mdi_select_path(cdip, NULL, mps_flag, pip, &npip);
8154 if ((sps != MDI_SUCCESS) || (npip == NULL)) {
8155 mdi_rele_path(pip);
8156 return (1);
8157 }
8158 mdi_rele_path(pip);
8159 pip = npip;
8160 goto again;
8161 }
8162 mdi_rele_path(pip);
8163 return (rval);
8164 }
8165
8166 static int
vhci_scsi_bus_config(dev_info_t * pdip,uint_t flags,ddi_bus_config_op_t op,void * arg,dev_info_t ** child)8167 vhci_scsi_bus_config(dev_info_t *pdip, uint_t flags, ddi_bus_config_op_t op,
8168 void *arg, dev_info_t **child)
8169 {
8170 char *guid;
8171
8172 if (vhci_bus_config_debug)
8173 flags |= NDI_DEVI_DEBUG;
8174
8175 if (op == BUS_CONFIG_ONE || op == BUS_UNCONFIG_ONE)
8176 guid = vhci_devnm_to_guid((char *)arg);
8177 else
8178 guid = NULL;
8179
8180 if (mdi_vhci_bus_config(pdip, flags, op, arg, child, guid)
8181 == MDI_SUCCESS)
8182 return (NDI_SUCCESS);
8183 else
8184 return (NDI_FAILURE);
8185 }
8186
8187 static int
vhci_scsi_bus_unconfig(dev_info_t * pdip,uint_t flags,ddi_bus_config_op_t op,void * arg)8188 vhci_scsi_bus_unconfig(dev_info_t *pdip, uint_t flags, ddi_bus_config_op_t op,
8189 void *arg)
8190 {
8191 if (vhci_bus_config_debug)
8192 flags |= NDI_DEVI_DEBUG;
8193
8194 return (ndi_busop_bus_unconfig(pdip, flags, op, arg));
8195 }
8196
8197 /*
8198 * Take the original vhci_pkt, create a duplicate of the pkt for resending
8199 * as though it originated in ssd.
8200 */
8201 static struct scsi_pkt *
vhci_create_retry_pkt(struct vhci_pkt * vpkt)8202 vhci_create_retry_pkt(struct vhci_pkt *vpkt)
8203 {
8204 struct vhci_pkt *new_vpkt = NULL;
8205 struct scsi_pkt *pkt = NULL;
8206
8207 scsi_vhci_priv_t *svp = (scsi_vhci_priv_t *)
8208 mdi_pi_get_vhci_private(vpkt->vpkt_path);
8209
8210 /*
8211 * Ensure consistent data at completion time by setting PKT_CONSISTENT
8212 */
8213 pkt = vhci_scsi_init_pkt(&svp->svp_psd->sd_address, pkt,
8214 vpkt->vpkt_tgt_init_bp, vpkt->vpkt_tgt_init_cdblen,
8215 vpkt->vpkt_tgt_init_scblen, 0, PKT_CONSISTENT, NULL_FUNC, NULL);
8216 if (pkt != NULL) {
8217 new_vpkt = TGTPKT2VHCIPKT(pkt);
8218
8219 pkt->pkt_address = vpkt->vpkt_tgt_pkt->pkt_address;
8220 pkt->pkt_flags = vpkt->vpkt_tgt_pkt->pkt_flags;
8221 pkt->pkt_time = vpkt->vpkt_tgt_pkt->pkt_time;
8222 pkt->pkt_comp = vpkt->vpkt_tgt_pkt->pkt_comp;
8223
8224 pkt->pkt_resid = 0;
8225 pkt->pkt_statistics = 0;
8226 pkt->pkt_reason = 0;
8227
8228 bcopy(vpkt->vpkt_tgt_pkt->pkt_cdbp,
8229 pkt->pkt_cdbp, vpkt->vpkt_tgt_init_cdblen);
8230
8231 /*
8232 * Save a pointer to the original vhci_pkt
8233 */
8234 new_vpkt->vpkt_org_vpkt = vpkt;
8235 }
8236
8237 return (pkt);
8238 }
8239
8240 /*
8241 * Copy the successful completion information from the hba packet into
8242 * the original target pkt from the upper layer. Returns the original
8243 * vpkt and destroys the new vpkt from the internal retry.
8244 */
8245 static struct vhci_pkt *
vhci_sync_retry_pkt(struct vhci_pkt * vpkt)8246 vhci_sync_retry_pkt(struct vhci_pkt *vpkt)
8247 {
8248 struct vhci_pkt *ret_vpkt = NULL;
8249 struct scsi_pkt *tpkt = NULL;
8250 struct scsi_pkt *hba_pkt = NULL;
8251 scsi_vhci_priv_t *svp = (scsi_vhci_priv_t *)
8252 mdi_pi_get_vhci_private(vpkt->vpkt_path);
8253
8254 ASSERT(vpkt->vpkt_org_vpkt != NULL);
8255 VHCI_DEBUG(0, (CE_NOTE, NULL, "vhci_sync_retry_pkt: Retry pkt "
8256 "completed successfully!\n"));
8257
8258 ret_vpkt = vpkt->vpkt_org_vpkt;
8259 tpkt = ret_vpkt->vpkt_tgt_pkt;
8260 hba_pkt = vpkt->vpkt_hba_pkt;
8261
8262 /*
8263 * Copy the good status into the target driver's packet
8264 */
8265 *(tpkt->pkt_scbp) = *(hba_pkt->pkt_scbp);
8266 tpkt->pkt_resid = hba_pkt->pkt_resid;
8267 tpkt->pkt_state = hba_pkt->pkt_state;
8268 tpkt->pkt_statistics = hba_pkt->pkt_statistics;
8269 tpkt->pkt_reason = hba_pkt->pkt_reason;
8270
8271 /*
8272 * Destroy the internally created vpkt for the retry
8273 */
8274 vhci_scsi_destroy_pkt(&svp->svp_psd->sd_address,
8275 vpkt->vpkt_tgt_pkt);
8276
8277 return (ret_vpkt);
8278 }
8279
8280 /* restart the request sense request */
8281 static void
vhci_uscsi_restart_sense(void * arg)8282 vhci_uscsi_restart_sense(void *arg)
8283 {
8284 struct buf *rqbp;
8285 struct buf *bp;
8286 struct scsi_pkt *rqpkt = (struct scsi_pkt *)arg;
8287 mp_uscsi_cmd_t *mp_uscmdp;
8288
8289 VHCI_DEBUG(4, (CE_WARN, NULL,
8290 "vhci_uscsi_restart_sense: enter: rqpkt: %p", (void *)rqpkt));
8291
8292 if (scsi_transport(rqpkt) != TRAN_ACCEPT) {
8293 /* if it fails - need to wakeup the original command */
8294 mp_uscmdp = rqpkt->pkt_private;
8295 bp = mp_uscmdp->cmdbp;
8296 rqbp = mp_uscmdp->rqbp;
8297 ASSERT(mp_uscmdp && bp && rqbp);
8298 scsi_free_consistent_buf(rqbp);
8299 scsi_destroy_pkt(rqpkt);
8300 bp->b_resid = bp->b_bcount;
8301 bioerror(bp, EIO);
8302 biodone(bp);
8303 }
8304 }
8305
8306 /*
8307 * auto-rqsense is not enabled so we have to retrieve the request sense
8308 * manually.
8309 */
8310 static int
vhci_uscsi_send_sense(struct scsi_pkt * pkt,mp_uscsi_cmd_t * mp_uscmdp)8311 vhci_uscsi_send_sense(struct scsi_pkt *pkt, mp_uscsi_cmd_t *mp_uscmdp)
8312 {
8313 struct buf *rqbp, *cmdbp;
8314 struct scsi_pkt *rqpkt;
8315 int rval = 0;
8316
8317 cmdbp = mp_uscmdp->cmdbp;
8318 ASSERT(cmdbp != NULL);
8319
8320 VHCI_DEBUG(4, (CE_WARN, NULL,
8321 "vhci_uscsi_send_sense: enter: bp: %p pkt: %p scmd: %p",
8322 (void *)cmdbp, (void *)pkt, (void *)mp_uscmdp));
8323 /* set up the packet information and cdb */
8324 if ((rqbp = scsi_alloc_consistent_buf(mp_uscmdp->ap, NULL,
8325 SENSE_LENGTH, B_READ, NULL, NULL)) == NULL) {
8326 return (-1);
8327 }
8328
8329 if ((rqpkt = scsi_init_pkt(mp_uscmdp->ap, NULL, rqbp,
8330 CDB_GROUP0, 1, 0, PKT_CONSISTENT, NULL, NULL)) == NULL) {
8331 scsi_free_consistent_buf(rqbp);
8332 return (-1);
8333 }
8334
8335 (void) scsi_setup_cdb((union scsi_cdb *)(intptr_t)rqpkt->pkt_cdbp,
8336 SCMD_REQUEST_SENSE, 0, SENSE_LENGTH, 0);
8337
8338 mp_uscmdp->rqbp = rqbp;
8339 rqbp->b_private = mp_uscmdp;
8340 rqpkt->pkt_flags |= FLAG_SENSING;
8341 rqpkt->pkt_time = 60;
8342 rqpkt->pkt_comp = vhci_uscsi_iodone;
8343 rqpkt->pkt_private = mp_uscmdp;
8344
8345 /*
8346 * NOTE: This code path is related to MPAPI uscsi(7I), so path
8347 * selection is not based on path_instance.
8348 */
8349 if (scsi_pkt_allocated_correctly(rqpkt))
8350 rqpkt->pkt_path_instance = 0;
8351
8352 /* get her done */
8353 switch (scsi_transport(rqpkt)) {
8354 case TRAN_ACCEPT:
8355 VHCI_DEBUG(1, (CE_NOTE, NULL, "vhci_uscsi_send_sense: "
8356 "transport accepted."));
8357 break;
8358 case TRAN_BUSY:
8359 VHCI_DEBUG(1, (CE_NOTE, NULL, "vhci_uscsi_send_sense: "
8360 "transport busy, setting timeout."));
8361 vhci_restart_timeid = timeout(vhci_uscsi_restart_sense, rqpkt,
8362 (drv_usectohz(5 * 1000000)));
8363 break;
8364 default:
8365 VHCI_DEBUG(1, (CE_NOTE, NULL, "vhci_uscsi_send_sense: "
8366 "transport failed"));
8367 scsi_free_consistent_buf(rqbp);
8368 scsi_destroy_pkt(rqpkt);
8369 rval = -1;
8370 }
8371
8372 return (rval);
8373 }
8374
8375 /*
8376 * done routine for the mpapi uscsi command - this is behaving as though
8377 * FLAG_DIAGNOSE is set meaning there are no retries except for a manual
8378 * request sense.
8379 */
8380 void
vhci_uscsi_iodone(struct scsi_pkt * pkt)8381 vhci_uscsi_iodone(struct scsi_pkt *pkt)
8382 {
8383 struct buf *bp;
8384 mp_uscsi_cmd_t *mp_uscmdp;
8385 struct uscsi_cmd *uscmdp;
8386 struct scsi_arq_status *arqstat;
8387 int err;
8388
8389 mp_uscmdp = (mp_uscsi_cmd_t *)pkt->pkt_private;
8390 uscmdp = mp_uscmdp->uscmdp;
8391 bp = mp_uscmdp->cmdbp;
8392 ASSERT(bp != NULL);
8393 VHCI_DEBUG(4, (CE_WARN, NULL,
8394 "vhci_uscsi_iodone: enter: bp: %p pkt: %p scmd: %p",
8395 (void *)bp, (void *)pkt, (void *)mp_uscmdp));
8396 /* Save the status and the residual into the uscsi_cmd struct */
8397 uscmdp->uscsi_status = ((*(pkt)->pkt_scbp) & STATUS_MASK);
8398 uscmdp->uscsi_resid = bp->b_resid;
8399
8400 /* return on a very successful command */
8401 if (pkt->pkt_reason == CMD_CMPLT &&
8402 SCBP_C(pkt) == 0 && ((pkt->pkt_flags & FLAG_SENSING) == 0) &&
8403 pkt->pkt_resid == 0) {
8404 mdi_pi_kstat_iosupdate(mp_uscmdp->pip, bp);
8405 scsi_destroy_pkt(pkt);
8406 biodone(bp);
8407 return;
8408 }
8409 VHCI_DEBUG(4, (CE_NOTE, NULL, "iodone: reason=0x%x "
8410 " pkt_resid=%ld pkt_state: 0x%x b_count: %ld b_resid: %ld",
8411 pkt->pkt_reason, pkt->pkt_resid,
8412 pkt->pkt_state, bp->b_bcount, bp->b_resid));
8413
8414 err = EIO;
8415
8416 arqstat = (struct scsi_arq_status *)(intptr_t)(pkt->pkt_scbp);
8417 if (pkt->pkt_reason != CMD_CMPLT) {
8418 /*
8419 * The command did not complete.
8420 */
8421 VHCI_DEBUG(4, (CE_NOTE, NULL,
8422 "vhci_uscsi_iodone: command did not complete."
8423 " reason: %x flag: %x", pkt->pkt_reason, pkt->pkt_flags));
8424 if (pkt->pkt_flags & FLAG_SENSING) {
8425 MDI_PI_ERRSTAT(mp_uscmdp->pip, MDI_PI_TRANSERR);
8426 } else if (pkt->pkt_reason == CMD_TIMEOUT) {
8427 MDI_PI_ERRSTAT(mp_uscmdp->pip, MDI_PI_HARDERR);
8428 err = ETIMEDOUT;
8429 }
8430 } else if (pkt->pkt_state & STATE_ARQ_DONE && mp_uscmdp->arq_enabled) {
8431 /*
8432 * The auto-rqsense happened, and the packet has a filled-in
8433 * scsi_arq_status structure, pointed to by pkt_scbp.
8434 */
8435 VHCI_DEBUG(4, (CE_NOTE, NULL,
8436 "vhci_uscsi_iodone: received auto-requested sense"));
8437 if (uscmdp->uscsi_flags & USCSI_RQENABLE) {
8438 /* get the amount of data to copy into rqbuf */
8439 int rqlen = SENSE_LENGTH - arqstat->sts_rqpkt_resid;
8440 rqlen = min(((int)uscmdp->uscsi_rqlen), rqlen);
8441 uscmdp->uscsi_rqresid = uscmdp->uscsi_rqlen - rqlen;
8442 uscmdp->uscsi_rqstatus =
8443 *((char *)&arqstat->sts_rqpkt_status);
8444 if (uscmdp->uscsi_rqbuf && uscmdp->uscsi_rqlen &&
8445 rqlen != 0) {
8446 bcopy(&(arqstat->sts_sensedata),
8447 uscmdp->uscsi_rqbuf, rqlen);
8448 }
8449 mdi_pi_kstat_iosupdate(mp_uscmdp->pip, bp);
8450 VHCI_DEBUG(4, (CE_NOTE, NULL,
8451 "vhci_uscsi_iodone: ARQ "
8452 "uscsi_rqstatus=0x%x uscsi_rqresid=%d rqlen: %d "
8453 "xfer: %d rqpkt_resid: %d\n",
8454 uscmdp->uscsi_rqstatus, uscmdp->uscsi_rqresid,
8455 uscmdp->uscsi_rqlen, rqlen,
8456 arqstat->sts_rqpkt_resid));
8457 }
8458 } else if (pkt->pkt_flags & FLAG_SENSING) {
8459 struct buf *rqbp;
8460 struct scsi_status *rqstatus;
8461
8462 rqstatus = (struct scsi_status *)pkt->pkt_scbp;
8463 /* a manual request sense was done - get the information */
8464 if (uscmdp->uscsi_flags & USCSI_RQENABLE) {
8465 int rqlen = SENSE_LENGTH - pkt->pkt_resid;
8466
8467 rqbp = mp_uscmdp->rqbp;
8468 /* get the amount of data to copy into rqbuf */
8469 rqlen = min(((int)uscmdp->uscsi_rqlen), rqlen);
8470 uscmdp->uscsi_rqresid = uscmdp->uscsi_rqlen - rqlen;
8471 uscmdp->uscsi_rqstatus = *((char *)rqstatus);
8472 if (uscmdp->uscsi_rqlen && uscmdp->uscsi_rqbuf) {
8473 bcopy(rqbp->b_un.b_addr, uscmdp->uscsi_rqbuf,
8474 rqlen);
8475 }
8476 MDI_PI_ERRSTAT(mp_uscmdp->pip, MDI_PI_TRANSERR);
8477 scsi_free_consistent_buf(rqbp);
8478 }
8479 VHCI_DEBUG(4, (CE_NOTE, NULL, "vhci_uscsi_iodone: FLAG_SENSING"
8480 "uscsi_rqstatus=0x%x uscsi_rqresid=%d\n",
8481 uscmdp->uscsi_rqstatus, uscmdp->uscsi_rqresid));
8482 } else {
8483 struct scsi_status *status =
8484 (struct scsi_status *)pkt->pkt_scbp;
8485 /*
8486 * Command completed and we're not getting sense. Check for
8487 * errors and decide what to do next.
8488 */
8489 VHCI_DEBUG(4, (CE_NOTE, NULL,
8490 "vhci_uscsi_iodone: command appears complete: reason: %x",
8491 pkt->pkt_reason));
8492 if (status->sts_chk) {
8493 /* need to manually get the request sense */
8494 if (vhci_uscsi_send_sense(pkt, mp_uscmdp) == 0) {
8495 scsi_destroy_pkt(pkt);
8496 return;
8497 }
8498 } else {
8499 VHCI_DEBUG(4, (CE_NOTE, NULL,
8500 "vhci_chk_err: appears complete"));
8501 err = 0;
8502 mdi_pi_kstat_iosupdate(mp_uscmdp->pip, bp);
8503 if (pkt->pkt_resid) {
8504 bp->b_resid += pkt->pkt_resid;
8505 }
8506 }
8507 }
8508
8509 if (err) {
8510 if (bp->b_resid == 0)
8511 bp->b_resid = bp->b_bcount;
8512 bioerror(bp, err);
8513 bp->b_flags |= B_ERROR;
8514 }
8515
8516 scsi_destroy_pkt(pkt);
8517 biodone(bp);
8518
8519 VHCI_DEBUG(4, (CE_WARN, NULL, "vhci_uscsi_iodone: exit"));
8520 }
8521
8522 /*
8523 * start routine for the mpapi uscsi command
8524 */
8525 int
vhci_uscsi_iostart(struct buf * bp)8526 vhci_uscsi_iostart(struct buf *bp)
8527 {
8528 struct scsi_pkt *pkt;
8529 struct uscsi_cmd *uscmdp;
8530 mp_uscsi_cmd_t *mp_uscmdp;
8531 int stat_size, rval;
8532 int retry = 0;
8533
8534 ASSERT(bp->b_private != NULL);
8535
8536 mp_uscmdp = (mp_uscsi_cmd_t *)bp->b_private;
8537 uscmdp = mp_uscmdp->uscmdp;
8538 if (uscmdp->uscsi_flags & USCSI_RQENABLE) {
8539 stat_size = SENSE_LENGTH;
8540 } else {
8541 stat_size = 1;
8542 }
8543
8544 pkt = scsi_init_pkt(mp_uscmdp->ap, NULL, bp, uscmdp->uscsi_cdblen,
8545 stat_size, 0, 0, SLEEP_FUNC, NULL);
8546 if (pkt == NULL) {
8547 VHCI_DEBUG(4, (CE_NOTE, NULL,
8548 "vhci_uscsi_iostart: rval: EINVAL"));
8549 bp->b_resid = bp->b_bcount;
8550 uscmdp->uscsi_resid = bp->b_bcount;
8551 bioerror(bp, EINVAL);
8552 biodone(bp);
8553 return (EINVAL);
8554 }
8555
8556 pkt->pkt_time = uscmdp->uscsi_timeout;
8557 bcopy(uscmdp->uscsi_cdb, pkt->pkt_cdbp, (size_t)uscmdp->uscsi_cdblen);
8558 pkt->pkt_comp = vhci_uscsi_iodone;
8559 pkt->pkt_private = mp_uscmdp;
8560 if (uscmdp->uscsi_flags & USCSI_SILENT)
8561 pkt->pkt_flags |= FLAG_SILENT;
8562 if (uscmdp->uscsi_flags & USCSI_ISOLATE)
8563 pkt->pkt_flags |= FLAG_ISOLATE;
8564 if (uscmdp->uscsi_flags & USCSI_DIAGNOSE)
8565 pkt->pkt_flags |= FLAG_DIAGNOSE;
8566 if (uscmdp->uscsi_flags & USCSI_RENEGOT) {
8567 pkt->pkt_flags |= FLAG_RENEGOTIATE_WIDE_SYNC;
8568 }
8569 VHCI_DEBUG(4, (CE_WARN, NULL,
8570 "vhci_uscsi_iostart: ap: %p pkt: %p pcdbp: %p uscmdp: %p"
8571 " ucdbp: %p pcdblen: %d bp: %p count: %ld pip: %p"
8572 " stat_size: %d",
8573 (void *)mp_uscmdp->ap, (void *)pkt, (void *)pkt->pkt_cdbp,
8574 (void *)uscmdp, (void *)uscmdp->uscsi_cdb, pkt->pkt_cdblen,
8575 (void *)bp, bp->b_bcount, (void *)mp_uscmdp->pip, stat_size));
8576
8577 /*
8578 * NOTE: This code path is related to MPAPI uscsi(7I), so path
8579 * selection is not based on path_instance.
8580 */
8581 if (scsi_pkt_allocated_correctly(pkt))
8582 pkt->pkt_path_instance = 0;
8583
8584 while (((rval = scsi_transport(pkt)) == TRAN_BUSY) &&
8585 retry < vhci_uscsi_retry_count) {
8586 delay(drv_usectohz(vhci_uscsi_delay));
8587 retry++;
8588 }
8589 if (retry >= vhci_uscsi_retry_count) {
8590 VHCI_DEBUG(4, (CE_NOTE, NULL,
8591 "vhci_uscsi_iostart: tran_busy - retry: %d", retry));
8592 }
8593 switch (rval) {
8594 case TRAN_ACCEPT:
8595 rval = 0;
8596 break;
8597
8598 default:
8599 VHCI_DEBUG(4, (CE_NOTE, NULL,
8600 "vhci_uscsi_iostart: rval: %d count: %ld res: %ld",
8601 rval, bp->b_bcount, bp->b_resid));
8602 bp->b_resid = bp->b_bcount;
8603 uscmdp->uscsi_resid = bp->b_bcount;
8604 bioerror(bp, EIO);
8605 scsi_destroy_pkt(pkt);
8606 biodone(bp);
8607 rval = EIO;
8608 MDI_PI_ERRSTAT(mp_uscmdp->pip, MDI_PI_TRANSERR);
8609 break;
8610 }
8611 VHCI_DEBUG(4, (CE_NOTE, NULL,
8612 "vhci_uscsi_iostart: exit: rval: %d", rval));
8613 return (rval);
8614 }
8615
8616 /* ARGSUSED */
8617 static struct scsi_failover_ops *
vhci_dev_fo(dev_info_t * vdip,struct scsi_device * psd,void ** ctprivp,char ** fo_namep)8618 vhci_dev_fo(dev_info_t *vdip, struct scsi_device *psd,
8619 void **ctprivp, char **fo_namep)
8620 {
8621 struct scsi_failover_ops *sfo;
8622 char *sfo_name;
8623 char *override;
8624 struct scsi_failover *sf;
8625
8626 ASSERT(psd && psd->sd_inq);
8627 if ((psd == NULL) || (psd->sd_inq == NULL)) {
8628 VHCI_DEBUG(1, (CE_NOTE, NULL,
8629 "!vhci_dev_fo:return NULL no scsi_device or inquiry"));
8630 return (NULL);
8631 }
8632
8633 /*
8634 * Determine if device is supported under scsi_vhci, and select
8635 * failover module.
8636 *
8637 * See if there is a scsi_vhci.conf file override for this devices's
8638 * VID/PID. The following values can be returned:
8639 *
8640 * NULL If the NULL is returned then there is no scsi_vhci.conf
8641 * override. For NULL, we determine the failover_ops for
8642 * this device by checking the sfo_device_probe entry
8643 * point for each 'fops' module, in order.
8644 *
8645 * NOTE: Correct operation may depend on module ordering
8646 * of 'specific' (failover modules that are completely
8647 * VID/PID table based) to 'generic' (failover modules
8648 * that based on T10 standards like TPGS). Currently,
8649 * the value of 'ddi-forceload' in scsi_vhci.conf is used
8650 * to establish the module list and probe order.
8651 *
8652 * "NONE" If value "NONE" is returned then there is a
8653 * scsi_vhci.conf VID/PID override to indicate the device
8654 * should not be supported under scsi_vhci (even if there
8655 * is an 'fops' module supporting the device).
8656 *
8657 * "<other>" If another value is returned then that value is the
8658 * name of the 'fops' module that should be used.
8659 */
8660 sfo = NULL; /* "NONE" */
8661 override = scsi_get_device_type_string(
8662 "scsi-vhci-failover-override", vdip, psd);
8663 if (override == NULL) {
8664 /* NULL: default: select based on sfo_device_probe results */
8665 for (sf = scsi_failover_table; sf->sf_mod; sf++) {
8666 if ((sf->sf_sfo == NULL) ||
8667 sf->sf_sfo->sfo_device_probe(psd, psd->sd_inq,
8668 ctprivp) == SFO_DEVICE_PROBE_PHCI)
8669 continue;
8670
8671 /* found failover module, supported under scsi_vhci */
8672 sfo = sf->sf_sfo;
8673 if (fo_namep && (*fo_namep == NULL)) {
8674 sfo_name = i_ddi_strdup(sfo->sfo_name,
8675 KM_SLEEP);
8676 *fo_namep = sfo_name;
8677 }
8678 break;
8679 }
8680 } else if (strcasecmp(override, "NONE")) {
8681 /* !"NONE": select based on driver.conf specified name */
8682 for (sf = scsi_failover_table, sfo = NULL; sf->sf_mod; sf++) {
8683 if ((sf->sf_sfo == NULL) ||
8684 (sf->sf_sfo->sfo_name == NULL) ||
8685 strcmp(override, sf->sf_sfo->sfo_name))
8686 continue;
8687
8688 /*
8689 * NOTE: If sfo_device_probe() has side-effects,
8690 * including setting *ctprivp, these are not going
8691 * to occur with override config.
8692 */
8693
8694 /* found failover module, supported under scsi_vhci */
8695 sfo = sf->sf_sfo;
8696 if (fo_namep && (*fo_namep == NULL)) {
8697 sfo_name = kmem_alloc(strlen("conf ") +
8698 strlen(sfo->sfo_name) + 1, KM_SLEEP);
8699 (void) sprintf(sfo_name, "conf %s",
8700 sfo->sfo_name);
8701 *fo_namep = sfo_name;
8702 }
8703 break;
8704 }
8705 }
8706 if (override)
8707 kmem_free(override, strlen(override) + 1);
8708 return (sfo);
8709 }
8710
8711 /*
8712 * Determine the device described by cinfo should be enumerated under
8713 * the vHCI or the pHCI - if there is a failover ops then device is
8714 * supported under vHCI. By agreement with SCSA cinfo is a pointer
8715 * to a scsi_device structure associated with a decorated pHCI probe node.
8716 */
8717 /* ARGSUSED */
8718 int
vhci_is_dev_supported(dev_info_t * vdip,dev_info_t * pdip,void * cinfo)8719 vhci_is_dev_supported(dev_info_t *vdip, dev_info_t *pdip, void *cinfo)
8720 {
8721 struct scsi_device *psd = (struct scsi_device *)cinfo;
8722
8723 return (vhci_dev_fo(vdip, psd, NULL, NULL) ? MDI_SUCCESS : MDI_FAILURE);
8724 }
8725
8726
8727 #ifdef DEBUG
8728 extern struct scsi_key_strings scsi_cmds[];
8729
8730 static char *
vhci_print_scsi_cmd(char cmd)8731 vhci_print_scsi_cmd(char cmd)
8732 {
8733 char tmp[64];
8734 char *cpnt;
8735
8736 cpnt = scsi_cmd_name(cmd, scsi_cmds, tmp);
8737 /* tmp goes out of scope on return and caller sees garbage */
8738 if (cpnt == tmp) {
8739 cpnt = "Unknown Command";
8740 }
8741 return (cpnt);
8742 }
8743
8744 extern uchar_t scsi_cdb_size[];
8745
8746 static void
vhci_print_cdb(dev_info_t * dip,uint_t level,char * title,uchar_t * cdb)8747 vhci_print_cdb(dev_info_t *dip, uint_t level, char *title, uchar_t *cdb)
8748 {
8749 int len = scsi_cdb_size[CDB_GROUPID(cdb[0])];
8750 char buf[256];
8751
8752 if (level == CE_NOTE) {
8753 vhci_log(level, dip, "path cmd %s\n",
8754 vhci_print_scsi_cmd(*cdb));
8755 return;
8756 }
8757
8758 (void) sprintf(buf, "%s for cmd(%s)", title, vhci_print_scsi_cmd(*cdb));
8759 vhci_clean_print(dip, level, buf, cdb, len);
8760 }
8761
8762 static void
vhci_clean_print(dev_info_t * dev,uint_t level,char * title,uchar_t * data,int len)8763 vhci_clean_print(dev_info_t *dev, uint_t level, char *title, uchar_t *data,
8764 int len)
8765 {
8766 int i;
8767 int c;
8768 char *format;
8769 char buf[256];
8770 uchar_t byte;
8771
8772 (void) sprintf(buf, "%s:\n", title);
8773 vhci_log(level, dev, "%s", buf);
8774 level = CE_CONT;
8775 for (i = 0; i < len; ) {
8776 buf[0] = 0;
8777 for (c = 0; c < 8 && i < len; c++, i++) {
8778 byte = (uchar_t)data[i];
8779 if (byte < 0x10)
8780 format = "0x0%x ";
8781 else
8782 format = "0x%x ";
8783 (void) sprintf(&buf[(int)strlen(buf)], format, byte);
8784 }
8785 (void) sprintf(&buf[(int)strlen(buf)], "\n");
8786
8787 vhci_log(level, dev, "%s\n", buf);
8788 }
8789 }
8790 #endif
8791 static void
vhci_invalidate_mpapi_lu(struct scsi_vhci * vhci,scsi_vhci_lun_t * vlun)8792 vhci_invalidate_mpapi_lu(struct scsi_vhci *vhci, scsi_vhci_lun_t *vlun)
8793 {
8794 char *svl_wwn;
8795 mpapi_item_list_t *ilist;
8796 mpapi_lu_data_t *ld;
8797
8798 if (vlun == NULL) {
8799 return;
8800 } else {
8801 svl_wwn = vlun->svl_lun_wwn;
8802 }
8803
8804 ilist = vhci->mp_priv->obj_hdr_list[MP_OBJECT_TYPE_MULTIPATH_LU]->head;
8805
8806 while (ilist != NULL) {
8807 ld = (mpapi_lu_data_t *)(ilist->item->idata);
8808 if ((ld != NULL) && (strncmp(ld->prop.name, svl_wwn,
8809 strlen(svl_wwn)) == 0)) {
8810 ld->valid = 0;
8811 VHCI_DEBUG(6, (CE_WARN, NULL,
8812 "vhci_invalidate_mpapi_lu: "
8813 "Invalidated LU(%s)", svl_wwn));
8814 return;
8815 }
8816 ilist = ilist->next;
8817 }
8818 VHCI_DEBUG(6, (CE_WARN, NULL, "vhci_invalidate_mpapi_lu: "
8819 "Could not find LU(%s) to invalidate.", svl_wwn));
8820 }
8821