1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 #pragma ident "%Z%%M% %I% %E% SMI" 26 27 /* 28 * Multiplexed I/O SCSI vHCI implementation 29 */ 30 31 #include <sys/conf.h> 32 #include <sys/file.h> 33 #include <sys/ddi.h> 34 #include <sys/sunddi.h> 35 #include <sys/scsi/scsi.h> 36 #include <sys/scsi/impl/scsi_reset_notify.h> 37 #include <sys/sunmdi.h> 38 #include <sys/mdi_impldefs.h> 39 #include <sys/scsi/adapters/scsi_vhci.h> 40 #include <sys/disp.h> 41 #include <sys/byteorder.h> 42 43 extern uintptr_t scsi_callback_id; 44 extern ddi_dma_attr_t scsi_alloc_attr; 45 46 #ifdef DEBUG 47 int vhci_debug = VHCI_DEBUG_DEFAULT_VAL; 48 #endif 49 50 /* retry for the vhci_do_prout command when a not ready is returned */ 51 int vhci_prout_not_ready_retry = 180; 52 53 /* 54 * These values are defined to support the internal retry of 55 * SCSI packets for better sense code handling. 56 */ 57 #define VHCI_CMD_CMPLT 0 58 #define VHCI_CMD_RETRY 1 59 #define VHCI_CMD_ERROR -1 60 61 #define PROPFLAGS (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM) 62 #define VHCI_SCSI_PERR 0x47 63 #define VHCI_PGR_ILLEGALOP -2 64 #define VHCI_NUM_UPDATE_TASKQ 8 65 /* changed to 132 to accomodate HDS */ 66 #define VHCI_STD_INQ_SIZE 132 67 68 /* 69 * Version Macros 70 */ 71 #define VHCI_NAME_VERSION "SCSI VHCI Driver %I%" 72 char vhci_version_name[] = VHCI_NAME_VERSION; 73 74 int vhci_first_time = 0; 75 clock_t vhci_to_ticks = 0; 76 int vhci_init_wait_timeout = VHCI_INIT_WAIT_TIMEOUT; 77 kcondvar_t vhci_cv; 78 kmutex_t vhci_global_mutex; 79 void *vhci_softstate = NULL; /* for soft state */ 80 81 /* 82 * Flag to delay the retry of the reserve command 83 */ 84 int vhci_reserve_delay = 100000; 85 static int vhci_path_quiesce_timeout = 60; 86 static uchar_t zero_key[MHIOC_RESV_KEY_SIZE]; 87 88 /* uscsi delay for a TRAN_BUSY */ 89 static int vhci_uscsi_delay = 100000; 90 static int vhci_uscsi_retry_count = 180; 91 /* uscsi_restart_sense timeout id in case it needs to get canceled */ 92 static timeout_id_t vhci_restart_timeid = 0; 93 94 /* 95 * Bidirectional map of 'target-port' to port id <pid> for support of 96 * iostat(1M) '-Xx' and '-Yx' output. 97 */ 98 static kmutex_t vhci_targetmap_mutex; 99 static uint_t vhci_targetmap_pid = 1; 100 static mod_hash_t *vhci_targetmap_bypid; /* <pid> -> 'target-port' */ 101 static mod_hash_t *vhci_targetmap_byport; /* 'target-port' -> <pid> */ 102 103 /* 104 * functions exported by scsi_vhci struct cb_ops 105 */ 106 static int vhci_open(dev_t *, int, int, cred_t *); 107 static int vhci_close(dev_t, int, int, cred_t *); 108 static int vhci_ioctl(dev_t, int, intptr_t, int, cred_t *, int *); 109 110 /* 111 * functions exported by scsi_vhci struct dev_ops 112 */ 113 static int vhci_getinfo(dev_info_t *, ddi_info_cmd_t, void *, void **); 114 static int vhci_attach(dev_info_t *, ddi_attach_cmd_t); 115 static int vhci_detach(dev_info_t *, ddi_detach_cmd_t); 116 117 /* 118 * functions exported by scsi_vhci scsi_hba_tran_t transport table 119 */ 120 static int vhci_scsi_tgt_init(dev_info_t *, dev_info_t *, 121 scsi_hba_tran_t *, struct scsi_device *); 122 static void vhci_scsi_tgt_free(dev_info_t *, dev_info_t *, scsi_hba_tran_t *, 123 struct scsi_device *); 124 static int vhci_pgr_register_start(scsi_vhci_lun_t *, struct scsi_pkt *); 125 static int vhci_scsi_start(struct scsi_address *, struct scsi_pkt *); 126 static int vhci_scsi_abort(struct scsi_address *, struct scsi_pkt *); 127 static int vhci_scsi_reset(struct scsi_address *, int); 128 static int vhci_scsi_reset_target(struct scsi_address *, int level, 129 uint8_t select_path); 130 static int vhci_scsi_reset_bus(struct scsi_address *); 131 static int vhci_scsi_getcap(struct scsi_address *, char *, int); 132 static int vhci_scsi_setcap(struct scsi_address *, char *, int, int); 133 static int vhci_commoncap(struct scsi_address *, char *, int, int, int); 134 static int vhci_pHCI_cap(struct scsi_address *ap, char *cap, int val, int whom, 135 mdi_pathinfo_t *pip); 136 static struct scsi_pkt *vhci_scsi_init_pkt(struct scsi_address *, 137 struct scsi_pkt *, struct buf *, int, int, int, int, int (*)(), caddr_t); 138 static void vhci_scsi_destroy_pkt(struct scsi_address *, struct scsi_pkt *); 139 static void vhci_scsi_dmafree(struct scsi_address *, struct scsi_pkt *); 140 static void vhci_scsi_sync_pkt(struct scsi_address *, struct scsi_pkt *); 141 static int vhci_scsi_reset_notify(struct scsi_address *, int, void (*)(caddr_t), 142 caddr_t); 143 static int vhci_scsi_get_bus_addr(struct scsi_device *, char *, int); 144 static int vhci_scsi_get_name(struct scsi_device *, char *, int); 145 static int vhci_scsi_bus_power(dev_info_t *, void *, pm_bus_power_op_t, 146 void *, void *); 147 static int vhci_scsi_bus_config(dev_info_t *, uint_t, ddi_bus_config_op_t, 148 void *, dev_info_t **); 149 150 /* 151 * functions registered with the mpxio framework via mdi_vhci_ops_t 152 */ 153 static int vhci_pathinfo_init(dev_info_t *, mdi_pathinfo_t *, int); 154 static int vhci_pathinfo_uninit(dev_info_t *, mdi_pathinfo_t *, int); 155 static int vhci_pathinfo_state_change(dev_info_t *, mdi_pathinfo_t *, 156 mdi_pathinfo_state_t, uint32_t, int); 157 static int vhci_pathinfo_online(dev_info_t *, mdi_pathinfo_t *, int); 158 static int vhci_pathinfo_offline(dev_info_t *, mdi_pathinfo_t *, int); 159 static int vhci_failover(dev_info_t *, dev_info_t *, int); 160 static void vhci_client_attached(dev_info_t *); 161 162 static int vhci_ctl(dev_t, int, intptr_t, int, cred_t *, int *); 163 static int vhci_devctl(dev_t, int, intptr_t, int, cred_t *, int *); 164 static int vhci_ioc_get_phci_path(sv_iocdata_t *, caddr_t, int, caddr_t); 165 static int vhci_ioc_get_client_path(sv_iocdata_t *, caddr_t, int, caddr_t); 166 static int vhci_ioc_get_paddr(sv_iocdata_t *, caddr_t, int, caddr_t); 167 static int vhci_ioc_send_client_path(caddr_t, sv_iocdata_t *, int, caddr_t); 168 static void vhci_ioc_devi_to_path(dev_info_t *, caddr_t); 169 static int vhci_get_phci_path_list(dev_info_t *, sv_path_info_t *, uint_t); 170 static int vhci_get_client_path_list(dev_info_t *, sv_path_info_t *, uint_t); 171 static int vhci_get_iocdata(const void *, sv_iocdata_t *, int, caddr_t); 172 static int vhci_get_iocswitchdata(const void *, sv_switch_to_cntlr_iocdata_t *, 173 int, caddr_t); 174 static int vhci_ioc_alloc_pathinfo(sv_path_info_t **, sv_path_info_t **, 175 uint_t, sv_iocdata_t *, int, caddr_t); 176 static void vhci_ioc_free_pathinfo(sv_path_info_t *, sv_path_info_t *, uint_t); 177 static int vhci_ioc_send_pathinfo(sv_path_info_t *, sv_path_info_t *, uint_t, 178 sv_iocdata_t *, int, caddr_t); 179 static int vhci_handle_ext_fo(struct scsi_pkt *, int); 180 static int vhci_efo_watch_cb(caddr_t, struct scsi_watch_result *); 181 static int vhci_quiesce_lun(struct scsi_vhci_lun *); 182 static int vhci_pgr_validate_and_register(scsi_vhci_priv_t *); 183 static void vhci_dispatch_scsi_start(void *); 184 static void vhci_efo_done(void *); 185 static void vhci_initiate_auto_failback(void *); 186 static void vhci_update_pHCI_pkt(struct vhci_pkt *, struct scsi_pkt *); 187 static int vhci_update_pathinfo(struct scsi_device *, mdi_pathinfo_t *, 188 struct scsi_failover_ops *, scsi_vhci_lun_t *, struct scsi_vhci *); 189 static void vhci_kstat_create_pathinfo(mdi_pathinfo_t *); 190 static int vhci_quiesce_paths(dev_info_t *, dev_info_t *, 191 scsi_vhci_lun_t *, char *, char *); 192 193 static char *vhci_devnm_to_guid(char *); 194 static int vhci_bind_transport(struct scsi_address *, struct vhci_pkt *, 195 int, int (*func)(caddr_t)); 196 static void vhci_intr(struct scsi_pkt *); 197 static int vhci_do_prout(scsi_vhci_priv_t *); 198 static void vhci_run_cmd(void *); 199 static int vhci_do_prin(struct vhci_pkt **); 200 static struct scsi_pkt *vhci_create_retry_pkt(struct vhci_pkt *); 201 static struct vhci_pkt *vhci_sync_retry_pkt(struct vhci_pkt *); 202 static struct scsi_vhci_lun *vhci_lun_lookup(dev_info_t *); 203 static struct scsi_vhci_lun *vhci_lun_lookup_alloc(dev_info_t *, char *, int *); 204 static void vhci_lun_free(dev_info_t *); 205 static int vhci_recovery_reset(scsi_vhci_lun_t *, struct scsi_address *, 206 uint8_t, uint8_t); 207 void vhci_update_pathstates(void *); 208 209 #ifdef DEBUG 210 static void vhci_print_prin_keys(vhci_prin_readkeys_t *, int); 211 #endif 212 static void vhci_print_prout_keys(scsi_vhci_lun_t *, char *); 213 static void vhci_uscsi_iodone(struct scsi_pkt *pkt); 214 215 /* 216 * MP-API related functions 217 */ 218 extern int vhci_mpapi_init(struct scsi_vhci *); 219 extern void vhci_mpapi_add_dev_prod(struct scsi_vhci *, char *); 220 extern int vhci_mpapi_ctl(dev_t, int, intptr_t, int, cred_t *, int *); 221 extern void vhci_update_mpapi_data(struct scsi_vhci *, 222 scsi_vhci_lun_t *, mdi_pathinfo_t *); 223 extern void* vhci_get_mpapi_item(struct scsi_vhci *, mpapi_list_header_t *, 224 uint8_t, void*); 225 extern void vhci_mpapi_set_path_state(dev_info_t *, mdi_pathinfo_t *, int); 226 extern int vhci_mpapi_update_tpg_acc_state_for_lu(struct scsi_vhci *, 227 scsi_vhci_lun_t *); 228 229 /* Special export to MP-API of tpgs non-'fops' entry point */ 230 int (*tpgs_set_target_groups)(struct scsi_address *, int, int); 231 232 #define VHCI_DMA_MAX_XFER_CAP 0xffffffffULL 233 234 #define VHCI_MAX_PGR_RETRIES 3 235 236 /* 237 * Macros for the device-type mpxio options 238 */ 239 #define LOAD_BALANCE_OPTIONS "load-balance-options" 240 #define LOGICAL_BLOCK_REGION_SIZE "region-size" 241 #define MPXIO_OPTIONS_LIST "device-type-mpxio-options-list" 242 #define DEVICE_TYPE_STR "device-type" 243 #define isdigit(ch) ((ch) >= '0' && (ch) <= '9') 244 245 static struct cb_ops vhci_cb_ops = { 246 vhci_open, /* open */ 247 vhci_close, /* close */ 248 nodev, /* strategy */ 249 nodev, /* print */ 250 nodev, /* dump */ 251 nodev, /* read */ 252 nodev, /* write */ 253 vhci_ioctl, /* ioctl */ 254 nodev, /* devmap */ 255 nodev, /* mmap */ 256 nodev, /* segmap */ 257 nochpoll, /* chpoll */ 258 ddi_prop_op, /* cb_prop_op */ 259 0, /* streamtab */ 260 D_NEW | D_MP, /* cb_flag */ 261 CB_REV, /* rev */ 262 nodev, /* aread */ 263 nodev /* awrite */ 264 }; 265 266 static struct dev_ops vhci_ops = { 267 DEVO_REV, 268 0, 269 vhci_getinfo, 270 nulldev, /* identify */ 271 nulldev, /* probe */ 272 vhci_attach, /* attach and detach are mandatory */ 273 vhci_detach, 274 nodev, /* reset */ 275 &vhci_cb_ops, /* cb_ops */ 276 NULL, /* bus_ops */ 277 NULL, /* power */ 278 }; 279 280 extern struct mod_ops mod_driverops; 281 282 static struct modldrv modldrv = { 283 &mod_driverops, 284 vhci_version_name, /* module name */ 285 &vhci_ops 286 }; 287 288 static struct modlinkage modlinkage = { 289 MODREV_1, 290 &modldrv, 291 NULL 292 }; 293 294 static mdi_vhci_ops_t vhci_opinfo = { 295 MDI_VHCI_OPS_REV, 296 vhci_pathinfo_init, /* Pathinfo node init callback */ 297 vhci_pathinfo_uninit, /* Pathinfo uninit callback */ 298 vhci_pathinfo_state_change, /* Pathinfo node state change */ 299 vhci_failover, /* failover callback */ 300 vhci_client_attached /* client attached callback */ 301 }; 302 303 /* 304 * The scsi_failover table defines an ordered set of 'fops' modules supported 305 * by scsi_vhci. Currently, initialize this table from the 'ddi-forceload' 306 * property specified in scsi_vhci.conf. 307 */ 308 struct scsi_failover { 309 ddi_modhandle_t sf_mod; 310 struct scsi_failover_ops *sf_sfo; 311 } *scsi_failover_table; 312 uint_t scsi_nfailover; 313 314 int 315 _init(void) 316 { 317 int rval; 318 319 /* 320 * Allocate soft state and prepare to do ddi_soft_state_zalloc() 321 * before registering with the transport first. 322 */ 323 if ((rval = ddi_soft_state_init(&vhci_softstate, 324 sizeof (struct scsi_vhci), 1)) != 0) { 325 VHCI_DEBUG(1, (CE_NOTE, NULL, 326 "!_init:soft state init failed\n")); 327 return (rval); 328 } 329 330 if ((rval = scsi_hba_init(&modlinkage)) != 0) { 331 VHCI_DEBUG(1, (CE_NOTE, NULL, 332 "!_init: scsi hba init failed\n")); 333 ddi_soft_state_fini(&vhci_softstate); 334 return (rval); 335 } 336 337 mutex_init(&vhci_global_mutex, NULL, MUTEX_DRIVER, NULL); 338 cv_init(&vhci_cv, NULL, CV_DRIVER, NULL); 339 340 mutex_init(&vhci_targetmap_mutex, NULL, MUTEX_DRIVER, NULL); 341 vhci_targetmap_byport = mod_hash_create_strhash( 342 "vhci_targetmap_byport", 256, mod_hash_null_valdtor); 343 vhci_targetmap_bypid = mod_hash_create_idhash( 344 "vhci_targetmap_bypid", 256, mod_hash_null_valdtor); 345 346 if ((rval = mod_install(&modlinkage)) != 0) { 347 VHCI_DEBUG(1, (CE_NOTE, NULL, "!_init: mod_install failed\n")); 348 if (vhci_targetmap_bypid) 349 mod_hash_destroy_idhash(vhci_targetmap_bypid); 350 if (vhci_targetmap_byport) 351 mod_hash_destroy_strhash(vhci_targetmap_byport); 352 mutex_destroy(&vhci_targetmap_mutex); 353 cv_destroy(&vhci_cv); 354 mutex_destroy(&vhci_global_mutex); 355 scsi_hba_fini(&modlinkage); 356 ddi_soft_state_fini(&vhci_softstate); 357 } 358 return (rval); 359 } 360 361 362 /* 363 * the system is done with us as a driver, so clean up 364 */ 365 int 366 _fini(void) 367 { 368 int rval; 369 370 /* 371 * don't start cleaning up until we know that the module remove 372 * has worked -- if this works, then we know that each instance 373 * has successfully been DDI_DETACHed 374 */ 375 if ((rval = mod_remove(&modlinkage)) != 0) { 376 VHCI_DEBUG(4, (CE_NOTE, NULL, "!_fini: mod_remove failed\n")); 377 return (rval); 378 } 379 380 if (vhci_targetmap_bypid) 381 mod_hash_destroy_idhash(vhci_targetmap_bypid); 382 if (vhci_targetmap_byport) 383 mod_hash_destroy_strhash(vhci_targetmap_byport); 384 mutex_destroy(&vhci_targetmap_mutex); 385 cv_destroy(&vhci_cv); 386 mutex_destroy(&vhci_global_mutex); 387 scsi_hba_fini(&modlinkage); 388 ddi_soft_state_fini(&vhci_softstate); 389 390 return (rval); 391 } 392 393 int 394 _info(struct modinfo *modinfop) 395 { 396 return (mod_info(&modlinkage, modinfop)); 397 } 398 399 /* 400 * Lookup scsi_failover by "short name" of failover module. 401 */ 402 struct scsi_failover_ops * 403 vhci_failover_ops_by_name(char *name) 404 { 405 struct scsi_failover *sf; 406 407 for (sf = scsi_failover_table; sf->sf_mod; sf++) { 408 if (sf->sf_sfo == NULL) 409 continue; 410 if (strcmp(sf->sf_sfo->sfo_name, name) == 0) 411 return (sf->sf_sfo); 412 } 413 return (NULL); 414 } 415 416 /* 417 * Load all scsi_failover_ops 'fops' modules. 418 */ 419 static void 420 vhci_failover_modopen(struct scsi_vhci *vhci) 421 { 422 char **module; 423 int i; 424 struct scsi_failover *sf; 425 char **dt; 426 int e; 427 428 if (scsi_failover_table) 429 return; 430 431 /* Get the list of modules from scsi_vhci.conf */ 432 if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, 433 vhci->vhci_dip, DDI_PROP_DONTPASS, "ddi-forceload", 434 &module, &scsi_nfailover) != DDI_PROP_SUCCESS) { 435 cmn_err(CE_WARN, "scsi_vhci: " 436 "scsi_vhci.conf is missing 'ddi-forceload'"); 437 return; 438 } 439 if (scsi_nfailover == 0) { 440 cmn_err(CE_WARN, "scsi_vhci: " 441 "scsi_vhci.conf has empty 'ddi-forceload'"); 442 ddi_prop_free(module); 443 return; 444 } 445 446 /* allocate failover table based on number of modules */ 447 scsi_failover_table = (struct scsi_failover *) 448 kmem_zalloc(sizeof (struct scsi_failover) * (scsi_nfailover + 1), 449 KM_SLEEP); 450 451 /* loop over modules specified in scsi_vhci.conf and open each module */ 452 for (i = 0, sf = scsi_failover_table; i < scsi_nfailover; i++) { 453 if (module[i] == NULL) 454 continue; 455 456 sf->sf_mod = ddi_modopen(module[i], KRTLD_MODE_FIRST, &e); 457 if (sf->sf_mod == NULL) { 458 /* 459 * A module returns EEXIST if other software is 460 * supporting the intended function: for example 461 * the scsi_vhci_f_sum_emc module returns EEXIST 462 * from _init if EMC powerpath software is installed. 463 */ 464 if (e != EEXIST) 465 cmn_err(CE_WARN, "scsi_vhci: unable to open " 466 "module '%s', error %d", module[i], e); 467 continue; 468 } 469 sf->sf_sfo = ddi_modsym(sf->sf_mod, 470 "scsi_vhci_failover_ops", &e); 471 if (sf->sf_sfo == NULL) { 472 cmn_err(CE_WARN, "scsi_vhci: " 473 "unable to import 'scsi_failover_ops' from '%s', " 474 "error %d", module[i], e); 475 (void) ddi_modclose(sf->sf_mod); 476 sf->sf_mod = NULL; 477 continue; 478 } 479 480 /* register vid/pid of devices supported with mpapi */ 481 for (dt = sf->sf_sfo->sfo_devices; *dt; dt++) 482 vhci_mpapi_add_dev_prod(vhci, *dt); 483 484 /* 485 * Special processing for SFO_NAME_TPGS module, which contains 486 * the `tpgs_set_target_groups` implementation needed by the 487 * MP-API code. 488 */ 489 if (strcmp(sf->sf_sfo->sfo_name, SFO_NAME_TPGS) == 0) { 490 tpgs_set_target_groups = 491 (int (*)(struct scsi_address *, int, int)) 492 ddi_modsym(sf->sf_mod, "std_set_target_groups", &e); 493 if (tpgs_set_target_groups == NULL) { 494 cmn_err(CE_WARN, "scsi_vhci: " 495 "unable to import 'std_set_target_groups' " 496 "from '%s', error %d", module[i], e); 497 } 498 } 499 500 sf++; 501 } 502 503 /* verify that at least the "well-known" modules were there */ 504 if (vhci_failover_ops_by_name(SFO_NAME_SYM) == NULL) 505 cmn_err(CE_WARN, "scsi_vhci: well-known module \"" 506 SFO_NAME_SYM "\" not defined in scsi_vhci.conf's " 507 "'ddi-forceload'"); 508 if (vhci_failover_ops_by_name(SFO_NAME_TPGS) == NULL) 509 cmn_err(CE_WARN, "scsi_vhci: well-known module \"" 510 SFO_NAME_TPGS "\" not defined in scsi_vhci.conf's " 511 "'ddi-forceload'"); 512 513 /* call sfo_init for modules that need it */ 514 for (sf = scsi_failover_table; sf->sf_mod; sf++) { 515 if (sf->sf_sfo && sf->sf_sfo->sfo_init) 516 (*sf->sf_sfo->sfo_init)(); 517 } 518 519 ddi_prop_free(module); 520 } 521 522 /* 523 * unload all loaded scsi_failover_ops modules 524 */ 525 static void 526 vhci_failover_modclose() 527 { 528 struct scsi_failover *sf; 529 530 for (sf = scsi_failover_table; sf->sf_mod; sf++) { 531 if ((sf->sf_mod == NULL) || (sf->sf_sfo == NULL)) 532 continue; 533 (void) ddi_modclose(sf->sf_mod); 534 sf->sf_mod = NULL; 535 sf->sf_sfo = NULL; 536 } 537 538 if (scsi_failover_table && scsi_nfailover) 539 kmem_free(scsi_failover_table, 540 sizeof (struct scsi_failover) * (scsi_nfailover + 1)); 541 scsi_failover_table = NULL; 542 scsi_nfailover = 0; 543 } 544 545 /* ARGSUSED */ 546 static int 547 vhci_open(dev_t *devp, int flag, int otype, cred_t *credp) 548 { 549 struct scsi_vhci *vhci; 550 551 if (otype != OTYP_CHR) { 552 return (EINVAL); 553 } 554 555 vhci = ddi_get_soft_state(vhci_softstate, MINOR2INST(getminor(*devp))); 556 if (vhci == NULL) { 557 VHCI_DEBUG(1, (CE_NOTE, NULL, "vhci_open: failed ENXIO\n")); 558 return (ENXIO); 559 } 560 561 mutex_enter(&vhci->vhci_mutex); 562 if ((flag & FEXCL) && (vhci->vhci_state & VHCI_STATE_OPEN)) { 563 mutex_exit(&vhci->vhci_mutex); 564 vhci_log(CE_NOTE, vhci->vhci_dip, 565 "!vhci%d: Already open\n", getminor(*devp)); 566 return (EBUSY); 567 } 568 569 vhci->vhci_state |= VHCI_STATE_OPEN; 570 mutex_exit(&vhci->vhci_mutex); 571 return (0); 572 } 573 574 575 /* ARGSUSED */ 576 static int 577 vhci_close(dev_t dev, int flag, int otype, cred_t *credp) 578 { 579 struct scsi_vhci *vhci; 580 581 if (otype != OTYP_CHR) { 582 return (EINVAL); 583 } 584 585 vhci = ddi_get_soft_state(vhci_softstate, MINOR2INST(getminor(dev))); 586 if (vhci == NULL) { 587 VHCI_DEBUG(1, (CE_NOTE, NULL, "vhci_close: failed ENXIO\n")); 588 return (ENXIO); 589 } 590 591 mutex_enter(&vhci->vhci_mutex); 592 vhci->vhci_state &= ~VHCI_STATE_OPEN; 593 mutex_exit(&vhci->vhci_mutex); 594 595 return (0); 596 } 597 598 /* ARGSUSED */ 599 static int 600 vhci_ioctl(dev_t dev, int cmd, intptr_t data, int mode, 601 cred_t *credp, int *rval) 602 { 603 if (IS_DEVCTL(cmd)) { 604 return (vhci_devctl(dev, cmd, data, mode, credp, rval)); 605 } else if (cmd == MP_CMD) { 606 return (vhci_mpapi_ctl(dev, cmd, data, mode, credp, rval)); 607 } else { 608 return (vhci_ctl(dev, cmd, data, mode, credp, rval)); 609 } 610 } 611 612 /* 613 * attach the module 614 */ 615 static int 616 vhci_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 617 { 618 int rval = DDI_FAILURE; 619 int scsi_hba_attached = 0; 620 int vhci_attached = 0; 621 int mutex_initted = 0; 622 int instance; 623 struct scsi_vhci *vhci; 624 scsi_hba_tran_t *tran; 625 char cache_name_buf[64]; 626 char *data; 627 628 VHCI_DEBUG(4, (CE_NOTE, NULL, "vhci_attach: cmd=0x%x\n", cmd)); 629 630 instance = ddi_get_instance(dip); 631 632 switch (cmd) { 633 case DDI_ATTACH: 634 break; 635 636 case DDI_RESUME: 637 case DDI_PM_RESUME: 638 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_attach: resume not yet" 639 "implemented\n")); 640 return (rval); 641 642 default: 643 VHCI_DEBUG(1, (CE_NOTE, NULL, 644 "!vhci_attach: unknown ddi command\n")); 645 return (rval); 646 } 647 648 /* 649 * Allocate vhci data structure. 650 */ 651 if (ddi_soft_state_zalloc(vhci_softstate, instance) != DDI_SUCCESS) { 652 VHCI_DEBUG(1, (CE_NOTE, dip, "!vhci_attach:" 653 "soft state alloc failed\n")); 654 return (DDI_FAILURE); 655 } 656 657 if ((vhci = ddi_get_soft_state(vhci_softstate, instance)) == NULL) { 658 VHCI_DEBUG(1, (CE_NOTE, dip, "!vhci_attach:" 659 "bad soft state\n")); 660 ddi_soft_state_free(vhci_softstate, instance); 661 return (DDI_FAILURE); 662 } 663 664 /* Allocate packet cache */ 665 (void) snprintf(cache_name_buf, sizeof (cache_name_buf), 666 "vhci%d_cache", instance); 667 668 mutex_init(&vhci->vhci_mutex, NULL, MUTEX_DRIVER, NULL); 669 mutex_initted++; 670 671 /* 672 * Allocate a transport structure 673 */ 674 tran = scsi_hba_tran_alloc(dip, SCSI_HBA_CANSLEEP); 675 ASSERT(tran != NULL); 676 677 vhci->vhci_tran = tran; 678 vhci->vhci_dip = dip; 679 vhci->vhci_instance = instance; 680 681 tran->tran_hba_private = vhci; 682 tran->tran_tgt_private = NULL; 683 tran->tran_tgt_init = vhci_scsi_tgt_init; 684 tran->tran_tgt_probe = NULL; 685 tran->tran_tgt_free = vhci_scsi_tgt_free; 686 687 tran->tran_start = vhci_scsi_start; 688 tran->tran_abort = vhci_scsi_abort; 689 tran->tran_reset = vhci_scsi_reset; 690 tran->tran_getcap = vhci_scsi_getcap; 691 tran->tran_setcap = vhci_scsi_setcap; 692 tran->tran_init_pkt = vhci_scsi_init_pkt; 693 tran->tran_destroy_pkt = vhci_scsi_destroy_pkt; 694 tran->tran_dmafree = vhci_scsi_dmafree; 695 tran->tran_sync_pkt = vhci_scsi_sync_pkt; 696 tran->tran_reset_notify = vhci_scsi_reset_notify; 697 698 tran->tran_get_bus_addr = vhci_scsi_get_bus_addr; 699 tran->tran_get_name = vhci_scsi_get_name; 700 tran->tran_bus_reset = NULL; 701 tran->tran_quiesce = NULL; 702 tran->tran_unquiesce = NULL; 703 704 /* 705 * register event notification routines with scsa 706 */ 707 tran->tran_get_eventcookie = NULL; 708 tran->tran_add_eventcall = NULL; 709 tran->tran_remove_eventcall = NULL; 710 tran->tran_post_event = NULL; 711 712 tran->tran_bus_power = vhci_scsi_bus_power; 713 714 tran->tran_bus_config = vhci_scsi_bus_config; 715 716 /* 717 * Attach this instance with the mpxio framework 718 */ 719 if (mdi_vhci_register(MDI_HCI_CLASS_SCSI, dip, &vhci_opinfo, 0) 720 != MDI_SUCCESS) { 721 VHCI_DEBUG(1, (CE_NOTE, dip, "!vhci_attach:" 722 "mdi_vhci_register failed\n")); 723 goto attach_fail; 724 } 725 vhci_attached++; 726 727 /* 728 * Attach this instance of the hba. 729 * 730 * Regarding dma attributes: Since scsi_vhci is a virtual scsi HBA 731 * driver, it has nothing to do with DMA. However, when calling 732 * scsi_hba_attach_setup() we need to pass something valid in the 733 * dma attributes parameter. So we just use scsi_alloc_attr. 734 * SCSA itself seems to care only for dma_attr_minxfer and 735 * dma_attr_burstsizes fields of dma attributes structure. 736 * It expects those fileds to be non-zero. 737 */ 738 if (scsi_hba_attach_setup(dip, &scsi_alloc_attr, tran, 739 SCSI_HBA_TRAN_CLONE) != DDI_SUCCESS) { 740 VHCI_DEBUG(1, (CE_NOTE, dip, "!vhci_attach:" 741 "hba attach failed\n")); 742 goto attach_fail; 743 } 744 scsi_hba_attached++; 745 746 if (ddi_create_minor_node(dip, "devctl", S_IFCHR, 747 INST2DEVCTL(instance), DDI_NT_SCSI_NEXUS, 0) != DDI_SUCCESS) { 748 VHCI_DEBUG(1, (CE_NOTE, dip, "!vhci_attach:" 749 " ddi_create_minor_node failed\n")); 750 goto attach_fail; 751 } 752 753 /* 754 * Set pm-want-child-notification property for 755 * power management of the phci and client 756 */ 757 if (ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP, 758 "pm-want-child-notification?", NULL, NULL) != DDI_PROP_SUCCESS) { 759 cmn_err(CE_WARN, 760 "%s%d fail to create pm-want-child-notification? prop", 761 ddi_driver_name(dip), ddi_get_instance(dip)); 762 goto attach_fail; 763 } 764 765 vhci->vhci_taskq = taskq_create("vhci_taskq", 1, MINCLSYSPRI, 1, 4, 0); 766 vhci->vhci_update_pathstates_taskq = 767 taskq_create("vhci_update_pathstates", VHCI_NUM_UPDATE_TASKQ, 768 MINCLSYSPRI, 1, 4, 0); 769 ASSERT(vhci->vhci_taskq); 770 ASSERT(vhci->vhci_update_pathstates_taskq); 771 772 /* 773 * Set appropriate configuration flags based on options set in 774 * conf file. 775 */ 776 vhci->vhci_conf_flags = 0; 777 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, PROPFLAGS, 778 "auto-failback", &data) == DDI_SUCCESS) { 779 if (strcmp(data, "enable") == 0) 780 vhci->vhci_conf_flags |= VHCI_CONF_FLAGS_AUTO_FAILBACK; 781 ddi_prop_free(data); 782 } 783 784 if (!(vhci->vhci_conf_flags & VHCI_CONF_FLAGS_AUTO_FAILBACK)) 785 vhci_log(CE_NOTE, dip, "!Auto-failback capability " 786 "disabled through scsi_vhci.conf file."); 787 788 /* 789 * Allocate an mpapi private structure 790 */ 791 vhci->mp_priv = kmem_zalloc(sizeof (mpapi_priv_t), KM_SLEEP); 792 if (vhci_mpapi_init(vhci) != 0) { 793 VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_attach: " 794 "vhci_mpapi_init() failed")); 795 } 796 797 vhci_failover_modopen(vhci); /* load failover modules */ 798 799 ddi_report_dev(dip); 800 return (DDI_SUCCESS); 801 802 attach_fail: 803 if (vhci_attached) 804 (void) mdi_vhci_unregister(dip, 0); 805 806 if (scsi_hba_attached) 807 (void) scsi_hba_detach(dip); 808 809 if (vhci->vhci_tran) 810 scsi_hba_tran_free(vhci->vhci_tran); 811 812 if (mutex_initted) { 813 mutex_destroy(&vhci->vhci_mutex); 814 } 815 816 ddi_soft_state_free(vhci_softstate, instance); 817 return (DDI_FAILURE); 818 } 819 820 821 /*ARGSUSED*/ 822 static int 823 vhci_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 824 { 825 int instance = ddi_get_instance(dip); 826 scsi_hba_tran_t *tran; 827 struct scsi_vhci *vhci; 828 829 VHCI_DEBUG(4, (CE_NOTE, NULL, "vhci_detach: cmd=0x%x\n", cmd)); 830 831 if ((tran = ddi_get_driver_private(dip)) == NULL) 832 return (DDI_FAILURE); 833 834 vhci = TRAN2HBAPRIVATE(tran); 835 if (!vhci) { 836 return (DDI_FAILURE); 837 } 838 839 switch (cmd) { 840 case DDI_DETACH: 841 break; 842 843 case DDI_SUSPEND: 844 case DDI_PM_SUSPEND: 845 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_detach: suspend/pm not yet" 846 "implemented\n")); 847 return (DDI_FAILURE); 848 849 default: 850 VHCI_DEBUG(1, (CE_NOTE, NULL, 851 "!vhci_detach: unknown ddi command\n")); 852 return (DDI_FAILURE); 853 } 854 855 (void) mdi_vhci_unregister(dip, 0); 856 (void) scsi_hba_detach(dip); 857 scsi_hba_tran_free(tran); 858 859 if (ddi_prop_remove(DDI_DEV_T_NONE, dip, 860 "pm-want-child-notification?") != DDI_PROP_SUCCESS) { 861 cmn_err(CE_WARN, 862 "%s%d unable to remove prop pm-want_child_notification?", 863 ddi_driver_name(dip), ddi_get_instance(dip)); 864 } 865 if (vhci_restart_timeid != 0) { 866 (void) untimeout(vhci_restart_timeid); 867 } 868 vhci_restart_timeid = 0; 869 870 mutex_destroy(&vhci->vhci_mutex); 871 vhci->vhci_dip = NULL; 872 vhci->vhci_tran = NULL; 873 taskq_destroy(vhci->vhci_taskq); 874 taskq_destroy(vhci->vhci_update_pathstates_taskq); 875 ddi_remove_minor_node(dip, NULL); 876 ddi_soft_state_free(vhci_softstate, instance); 877 878 vhci_failover_modclose(); /* unload failover modules */ 879 return (DDI_SUCCESS); 880 } 881 882 /* 883 * vhci_getinfo() 884 * Given the device number, return the devinfo pointer or the 885 * instance number. 886 * Note: always succeed DDI_INFO_DEVT2INSTANCE, even before attach. 887 */ 888 889 /*ARGSUSED*/ 890 static int 891 vhci_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result) 892 { 893 struct scsi_vhci *vhcip; 894 int instance = MINOR2INST(getminor((dev_t)arg)); 895 896 switch (cmd) { 897 case DDI_INFO_DEVT2DEVINFO: 898 vhcip = ddi_get_soft_state(vhci_softstate, instance); 899 if (vhcip != NULL) 900 *result = vhcip->vhci_dip; 901 else { 902 *result = NULL; 903 return (DDI_FAILURE); 904 } 905 break; 906 907 case DDI_INFO_DEVT2INSTANCE: 908 *result = (void *)(uintptr_t)instance; 909 break; 910 911 default: 912 return (DDI_FAILURE); 913 } 914 915 return (DDI_SUCCESS); 916 } 917 918 919 /*ARGSUSED*/ 920 static int 921 vhci_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip, 922 scsi_hba_tran_t *hba_tran, struct scsi_device *sd) 923 { 924 char *guid; 925 scsi_vhci_lun_t *vlun; 926 struct scsi_vhci *vhci; 927 clock_t from_ticks; 928 mdi_pathinfo_t *pip; 929 int rval; 930 931 ASSERT(hba_dip != NULL); 932 ASSERT(tgt_dip != NULL); 933 934 vhci = ddi_get_soft_state(vhci_softstate, ddi_get_instance(hba_dip)); 935 ASSERT(vhci != NULL); 936 937 VHCI_DEBUG(4, (CE_NOTE, hba_dip, 938 "!tgt_init: called for %s (instance %d)\n", 939 ddi_driver_name(tgt_dip), ddi_get_instance(tgt_dip))); 940 941 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, tgt_dip, PROPFLAGS, 942 MDI_CLIENT_GUID_PROP, &guid) != DDI_SUCCESS) { 943 /* 944 * This must be the .conf node. The ssd node under 945 * fp already inserts a delay, so we just return from here. 946 * We rely on this delay to have all dips be posted to 947 * the ndi hotplug thread's newdev list. This is 948 * necessary for the deferred attach mechanism to work 949 * and opens() done soon after boot to succeed. 950 */ 951 VHCI_DEBUG(4, (CE_WARN, hba_dip, "tgt_init: lun guid " 952 "property failed")); 953 return (DDI_NOT_WELL_FORMED); 954 } 955 956 vlun = vhci_lun_lookup(tgt_dip); 957 958 mutex_enter(&vhci_global_mutex); 959 960 from_ticks = ddi_get_lbolt(); 961 if (vhci_to_ticks == 0) { 962 vhci_to_ticks = from_ticks + 963 drv_usectohz(vhci_init_wait_timeout); 964 } 965 966 #if DEBUG 967 if (vlun) { 968 VHCI_DEBUG(1, (CE_WARN, hba_dip, "tgt_init: " 969 "vhci_scsi_tgt_init: guid %s : found vlun 0x%p " 970 "from_ticks %lx to_ticks %lx", 971 guid, (void *)vlun, from_ticks, vhci_to_ticks)); 972 } else { 973 VHCI_DEBUG(1, (CE_WARN, hba_dip, "tgt_init: " 974 "vhci_scsi_tgt_init: guid %s : vlun not found " 975 "from_ticks %lx to_ticks %lx", guid, from_ticks, 976 vhci_to_ticks)); 977 } 978 #endif 979 980 rval = mdi_select_path(tgt_dip, NULL, 981 (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH), NULL, &pip); 982 if (rval == MDI_SUCCESS) { 983 mdi_rele_path(pip); 984 } 985 986 /* 987 * Wait for the following conditions : 988 * 1. no vlun available yet 989 * 2. no path established 990 * 3. timer did not expire 991 */ 992 while ((vlun == NULL) || (mdi_client_get_path_count(tgt_dip) == 0) || 993 (rval != MDI_SUCCESS)) { 994 if (vlun && vlun->svl_not_supported) { 995 VHCI_DEBUG(1, (CE_WARN, hba_dip, "tgt_init: " 996 "vlun 0x%p lun guid %s not supported!", 997 (void *)vlun, guid)); 998 mutex_exit(&vhci_global_mutex); 999 ddi_prop_free(guid); 1000 return (DDI_NOT_WELL_FORMED); 1001 } 1002 if ((vhci_first_time == 0) && (from_ticks >= vhci_to_ticks)) { 1003 vhci_first_time = 1; 1004 } 1005 if (vhci_first_time == 1) { 1006 VHCI_DEBUG(1, (CE_WARN, hba_dip, "vhci_scsi_tgt_init: " 1007 "no wait for %s. from_tick %lx, to_tick %lx", 1008 guid, from_ticks, vhci_to_ticks)); 1009 mutex_exit(&vhci_global_mutex); 1010 ddi_prop_free(guid); 1011 return (DDI_NOT_WELL_FORMED); 1012 } 1013 1014 if (cv_timedwait(&vhci_cv, 1015 &vhci_global_mutex, vhci_to_ticks) == -1) { 1016 /* Timed out */ 1017 #ifdef DEBUG 1018 if (vlun == NULL) { 1019 VHCI_DEBUG(1, (CE_WARN, hba_dip, 1020 "tgt_init: no vlun for %s!", guid)); 1021 } else if (mdi_client_get_path_count(tgt_dip) == 0) { 1022 VHCI_DEBUG(1, (CE_WARN, hba_dip, 1023 "tgt_init: client path count is " 1024 "zero for %s!", guid)); 1025 } else { 1026 VHCI_DEBUG(1, (CE_WARN, hba_dip, 1027 "tgt_init: client path not " 1028 "available yet for %s!", guid)); 1029 } 1030 #endif /* DEBUG */ 1031 mutex_exit(&vhci_global_mutex); 1032 ddi_prop_free(guid); 1033 return (DDI_NOT_WELL_FORMED); 1034 } 1035 vlun = vhci_lun_lookup(tgt_dip); 1036 rval = mdi_select_path(tgt_dip, NULL, 1037 (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH), 1038 NULL, &pip); 1039 if (rval == MDI_SUCCESS) { 1040 mdi_rele_path(pip); 1041 } 1042 from_ticks = ddi_get_lbolt(); 1043 } 1044 mutex_exit(&vhci_global_mutex); 1045 1046 ASSERT(vlun != NULL); 1047 ddi_prop_free(guid); 1048 hba_tran->tran_tgt_private = vlun; 1049 1050 return (DDI_SUCCESS); 1051 } 1052 1053 /*ARGSUSED*/ 1054 static void 1055 vhci_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip, 1056 scsi_hba_tran_t *hba_tran, struct scsi_device *sd) 1057 { 1058 } 1059 1060 /* 1061 * a PGR register command has started; copy the info we need 1062 */ 1063 int 1064 vhci_pgr_register_start(scsi_vhci_lun_t *vlun, struct scsi_pkt *pkt) 1065 { 1066 struct vhci_pkt *vpkt = TGTPKT2VHCIPKT(pkt); 1067 void *addr; 1068 1069 if (!vpkt->vpkt_tgt_init_bp) 1070 return (TRAN_BADPKT); 1071 1072 addr = bp_mapin_common(vpkt->vpkt_tgt_init_bp, 1073 (vpkt->vpkt_flags & CFLAG_NOWAIT) ? VM_NOSLEEP : VM_SLEEP); 1074 if (addr == NULL) 1075 return (TRAN_BUSY); 1076 1077 mutex_enter(&vlun->svl_mutex); 1078 1079 vhci_print_prout_keys(vlun, "v_pgr_reg_start: before bcopy:"); 1080 1081 bcopy(addr, &vlun->svl_prout, sizeof (vhci_prout_t) - 1082 (2 * MHIOC_RESV_KEY_SIZE*sizeof (char))); 1083 bcopy(pkt->pkt_cdbp, vlun->svl_cdb, sizeof (vlun->svl_cdb)); 1084 1085 vhci_print_prout_keys(vlun, "v_pgr_reg_start: after bcopy:"); 1086 1087 vlun->svl_time = pkt->pkt_time; 1088 vlun->svl_bcount = vpkt->vpkt_tgt_init_bp->b_bcount; 1089 vlun->svl_first_path = vpkt->vpkt_path; 1090 mutex_exit(&vlun->svl_mutex); 1091 return (0); 1092 } 1093 1094 /* 1095 * Function name : vhci_scsi_start() 1096 * 1097 * Return Values : TRAN_FATAL_ERROR - vhci has been shutdown 1098 * or other fatal failure 1099 * preventing packet transportation 1100 * TRAN_BUSY - request queue is full 1101 * TRAN_ACCEPT - pkt has been submitted to phci 1102 * (or is held in the waitQ) 1103 * Description : Implements SCSA's tran_start() entry point for 1104 * packet transport 1105 * 1106 */ 1107 static int 1108 vhci_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt) 1109 { 1110 int rval = TRAN_ACCEPT; 1111 int instance, held; 1112 struct scsi_vhci *vhci = ADDR2VHCI(ap); 1113 struct scsi_vhci_lun *vlun = ADDR2VLUN(ap); 1114 struct vhci_pkt *vpkt = TGTPKT2VHCIPKT(pkt); 1115 int flags = 0; 1116 scsi_vhci_priv_t *svp; 1117 dev_info_t *cdip; 1118 client_lb_t lbp; 1119 int restore_lbp = 0; 1120 /* set if pkt is SCSI-II RESERVE cmd */ 1121 int pkt_reserve_cmd = 0; 1122 int reserve_failed = 0; 1123 1124 ASSERT(vhci != NULL); 1125 ASSERT(vpkt != NULL); 1126 ASSERT(vpkt->vpkt_state != VHCI_PKT_ISSUED); 1127 cdip = ADDR2DIP(ap); 1128 1129 /* 1130 * Block IOs if LUN is held or QUIESCED for IOs. 1131 */ 1132 if ((VHCI_LUN_IS_HELD(vlun)) || 1133 ((vlun->svl_flags & VLUN_QUIESCED_FLG) == VLUN_QUIESCED_FLG)) { 1134 return (TRAN_BUSY); 1135 } 1136 1137 /* 1138 * vhci_lun needs to be quiesced before SCSI-II RESERVE command 1139 * can be issued. This may require a cv_timedwait, which is 1140 * dangerous to perform in an interrupt context. So if this 1141 * is a RESERVE command a taskq is dispatched to service it. 1142 * This taskq shall again call vhci_scsi_start, but we shall be 1143 * sure its not in an interrupt context. 1144 */ 1145 if ((pkt->pkt_cdbp[0] == SCMD_RESERVE) || 1146 (pkt->pkt_cdbp[0] == SCMD_RESERVE_G1)) { 1147 if (!(vpkt->vpkt_state & VHCI_PKT_THRU_TASKQ)) { 1148 if (taskq_dispatch(vhci->vhci_taskq, 1149 vhci_dispatch_scsi_start, (void *) vpkt, 1150 KM_NOSLEEP)) { 1151 return (TRAN_ACCEPT); 1152 } else { 1153 return (TRAN_BUSY); 1154 } 1155 } 1156 1157 /* 1158 * Here we ensure that simultaneous SCSI-II RESERVE cmds don't 1159 * get serviced for a lun. 1160 */ 1161 VHCI_HOLD_LUN(vlun, VH_NOSLEEP, held); 1162 if (!held) { 1163 return (TRAN_BUSY); 1164 } else if ((vlun->svl_flags & VLUN_QUIESCED_FLG) == 1165 VLUN_QUIESCED_FLG) { 1166 VHCI_RELEASE_LUN(vlun); 1167 return (TRAN_BUSY); 1168 } 1169 1170 /* 1171 * To ensure that no IOs occur for this LUN for the duration 1172 * of this pkt set the VLUN_QUIESCED_FLG. 1173 * In case this routine needs to exit on error make sure that 1174 * this flag is cleared. 1175 */ 1176 vlun->svl_flags |= VLUN_QUIESCED_FLG; 1177 pkt_reserve_cmd = 1; 1178 1179 /* 1180 * if this is a SCSI-II RESERVE command, set load balancing 1181 * policy to be ALTERNATE PATH to ensure that all subsequent 1182 * IOs are routed on the same path. This is because if commands 1183 * are routed across multiple paths then IOs on paths other than 1184 * the one on which the RESERVE was executed will get a 1185 * RESERVATION CONFLICT 1186 */ 1187 lbp = mdi_get_lb_policy(cdip); 1188 if (lbp != LOAD_BALANCE_NONE) { 1189 if (vhci_quiesce_lun(vlun) != 1) { 1190 vlun->svl_flags &= ~VLUN_QUIESCED_FLG; 1191 VHCI_RELEASE_LUN(vlun); 1192 return (TRAN_FATAL_ERROR); 1193 } 1194 vlun->svl_lb_policy_save = lbp; 1195 if (mdi_set_lb_policy(cdip, LOAD_BALANCE_NONE) != 1196 MDI_SUCCESS) { 1197 vlun->svl_flags &= ~VLUN_QUIESCED_FLG; 1198 VHCI_RELEASE_LUN(vlun); 1199 return (TRAN_FATAL_ERROR); 1200 } 1201 restore_lbp = 1; 1202 } 1203 /* 1204 * See comments for VLUN_RESERVE_ACTIVE_FLG in scsi_vhci.h 1205 * To narrow this window where a reserve command may be sent 1206 * down an inactive path the path states first need to be 1207 * updated. Before calling vhci_update_pathstates reset 1208 * VLUN_RESERVE_ACTIVE_FLG, just in case it was already set 1209 * for this lun. This shall prevent an unnecessary reset 1210 * from being sent out. 1211 */ 1212 vlun->svl_flags &= ~VLUN_RESERVE_ACTIVE_FLG; 1213 vhci_update_pathstates((void *)vlun); 1214 } 1215 1216 instance = ddi_get_instance(vhci->vhci_dip); 1217 1218 /* 1219 * If the command is PRIN with action of zero, then the cmd 1220 * is reading PR keys which requires filtering on completion. 1221 * Data cache sync must be guaranteed. 1222 */ 1223 if ((pkt->pkt_cdbp[0] == SCMD_PRIN) && 1224 (pkt->pkt_cdbp[1] == 0) && 1225 (vpkt->vpkt_org_vpkt == NULL)) { 1226 vpkt->vpkt_tgt_init_pkt_flags |= PKT_CONSISTENT; 1227 } 1228 1229 /* 1230 * Do not defer bind for PKT_DMA_PARTIAL 1231 */ 1232 if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) == 0) { 1233 1234 /* This is a non pkt_dma_partial case */ 1235 if ((rval = vhci_bind_transport( 1236 ap, vpkt, vpkt->vpkt_tgt_init_pkt_flags, NULL_FUNC)) 1237 != TRAN_ACCEPT) { 1238 VHCI_DEBUG(6, (CE_WARN, vhci->vhci_dip, 1239 "!vhci%d %x: failed to bind transport: " 1240 "vlun 0x%p pkt_reserved %x restore_lbp %x," 1241 "lbp %x", instance, rval, (void *)vlun, 1242 pkt_reserve_cmd, restore_lbp, lbp)); 1243 if (restore_lbp) 1244 (void) mdi_set_lb_policy(cdip, lbp); 1245 if (pkt_reserve_cmd) 1246 vlun->svl_flags &= ~VLUN_QUIESCED_FLG; 1247 return (rval); 1248 } 1249 VHCI_DEBUG(8, (CE_NOTE, NULL, 1250 "vhci_scsi_start: v_b_t called 0x%p\n", (void *)vpkt)); 1251 } 1252 ASSERT(vpkt->vpkt_hba_pkt != NULL); 1253 ASSERT(vpkt->vpkt_path != NULL); 1254 1255 /* 1256 * This is the chance to adjust the pHCI's pkt and other information 1257 * from target driver's pkt. 1258 */ 1259 VHCI_DEBUG(8, (CE_NOTE, vhci->vhci_dip, "vhci_scsi_start vpkt %p\n", 1260 (void *)vpkt)); 1261 vhci_update_pHCI_pkt(vpkt, pkt); 1262 1263 if (vlun->svl_flags & VLUN_RESERVE_ACTIVE_FLG) { 1264 if (vpkt->vpkt_path != vlun->svl_resrv_pip) { 1265 VHCI_DEBUG(1, (CE_WARN, vhci->vhci_dip, 1266 "!vhci_bind: reserve flag set for vlun 0x%p, but, " 1267 "pktpath 0x%p resrv path 0x%p differ. lb_policy %x", 1268 (void *)vlun, (void *)vpkt->vpkt_path, 1269 (void *)vlun->svl_resrv_pip, 1270 mdi_get_lb_policy(cdip))); 1271 reserve_failed = 1; 1272 } 1273 } 1274 1275 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private( 1276 vpkt->vpkt_path); 1277 if (svp == NULL || reserve_failed) { 1278 if (pkt_reserve_cmd) { 1279 VHCI_DEBUG(6, (CE_WARN, vhci->vhci_dip, 1280 "!vhci_bind returned null svp vlun 0x%p", 1281 (void *)vlun)); 1282 vlun->svl_flags &= ~VLUN_QUIESCED_FLG; 1283 if (restore_lbp) 1284 (void) mdi_set_lb_policy(cdip, lbp); 1285 } 1286 pkt_cleanup: 1287 if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) == 0) { 1288 scsi_destroy_pkt(vpkt->vpkt_hba_pkt); 1289 vpkt->vpkt_hba_pkt = NULL; 1290 if (vpkt->vpkt_path) { 1291 mdi_rele_path(vpkt->vpkt_path); 1292 vpkt->vpkt_path = NULL; 1293 } 1294 } 1295 if ((pkt->pkt_cdbp[0] == SCMD_PROUT) && 1296 (((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_REGISTER) || 1297 ((pkt->pkt_cdbp[1] & 0x1f) == 1298 VHCI_PROUT_R_AND_IGNORE))) { 1299 sema_v(&vlun->svl_pgr_sema); 1300 } 1301 return (TRAN_BUSY); 1302 } 1303 1304 VHCI_INCR_PATH_CMDCOUNT(svp); 1305 1306 /* 1307 * Ensure that no other IOs raced ahead, while a RESERVE cmd was 1308 * QUIESCING the same lun. 1309 */ 1310 if ((!pkt_reserve_cmd) && 1311 ((vlun->svl_flags & VLUN_QUIESCED_FLG) == VLUN_QUIESCED_FLG)) { 1312 VHCI_DECR_PATH_CMDCOUNT(svp); 1313 goto pkt_cleanup; 1314 } 1315 1316 if ((pkt->pkt_cdbp[0] == SCMD_PRIN) || 1317 (pkt->pkt_cdbp[0] == SCMD_PROUT)) { 1318 /* 1319 * currently this thread only handles running PGR 1320 * commands, so don't bother creating it unless 1321 * something interesting is going to happen (like 1322 * either a PGR out, or a PGR in with enough space 1323 * to hold the keys that are getting returned) 1324 */ 1325 mutex_enter(&vlun->svl_mutex); 1326 if (((vlun->svl_flags & VLUN_TASK_D_ALIVE_FLG) == 0) && 1327 (pkt->pkt_cdbp[0] == SCMD_PROUT)) { 1328 vlun->svl_taskq = taskq_create("vlun_pgr_task_daemon", 1329 1, MINCLSYSPRI, 1, 4, 0); 1330 vlun->svl_flags |= VLUN_TASK_D_ALIVE_FLG; 1331 } 1332 mutex_exit(&vlun->svl_mutex); 1333 if ((pkt->pkt_cdbp[0] == SCMD_PROUT) && 1334 (((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_REGISTER) || 1335 ((pkt->pkt_cdbp[1] & 0x1f) == 1336 VHCI_PROUT_R_AND_IGNORE))) { 1337 if (rval = vhci_pgr_register_start(vlun, pkt)) { 1338 /* an error */ 1339 sema_v(&vlun->svl_pgr_sema); 1340 return (rval); 1341 } 1342 } 1343 } 1344 1345 /* 1346 * SCSI-II RESERVE cmd is not expected in polled mode. 1347 * If this changes it needs to be handled for the polled scenario. 1348 */ 1349 flags = vpkt->vpkt_hba_pkt->pkt_flags; 1350 1351 /* 1352 * Set the path_instance *before* sending the scsi_pkt down the path 1353 * to mpxio's pHCI so that additional path abstractions at a pHCI 1354 * level (like maybe iSCSI at some point in the future) can update 1355 * the path_instance. 1356 */ 1357 if (scsi_pkt_allocated_correctly(vpkt->vpkt_hba_pkt)) 1358 vpkt->vpkt_hba_pkt->pkt_path_instance = 1359 mdi_pi_get_path_instance(vpkt->vpkt_path); 1360 1361 rval = scsi_transport(vpkt->vpkt_hba_pkt); 1362 if (rval == TRAN_ACCEPT) { 1363 if (flags & FLAG_NOINTR) { 1364 struct scsi_pkt *tpkt = vpkt->vpkt_tgt_pkt; 1365 struct scsi_pkt *pkt = vpkt->vpkt_hba_pkt; 1366 1367 ASSERT(tpkt != NULL); 1368 *(tpkt->pkt_scbp) = *(pkt->pkt_scbp); 1369 tpkt->pkt_resid = pkt->pkt_resid; 1370 tpkt->pkt_state = pkt->pkt_state; 1371 tpkt->pkt_statistics = pkt->pkt_statistics; 1372 tpkt->pkt_reason = pkt->pkt_reason; 1373 1374 if ((*(pkt->pkt_scbp) == STATUS_CHECK) && 1375 (pkt->pkt_state & STATE_ARQ_DONE)) { 1376 bcopy(pkt->pkt_scbp, tpkt->pkt_scbp, 1377 vpkt->vpkt_tgt_init_scblen); 1378 } 1379 1380 VHCI_DECR_PATH_CMDCOUNT(svp); 1381 if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) == 0) { 1382 scsi_destroy_pkt(vpkt->vpkt_hba_pkt); 1383 vpkt->vpkt_hba_pkt = NULL; 1384 if (vpkt->vpkt_path) { 1385 mdi_rele_path(vpkt->vpkt_path); 1386 vpkt->vpkt_path = NULL; 1387 } 1388 } 1389 /* 1390 * This path will not automatically retry pkts 1391 * internally, therefore, vpkt_org_vpkt should 1392 * never be set. 1393 */ 1394 ASSERT(vpkt->vpkt_org_vpkt == NULL); 1395 if (tpkt->pkt_comp) { 1396 (*tpkt->pkt_comp)(tpkt); 1397 } 1398 } 1399 return (rval); 1400 } else if ((pkt->pkt_cdbp[0] == SCMD_PROUT) && 1401 (((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_REGISTER) || 1402 ((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_R_AND_IGNORE))) { 1403 /* the command exited with bad status */ 1404 sema_v(&vlun->svl_pgr_sema); 1405 } else if (vpkt->vpkt_tgt_pkt->pkt_cdbp[0] == SCMD_PRIN) { 1406 /* the command exited with bad status */ 1407 sema_v(&vlun->svl_pgr_sema); 1408 } else if (pkt_reserve_cmd) { 1409 VHCI_DEBUG(6, (CE_WARN, vhci->vhci_dip, 1410 "!vhci_scsi_start: reserve failed vlun 0x%p", 1411 (void *)vlun)); 1412 vlun->svl_flags &= ~VLUN_QUIESCED_FLG; 1413 if (restore_lbp) 1414 (void) mdi_set_lb_policy(cdip, lbp); 1415 } 1416 1417 ASSERT(vpkt->vpkt_hba_pkt != NULL); 1418 VHCI_DECR_PATH_CMDCOUNT(svp); 1419 1420 /* Do not destroy phci packet information for PKT_DMA_PARTIAL */ 1421 if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) == 0) { 1422 scsi_destroy_pkt(vpkt->vpkt_hba_pkt); 1423 vpkt->vpkt_hba_pkt = NULL; 1424 if (vpkt->vpkt_path) { 1425 MDI_PI_ERRSTAT(vpkt->vpkt_path, MDI_PI_TRANSERR); 1426 mdi_rele_path(vpkt->vpkt_path); 1427 vpkt->vpkt_path = NULL; 1428 } 1429 } 1430 return (TRAN_BUSY); 1431 } 1432 1433 /* 1434 * Function name : vhci_scsi_reset() 1435 * 1436 * Return Values : 0 - reset failed 1437 * 1 - reset succeeded 1438 */ 1439 1440 /* ARGSUSED */ 1441 static int 1442 vhci_scsi_reset(struct scsi_address *ap, int level) 1443 { 1444 int rval = 0; 1445 1446 cmn_err(CE_WARN, "!vhci_scsi_reset 0x%x", level); 1447 if ((level == RESET_TARGET) || (level == RESET_LUN)) { 1448 return (vhci_scsi_reset_target(ap, level, TRUE)); 1449 } else if (level == RESET_ALL) { 1450 return (vhci_scsi_reset_bus(ap)); 1451 } 1452 1453 return (rval); 1454 } 1455 1456 /* 1457 * vhci_recovery_reset: 1458 * Issues reset to the device 1459 * Input: 1460 * vlun - vhci lun pointer of the device 1461 * ap - address of the device 1462 * select_path: 1463 * If select_path is FALSE, then the address specified in ap is 1464 * the path on which reset will be issued. 1465 * If select_path is TRUE, then path is obtained by calling 1466 * mdi_select_path. 1467 * 1468 * recovery_depth: 1469 * Caller can specify the level of reset. 1470 * VHCI_DEPTH_LUN - 1471 * Issues LUN RESET if device supports lun reset. 1472 * VHCI_DEPTH_TARGET - 1473 * If Lun Reset fails or the device does not support 1474 * Lun Reset, issues TARGET RESET 1475 * VHCI_DEPTH_ALL - 1476 * If Lun Reset fails or the device does not support 1477 * Lun Reset, issues TARGET RESET. 1478 * If TARGET RESET does not succeed, issues Bus Reset. 1479 */ 1480 1481 static int 1482 vhci_recovery_reset(scsi_vhci_lun_t *vlun, struct scsi_address *ap, 1483 uint8_t select_path, uint8_t recovery_depth) 1484 { 1485 int ret = 0; 1486 1487 ASSERT(ap != NULL); 1488 1489 if (vlun && vlun->svl_support_lun_reset == 1) { 1490 ret = vhci_scsi_reset_target(ap, RESET_LUN, 1491 select_path); 1492 } 1493 1494 recovery_depth--; 1495 1496 if ((ret == 0) && recovery_depth) { 1497 ret = vhci_scsi_reset_target(ap, RESET_TARGET, 1498 select_path); 1499 recovery_depth--; 1500 } 1501 1502 if ((ret == 0) && recovery_depth) { 1503 (void) scsi_reset(ap, RESET_ALL); 1504 } 1505 1506 return (ret); 1507 } 1508 1509 /* 1510 * Note: The scsi_address passed to this routine could be the scsi_address 1511 * for the virtual device or the physical device. No assumptions should be 1512 * made in this routine about the ap structure and a_hba_tran->tran_tgt_private 1513 * field of ap can not be assumed to be the vhci structure. 1514 * Further note that the child dip would be the dip of the ssd node irrespective 1515 * of the scsi_address passed. 1516 */ 1517 1518 static int 1519 vhci_scsi_reset_target(struct scsi_address *ap, int level, uint8_t select_path) 1520 { 1521 dev_info_t *vdip, *pdip, *cdip = ADDR2DIP(ap); 1522 mdi_pathinfo_t *pip = NULL; 1523 mdi_pathinfo_t *npip = NULL; 1524 int rval = -1; 1525 scsi_vhci_priv_t *svp = NULL; 1526 struct scsi_address *pap = NULL; 1527 scsi_hba_tran_t *hba = NULL; 1528 int sps; 1529 struct scsi_vhci *vhci = NULL; 1530 1531 if (select_path != TRUE) { 1532 ASSERT(ap != NULL); 1533 if (level == RESET_LUN) { 1534 hba = ap->a_hba_tran; 1535 ASSERT(hba != NULL); 1536 return ((*hba->tran_reset)(ap, RESET_LUN)); 1537 } 1538 return (scsi_reset(ap, level)); 1539 } 1540 1541 ASSERT(cdip != NULL); 1542 vdip = ddi_get_parent(cdip); 1543 ASSERT(vdip != NULL); 1544 vhci = ddi_get_soft_state(vhci_softstate, ddi_get_instance(vdip)); 1545 ASSERT(vhci != NULL); 1546 1547 rval = mdi_select_path(cdip, NULL, MDI_SELECT_ONLINE_PATH, NULL, &pip); 1548 if ((rval != MDI_SUCCESS) || (pip == NULL)) { 1549 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_scsi_reset_target: " 1550 "Unable to get a path, dip 0x%p", (void *)cdip)); 1551 return (0); 1552 } 1553 again: 1554 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip); 1555 if (svp == NULL) { 1556 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_scsi_reset_target: " 1557 "priv is NULL, pip 0x%p", (void *)pip)); 1558 mdi_rele_path(pip); 1559 return (0); 1560 } 1561 1562 if (svp->svp_psd == NULL) { 1563 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_scsi_reset_target: " 1564 "psd is NULL, pip 0x%p, svp 0x%p", 1565 (void *)pip, (void *)svp)); 1566 mdi_rele_path(pip); 1567 return (0); 1568 } 1569 1570 pap = &svp->svp_psd->sd_address; 1571 hba = pap->a_hba_tran; 1572 1573 ASSERT(pap != NULL); 1574 ASSERT(hba != NULL); 1575 1576 if (hba->tran_reset != NULL) { 1577 if ((*hba->tran_reset)(pap, level) == 0) { 1578 pdip = mdi_pi_get_phci(pip); 1579 vhci_log(CE_WARN, vdip, "!(%s%d):" 1580 " path (%s%d), reset %d failed", 1581 ddi_driver_name(cdip), ddi_get_instance(cdip), 1582 ddi_driver_name(pdip), ddi_get_instance(pdip), 1583 level); 1584 1585 /* 1586 * Select next path and issue the reset, repeat 1587 * until all paths are exhausted 1588 */ 1589 sps = mdi_select_path(cdip, NULL, 1590 MDI_SELECT_ONLINE_PATH, pip, &npip); 1591 if ((sps != MDI_SUCCESS) || (npip == NULL)) { 1592 mdi_rele_path(pip); 1593 return (0); 1594 } 1595 mdi_rele_path(pip); 1596 pip = npip; 1597 goto again; 1598 } 1599 mdi_rele_path(pip); 1600 mutex_enter(&vhci->vhci_mutex); 1601 scsi_hba_reset_notify_callback(&vhci->vhci_mutex, 1602 &vhci->vhci_reset_notify_listf); 1603 mutex_exit(&vhci->vhci_mutex); 1604 VHCI_DEBUG(6, (CE_NOTE, NULL, "!vhci_scsi_reset_target: " 1605 "reset %d sent down pip:%p for cdip:%p\n", level, 1606 (void *)pip, (void *)cdip)); 1607 return (1); 1608 } 1609 mdi_rele_path(pip); 1610 return (0); 1611 } 1612 1613 1614 /* ARGSUSED */ 1615 static int 1616 vhci_scsi_reset_bus(struct scsi_address *ap) 1617 { 1618 return (1); 1619 } 1620 1621 1622 /* 1623 * called by vhci_getcap and vhci_setcap to get and set (respectively) 1624 * SCSI capabilities 1625 */ 1626 /* ARGSUSED */ 1627 static int 1628 vhci_commoncap(struct scsi_address *ap, char *cap, 1629 int val, int tgtonly, int doset) 1630 { 1631 struct scsi_vhci *vhci = ADDR2VHCI(ap); 1632 struct scsi_vhci_lun *vlun = ADDR2VLUN(ap); 1633 int cidx; 1634 int rval = 0; 1635 1636 if (cap == (char *)0) { 1637 VHCI_DEBUG(3, (CE_WARN, vhci->vhci_dip, 1638 "!vhci_commoncap: invalid arg")); 1639 return (rval); 1640 } 1641 1642 if (vlun == NULL) { 1643 VHCI_DEBUG(3, (CE_WARN, vhci->vhci_dip, 1644 "!vhci_commoncap: vlun is null")); 1645 return (rval); 1646 } 1647 1648 if ((cidx = scsi_hba_lookup_capstr(cap)) == -1) { 1649 return (UNDEFINED); 1650 } 1651 1652 /* 1653 * Process setcap request. 1654 */ 1655 if (doset) { 1656 /* 1657 * At present, we can only set binary (0/1) values 1658 */ 1659 switch (cidx) { 1660 case SCSI_CAP_ARQ: 1661 if (val == 0) { 1662 rval = 0; 1663 } else { 1664 rval = 1; 1665 } 1666 break; 1667 1668 case SCSI_CAP_LUN_RESET: 1669 if (tgtonly == 0) { 1670 VHCI_DEBUG(1, (CE_WARN, vhci->vhci_dip, 1671 "scsi_vhci_setcap: " 1672 "Returning error since whom = 0")); 1673 rval = -1; 1674 break; 1675 } 1676 /* 1677 * Set the capability accordingly. 1678 */ 1679 mutex_enter(&vlun->svl_mutex); 1680 vlun->svl_support_lun_reset = val; 1681 rval = val; 1682 mutex_exit(&vlun->svl_mutex); 1683 break; 1684 1685 case SCSI_CAP_SECTOR_SIZE: 1686 mutex_enter(&vlun->svl_mutex); 1687 vlun->svl_sector_size = val; 1688 vlun->svl_setcap_done = 1; 1689 mutex_exit(&vlun->svl_mutex); 1690 (void) vhci_pHCI_cap(ap, cap, val, tgtonly, NULL); 1691 1692 /* Always return success */ 1693 rval = 1; 1694 break; 1695 1696 default: 1697 VHCI_DEBUG(6, (CE_WARN, vhci->vhci_dip, 1698 "!vhci_setcap: unsupported %d", cidx)); 1699 rval = UNDEFINED; 1700 break; 1701 } 1702 1703 VHCI_DEBUG(6, (CE_NOTE, vhci->vhci_dip, 1704 "!set cap: cap=%s, val/tgtonly/doset/rval = " 1705 "0x%x/0x%x/0x%x/%d\n", 1706 cap, val, tgtonly, doset, rval)); 1707 1708 } else { 1709 /* 1710 * Process getcap request. 1711 */ 1712 switch (cidx) { 1713 case SCSI_CAP_DMA_MAX: 1714 rval = (int)VHCI_DMA_MAX_XFER_CAP; 1715 break; 1716 1717 case SCSI_CAP_INITIATOR_ID: 1718 rval = 0x00; 1719 break; 1720 1721 case SCSI_CAP_ARQ: 1722 case SCSI_CAP_RESET_NOTIFICATION: 1723 case SCSI_CAP_TAGGED_QING: 1724 rval = 1; 1725 break; 1726 1727 case SCSI_CAP_SCSI_VERSION: 1728 rval = 3; 1729 break; 1730 1731 case SCSI_CAP_INTERCONNECT_TYPE: 1732 rval = INTERCONNECT_FABRIC; 1733 break; 1734 1735 case SCSI_CAP_LUN_RESET: 1736 /* 1737 * scsi_vhci will always return success for LUN reset. 1738 * When request for doing LUN reset comes 1739 * through scsi_reset entry point, at that time attempt 1740 * will be made to do reset through all the possible 1741 * paths. 1742 */ 1743 mutex_enter(&vlun->svl_mutex); 1744 rval = vlun->svl_support_lun_reset; 1745 mutex_exit(&vlun->svl_mutex); 1746 VHCI_DEBUG(4, (CE_WARN, vhci->vhci_dip, 1747 "scsi_vhci_getcap:" 1748 "Getting the Lun reset capability %d", rval)); 1749 break; 1750 1751 case SCSI_CAP_SECTOR_SIZE: 1752 mutex_enter(&vlun->svl_mutex); 1753 rval = vlun->svl_sector_size; 1754 mutex_exit(&vlun->svl_mutex); 1755 break; 1756 1757 default: 1758 VHCI_DEBUG(6, (CE_WARN, vhci->vhci_dip, 1759 "!vhci_getcap: unsupported %d", cidx)); 1760 rval = UNDEFINED; 1761 break; 1762 } 1763 1764 VHCI_DEBUG(6, (CE_NOTE, vhci->vhci_dip, 1765 "!get cap: cap=%s, val/tgtonly/doset/rval = " 1766 "0x%x/0x%x/0x%x/%d\n", 1767 cap, val, tgtonly, doset, rval)); 1768 } 1769 return (rval); 1770 } 1771 1772 1773 /* 1774 * Function name : vhci_scsi_getcap() 1775 * 1776 */ 1777 static int 1778 vhci_scsi_getcap(struct scsi_address *ap, char *cap, int whom) 1779 { 1780 return (vhci_commoncap(ap, cap, 0, whom, 0)); 1781 } 1782 1783 static int 1784 vhci_scsi_setcap(struct scsi_address *ap, char *cap, int value, int whom) 1785 { 1786 return (vhci_commoncap(ap, cap, value, whom, 1)); 1787 } 1788 1789 /* 1790 * Function name : vhci_scsi_abort() 1791 */ 1792 /* ARGSUSED */ 1793 static int 1794 vhci_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt) 1795 { 1796 return (0); 1797 } 1798 1799 /* 1800 * Function name : vhci_scsi_init_pkt 1801 * 1802 * Return Values : pointer to scsi_pkt, or NULL 1803 */ 1804 /* ARGSUSED */ 1805 static struct scsi_pkt * 1806 vhci_scsi_init_pkt(struct scsi_address *ap, struct scsi_pkt *pkt, 1807 struct buf *bp, int cmdlen, int statuslen, int tgtlen, 1808 int flags, int (*callback)(caddr_t), caddr_t arg) 1809 { 1810 struct scsi_vhci *vhci = ADDR2VHCI(ap); 1811 struct vhci_pkt *vpkt; 1812 int rval; 1813 int newpkt = 0; 1814 struct scsi_pkt *pktp; 1815 1816 1817 if (pkt == NULL) { 1818 if (cmdlen > VHCI_SCSI_CDB_SIZE) { 1819 VHCI_DEBUG(1, (CE_NOTE, NULL, 1820 "!init pkt: cdb size not supported\n")); 1821 return (NULL); 1822 } 1823 1824 pktp = scsi_hba_pkt_alloc(vhci->vhci_dip, 1825 ap, cmdlen, statuslen, tgtlen, sizeof (*vpkt), callback, 1826 arg); 1827 1828 if (pktp == NULL) { 1829 return (NULL); 1830 } 1831 1832 /* Get the vhci's private structure */ 1833 vpkt = (struct vhci_pkt *)(pktp->pkt_ha_private); 1834 ASSERT(vpkt); 1835 1836 /* Save the target driver's packet */ 1837 vpkt->vpkt_tgt_pkt = pktp; 1838 1839 /* 1840 * Save pkt_tgt_init_pkt fields if deferred binding 1841 * is needed or for other purposes. 1842 */ 1843 vpkt->vpkt_tgt_init_pkt_flags = flags; 1844 vpkt->vpkt_flags = (callback == NULL_FUNC) ? CFLAG_NOWAIT : 0; 1845 vpkt->vpkt_state = VHCI_PKT_IDLE; 1846 vpkt->vpkt_tgt_init_cdblen = cmdlen; 1847 vpkt->vpkt_tgt_init_scblen = statuslen; 1848 newpkt = 1; 1849 } else { /* pkt not NULL */ 1850 vpkt = pkt->pkt_ha_private; 1851 } 1852 1853 VHCI_DEBUG(8, (CE_NOTE, NULL, "vhci_scsi_init_pkt " 1854 "vpkt %p flags %x\n", (void *)vpkt, flags)); 1855 1856 /* Clear any stale error flags */ 1857 if (bp) { 1858 bioerror(bp, 0); 1859 } 1860 1861 vpkt->vpkt_tgt_init_bp = bp; 1862 1863 if (flags & PKT_DMA_PARTIAL) { 1864 1865 /* 1866 * Immediate binding is needed. 1867 * Target driver may not set this flag in next invocation. 1868 * vhci has to remember this flag was set during first 1869 * invocation of vhci_scsi_init_pkt. 1870 */ 1871 vpkt->vpkt_flags |= CFLAG_DMA_PARTIAL; 1872 } 1873 1874 if (vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) { 1875 1876 /* 1877 * Re-initialize some of the target driver packet state 1878 * information. 1879 */ 1880 vpkt->vpkt_tgt_pkt->pkt_state = 0; 1881 vpkt->vpkt_tgt_pkt->pkt_statistics = 0; 1882 vpkt->vpkt_tgt_pkt->pkt_reason = 0; 1883 1884 /* 1885 * Binding a vpkt->vpkt_path for this IO at init_time. 1886 * If an IO error happens later, target driver will clear 1887 * this vpkt->vpkt_path binding before re-init IO again. 1888 */ 1889 VHCI_DEBUG(8, (CE_NOTE, NULL, 1890 "vhci_scsi_init_pkt: calling v_b_t %p, newpkt %d\n", 1891 (void *)vpkt, newpkt)); 1892 if (pkt && vpkt->vpkt_hba_pkt) { 1893 VHCI_DEBUG(4, (CE_NOTE, NULL, 1894 "v_s_i_p calling update_pHCI_pkt resid %ld\n", 1895 pkt->pkt_resid)); 1896 vhci_update_pHCI_pkt(vpkt, pkt); 1897 } 1898 if (callback == SLEEP_FUNC) { 1899 rval = vhci_bind_transport( 1900 ap, vpkt, flags, callback); 1901 } else { 1902 rval = vhci_bind_transport( 1903 ap, vpkt, flags, NULL_FUNC); 1904 } 1905 VHCI_DEBUG(8, (CE_NOTE, NULL, 1906 "vhci_scsi_init_pkt: v_b_t called 0x%p rval 0x%x\n", 1907 (void *)vpkt, rval)); 1908 if (bp) { 1909 if (rval == TRAN_FATAL_ERROR) { 1910 /* 1911 * No paths available. Could not bind 1912 * any pHCI. Setting EFAULT as a way 1913 * to indicate no DMA is mapped. 1914 */ 1915 bioerror(bp, EFAULT); 1916 } else { 1917 /* 1918 * Do not indicate any pHCI errors to 1919 * target driver otherwise. 1920 */ 1921 bioerror(bp, 0); 1922 } 1923 } 1924 if (rval != TRAN_ACCEPT) { 1925 VHCI_DEBUG(8, (CE_NOTE, NULL, 1926 "vhci_scsi_init_pkt: " 1927 "v_b_t failed 0x%p newpkt %x\n", 1928 (void *)vpkt, newpkt)); 1929 if (newpkt) { 1930 scsi_hba_pkt_free(ap, 1931 vpkt->vpkt_tgt_pkt); 1932 } 1933 return (NULL); 1934 } 1935 ASSERT(vpkt->vpkt_hba_pkt != NULL); 1936 ASSERT(vpkt->vpkt_path != NULL); 1937 1938 /* Update the resid for the target driver */ 1939 vpkt->vpkt_tgt_pkt->pkt_resid = 1940 vpkt->vpkt_hba_pkt->pkt_resid; 1941 } 1942 1943 return (vpkt->vpkt_tgt_pkt); 1944 } 1945 1946 /* 1947 * Function name : vhci_scsi_destroy_pkt 1948 * 1949 * Return Values : none 1950 */ 1951 static void 1952 vhci_scsi_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt) 1953 { 1954 struct vhci_pkt *vpkt = (struct vhci_pkt *)pkt->pkt_ha_private; 1955 1956 VHCI_DEBUG(8, (CE_NOTE, NULL, 1957 "vhci_scsi_destroy_pkt: vpkt 0x%p\n", (void *)vpkt)); 1958 1959 vpkt->vpkt_tgt_init_pkt_flags = 0; 1960 if (vpkt->vpkt_hba_pkt) { 1961 scsi_destroy_pkt(vpkt->vpkt_hba_pkt); 1962 vpkt->vpkt_hba_pkt = NULL; 1963 } 1964 if (vpkt->vpkt_path) { 1965 mdi_rele_path(vpkt->vpkt_path); 1966 vpkt->vpkt_path = NULL; 1967 } 1968 1969 ASSERT(vpkt->vpkt_state != VHCI_PKT_ISSUED); 1970 scsi_hba_pkt_free(ap, vpkt->vpkt_tgt_pkt); 1971 } 1972 1973 /* 1974 * Function name : vhci_scsi_dmafree() 1975 * 1976 * Return Values : none 1977 */ 1978 /*ARGSUSED*/ 1979 static void 1980 vhci_scsi_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt) 1981 { 1982 struct vhci_pkt *vpkt = (struct vhci_pkt *)pkt->pkt_ha_private; 1983 1984 VHCI_DEBUG(6, (CE_NOTE, NULL, 1985 "vhci_scsi_dmafree: vpkt 0x%p\n", (void *)vpkt)); 1986 1987 ASSERT(vpkt != NULL); 1988 if (vpkt->vpkt_hba_pkt) { 1989 scsi_destroy_pkt(vpkt->vpkt_hba_pkt); 1990 vpkt->vpkt_hba_pkt = NULL; 1991 } 1992 if (vpkt->vpkt_path) { 1993 mdi_rele_path(vpkt->vpkt_path); 1994 vpkt->vpkt_path = NULL; 1995 } 1996 } 1997 1998 /* 1999 * Function name : vhci_scsi_sync_pkt() 2000 * 2001 * Return Values : none 2002 */ 2003 /*ARGSUSED*/ 2004 static void 2005 vhci_scsi_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt) 2006 { 2007 struct vhci_pkt *vpkt = (struct vhci_pkt *)pkt->pkt_ha_private; 2008 2009 ASSERT(vpkt != NULL); 2010 if (vpkt->vpkt_hba_pkt) { 2011 scsi_sync_pkt(vpkt->vpkt_hba_pkt); 2012 } 2013 } 2014 2015 /* 2016 * routine for reset notification setup, to register or cancel. 2017 */ 2018 static int 2019 vhci_scsi_reset_notify(struct scsi_address *ap, int flag, 2020 void (*callback)(caddr_t), caddr_t arg) 2021 { 2022 struct scsi_vhci *vhci = ADDR2VHCI(ap); 2023 return (scsi_hba_reset_notify_setup(ap, flag, callback, arg, 2024 &vhci->vhci_mutex, &vhci->vhci_reset_notify_listf)); 2025 } 2026 2027 static int 2028 vhci_scsi_get_name_bus_addr(struct scsi_device *sd, 2029 char *name, int len, int bus_addr) 2030 { 2031 dev_info_t *cdip; 2032 char *guid; 2033 scsi_vhci_lun_t *vlun; 2034 2035 ASSERT(sd != NULL); 2036 ASSERT(name != NULL); 2037 2038 cdip = sd->sd_dev; 2039 2040 ASSERT(cdip != NULL); 2041 2042 if (mdi_component_is_client(cdip, NULL) != MDI_SUCCESS) { 2043 name[0] = '\0'; 2044 return (1); 2045 } 2046 2047 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, cdip, PROPFLAGS, 2048 MDI_CLIENT_GUID_PROP, &guid) != DDI_SUCCESS) { 2049 name[0] = '\0'; 2050 return (1); 2051 } 2052 2053 vlun = ADDR2VLUN(&sd->sd_address); 2054 if (bus_addr && vlun && vlun->svl_fops_name) { 2055 /* report the guid and the name of the failover module */ 2056 (void) snprintf(name, len, "g%s %s", guid, vlun->svl_fops_name); 2057 } else { 2058 /* report the guid */ 2059 (void) snprintf(name, len, "g%s", guid); 2060 } 2061 2062 ddi_prop_free(guid); 2063 return (1); 2064 } 2065 2066 static int 2067 vhci_scsi_get_bus_addr(struct scsi_device *sd, char *name, int len) 2068 { 2069 return (vhci_scsi_get_name_bus_addr(sd, name, len, 1)); 2070 } 2071 2072 static int 2073 vhci_scsi_get_name(struct scsi_device *sd, char *name, int len) 2074 { 2075 return (vhci_scsi_get_name_bus_addr(sd, name, len, 0)); 2076 } 2077 2078 /* 2079 * Return a pointer to the guid part of the devnm. 2080 * devnm format is "nodename@busaddr", busaddr format is "gGUID". 2081 */ 2082 static char * 2083 vhci_devnm_to_guid(char *devnm) 2084 { 2085 char *cp = devnm; 2086 2087 if (devnm == NULL) 2088 return (NULL); 2089 2090 while (*cp != '\0' && *cp != '@') 2091 cp++; 2092 if (*cp == '@' && *(cp + 1) == 'g') 2093 return (cp + 2); 2094 return (NULL); 2095 } 2096 2097 static int 2098 vhci_bind_transport(struct scsi_address *ap, struct vhci_pkt *vpkt, int flags, 2099 int (*func)(caddr_t)) 2100 { 2101 struct scsi_vhci *vhci = ADDR2VHCI(ap); 2102 dev_info_t *cdip = ADDR2DIP(ap); 2103 mdi_pathinfo_t *pip = NULL; 2104 mdi_pathinfo_t *npip = NULL; 2105 scsi_vhci_priv_t *svp = NULL; 2106 struct scsi_device *psd = NULL; 2107 struct scsi_address *address = NULL; 2108 struct scsi_pkt *pkt = NULL; 2109 int rval = -1; 2110 int pgr_sema_held = 0; 2111 int held; 2112 int mps_flag = MDI_SELECT_ONLINE_PATH; 2113 struct scsi_vhci_lun *vlun; 2114 time_t tnow; 2115 int path_instance; 2116 2117 vlun = ADDR2VLUN(ap); 2118 ASSERT(vlun != 0); 2119 2120 if ((vpkt->vpkt_tgt_pkt->pkt_cdbp[0] == SCMD_PROUT) && 2121 (((vpkt->vpkt_tgt_pkt->pkt_cdbp[1] & 0x1f) == 2122 VHCI_PROUT_REGISTER) || 2123 ((vpkt->vpkt_tgt_pkt->pkt_cdbp[1] & 0x1f) == 2124 VHCI_PROUT_R_AND_IGNORE))) { 2125 if (!sema_tryp(&vlun->svl_pgr_sema)) 2126 return (TRAN_BUSY); 2127 pgr_sema_held = 1; 2128 if (vlun->svl_first_path != NULL) { 2129 rval = mdi_select_path(cdip, NULL, 2130 MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH, 2131 NULL, &pip); 2132 if ((rval != MDI_SUCCESS) || (pip == NULL)) { 2133 VHCI_DEBUG(4, (CE_NOTE, NULL, 2134 "vhci_bind_transport: path select fail\n")); 2135 } else { 2136 npip = pip; 2137 do { 2138 if (npip == vlun->svl_first_path) { 2139 VHCI_DEBUG(4, (CE_NOTE, NULL, 2140 "vhci_bind_transport: " 2141 "valid first path 0x%p\n", 2142 (void *) 2143 vlun->svl_first_path)); 2144 pip = vlun->svl_first_path; 2145 goto bind_path; 2146 } 2147 pip = npip; 2148 rval = mdi_select_path(cdip, NULL, 2149 MDI_SELECT_ONLINE_PATH | 2150 MDI_SELECT_STANDBY_PATH, 2151 pip, &npip); 2152 mdi_rele_path(pip); 2153 } while ((rval == MDI_SUCCESS) && 2154 (npip != NULL)); 2155 } 2156 } 2157 2158 if (vlun->svl_first_path) { 2159 VHCI_DEBUG(4, (CE_NOTE, NULL, 2160 "vhci_bind_transport: invalid first path 0x%p\n", 2161 (void *)vlun->svl_first_path)); 2162 vlun->svl_first_path = NULL; 2163 } 2164 } else if (vpkt->vpkt_tgt_pkt->pkt_cdbp[0] == SCMD_PRIN) { 2165 if ((vpkt->vpkt_state & VHCI_PKT_THRU_TASKQ) == 0) { 2166 if (!sema_tryp(&vlun->svl_pgr_sema)) 2167 return (TRAN_BUSY); 2168 } 2169 pgr_sema_held = 1; 2170 } 2171 2172 /* 2173 * If the path is already bound for PKT_PARTIAL_DMA case, 2174 * try to use the same path. 2175 */ 2176 if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) && vpkt->vpkt_path) { 2177 VHCI_DEBUG(4, (CE_NOTE, NULL, 2178 "vhci_bind_transport: PKT_PARTIAL_DMA " 2179 "vpkt 0x%p, path 0x%p\n", 2180 (void *)vpkt, (void *)vpkt->vpkt_path)); 2181 pip = vpkt->vpkt_path; 2182 goto bind_path; 2183 } 2184 2185 /* 2186 * Get path_instance. Non-zero indicates that mdi_select_path should 2187 * be called to select a specific instance. 2188 * 2189 * NB: Condition pkt_path_instance reference on proper allocation. 2190 */ 2191 if (scsi_pkt_allocated_correctly(vpkt->vpkt_tgt_pkt)) 2192 path_instance = vpkt->vpkt_tgt_pkt->pkt_path_instance; 2193 else 2194 path_instance = 0; 2195 2196 /* 2197 * If reservation is active bind the transport directly to the pip 2198 * with the reservation. 2199 */ 2200 if (vpkt->vpkt_hba_pkt == NULL) { 2201 if (vlun->svl_flags & VLUN_RESERVE_ACTIVE_FLG) { 2202 if (MDI_PI_IS_ONLINE(vlun->svl_resrv_pip)) { 2203 pip = vlun->svl_resrv_pip; 2204 mdi_hold_path(pip); 2205 vlun->svl_waiting_for_activepath = 0; 2206 rval = MDI_SUCCESS; 2207 goto bind_path; 2208 } else { 2209 if (pgr_sema_held) { 2210 sema_v(&vlun->svl_pgr_sema); 2211 } 2212 return (TRAN_BUSY); 2213 } 2214 } 2215 try_again: 2216 rval = mdi_select_path(cdip, vpkt->vpkt_tgt_init_bp, 2217 path_instance ? MDI_SELECT_PATH_INSTANCE : 0, 2218 (void *)(intptr_t)path_instance, &pip); 2219 if (rval == MDI_BUSY) { 2220 if (pgr_sema_held) { 2221 sema_v(&vlun->svl_pgr_sema); 2222 } 2223 return (TRAN_BUSY); 2224 } else if (rval == MDI_DEVI_ONLINING) { 2225 /* 2226 * if we are here then we are in the midst of 2227 * an attach/probe of the client device. 2228 * We attempt to bind to ONLINE path if available, 2229 * else it is OK to bind to a STANDBY path (instead 2230 * of triggering a failover) because IO associated 2231 * with attach/probe (eg. INQUIRY, block 0 read) 2232 * are completed by targets even on passive paths 2233 * If no ONLINE paths available, it is important 2234 * to set svl_waiting_for_activepath for two 2235 * reasons: (1) avoid sense analysis in the 2236 * "external failure detection" codepath in 2237 * vhci_intr(). Failure to do so will result in 2238 * infinite loop (unless an ONLINE path becomes 2239 * available at some point) (2) avoid 2240 * unnecessary failover (see "---Waiting For Active 2241 * Path---" comment below). 2242 */ 2243 VHCI_DEBUG(1, (CE_NOTE, NULL, "!%p in onlining " 2244 "state\n", (void *)cdip)); 2245 pip = NULL; 2246 rval = mdi_select_path(cdip, vpkt->vpkt_tgt_init_bp, 2247 mps_flag, NULL, &pip); 2248 if ((rval != MDI_SUCCESS) || (pip == NULL)) { 2249 if (vlun->svl_waiting_for_activepath == 0) { 2250 vlun->svl_waiting_for_activepath = 1; 2251 vlun->svl_wfa_time = ddi_get_time(); 2252 } 2253 mps_flag |= MDI_SELECT_STANDBY_PATH; 2254 rval = mdi_select_path(cdip, 2255 vpkt->vpkt_tgt_init_bp, 2256 mps_flag, NULL, &pip); 2257 if ((rval != MDI_SUCCESS) || (pip == NULL)) { 2258 if (pgr_sema_held) { 2259 sema_v(&vlun->svl_pgr_sema); 2260 } 2261 return (TRAN_FATAL_ERROR); 2262 } 2263 goto bind_path; 2264 } 2265 } else if (rval == MDI_FAILURE) { 2266 if (pgr_sema_held) { 2267 sema_v(&vlun->svl_pgr_sema); 2268 } 2269 return (TRAN_FATAL_ERROR); 2270 } 2271 2272 if ((pip == NULL) || (rval == MDI_NOPATH)) { 2273 while (vlun->svl_waiting_for_activepath) { 2274 /* 2275 * ---Waiting For Active Path--- 2276 * This device was discovered across a 2277 * passive path; lets wait for a little 2278 * bit, hopefully an active path will 2279 * show up obviating the need for a 2280 * failover 2281 */ 2282 tnow = ddi_get_time(); 2283 if (tnow - vlun->svl_wfa_time >= 60) { 2284 vlun->svl_waiting_for_activepath = 0; 2285 } else { 2286 drv_usecwait(1000); 2287 if (vlun->svl_waiting_for_activepath 2288 == 0) { 2289 /* 2290 * an active path has come 2291 * online! 2292 */ 2293 goto try_again; 2294 } 2295 } 2296 } 2297 VHCI_HOLD_LUN(vlun, VH_NOSLEEP, held); 2298 if (!held) { 2299 VHCI_DEBUG(4, (CE_NOTE, NULL, 2300 "!Lun not held\n")); 2301 if (pgr_sema_held) { 2302 sema_v(&vlun->svl_pgr_sema); 2303 } 2304 return (TRAN_BUSY); 2305 } 2306 /* 2307 * now that the LUN is stable, one last check 2308 * to make sure no other changes sneaked in 2309 * (like a path coming online or a 2310 * failover initiated by another thread) 2311 */ 2312 pip = NULL; 2313 rval = mdi_select_path(cdip, vpkt->vpkt_tgt_init_bp, 2314 0, NULL, &pip); 2315 if (pip != NULL) { 2316 VHCI_RELEASE_LUN(vlun); 2317 vlun->svl_waiting_for_activepath = 0; 2318 goto bind_path; 2319 } 2320 2321 /* 2322 * Check if there is an ONLINE path OR a STANDBY path 2323 * available. If none is available, do not attempt 2324 * to do a failover, just return a fatal error at this 2325 * point. 2326 */ 2327 npip = NULL; 2328 rval = mdi_select_path(cdip, NULL, 2329 (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH), 2330 NULL, &npip); 2331 if ((npip == NULL) || (rval != MDI_SUCCESS)) { 2332 /* 2333 * No paths available, jus return FATAL error. 2334 */ 2335 VHCI_RELEASE_LUN(vlun); 2336 if (pgr_sema_held) { 2337 sema_v(&vlun->svl_pgr_sema); 2338 } 2339 return (TRAN_FATAL_ERROR); 2340 } 2341 mdi_rele_path(npip); 2342 VHCI_DEBUG(1, (CE_NOTE, NULL, "!invoking " 2343 "mdi_failover\n")); 2344 rval = mdi_failover(vhci->vhci_dip, cdip, 2345 MDI_FAILOVER_ASYNC); 2346 if (rval == MDI_FAILURE) { 2347 VHCI_RELEASE_LUN(vlun); 2348 if (pgr_sema_held) { 2349 sema_v(&vlun->svl_pgr_sema); 2350 } 2351 return (TRAN_FATAL_ERROR); 2352 } else if (rval == MDI_BUSY) { 2353 VHCI_RELEASE_LUN(vlun); 2354 if (pgr_sema_held) { 2355 sema_v(&vlun->svl_pgr_sema); 2356 } 2357 return (TRAN_BUSY); 2358 } else { 2359 if (pgr_sema_held) { 2360 sema_v(&vlun->svl_pgr_sema); 2361 } 2362 return (TRAN_BUSY); 2363 } 2364 } 2365 vlun->svl_waiting_for_activepath = 0; 2366 bind_path: 2367 vpkt->vpkt_path = pip; 2368 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip); 2369 ASSERT(svp != NULL); 2370 2371 psd = svp->svp_psd; 2372 ASSERT(psd != NULL); 2373 address = &psd->sd_address; 2374 } else { 2375 pkt = vpkt->vpkt_hba_pkt; 2376 address = &pkt->pkt_address; 2377 } 2378 2379 /* Verify match of specified path_instance and selected path_instance */ 2380 ASSERT((path_instance == 0) || 2381 (path_instance == mdi_pi_get_path_instance(vpkt->vpkt_path))); 2382 2383 /* 2384 * For PKT_PARTIAL_DMA case, call pHCI's scsi_init_pkt whenever 2385 * target driver calls vhci_scsi_init_pkt. 2386 */ 2387 if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) && 2388 vpkt->vpkt_path && vpkt->vpkt_hba_pkt) { 2389 VHCI_DEBUG(4, (CE_NOTE, NULL, 2390 "vhci_bind_transport: PKT_PARTIAL_DMA " 2391 "vpkt 0x%p, path 0x%p hba_pkt 0x%p\n", 2392 (void *)vpkt, (void *)vpkt->vpkt_path, (void *)pkt)); 2393 pkt = vpkt->vpkt_hba_pkt; 2394 address = &pkt->pkt_address; 2395 } 2396 2397 if (pkt == NULL || (vpkt->vpkt_flags & CFLAG_DMA_PARTIAL)) { 2398 pkt = scsi_init_pkt(address, pkt, 2399 vpkt->vpkt_tgt_init_bp, vpkt->vpkt_tgt_init_cdblen, 2400 vpkt->vpkt_tgt_init_scblen, 2401 0, flags, func, NULL); 2402 2403 if (pkt == NULL) { 2404 VHCI_DEBUG(4, (CE_NOTE, NULL, 2405 "!bind transport: 0x%p 0x%p 0x%p\n", 2406 (void *)vhci, (void *)psd, (void *)vpkt)); 2407 if ((vpkt->vpkt_hba_pkt == NULL) && vpkt->vpkt_path) { 2408 MDI_PI_ERRSTAT(vpkt->vpkt_path, 2409 MDI_PI_TRANSERR); 2410 mdi_rele_path(vpkt->vpkt_path); 2411 vpkt->vpkt_path = NULL; 2412 } 2413 if (pgr_sema_held) { 2414 sema_v(&vlun->svl_pgr_sema); 2415 } 2416 /* 2417 * Looks like a fatal error. 2418 * May be device disappeared underneath. 2419 * Give another chance to target driver for a retry to 2420 * get another path. 2421 */ 2422 return (TRAN_BUSY); 2423 } 2424 } 2425 2426 pkt->pkt_private = vpkt; 2427 vpkt->vpkt_hba_pkt = pkt; 2428 return (TRAN_ACCEPT); 2429 } 2430 2431 2432 /*PRINTFLIKE3*/ 2433 void 2434 vhci_log(int level, dev_info_t *dip, const char *fmt, ...) 2435 { 2436 char buf[256]; 2437 va_list ap; 2438 2439 va_start(ap, fmt); 2440 (void) vsprintf(buf, fmt, ap); 2441 va_end(ap); 2442 2443 scsi_log(dip, "scsi_vhci", level, buf); 2444 } 2445 2446 /* do a PGR out with the information we've saved away */ 2447 static int 2448 vhci_do_prout(scsi_vhci_priv_t *svp) 2449 { 2450 2451 struct scsi_pkt *new_pkt; 2452 struct buf *bp; 2453 scsi_vhci_lun_t *vlun; 2454 int rval, retry, nr_retry, ua_retry; 2455 struct scsi_extended_sense *sns; 2456 2457 bp = getrbuf(KM_SLEEP); 2458 bp->b_flags = B_WRITE; 2459 bp->b_resid = 0; 2460 2461 VHCI_INCR_PATH_CMDCOUNT(svp); 2462 vlun = svp->svp_svl; 2463 2464 new_pkt = scsi_init_pkt(&svp->svp_psd->sd_address, NULL, bp, 2465 CDB_GROUP1, sizeof (struct scsi_arq_status), 0, 0, 2466 SLEEP_FUNC, NULL); 2467 if (new_pkt == NULL) { 2468 VHCI_DECR_PATH_CMDCOUNT(svp); 2469 freerbuf(bp); 2470 cmn_err(CE_WARN, "!vhci_do_prout: scsi_init_pkt failed"); 2471 return (0); 2472 } 2473 mutex_enter(&vlun->svl_mutex); 2474 bp->b_un.b_addr = (caddr_t)&vlun->svl_prout; 2475 bp->b_bcount = vlun->svl_bcount; 2476 bcopy(vlun->svl_cdb, new_pkt->pkt_cdbp, 2477 sizeof (vlun->svl_cdb)); 2478 new_pkt->pkt_time = vlun->svl_time; 2479 mutex_exit(&vlun->svl_mutex); 2480 new_pkt->pkt_flags = FLAG_NOINTR; 2481 2482 ua_retry = nr_retry = retry = 0; 2483 again: 2484 rval = vhci_do_scsi_cmd(new_pkt); 2485 if (rval != 1) { 2486 if ((new_pkt->pkt_reason == CMD_CMPLT) && 2487 (SCBP_C(new_pkt) == STATUS_CHECK) && 2488 (new_pkt->pkt_state & STATE_ARQ_DONE)) { 2489 sns = &(((struct scsi_arq_status *)(uintptr_t) 2490 (new_pkt->pkt_scbp))->sts_sensedata); 2491 if ((sns->es_key == KEY_UNIT_ATTENTION) || 2492 (sns->es_key == KEY_NOT_READY)) { 2493 int max_retry; 2494 struct scsi_failover_ops *fops; 2495 fops = vlun->svl_fops; 2496 rval = (*fops->sfo_analyze_sense) 2497 (svp->svp_psd, sns, 2498 vlun->svl_fops_ctpriv); 2499 if (rval == SCSI_SENSE_NOT_READY) { 2500 max_retry = vhci_prout_not_ready_retry; 2501 retry = nr_retry++; 2502 delay(1*drv_usectohz(1000000)); 2503 } else { 2504 /* chk for state change and update */ 2505 if (rval == SCSI_SENSE_STATE_CHANGED) { 2506 int held; 2507 VHCI_HOLD_LUN(vlun, 2508 VH_NOSLEEP, held); 2509 if (!held) { 2510 rval = TRAN_BUSY; 2511 } else { 2512 /* chk for alua first */ 2513 vhci_update_pathstates( 2514 (void *)vlun); 2515 } 2516 } 2517 retry = ua_retry++; 2518 max_retry = VHCI_MAX_PGR_RETRIES; 2519 } 2520 if (retry < max_retry) { 2521 VHCI_DEBUG(4, (CE_WARN, NULL, 2522 "!vhci_do_prout retry 0x%x " 2523 "(0x%x 0x%x 0x%x)", 2524 SCBP_C(new_pkt), 2525 new_pkt->pkt_cdbp[0], 2526 new_pkt->pkt_cdbp[1], 2527 new_pkt->pkt_cdbp[2])); 2528 goto again; 2529 } 2530 rval = 0; 2531 VHCI_DEBUG(4, (CE_WARN, NULL, 2532 "!vhci_do_prout 0x%x " 2533 "(0x%x 0x%x 0x%x)", 2534 SCBP_C(new_pkt), 2535 new_pkt->pkt_cdbp[0], 2536 new_pkt->pkt_cdbp[1], 2537 new_pkt->pkt_cdbp[2])); 2538 } else if (sns->es_key == KEY_ILLEGAL_REQUEST) 2539 rval = VHCI_PGR_ILLEGALOP; 2540 } 2541 } else { 2542 rval = 1; 2543 } 2544 scsi_destroy_pkt(new_pkt); 2545 VHCI_DECR_PATH_CMDCOUNT(svp); 2546 freerbuf(bp); 2547 return (rval); 2548 } 2549 2550 static void 2551 vhci_run_cmd(void *arg) 2552 { 2553 struct scsi_pkt *pkt = (struct scsi_pkt *)arg; 2554 struct scsi_pkt *tpkt; 2555 scsi_vhci_priv_t *svp; 2556 mdi_pathinfo_t *pip, *npip; 2557 scsi_vhci_lun_t *vlun; 2558 dev_info_t *cdip; 2559 scsi_vhci_priv_t *nsvp; 2560 int fail = 0; 2561 int rval; 2562 struct vhci_pkt *vpkt; 2563 uchar_t cdb_1; 2564 vhci_prout_t *prout; 2565 2566 vpkt = (struct vhci_pkt *)pkt->pkt_private; 2567 tpkt = vpkt->vpkt_tgt_pkt; 2568 pip = vpkt->vpkt_path; 2569 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip); 2570 if (svp == NULL) { 2571 tpkt->pkt_reason = CMD_TRAN_ERR; 2572 tpkt->pkt_statistics = STAT_ABORTED; 2573 goto done; 2574 } 2575 vlun = svp->svp_svl; 2576 prout = &vlun->svl_prout; 2577 if (SCBP_C(pkt) != STATUS_GOOD) 2578 fail++; 2579 cdip = vlun->svl_dip; 2580 pip = npip = NULL; 2581 rval = mdi_select_path(cdip, NULL, 2582 MDI_SELECT_ONLINE_PATH|MDI_SELECT_STANDBY_PATH, NULL, &npip); 2583 if ((rval != MDI_SUCCESS) || (npip == NULL)) { 2584 VHCI_DEBUG(4, (CE_NOTE, NULL, 2585 "vhci_run_cmd: no path! 0x%p\n", (void *)svp)); 2586 tpkt->pkt_reason = CMD_TRAN_ERR; 2587 tpkt->pkt_statistics = STAT_ABORTED; 2588 goto done; 2589 } 2590 2591 cdb_1 = vlun->svl_cdb[1]; 2592 vlun->svl_cdb[1] &= 0xe0; 2593 vlun->svl_cdb[1] |= VHCI_PROUT_R_AND_IGNORE; 2594 2595 do { 2596 nsvp = (scsi_vhci_priv_t *) 2597 mdi_pi_get_vhci_private(npip); 2598 if (nsvp == NULL) { 2599 VHCI_DEBUG(4, (CE_NOTE, NULL, 2600 "vhci_run_cmd: no " 2601 "client priv! 0x%p offlined?\n", 2602 (void *)npip)); 2603 goto next_path; 2604 } 2605 if (vlun->svl_first_path == npip) { 2606 goto next_path; 2607 } else { 2608 if (vhci_do_prout(nsvp) != 1) 2609 fail++; 2610 } 2611 next_path: 2612 pip = npip; 2613 rval = mdi_select_path(cdip, NULL, 2614 MDI_SELECT_ONLINE_PATH|MDI_SELECT_STANDBY_PATH, 2615 pip, &npip); 2616 mdi_rele_path(pip); 2617 } while ((rval == MDI_SUCCESS) && (npip != NULL)); 2618 2619 vlun->svl_cdb[1] = cdb_1; 2620 2621 if (fail) { 2622 VHCI_DEBUG(4, (CE_WARN, NULL, "%s%d: key registration failed, " 2623 "couldn't be replicated on all paths", 2624 ddi_driver_name(cdip), ddi_get_instance(cdip))); 2625 vhci_print_prout_keys(vlun, "vhci_run_cmd: "); 2626 2627 if (SCBP_C(pkt) != STATUS_GOOD) { 2628 tpkt->pkt_reason = CMD_TRAN_ERR; 2629 tpkt->pkt_statistics = STAT_ABORTED; 2630 } 2631 } else { 2632 vlun->svl_pgr_active = 1; 2633 vhci_print_prout_keys(vlun, "vhci_run_cmd: before bcopy:"); 2634 2635 bcopy((const void *)prout->service_key, 2636 (void *)prout->active_service_key, MHIOC_RESV_KEY_SIZE); 2637 bcopy((const void *)prout->res_key, 2638 (void *)prout->active_res_key, MHIOC_RESV_KEY_SIZE); 2639 2640 vhci_print_prout_keys(vlun, "vhci_run_cmd: after bcopy:"); 2641 } 2642 done: 2643 if (SCBP_C(pkt) == STATUS_GOOD) 2644 vlun->svl_first_path = NULL; 2645 2646 if (svp) 2647 VHCI_DECR_PATH_CMDCOUNT(svp); 2648 2649 if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) == 0) { 2650 scsi_destroy_pkt(pkt); 2651 vpkt->vpkt_hba_pkt = NULL; 2652 if (vpkt->vpkt_path) { 2653 mdi_rele_path(vpkt->vpkt_path); 2654 vpkt->vpkt_path = NULL; 2655 } 2656 } 2657 2658 sema_v(&vlun->svl_pgr_sema); 2659 /* 2660 * The PROUT commands are not included in the automatic retry 2661 * mechanism, therefore, vpkt_org_vpkt should never be set here. 2662 */ 2663 ASSERT(vpkt->vpkt_org_vpkt == NULL); 2664 if (tpkt->pkt_comp) 2665 (*tpkt->pkt_comp)(tpkt); 2666 2667 } 2668 2669 /* 2670 * Get the keys registered with this target. Since we will have 2671 * registered the same key with multiple initiators, strip out 2672 * any duplicate keys. 2673 * 2674 * The pointers which will be used to filter the registered keys from 2675 * the device will be stored in filter_prin and filter_pkt. If the 2676 * allocation length of the buffer was sufficient for the number of 2677 * parameter data bytes available to be returned by the device then the 2678 * key filtering will use the keylist returned from the original 2679 * request. If the allocation length of the buffer was not sufficient, 2680 * then the filtering will use the keylist returned from the request 2681 * that is resent below. 2682 * 2683 * If the device returns an additional length field that is greater than 2684 * the allocation length of the buffer, then allocate a new buffer which 2685 * can accommodate the number of parameter data bytes available to be 2686 * returned. Resend the scsi PRIN command, filter out the duplicate 2687 * keys and return as many of the unique keys found that was originally 2688 * requested and set the additional length field equal to the data bytes 2689 * of unique reservation keys available to be returned. 2690 * 2691 * If the device returns an additional length field that is less than or 2692 * equal to the allocation length of the buffer, then all the available 2693 * keys registered were returned by the device. Filter out the 2694 * duplicate keys and return all of the unique keys found and set the 2695 * additional length field equal to the data bytes of the reservation 2696 * keys to be returned. 2697 */ 2698 static int 2699 vhci_do_prin(struct vhci_pkt **vpkt) 2700 { 2701 scsi_vhci_priv_t *svp = (scsi_vhci_priv_t *) 2702 mdi_pi_get_vhci_private((*vpkt)->vpkt_path); 2703 vhci_prin_readkeys_t *prin; 2704 scsi_vhci_lun_t *vlun = svp->svp_svl; 2705 struct scsi_vhci *vhci = 2706 ADDR2VHCI(&((*vpkt)->vpkt_tgt_pkt->pkt_address)); 2707 2708 struct buf *new_bp = NULL; 2709 struct scsi_pkt *new_pkt = NULL; 2710 struct vhci_pkt *new_vpkt = NULL; 2711 int hdr_len = 0; 2712 int rval = VHCI_CMD_CMPLT; 2713 uint32_t prin_length = 0; 2714 uint32_t svl_prin_length = 0; 2715 2716 prin = (vhci_prin_readkeys_t *) 2717 bp_mapin_common((*vpkt)->vpkt_tgt_init_bp, VM_NOSLEEP); 2718 2719 if (prin != NULL) { 2720 prin_length = BE_32(prin->length); 2721 } 2722 2723 if (prin == NULL) { 2724 VHCI_DEBUG(5, (CE_WARN, NULL, 2725 "vhci_do_prin: bp_mapin_common failed.")); 2726 rval = VHCI_CMD_ERROR; 2727 } else { 2728 /* 2729 * According to SPC-3r22, sec 4.3.4.6: "If the amount of 2730 * information to be transferred exceeds the maximum value 2731 * that the ALLOCATION LENGTH field is capable of specifying, 2732 * the device server shall...terminate the command with CHECK 2733 * CONDITION status". The ALLOCATION LENGTH field of the 2734 * PERSISTENT RESERVE IN command is 2 bytes. We should never 2735 * get here with an ADDITIONAL LENGTH greater than 0xFFFF 2736 * so if we do, then it is an error! 2737 */ 2738 2739 hdr_len = sizeof (prin->length) + sizeof (prin->generation); 2740 2741 if ((prin_length + hdr_len) > 0xFFFF) { 2742 VHCI_DEBUG(5, (CE_NOTE, NULL, 2743 "vhci_do_prin: Device returned invalid " 2744 "length 0x%x\n", prin_length)); 2745 rval = VHCI_CMD_ERROR; 2746 } 2747 } 2748 2749 /* 2750 * If prin->length is greater than the byte count allocated in the 2751 * original buffer, then resend the request with enough buffer 2752 * allocated to get all of the available registered keys. 2753 */ 2754 if (rval != VHCI_CMD_ERROR) { 2755 if (((*vpkt)->vpkt_tgt_init_bp->b_bcount - hdr_len) < 2756 prin_length) { 2757 if ((*vpkt)->vpkt_org_vpkt == NULL) { 2758 new_pkt = vhci_create_retry_pkt(*vpkt); 2759 if (new_pkt != NULL) { 2760 new_vpkt = TGTPKT2VHCIPKT(new_pkt); 2761 2762 /* 2763 * This is the buf with buffer pointer 2764 * where the prin readkeys will be 2765 * returned from the device 2766 */ 2767 new_bp = scsi_alloc_consistent_buf( 2768 &svp->svp_psd->sd_address, 2769 NULL, (prin_length + hdr_len), 2770 ((*vpkt)->vpkt_tgt_init_bp-> 2771 b_flags & (B_READ | B_WRITE)), 2772 NULL_FUNC, NULL); 2773 if (new_bp != NULL) { 2774 if (new_bp->b_un.b_addr != 2775 NULL) { 2776 2777 new_bp->b_bcount = 2778 prin_length + 2779 hdr_len; 2780 2781 new_pkt->pkt_cdbp[7] = 2782 (uchar_t)(new_bp-> 2783 b_bcount >> 8); 2784 new_pkt->pkt_cdbp[8] = 2785 (uchar_t)new_bp-> 2786 b_bcount; 2787 2788 rval = VHCI_CMD_RETRY; 2789 } else { 2790 rval = VHCI_CMD_ERROR; 2791 } 2792 } else { 2793 rval = VHCI_CMD_ERROR; 2794 } 2795 } else { 2796 rval = VHCI_CMD_ERROR; 2797 } 2798 } else { 2799 rval = VHCI_CMD_ERROR; 2800 } 2801 } 2802 } 2803 2804 if (rval == VHCI_CMD_RETRY) { 2805 new_vpkt->vpkt_tgt_init_bp = new_bp; 2806 2807 /* 2808 * Release the old path because it does not matter which path 2809 * this command is sent down. This allows the normal bind 2810 * transport mechanism to be used. 2811 */ 2812 if ((*vpkt)->vpkt_path != NULL) { 2813 mdi_rele_path((*vpkt)->vpkt_path); 2814 (*vpkt)->vpkt_path = NULL; 2815 } 2816 2817 /* 2818 * Dispatch the retry command 2819 */ 2820 if (taskq_dispatch(vhci->vhci_taskq, vhci_dispatch_scsi_start, 2821 (void *) new_vpkt, KM_NOSLEEP) == NULL) { 2822 rval = VHCI_CMD_ERROR; 2823 } else { 2824 /* 2825 * If we return VHCI_CMD_RETRY, that means the caller 2826 * is going to bail and wait for the reissued command 2827 * to complete. In that case, we need to decrement 2828 * the path command count right now. In any other 2829 * case, it'll be decremented by the caller. 2830 */ 2831 VHCI_DECR_PATH_CMDCOUNT(svp); 2832 } 2833 } 2834 2835 if ((rval != VHCI_CMD_ERROR) && (rval != VHCI_CMD_RETRY)) { 2836 int new, old; 2837 int data_len = 0; 2838 2839 data_len = prin_length / MHIOC_RESV_KEY_SIZE; 2840 VHCI_DEBUG(4, (CE_NOTE, NULL, "vhci_do_prin: %d keys read\n", 2841 data_len)); 2842 2843 #ifdef DEBUG 2844 VHCI_DEBUG(5, (CE_NOTE, NULL, "vhci_do_prin: from storage\n")); 2845 if (vhci_debug == 5) 2846 vhci_print_prin_keys(prin, data_len); 2847 VHCI_DEBUG(5, (CE_NOTE, NULL, 2848 "vhci_do_prin: MPxIO old keys:\n")); 2849 if (vhci_debug == 5) 2850 vhci_print_prin_keys(&vlun->svl_prin, data_len); 2851 #endif 2852 2853 /* 2854 * Filter out all duplicate keys returned from the device 2855 * We know that we use a different key for every host, so we 2856 * can simply strip out duplicates. Otherwise we would need to 2857 * do more bookkeeping to figure out which keys to strip out. 2858 */ 2859 2860 new = 0; 2861 2862 if (data_len > 0) { 2863 vlun->svl_prin.keylist[0] = prin->keylist[0]; 2864 new++; 2865 } 2866 2867 for (old = 1; old < data_len; old++) { 2868 int j; 2869 int match = 0; 2870 for (j = 0; j < new; j++) { 2871 if (bcmp(&prin->keylist[old], 2872 &vlun->svl_prin.keylist[j], 2873 sizeof (mhioc_resv_key_t)) == 0) { 2874 match = 1; 2875 break; 2876 } 2877 } 2878 if (!match) { 2879 vlun->svl_prin.keylist[new] = 2880 prin->keylist[old]; 2881 new++; 2882 } 2883 } 2884 2885 vlun->svl_prin.generation = prin->generation; 2886 svl_prin_length = new * MHIOC_RESV_KEY_SIZE; 2887 vlun->svl_prin.length = BE_32(svl_prin_length); 2888 2889 /* 2890 * If we arrived at this point after issuing a retry, make sure 2891 * that we put everything back the way it originally was so 2892 * that the target driver can complete the command correctly. 2893 */ 2894 if ((*vpkt)->vpkt_org_vpkt != NULL) { 2895 new_bp = (*vpkt)->vpkt_tgt_init_bp; 2896 2897 scsi_free_consistent_buf(new_bp); 2898 2899 *vpkt = vhci_sync_retry_pkt(*vpkt); 2900 2901 /* 2902 * Make sure the original buffer is mapped into kernel 2903 * space before we try to copy the filtered keys into 2904 * it. 2905 */ 2906 prin = (vhci_prin_readkeys_t *)bp_mapin_common( 2907 (*vpkt)->vpkt_tgt_init_bp, VM_NOSLEEP); 2908 } 2909 2910 /* 2911 * Now copy the desired number of prin keys into the original 2912 * target buffer. 2913 */ 2914 if (svl_prin_length <= 2915 ((*vpkt)->vpkt_tgt_init_bp->b_bcount - hdr_len)) { 2916 /* 2917 * It is safe to return all of the available unique 2918 * keys 2919 */ 2920 bcopy(&vlun->svl_prin, prin, svl_prin_length + hdr_len); 2921 } else { 2922 /* 2923 * Not all of the available keys were requested by the 2924 * original command. 2925 */ 2926 bcopy(&vlun->svl_prin, prin, 2927 (*vpkt)->vpkt_tgt_init_bp->b_bcount); 2928 } 2929 #ifdef DEBUG 2930 VHCI_DEBUG(5, (CE_NOTE, NULL, 2931 "vhci_do_prin: To Application:\n")); 2932 if (vhci_debug == 5) 2933 vhci_print_prin_keys(prin, new); 2934 VHCI_DEBUG(5, (CE_NOTE, NULL, 2935 "vhci_do_prin: MPxIO new keys:\n")); 2936 if (vhci_debug == 5) 2937 vhci_print_prin_keys(&vlun->svl_prin, new); 2938 #endif 2939 } 2940 2941 if (rval == VHCI_CMD_ERROR) { 2942 /* 2943 * If we arrived at this point after issuing a 2944 * retry, make sure that we put everything back 2945 * the way it originally was so that ssd can 2946 * complete the command correctly. 2947 */ 2948 2949 if ((*vpkt)->vpkt_org_vpkt != NULL) { 2950 new_bp = (*vpkt)->vpkt_tgt_init_bp; 2951 if (new_bp != NULL) { 2952 scsi_free_consistent_buf(new_bp); 2953 } 2954 2955 new_vpkt = *vpkt; 2956 *vpkt = (*vpkt)->vpkt_org_vpkt; 2957 2958 vhci_scsi_destroy_pkt(&svp->svp_psd->sd_address, 2959 new_vpkt->vpkt_tgt_pkt); 2960 } 2961 2962 /* 2963 * Mark this command completion as having an error so that 2964 * ssd will retry the command. 2965 */ 2966 2967 (*vpkt)->vpkt_tgt_pkt->pkt_reason = CMD_ABORTED; 2968 (*vpkt)->vpkt_tgt_pkt->pkt_statistics |= STAT_ABORTED; 2969 2970 rval = VHCI_CMD_CMPLT; 2971 } 2972 2973 /* 2974 * Make sure that the semaphore is only released once. 2975 */ 2976 if (rval == VHCI_CMD_CMPLT) { 2977 sema_v(&vlun->svl_pgr_sema); 2978 } 2979 2980 return (rval); 2981 } 2982 2983 static void 2984 vhci_intr(struct scsi_pkt *pkt) 2985 { 2986 struct vhci_pkt *vpkt = (struct vhci_pkt *)pkt->pkt_private; 2987 struct scsi_pkt *tpkt; 2988 scsi_vhci_priv_t *svp; 2989 scsi_vhci_lun_t *vlun; 2990 int rval, held; 2991 struct scsi_failover_ops *fops; 2992 struct scsi_extended_sense *sns; 2993 mdi_pathinfo_t *lpath; 2994 static char *timeout_err = "Command Timeout"; 2995 static char *parity_err = "Parity Error"; 2996 char *err_str = NULL; 2997 dev_info_t *vdip, *cdip, *pdip; 2998 char *cpath, *dpath; 2999 3000 ASSERT(vpkt != NULL); 3001 tpkt = vpkt->vpkt_tgt_pkt; 3002 ASSERT(tpkt != NULL); 3003 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(vpkt->vpkt_path); 3004 ASSERT(svp != NULL); 3005 vlun = svp->svp_svl; 3006 ASSERT(vlun != NULL); 3007 lpath = vpkt->vpkt_path; 3008 3009 /* 3010 * sync up the target driver's pkt with the pkt that 3011 * we actually used 3012 */ 3013 *(tpkt->pkt_scbp) = *(pkt->pkt_scbp); 3014 tpkt->pkt_resid = pkt->pkt_resid; 3015 tpkt->pkt_state = pkt->pkt_state; 3016 tpkt->pkt_statistics = pkt->pkt_statistics; 3017 tpkt->pkt_reason = pkt->pkt_reason; 3018 3019 /* Return path_instance information back to the target driver. */ 3020 if (scsi_pkt_allocated_correctly(tpkt)) { 3021 if (scsi_pkt_allocated_correctly(pkt)) { 3022 /* 3023 * If both packets were correctly allocated, 3024 * return path returned by pHCI. 3025 */ 3026 tpkt->pkt_path_instance = pkt->pkt_path_instance; 3027 } else { 3028 /* Otherwise return path of pHCI we used */ 3029 tpkt->pkt_path_instance = 3030 mdi_pi_get_path_instance(lpath); 3031 } 3032 } 3033 3034 if (pkt->pkt_cdbp[0] == SCMD_PROUT && 3035 ((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_REGISTER) || 3036 ((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_R_AND_IGNORE)) { 3037 if ((SCBP_C(pkt) != STATUS_GOOD) || 3038 (pkt->pkt_reason != CMD_CMPLT)) { 3039 sema_v(&vlun->svl_pgr_sema); 3040 } 3041 } else if (pkt->pkt_cdbp[0] == SCMD_PRIN) { 3042 if (pkt->pkt_reason != CMD_CMPLT || 3043 (SCBP_C(pkt) != STATUS_GOOD)) { 3044 sema_v(&vlun->svl_pgr_sema); 3045 } 3046 } 3047 3048 switch (pkt->pkt_reason) { 3049 case CMD_CMPLT: 3050 /* 3051 * cmd completed successfully, check for scsi errors 3052 */ 3053 switch (*(pkt->pkt_scbp)) { 3054 case STATUS_CHECK: 3055 if (pkt->pkt_state & STATE_ARQ_DONE) { 3056 sns = &(((struct scsi_arq_status *)(uintptr_t) 3057 (pkt->pkt_scbp))->sts_sensedata); 3058 fops = vlun->svl_fops; 3059 ASSERT(fops != NULL); 3060 VHCI_DEBUG(4, (CE_NOTE, NULL, "vhci_intr: " 3061 "Received sns key %x esc %x escq %x\n", 3062 sns->es_key, sns->es_add_code, 3063 sns->es_qual_code)); 3064 3065 if (vlun->svl_waiting_for_activepath == 1) { 3066 /* 3067 * if we are here it means we are 3068 * in the midst of a probe/attach 3069 * through a passive path; this 3070 * case is exempt from sense analysis 3071 * for detection of ext. failover 3072 * because that would unnecessarily 3073 * increase attach time. 3074 */ 3075 bcopy(pkt->pkt_scbp, tpkt->pkt_scbp, 3076 vpkt->vpkt_tgt_init_scblen); 3077 break; 3078 } 3079 if (sns->es_add_code == VHCI_SCSI_PERR) { 3080 /* 3081 * parity error 3082 */ 3083 err_str = parity_err; 3084 bcopy(pkt->pkt_scbp, tpkt->pkt_scbp, 3085 vpkt->vpkt_tgt_init_scblen); 3086 break; 3087 } 3088 rval = (*fops->sfo_analyze_sense) 3089 (svp->svp_psd, sns, vlun->svl_fops_ctpriv); 3090 if ((rval == SCSI_SENSE_NOFAILOVER) || 3091 (rval == SCSI_SENSE_UNKNOWN) || 3092 (rval == SCSI_SENSE_NOT_READY)) { 3093 bcopy(pkt->pkt_scbp, tpkt->pkt_scbp, 3094 vpkt->vpkt_tgt_init_scblen); 3095 break; 3096 } else if (rval == SCSI_SENSE_STATE_CHANGED) { 3097 struct scsi_vhci *vhci; 3098 vhci = ADDR2VHCI(&tpkt->pkt_address); 3099 VHCI_HOLD_LUN(vlun, VH_NOSLEEP, held); 3100 if (!held) { 3101 /* 3102 * looks like some other thread 3103 * has already detected this 3104 * condition 3105 */ 3106 tpkt->pkt_state &= 3107 ~STATE_ARQ_DONE; 3108 *(tpkt->pkt_scbp) = 3109 STATUS_BUSY; 3110 break; 3111 } 3112 (void) taskq_dispatch( 3113 vhci->vhci_update_pathstates_taskq, 3114 vhci_update_pathstates, 3115 (void *)vlun, KM_SLEEP); 3116 } else { 3117 /* 3118 * externally initiated failover 3119 * has occurred or is in progress 3120 */ 3121 VHCI_HOLD_LUN(vlun, VH_NOSLEEP, held); 3122 if (!held) { 3123 /* 3124 * looks like some other thread 3125 * has already detected this 3126 * condition 3127 */ 3128 tpkt->pkt_state &= 3129 ~STATE_ARQ_DONE; 3130 *(tpkt->pkt_scbp) = 3131 STATUS_BUSY; 3132 break; 3133 } else { 3134 rval = vhci_handle_ext_fo 3135 (pkt, rval); 3136 if (rval == BUSY_RETURN) { 3137 tpkt->pkt_state &= 3138 ~STATE_ARQ_DONE; 3139 *(tpkt->pkt_scbp) = 3140 STATUS_BUSY; 3141 break; 3142 } 3143 bcopy(pkt->pkt_scbp, 3144 tpkt->pkt_scbp, 3145 vpkt->vpkt_tgt_init_scblen); 3146 break; 3147 } 3148 } 3149 } 3150 break; 3151 3152 /* 3153 * If this is a good SCSI-II RELEASE cmd completion then restore 3154 * the load balancing policy and reset VLUN_RESERVE_ACTIVE_FLG. 3155 * If this is a good SCSI-II RESERVE cmd completion then set 3156 * VLUN_RESERVE_ACTIVE_FLG. 3157 */ 3158 case STATUS_GOOD: 3159 if ((pkt->pkt_cdbp[0] == SCMD_RELEASE) || 3160 (pkt->pkt_cdbp[0] == SCMD_RELEASE_G1)) { 3161 (void) mdi_set_lb_policy(vlun->svl_dip, 3162 vlun->svl_lb_policy_save); 3163 vlun->svl_flags &= ~VLUN_RESERVE_ACTIVE_FLG; 3164 VHCI_DEBUG(1, (CE_WARN, NULL, 3165 "!vhci_intr: vlun 0x%p release path 0x%p", 3166 (void *)vlun, (void *)vpkt->vpkt_path)); 3167 } 3168 3169 if ((pkt->pkt_cdbp[0] == SCMD_RESERVE) || 3170 (pkt->pkt_cdbp[0] == SCMD_RESERVE_G1)) { 3171 vlun->svl_flags |= VLUN_RESERVE_ACTIVE_FLG; 3172 vlun->svl_resrv_pip = vpkt->vpkt_path; 3173 VHCI_DEBUG(1, (CE_WARN, NULL, 3174 "!vhci_intr: vlun 0x%p reserved path 0x%p", 3175 (void *)vlun, (void *)vpkt->vpkt_path)); 3176 } 3177 break; 3178 3179 case STATUS_RESERVATION_CONFLICT: 3180 VHCI_DEBUG(1, (CE_WARN, NULL, 3181 "!vhci_intr: vlun 0x%p " 3182 "reserve conflict on path 0x%p", 3183 (void *)vlun, (void *)vpkt->vpkt_path)); 3184 /* FALLTHROUGH */ 3185 default: 3186 break; 3187 } 3188 3189 /* 3190 * Update I/O completion statistics for the path 3191 */ 3192 mdi_pi_kstat_iosupdate(vpkt->vpkt_path, vpkt->vpkt_tgt_init_bp); 3193 3194 /* 3195 * Command completed successfully, release the dma binding and 3196 * destroy the transport side of the packet. 3197 */ 3198 if ((pkt->pkt_cdbp[0] == SCMD_PROUT) && 3199 (((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_REGISTER) || 3200 ((pkt->pkt_cdbp[1] & 0x1f) == 3201 VHCI_PROUT_R_AND_IGNORE))) { 3202 if (SCBP_C(pkt) == STATUS_GOOD) { 3203 ASSERT(vlun->svl_taskq); 3204 svp->svp_last_pkt_reason = pkt->pkt_reason; 3205 (void) taskq_dispatch(vlun->svl_taskq, 3206 vhci_run_cmd, pkt, KM_SLEEP); 3207 return; 3208 } 3209 } 3210 if ((SCBP_C(pkt) == STATUS_GOOD) && 3211 (pkt->pkt_cdbp[0] == SCMD_PRIN) && 3212 vpkt->vpkt_tgt_init_bp) { 3213 /* 3214 * If the action (value in byte 1 of the cdb) is zero, 3215 * we're reading keys, and that's the only condition 3216 * where we need to be concerned with filtering keys 3217 * and potential retries. Otherwise, we simply signal 3218 * the semaphore and move on. 3219 */ 3220 if (pkt->pkt_cdbp[1] == 0) { 3221 /* 3222 * If this is the completion of an internal 3223 * retry then we need to make sure that the 3224 * pkt and tpkt pointers are readjusted so 3225 * the calls to scsi_destroy_pkt and pkt_comp 3226 * below work * correctly. 3227 */ 3228 if (vpkt->vpkt_org_vpkt != NULL) { 3229 pkt = vpkt->vpkt_org_vpkt->vpkt_hba_pkt; 3230 tpkt = vpkt->vpkt_org_vpkt-> 3231 vpkt_tgt_pkt; 3232 3233 /* 3234 * If this command was issued through 3235 * the taskq then we need to clear 3236 * this flag for proper processing in 3237 * the case of a retry from the target 3238 * driver. 3239 */ 3240 vpkt->vpkt_state &= 3241 ~VHCI_PKT_THRU_TASKQ; 3242 } 3243 3244 /* 3245 * if vhci_do_prin returns VHCI_CMD_CMPLT then 3246 * vpkt will contain the address of the 3247 * original vpkt 3248 */ 3249 if (vhci_do_prin(&vpkt) == VHCI_CMD_RETRY) { 3250 /* 3251 * The command has been resent to get 3252 * all the keys from the device. Don't 3253 * complete the command with ssd until 3254 * the retry completes. 3255 */ 3256 return; 3257 } 3258 } else { 3259 sema_v(&vlun->svl_pgr_sema); 3260 } 3261 } 3262 3263 break; 3264 3265 case CMD_TIMEOUT: 3266 if ((pkt->pkt_statistics & 3267 (STAT_BUS_RESET|STAT_DEV_RESET|STAT_ABORTED)) == 0) { 3268 3269 VHCI_DEBUG(1, (CE_NOTE, NULL, 3270 "!scsi vhci timeout invoked\n")); 3271 3272 (void) vhci_recovery_reset(vlun, &pkt->pkt_address, 3273 FALSE, VHCI_DEPTH_ALL); 3274 } 3275 MDI_PI_ERRSTAT(lpath, MDI_PI_TRANSERR); 3276 tpkt->pkt_statistics |= STAT_ABORTED; 3277 err_str = timeout_err; 3278 break; 3279 3280 case CMD_TRAN_ERR: 3281 /* 3282 * This status is returned if the transport has sent the cmd 3283 * down the link to the target and then some error occurs. 3284 * In case of SCSI-II RESERVE cmd, we don't know if the 3285 * reservation been accepted by the target or not, so we need 3286 * to clear the reservation. 3287 */ 3288 if ((pkt->pkt_cdbp[0] == SCMD_RESERVE) || 3289 (pkt->pkt_cdbp[0] == SCMD_RESERVE_G1)) { 3290 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_intr received" 3291 " cmd_tran_err for scsi-2 reserve cmd\n")); 3292 if (!vhci_recovery_reset(vlun, &pkt->pkt_address, 3293 TRUE, VHCI_DEPTH_TARGET)) { 3294 VHCI_DEBUG(1, (CE_WARN, NULL, 3295 "!vhci_intr cmd_tran_err reset failed!")); 3296 } 3297 } 3298 break; 3299 3300 case CMD_DEV_GONE: 3301 tpkt->pkt_reason = CMD_CMPLT; 3302 tpkt->pkt_state = STATE_GOT_BUS | 3303 STATE_GOT_TARGET | STATE_SENT_CMD | 3304 STATE_GOT_STATUS; 3305 *(tpkt->pkt_scbp) = STATUS_BUSY; 3306 break; 3307 3308 default: 3309 break; 3310 } 3311 3312 /* 3313 * SCSI-II RESERVE cmd has been serviced by the lower layers clear 3314 * the flag so the lun is not QUIESCED any longer. 3315 * Also clear the VHCI_PKT_THRU_TASKQ flag, to ensure that if this pkt 3316 * is retried, a taskq shall again be dispatched to service it. Else 3317 * it may lead to a system hang if the retry is within interrupt 3318 * context. 3319 */ 3320 if ((pkt->pkt_cdbp[0] == SCMD_RESERVE) || 3321 (pkt->pkt_cdbp[0] == SCMD_RESERVE_G1)) { 3322 vlun->svl_flags &= ~VLUN_QUIESCED_FLG; 3323 vpkt->vpkt_state &= ~VHCI_PKT_THRU_TASKQ; 3324 } 3325 3326 /* 3327 * vpkt_org_vpkt should always be NULL here if the retry command 3328 * has been successfully processed. If vpkt_org_vpkt != NULL at 3329 * this point, it is an error so restore the original vpkt and 3330 * return an error to the target driver so it can retry the 3331 * command as appropriate. 3332 */ 3333 if (vpkt->vpkt_org_vpkt != NULL) { 3334 struct vhci_pkt *new_vpkt = vpkt; 3335 vpkt = vpkt->vpkt_org_vpkt; 3336 3337 vhci_scsi_destroy_pkt(&svp->svp_psd->sd_address, 3338 new_vpkt->vpkt_tgt_pkt); 3339 3340 /* 3341 * Mark this command completion as having an error so that 3342 * ssd will retry the command. 3343 */ 3344 vpkt->vpkt_tgt_pkt->pkt_reason = CMD_ABORTED; 3345 vpkt->vpkt_tgt_pkt->pkt_statistics |= STAT_ABORTED; 3346 3347 pkt = vpkt->vpkt_hba_pkt; 3348 tpkt = vpkt->vpkt_tgt_pkt; 3349 } 3350 3351 if ((err_str != NULL) && (pkt->pkt_reason != 3352 svp->svp_last_pkt_reason)) { 3353 cdip = vlun->svl_dip; 3354 pdip = mdi_pi_get_phci(vpkt->vpkt_path); 3355 vdip = ddi_get_parent(cdip); 3356 cpath = kmem_alloc(MAXPATHLEN, KM_SLEEP); 3357 dpath = kmem_alloc(MAXPATHLEN, KM_SLEEP); 3358 vhci_log(CE_WARN, vdip, "!%s (%s%d): %s on path %s (%s%d)", 3359 ddi_pathname(cdip, cpath), ddi_driver_name(cdip), 3360 ddi_get_instance(cdip), err_str, 3361 ddi_pathname(pdip, dpath), ddi_driver_name(pdip), 3362 ddi_get_instance(pdip)); 3363 kmem_free(cpath, MAXPATHLEN); 3364 kmem_free(dpath, MAXPATHLEN); 3365 } 3366 svp->svp_last_pkt_reason = pkt->pkt_reason; 3367 VHCI_DECR_PATH_CMDCOUNT(svp); 3368 3369 /* 3370 * For PARTIAL_DMA, vhci should not free the path. 3371 * Target driver will call into vhci_scsi_dmafree or 3372 * destroy pkt to release this path. 3373 */ 3374 if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) == 0) { 3375 scsi_destroy_pkt(pkt); 3376 vpkt->vpkt_hba_pkt = NULL; 3377 if (vpkt->vpkt_path) { 3378 mdi_rele_path(vpkt->vpkt_path); 3379 vpkt->vpkt_path = NULL; 3380 } 3381 } 3382 3383 if (tpkt->pkt_comp) { 3384 (*tpkt->pkt_comp)(tpkt); 3385 } 3386 } 3387 3388 /* 3389 * two possibilities: (1) failover has completed 3390 * or (2) is in progress; update our path states for 3391 * the former case; for the latter case, 3392 * initiate a scsi_watch request to 3393 * determine when failover completes - vlun is HELD 3394 * until failover completes; BUSY is returned to upper 3395 * layer in both the cases 3396 */ 3397 static int 3398 vhci_handle_ext_fo(struct scsi_pkt *pkt, int fostat) 3399 { 3400 struct vhci_pkt *vpkt = (struct vhci_pkt *)pkt->pkt_private; 3401 struct scsi_pkt *tpkt; 3402 scsi_vhci_priv_t *svp; 3403 scsi_vhci_lun_t *vlun; 3404 struct scsi_vhci *vhci; 3405 scsi_vhci_swarg_t *swarg; 3406 char *path; 3407 3408 ASSERT(vpkt != NULL); 3409 tpkt = vpkt->vpkt_tgt_pkt; 3410 ASSERT(tpkt != NULL); 3411 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(vpkt->vpkt_path); 3412 ASSERT(svp != NULL); 3413 vlun = svp->svp_svl; 3414 ASSERT(vlun != NULL); 3415 ASSERT(VHCI_LUN_IS_HELD(vlun)); 3416 3417 vhci = ADDR2VHCI(&tpkt->pkt_address); 3418 3419 if (fostat == SCSI_SENSE_INACTIVE) { 3420 VHCI_DEBUG(1, (CE_NOTE, NULL, "!Failover " 3421 "detected for %s; updating path states...\n", 3422 vlun->svl_lun_wwn)); 3423 /* 3424 * set the vlun flag to indicate to the task that the target 3425 * port group needs updating 3426 */ 3427 vlun->svl_flags |= VLUN_UPDATE_TPG; 3428 (void) taskq_dispatch(vhci->vhci_update_pathstates_taskq, 3429 vhci_update_pathstates, (void *)vlun, KM_SLEEP); 3430 } else { 3431 path = kmem_alloc(MAXPATHLEN, KM_SLEEP); 3432 vhci_log(CE_NOTE, ddi_get_parent(vlun->svl_dip), 3433 "!%s (%s%d): Waiting for externally initiated failover " 3434 "to complete", ddi_pathname(vlun->svl_dip, path), 3435 ddi_driver_name(vlun->svl_dip), 3436 ddi_get_instance(vlun->svl_dip)); 3437 kmem_free(path, MAXPATHLEN); 3438 swarg = kmem_alloc(sizeof (*swarg), KM_NOSLEEP); 3439 if (swarg == NULL) { 3440 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_handle_ext_fo: " 3441 "request packet allocation for %s failed....\n", 3442 vlun->svl_lun_wwn)); 3443 VHCI_RELEASE_LUN(vlun); 3444 return (PKT_RETURN); 3445 } 3446 swarg->svs_svp = svp; 3447 swarg->svs_tos = ddi_get_time(); 3448 swarg->svs_pi = vpkt->vpkt_path; 3449 swarg->svs_release_lun = 0; 3450 swarg->svs_done = 0; 3451 /* 3452 * place a hold on the path...we don't want it to 3453 * vanish while scsi_watch is in progress 3454 */ 3455 mdi_hold_path(vpkt->vpkt_path); 3456 svp->svp_sw_token = scsi_watch_request_submit(svp->svp_psd, 3457 VHCI_FOWATCH_INTERVAL, SENSE_LENGTH, vhci_efo_watch_cb, 3458 (caddr_t)swarg); 3459 } 3460 return (BUSY_RETURN); 3461 } 3462 3463 /* 3464 * vhci_efo_watch_cb: 3465 * Callback from scsi_watch request to check the failover status. 3466 * Completion is either due to successful failover or timeout. 3467 * Upon successful completion, vhci_update_path_states is called. 3468 * For timeout condition, vhci_efo_done is called. 3469 * Always returns 0 to scsi_watch to keep retrying till vhci_efo_done 3470 * terminates this request properly in a separate thread. 3471 */ 3472 3473 static int 3474 vhci_efo_watch_cb(caddr_t arg, struct scsi_watch_result *resultp) 3475 { 3476 struct scsi_status *statusp = resultp->statusp; 3477 struct scsi_extended_sense *sensep = resultp->sensep; 3478 struct scsi_pkt *pkt = resultp->pkt; 3479 scsi_vhci_swarg_t *swarg; 3480 scsi_vhci_priv_t *svp; 3481 scsi_vhci_lun_t *vlun; 3482 struct scsi_vhci *vhci; 3483 dev_info_t *vdip; 3484 int rval, updt_paths; 3485 3486 swarg = (scsi_vhci_swarg_t *)(uintptr_t)arg; 3487 svp = swarg->svs_svp; 3488 if (swarg->svs_done) { 3489 /* 3490 * Already completed failover or timedout. 3491 * Waiting for vhci_efo_done to terminate this scsi_watch. 3492 */ 3493 return (0); 3494 } 3495 3496 ASSERT(svp != NULL); 3497 vlun = svp->svp_svl; 3498 ASSERT(vlun != NULL); 3499 ASSERT(VHCI_LUN_IS_HELD(vlun)); 3500 vlun->svl_efo_update_path = 0; 3501 vdip = ddi_get_parent(vlun->svl_dip); 3502 vhci = ddi_get_soft_state(vhci_softstate, 3503 ddi_get_instance(vdip)); 3504 3505 updt_paths = 0; 3506 3507 if (pkt->pkt_reason != CMD_CMPLT) { 3508 if ((ddi_get_time() - swarg->svs_tos) >= VHCI_EXTFO_TIMEOUT) { 3509 swarg->svs_release_lun = 1; 3510 goto done; 3511 } 3512 return (0); 3513 } 3514 if (*((unsigned char *)statusp) == STATUS_CHECK) { 3515 rval = (*(vlun->svl_fops->sfo_analyze_sense)) 3516 (svp->svp_psd, sensep, vlun->svl_fops_ctpriv); 3517 switch (rval) { 3518 /* 3519 * Only update path states in case path is definitely 3520 * inactive, or no failover occurred. For all other 3521 * check conditions continue pinging. A unexpected 3522 * check condition shouldn't cause pinging to complete 3523 * prematurely. 3524 */ 3525 case SCSI_SENSE_INACTIVE: 3526 case SCSI_SENSE_NOFAILOVER: 3527 updt_paths = 1; 3528 break; 3529 default: 3530 if ((ddi_get_time() - swarg->svs_tos) 3531 >= VHCI_EXTFO_TIMEOUT) { 3532 swarg->svs_release_lun = 1; 3533 goto done; 3534 } 3535 return (0); 3536 } 3537 } else if (*((unsigned char *)statusp) == 3538 STATUS_RESERVATION_CONFLICT) { 3539 updt_paths = 1; 3540 } else if ((*((unsigned char *)statusp)) & 3541 (STATUS_BUSY | STATUS_QFULL)) { 3542 return (0); 3543 } 3544 if ((*((unsigned char *)statusp) == STATUS_GOOD) || 3545 (updt_paths == 1)) { 3546 /* 3547 * we got here because we had detected an 3548 * externally initiated failover; things 3549 * have settled down now, so let's 3550 * start up a task to update the 3551 * path states and target port group 3552 */ 3553 vlun->svl_efo_update_path = 1; 3554 swarg->svs_done = 1; 3555 vlun->svl_swarg = swarg; 3556 vlun->svl_flags |= VLUN_UPDATE_TPG; 3557 (void) taskq_dispatch(vhci->vhci_update_pathstates_taskq, 3558 vhci_update_pathstates, (void *)vlun, 3559 KM_SLEEP); 3560 return (0); 3561 } 3562 if ((ddi_get_time() - swarg->svs_tos) >= VHCI_EXTFO_TIMEOUT) { 3563 swarg->svs_release_lun = 1; 3564 goto done; 3565 } 3566 return (0); 3567 done: 3568 swarg->svs_done = 1; 3569 (void) taskq_dispatch(vhci->vhci_taskq, 3570 vhci_efo_done, (void *)swarg, KM_SLEEP); 3571 return (0); 3572 } 3573 3574 /* 3575 * vhci_efo_done: 3576 * cleanly terminates scsi_watch and free up resources. 3577 * Called as taskq function in vhci_efo_watch_cb for EFO timeout condition 3578 * or by vhci_update_path_states invoked during external initiated 3579 * failover completion. 3580 */ 3581 static void 3582 vhci_efo_done(void *arg) 3583 { 3584 scsi_vhci_lun_t *vlun; 3585 scsi_vhci_swarg_t *swarg = (scsi_vhci_swarg_t *)arg; 3586 scsi_vhci_priv_t *svp = swarg->svs_svp; 3587 ASSERT(svp); 3588 3589 vlun = svp->svp_svl; 3590 ASSERT(vlun); 3591 3592 /* Wait for clean termination of scsi_watch */ 3593 (void) scsi_watch_request_terminate(svp->svp_sw_token, 3594 SCSI_WATCH_TERMINATE_WAIT); 3595 svp->svp_sw_token = NULL; 3596 3597 /* release path and freeup resources to indicate failover completion */ 3598 mdi_rele_path(swarg->svs_pi); 3599 if (swarg->svs_release_lun) { 3600 VHCI_RELEASE_LUN(vlun); 3601 } 3602 kmem_free((void *)swarg, sizeof (*swarg)); 3603 } 3604 3605 /* 3606 * Update the path states 3607 * vlun should be HELD when this is invoked. 3608 * Calls vhci_efo_done to cleanup resources allocated for EFO. 3609 */ 3610 void 3611 vhci_update_pathstates(void *arg) 3612 { 3613 mdi_pathinfo_t *pip, *npip; 3614 dev_info_t *dip, *pdip; 3615 struct scsi_failover_ops *fo; 3616 struct scsi_vhci_priv *svp; 3617 struct scsi_device *psd; 3618 struct scsi_path_opinfo opinfo; 3619 char *pclass, *tptr; 3620 struct scsi_vhci_lun *vlun = (struct scsi_vhci_lun *)arg; 3621 int sps; /* mdi_select_path() status */ 3622 char *cpath, *dpath; 3623 struct scsi_vhci *vhci; 3624 struct scsi_pkt *pkt; 3625 struct buf *bp; 3626 int reserve_conflict = 0; 3627 3628 ASSERT(VHCI_LUN_IS_HELD(vlun)); 3629 dip = vlun->svl_dip; 3630 pip = npip = NULL; 3631 3632 vhci = ddi_get_soft_state(vhci_softstate, 3633 ddi_get_instance(ddi_get_parent(dip))); 3634 3635 sps = mdi_select_path(dip, NULL, (MDI_SELECT_ONLINE_PATH | 3636 MDI_SELECT_STANDBY_PATH), NULL, &npip); 3637 if ((npip == NULL) || (sps != MDI_SUCCESS)) { 3638 goto done; 3639 } 3640 3641 fo = vlun->svl_fops; 3642 do { 3643 pip = npip; 3644 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip); 3645 psd = svp->svp_psd; 3646 if ((*fo->sfo_path_get_opinfo)(psd, &opinfo, 3647 vlun->svl_fops_ctpriv) != 0) { 3648 sps = mdi_select_path(dip, NULL, 3649 (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH), 3650 pip, &npip); 3651 mdi_rele_path(pip); 3652 continue; 3653 } 3654 3655 if (mdi_prop_lookup_string(pip, "path-class", &pclass) != 3656 MDI_SUCCESS) { 3657 VHCI_DEBUG(1, (CE_NOTE, NULL, 3658 "!vhci_update_pathstates: prop lookup failed for " 3659 "path 0x%p\n", (void *)pip)); 3660 sps = mdi_select_path(dip, NULL, 3661 (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH), 3662 pip, &npip); 3663 mdi_rele_path(pip); 3664 continue; 3665 } 3666 3667 /* 3668 * Need to update the "path-class" property 3669 * value in the device tree if different 3670 * from the existing value. 3671 */ 3672 if (strcmp(pclass, opinfo.opinfo_path_attr) != 0) { 3673 (void) mdi_prop_update_string(pip, "path-class", 3674 opinfo.opinfo_path_attr); 3675 } 3676 3677 /* 3678 * Only change the state if needed. i.e. Don't call 3679 * mdi_pi_set_state to ONLINE a path if its already 3680 * ONLINE. Same for STANDBY paths. 3681 */ 3682 3683 if ((opinfo.opinfo_path_state == SCSI_PATH_ACTIVE || 3684 opinfo.opinfo_path_state == SCSI_PATH_ACTIVE_NONOPT)) { 3685 if (!(MDI_PI_IS_ONLINE(pip))) { 3686 VHCI_DEBUG(1, (CE_NOTE, NULL, 3687 "!vhci_update_pathstates: marking path" 3688 " 0x%p as ONLINE\n", (void *)pip)); 3689 pdip = mdi_pi_get_phci(pip); 3690 cpath = kmem_alloc(MAXPATHLEN, KM_SLEEP); 3691 dpath = kmem_alloc(MAXPATHLEN, KM_SLEEP); 3692 vhci_log(CE_NOTE, ddi_get_parent(dip), "!%s" 3693 " (%s%d): path %s (%s%d) target address %s" 3694 " is now ONLINE because of" 3695 " an externally initiated failover", 3696 ddi_pathname(dip, cpath), 3697 ddi_driver_name(dip), 3698 ddi_get_instance(dip), 3699 ddi_pathname(pdip, dpath), 3700 ddi_driver_name(pdip), 3701 ddi_get_instance(pdip), 3702 mdi_pi_get_addr(pip)); 3703 kmem_free(cpath, MAXPATHLEN); 3704 kmem_free(dpath, MAXPATHLEN); 3705 mdi_pi_set_state(pip, 3706 MDI_PATHINFO_STATE_ONLINE); 3707 mdi_pi_set_preferred(pip, 3708 opinfo.opinfo_preferred); 3709 tptr = kmem_alloc(strlen 3710 (opinfo.opinfo_path_attr)+1, KM_SLEEP); 3711 (void) strlcpy(tptr, opinfo.opinfo_path_attr, 3712 (strlen(opinfo.opinfo_path_attr)+1)); 3713 mutex_enter(&vlun->svl_mutex); 3714 if (vlun->svl_active_pclass != NULL) { 3715 kmem_free(vlun->svl_active_pclass, 3716 strlen(vlun->svl_active_pclass)+1); 3717 } 3718 vlun->svl_active_pclass = tptr; 3719 if (vlun->svl_waiting_for_activepath) { 3720 vlun->svl_waiting_for_activepath = 0; 3721 } 3722 mutex_exit(&vlun->svl_mutex); 3723 /* Check for Reservation Conflict */ 3724 bp = scsi_alloc_consistent_buf( 3725 &svp->svp_psd->sd_address, 3726 (struct buf *)NULL, DEV_BSIZE, B_READ, 3727 NULL, NULL); 3728 if (!bp) { 3729 VHCI_DEBUG(1, (CE_NOTE, NULL, 3730 "vhci_update_pathstates: " 3731 "!No resources (buf)\n")); 3732 mdi_rele_path(pip); 3733 goto done; 3734 } 3735 pkt = scsi_init_pkt(&svp->svp_psd->sd_address, 3736 NULL, bp, CDB_GROUP1, 3737 sizeof (struct scsi_arq_status), 0, 3738 PKT_CONSISTENT, NULL, NULL); 3739 if (pkt) { 3740 (void) scsi_setup_cdb((union scsi_cdb *) 3741 (uintptr_t)pkt->pkt_cdbp, 3742 SCMD_READ, 1, 1, 0); 3743 pkt->pkt_time = 3*30; 3744 pkt->pkt_flags = FLAG_NOINTR; 3745 pkt->pkt_path_instance = 3746 mdi_pi_get_path_instance(pip); 3747 3748 if ((scsi_transport(pkt) == 3749 TRAN_ACCEPT) && (pkt->pkt_reason 3750 == CMD_CMPLT) && (SCBP_C(pkt) == 3751 STATUS_RESERVATION_CONFLICT)) { 3752 reserve_conflict = 1; 3753 } 3754 scsi_destroy_pkt(pkt); 3755 } 3756 scsi_free_consistent_buf(bp); 3757 } else if (MDI_PI_IS_ONLINE(pip)) { 3758 if (strcmp(pclass, opinfo.opinfo_path_attr) 3759 != 0) { 3760 mdi_pi_set_preferred(pip, 3761 opinfo.opinfo_preferred); 3762 mutex_enter(&vlun->svl_mutex); 3763 if (vlun->svl_active_pclass == NULL || 3764 strcmp(opinfo.opinfo_path_attr, 3765 vlun->svl_active_pclass) != 0) { 3766 mutex_exit(&vlun->svl_mutex); 3767 tptr = kmem_alloc(strlen 3768 (opinfo.opinfo_path_attr)+1, 3769 KM_SLEEP); 3770 (void) strlcpy(tptr, 3771 opinfo.opinfo_path_attr, 3772 (strlen 3773 (opinfo.opinfo_path_attr) 3774 +1)); 3775 mutex_enter(&vlun->svl_mutex); 3776 } else { 3777 /* 3778 * No need to update 3779 * svl_active_pclass 3780 */ 3781 tptr = NULL; 3782 mutex_exit(&vlun->svl_mutex); 3783 } 3784 if (tptr) { 3785 if (vlun->svl_active_pclass 3786 != NULL) { 3787 kmem_free(vlun-> 3788 svl_active_pclass, 3789 strlen(vlun-> 3790 svl_active_pclass) 3791 +1); 3792 } 3793 vlun->svl_active_pclass = tptr; 3794 mutex_exit(&vlun->svl_mutex); 3795 } 3796 } 3797 } 3798 } else if ((opinfo.opinfo_path_state == SCSI_PATH_INACTIVE) && 3799 !(MDI_PI_IS_STANDBY(pip))) { 3800 VHCI_DEBUG(1, (CE_NOTE, NULL, 3801 "!vhci_update_pathstates: marking path" 3802 " 0x%p as STANDBY\n", (void *)pip)); 3803 pdip = mdi_pi_get_phci(pip); 3804 cpath = kmem_alloc(MAXPATHLEN, KM_SLEEP); 3805 dpath = kmem_alloc(MAXPATHLEN, KM_SLEEP); 3806 vhci_log(CE_NOTE, ddi_get_parent(dip), "!%s" 3807 " (%s%d): path %s (%s%d) target address %s" 3808 " is now STANDBY because of" 3809 " an externally initiated failover", 3810 ddi_pathname(dip, cpath), 3811 ddi_driver_name(dip), 3812 ddi_get_instance(dip), 3813 ddi_pathname(pdip, dpath), 3814 ddi_driver_name(pdip), 3815 ddi_get_instance(pdip), 3816 mdi_pi_get_addr(pip)); 3817 kmem_free(cpath, MAXPATHLEN); 3818 kmem_free(dpath, MAXPATHLEN); 3819 mdi_pi_set_state(pip, 3820 MDI_PATHINFO_STATE_STANDBY); 3821 mdi_pi_set_preferred(pip, 3822 opinfo.opinfo_preferred); 3823 mutex_enter(&vlun->svl_mutex); 3824 if (vlun->svl_active_pclass != NULL) { 3825 if (strcmp(vlun->svl_active_pclass, 3826 opinfo.opinfo_path_attr) == 0) { 3827 kmem_free(vlun-> 3828 svl_active_pclass, 3829 strlen(vlun-> 3830 svl_active_pclass)+1); 3831 vlun->svl_active_pclass = NULL; 3832 } 3833 } 3834 mutex_exit(&vlun->svl_mutex); 3835 } 3836 (void) mdi_prop_free(pclass); 3837 sps = mdi_select_path(dip, NULL, 3838 (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH), 3839 pip, &npip); 3840 mdi_rele_path(pip); 3841 3842 } while ((npip != NULL) && (sps == MDI_SUCCESS)); 3843 3844 /* 3845 * Check to see if this vlun has an active SCSI-II RESERVE. If so 3846 * clear the reservation by sending a reset, so the host doesn't 3847 * receive a reservation conflict. 3848 * Reset VLUN_RESERVE_ACTIVE_FLG for this vlun. Also notify ssd 3849 * of the reset, explicitly. 3850 */ 3851 if (vlun->svl_flags & VLUN_RESERVE_ACTIVE_FLG) { 3852 if (reserve_conflict && (vlun->svl_xlf_capable == 0)) { 3853 (void) vhci_recovery_reset(vlun, 3854 &svp->svp_psd->sd_address, FALSE, 3855 VHCI_DEPTH_TARGET); 3856 } 3857 vlun->svl_flags &= ~VLUN_RESERVE_ACTIVE_FLG; 3858 mutex_enter(&vhci->vhci_mutex); 3859 scsi_hba_reset_notify_callback(&vhci->vhci_mutex, 3860 &vhci->vhci_reset_notify_listf); 3861 mutex_exit(&vhci->vhci_mutex); 3862 } 3863 if (vlun->svl_flags & VLUN_UPDATE_TPG) { 3864 /* 3865 * Update the AccessState of related MP-API TPGs 3866 */ 3867 (void) vhci_mpapi_update_tpg_acc_state_for_lu(vhci, vlun); 3868 vlun->svl_flags &= ~VLUN_UPDATE_TPG; 3869 } 3870 done: 3871 if (vlun->svl_efo_update_path) { 3872 vlun->svl_efo_update_path = 0; 3873 vhci_efo_done(vlun->svl_swarg); 3874 vlun->svl_swarg = 0; 3875 } 3876 VHCI_RELEASE_LUN(vlun); 3877 } 3878 3879 /* ARGSUSED */ 3880 static int 3881 vhci_pathinfo_init(dev_info_t *vdip, mdi_pathinfo_t *pip, int flags) 3882 { 3883 scsi_hba_tran_t *hba = NULL; 3884 struct scsi_device *psd = NULL; 3885 scsi_vhci_lun_t *vlun = NULL; 3886 dev_info_t *pdip = NULL; 3887 dev_info_t *tgt_dip; 3888 struct scsi_vhci *vhci; 3889 char *guid; 3890 scsi_vhci_priv_t *svp = NULL; 3891 int rval = MDI_FAILURE; 3892 int vlun_alloced = 0; 3893 3894 ASSERT(vdip != NULL); 3895 ASSERT(pip != NULL); 3896 3897 vhci = ddi_get_soft_state(vhci_softstate, ddi_get_instance(vdip)); 3898 ASSERT(vhci != NULL); 3899 3900 pdip = mdi_pi_get_phci(pip); 3901 ASSERT(pdip != NULL); 3902 3903 hba = ddi_get_driver_private(pdip); 3904 ASSERT(hba != NULL); 3905 3906 tgt_dip = mdi_pi_get_client(pip); 3907 ASSERT(tgt_dip != NULL); 3908 3909 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, tgt_dip, PROPFLAGS, 3910 MDI_CLIENT_GUID_PROP, &guid) != DDI_SUCCESS) { 3911 VHCI_DEBUG(1, (CE_WARN, NULL, 3912 "vhci_pathinfo_init: lun guid property failed")); 3913 goto failure; 3914 } 3915 3916 vlun = vhci_lun_lookup_alloc(tgt_dip, guid, &vlun_alloced); 3917 ddi_prop_free(guid); 3918 3919 vlun->svl_dip = tgt_dip; 3920 3921 svp = kmem_zalloc(sizeof (*svp), KM_SLEEP); 3922 svp->svp_svl = vlun; 3923 3924 vlun->svl_lb_policy_save = mdi_get_lb_policy(tgt_dip); 3925 mutex_init(&svp->svp_mutex, NULL, MUTEX_DRIVER, NULL); 3926 cv_init(&svp->svp_cv, NULL, CV_DRIVER, NULL); 3927 3928 psd = kmem_zalloc(sizeof (*psd), KM_SLEEP); 3929 mutex_init(&psd->sd_mutex, NULL, MUTEX_DRIVER, NULL); 3930 3931 /* 3932 * Clone transport structure if requested, so 3933 * Self enumerating HBAs always need to use cloning 3934 */ 3935 3936 if (hba->tran_hba_flags & SCSI_HBA_TRAN_CLONE) { 3937 scsi_hba_tran_t *clone = 3938 kmem_alloc(sizeof (scsi_hba_tran_t), KM_SLEEP); 3939 bcopy(hba, clone, sizeof (scsi_hba_tran_t)); 3940 hba = clone; 3941 hba->tran_sd = psd; 3942 } else { 3943 ASSERT(hba->tran_sd == NULL); 3944 } 3945 psd->sd_dev = tgt_dip; 3946 psd->sd_address.a_hba_tran = hba; 3947 psd->sd_private = (caddr_t)pip; 3948 svp->svp_psd = psd; 3949 mdi_pi_set_vhci_private(pip, (caddr_t)svp); 3950 3951 /* 3952 * call hba's target init entry point if it exists 3953 */ 3954 if (hba->tran_tgt_init != NULL) { 3955 if ((rval = (*hba->tran_tgt_init)(pdip, tgt_dip, 3956 hba, psd)) != DDI_SUCCESS) { 3957 VHCI_DEBUG(1, (CE_WARN, pdip, 3958 "!vhci_pathinfo_init: tran_tgt_init failed for " 3959 "path=0x%p rval=%x", (void *)pip, rval)); 3960 goto failure; 3961 } 3962 } 3963 3964 svp->svp_new_path = 1; 3965 3966 psd->sd_inq = NULL; 3967 3968 VHCI_DEBUG(4, (CE_NOTE, NULL, "!vhci_pathinfo_init: path:%p\n", 3969 (void *)pip)); 3970 return (MDI_SUCCESS); 3971 3972 failure: 3973 if (psd) { 3974 mutex_destroy(&psd->sd_mutex); 3975 kmem_free(psd, sizeof (*psd)); 3976 } 3977 if (svp) { 3978 mdi_pi_set_vhci_private(pip, NULL); 3979 mutex_destroy(&svp->svp_mutex); 3980 cv_destroy(&svp->svp_cv); 3981 kmem_free(svp, sizeof (*svp)); 3982 } 3983 if (hba && hba->tran_hba_flags & SCSI_HBA_TRAN_CLONE) 3984 kmem_free(hba, sizeof (scsi_hba_tran_t)); 3985 3986 if (vlun_alloced) 3987 vhci_lun_free(tgt_dip); 3988 3989 return (rval); 3990 } 3991 3992 /* ARGSUSED */ 3993 static int 3994 vhci_pathinfo_uninit(dev_info_t *vdip, mdi_pathinfo_t *pip, int flags) 3995 { 3996 scsi_hba_tran_t *hba = NULL; 3997 struct scsi_device *psd = NULL; 3998 dev_info_t *pdip = NULL; 3999 dev_info_t *cdip = NULL; 4000 scsi_vhci_priv_t *svp = NULL; 4001 4002 ASSERT(vdip != NULL); 4003 ASSERT(pip != NULL); 4004 4005 pdip = mdi_pi_get_phci(pip); 4006 ASSERT(pdip != NULL); 4007 4008 cdip = mdi_pi_get_client(pip); 4009 ASSERT(cdip != NULL); 4010 4011 hba = ddi_get_driver_private(pdip); 4012 ASSERT(hba != NULL); 4013 4014 vhci_mpapi_set_path_state(vdip, pip, MP_DRVR_PATH_STATE_REMOVED); 4015 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip); 4016 if (svp == NULL) { 4017 /* path already freed. Nothing to do. */ 4018 return (MDI_SUCCESS); 4019 } 4020 4021 psd = svp->svp_psd; 4022 ASSERT(psd != NULL); 4023 4024 if (hba->tran_hba_flags & SCSI_HBA_TRAN_CLONE) { 4025 hba = psd->sd_address.a_hba_tran; 4026 ASSERT(hba->tran_hba_flags & SCSI_HBA_TRAN_CLONE); 4027 ASSERT(hba->tran_sd == psd); 4028 } else { 4029 ASSERT(hba->tran_sd == NULL); 4030 } 4031 4032 if (hba->tran_tgt_free != NULL) { 4033 (*hba->tran_tgt_free) (pdip, cdip, hba, psd); 4034 } 4035 mutex_destroy(&psd->sd_mutex); 4036 if (hba->tran_hba_flags & SCSI_HBA_TRAN_CLONE) { 4037 kmem_free(hba, sizeof (*hba)); 4038 } 4039 4040 mdi_pi_set_vhci_private(pip, NULL); 4041 kmem_free((caddr_t)psd, sizeof (*psd)); 4042 4043 mutex_destroy(&svp->svp_mutex); 4044 cv_destroy(&svp->svp_cv); 4045 kmem_free((caddr_t)svp, sizeof (*svp)); 4046 4047 /* 4048 * If this is the last path to the client, 4049 * then free up the vlun as well. 4050 */ 4051 if (mdi_client_get_path_count(cdip) == 1) { 4052 vhci_lun_free(cdip); 4053 } 4054 4055 VHCI_DEBUG(4, (CE_NOTE, NULL, "!vhci_pathinfo_uninit: path=0x%p\n", 4056 (void *)pip)); 4057 return (MDI_SUCCESS); 4058 } 4059 4060 /* ARGSUSED */ 4061 static int 4062 vhci_pathinfo_state_change(dev_info_t *vdip, mdi_pathinfo_t *pip, 4063 mdi_pathinfo_state_t state, uint32_t ext_state, int flags) 4064 { 4065 int rval = MDI_SUCCESS; 4066 scsi_vhci_priv_t *svp; 4067 scsi_vhci_lun_t *vlun; 4068 int held; 4069 int op = (flags & 0xf00) >> 8; 4070 struct scsi_vhci *vhci; 4071 4072 vhci = ddi_get_soft_state(vhci_softstate, ddi_get_instance(vdip)); 4073 4074 if (flags & MDI_EXT_STATE_CHANGE) { 4075 /* 4076 * We do not want to issue any commands down the path in case 4077 * sync flag is set. Lower layers might not be ready to accept 4078 * any I/O commands. 4079 */ 4080 if (op == DRIVER_DISABLE) 4081 return (MDI_SUCCESS); 4082 4083 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip); 4084 if (svp == NULL) { 4085 return (MDI_FAILURE); 4086 } 4087 vlun = svp->svp_svl; 4088 4089 if (flags & MDI_BEFORE_STATE_CHANGE) { 4090 /* 4091 * Hold the LUN. 4092 */ 4093 VHCI_HOLD_LUN(vlun, VH_SLEEP, held); 4094 if (flags & MDI_DISABLE_OP) { 4095 /* 4096 * Issue scsi reset if it happens to be 4097 * reserved path. 4098 */ 4099 if (vlun->svl_flags & VLUN_RESERVE_ACTIVE_FLG) { 4100 /* 4101 * if reservation pending on 4102 * this path, dont' mark the 4103 * path busy 4104 */ 4105 if (op == DRIVER_DISABLE_TRANSIENT) { 4106 VHCI_DEBUG(1, (CE_NOTE, NULL, 4107 "!vhci_pathinfo" 4108 "_state_change (pip:%p): " 4109 " reservation: fail busy\n", 4110 (void *)pip)); 4111 return (MDI_FAILURE); 4112 } 4113 if (pip == vlun->svl_resrv_pip) { 4114 if (vhci_recovery_reset( 4115 svp->svp_svl, 4116 &svp->svp_psd->sd_address, 4117 TRUE, 4118 VHCI_DEPTH_TARGET) == 0) { 4119 VHCI_DEBUG(1, 4120 (CE_NOTE, NULL, 4121 "!vhci_pathinfo" 4122 "_state_change " 4123 " (pip:%p): " 4124 "reset failed, " 4125 "give up!\n", 4126 (void *)pip)); 4127 } 4128 vlun->svl_flags &= 4129 ~VLUN_RESERVE_ACTIVE_FLG; 4130 } 4131 } 4132 } else if (flags & MDI_ENABLE_OP) { 4133 if (((vhci->vhci_conf_flags & 4134 VHCI_CONF_FLAGS_AUTO_FAILBACK) == 4135 VHCI_CONF_FLAGS_AUTO_FAILBACK) && 4136 MDI_PI_IS_USER_DISABLE(pip) && 4137 MDI_PI_IS_STANDBY(pip)) { 4138 struct scsi_failover_ops *fo; 4139 char *best_pclass, *pclass = NULL; 4140 int best_class, rv; 4141 /* 4142 * Failback if enabling a standby path 4143 * and it is the primary class or 4144 * preferred class 4145 */ 4146 best_class = mdi_pi_get_preferred(pip); 4147 if (best_class == 0) { 4148 /* 4149 * if not preferred - compare 4150 * path-class with class 4151 */ 4152 fo = vlun->svl_fops; 4153 (*fo->sfo_pathclass_next)(NULL, 4154 &best_pclass, 4155 vlun->svl_fops_ctpriv); 4156 pclass = NULL; 4157 rv = mdi_prop_lookup_string(pip, 4158 "path-class", &pclass); 4159 if (rv != MDI_SUCCESS || 4160 pclass == NULL) { 4161 vhci_log(CE_NOTE, vdip, 4162 "!path-class " 4163 " lookup " 4164 "failed. rv: %d" 4165 "class: %p", rv, 4166 (void *)pclass); 4167 } else if (strncmp(pclass, 4168 best_pclass, 4169 strlen(best_pclass)) == 0) { 4170 best_class = 1; 4171 } 4172 if (rv == MDI_SUCCESS && 4173 pclass != NULL) { 4174 rv = mdi_prop_free( 4175 pclass); 4176 if (rv != 4177 DDI_PROP_SUCCESS) { 4178 vhci_log( 4179 CE_NOTE, 4180 vdip, 4181 "!path-" 4182 "class" 4183 " free" 4184 " failed" 4185 " rv: %d" 4186 " class: " 4187 "%p", 4188 rv, 4189 (void *) 4190 pclass); 4191 } 4192 } 4193 } 4194 if (best_class == 1) { 4195 VHCI_DEBUG(1, (CE_NOTE, NULL, 4196 "preferred path: %p " 4197 "USER_DISABLE->USER_ENABLE " 4198 "transition for lun %s\n", 4199 (void *)pip, 4200 vlun->svl_lun_wwn)); 4201 (void) taskq_dispatch( 4202 vhci->vhci_taskq, 4203 vhci_initiate_auto_failback, 4204 (void *) vlun, KM_SLEEP); 4205 } 4206 } 4207 /* 4208 * if PGR is active, revalidate key and 4209 * register on this path also, if key is 4210 * still valid 4211 */ 4212 sema_p(&vlun->svl_pgr_sema); 4213 if (vlun->svl_pgr_active) 4214 (void) 4215 vhci_pgr_validate_and_register(svp); 4216 sema_v(&vlun->svl_pgr_sema); 4217 /* 4218 * Inform target driver about any 4219 * reservations to be reinstated if target 4220 * has dropped reservation during the busy 4221 * period. 4222 */ 4223 mutex_enter(&vhci->vhci_mutex); 4224 scsi_hba_reset_notify_callback( 4225 &vhci->vhci_mutex, 4226 &vhci->vhci_reset_notify_listf); 4227 mutex_exit(&vhci->vhci_mutex); 4228 } 4229 } 4230 if (flags & MDI_AFTER_STATE_CHANGE) { 4231 if (flags & MDI_ENABLE_OP) { 4232 mutex_enter(&vhci_global_mutex); 4233 cv_broadcast(&vhci_cv); 4234 mutex_exit(&vhci_global_mutex); 4235 } 4236 if (vlun->svl_setcap_done) { 4237 (void) vhci_pHCI_cap(&svp->svp_psd->sd_address, 4238 "sector-size", vlun->svl_sector_size, 4239 1, pip); 4240 } 4241 4242 /* 4243 * Release the LUN 4244 */ 4245 VHCI_RELEASE_LUN(vlun); 4246 4247 /* 4248 * Path transition is complete. 4249 * Run callback to indicate target driver to 4250 * retry to prevent IO starvation. 4251 */ 4252 if (scsi_callback_id != 0) { 4253 ddi_run_callback(&scsi_callback_id); 4254 } 4255 } 4256 } else { 4257 switch (state) { 4258 case MDI_PATHINFO_STATE_ONLINE: 4259 rval = vhci_pathinfo_online(vdip, pip, flags); 4260 break; 4261 4262 case MDI_PATHINFO_STATE_OFFLINE: 4263 rval = vhci_pathinfo_offline(vdip, pip, flags); 4264 break; 4265 4266 default: 4267 break; 4268 } 4269 /* 4270 * Path transition is complete. 4271 * Run callback to indicate target driver to 4272 * retry to prevent IO starvation. 4273 */ 4274 if ((rval == MDI_SUCCESS) && (scsi_callback_id != 0)) { 4275 ddi_run_callback(&scsi_callback_id); 4276 } 4277 return (rval); 4278 } 4279 4280 return (MDI_SUCCESS); 4281 } 4282 4283 /* 4284 * Parse the mpxio load balancing options. The datanameptr 4285 * will point to a string containing the load-balance-options value. 4286 * The load-balance-options value will be a property that 4287 * defines the load-balance algorithm and any arguments to that 4288 * algorithm. 4289 * For example: 4290 * device-type-mpxio-options-list= 4291 * "device-type=SUN SENA", "load-balance-options=logical-block-options" 4292 * "device-type=SUN SE6920", "round-robin-options"; 4293 * logical-block-options="load-balance=logical-block", "region-size=15"; 4294 * round-robin-options="load-balance=round-robin"; 4295 * 4296 * If the load-balance is not defined the load balance algorithm will 4297 * default to the global setting. There will be default values assigned 4298 * to the arguments (region-size=18) and if an argument is one 4299 * that is not known, it will be ignored. 4300 */ 4301 static void 4302 vhci_parse_mpxio_lb_options(dev_info_t *dip, dev_info_t *cdip, 4303 caddr_t datanameptr) 4304 { 4305 char *dataptr, *next_entry; 4306 caddr_t config_list = NULL; 4307 int config_list_len = 0, list_len = 0; 4308 int region_size = -1; 4309 client_lb_t load_balance; 4310 4311 if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, datanameptr, 4312 (caddr_t)&config_list, &config_list_len) != DDI_PROP_SUCCESS) { 4313 return; 4314 } 4315 4316 list_len = config_list_len; 4317 next_entry = config_list; 4318 while (config_list_len > 0) { 4319 dataptr = next_entry; 4320 4321 if (strncmp(mdi_load_balance, dataptr, 4322 strlen(mdi_load_balance)) == 0) { 4323 /* get the load-balance scheme */ 4324 dataptr += strlen(mdi_load_balance) + 1; 4325 if (strcmp(dataptr, LOAD_BALANCE_PROP_RR) == 0) { 4326 (void) mdi_set_lb_policy(cdip, LOAD_BALANCE_RR); 4327 load_balance = LOAD_BALANCE_RR; 4328 } else if (strcmp(dataptr, 4329 LOAD_BALANCE_PROP_LBA) == 0) { 4330 (void) mdi_set_lb_policy(cdip, 4331 LOAD_BALANCE_LBA); 4332 load_balance = LOAD_BALANCE_LBA; 4333 } else if (strcmp(dataptr, 4334 LOAD_BALANCE_PROP_NONE) == 0) { 4335 (void) mdi_set_lb_policy(cdip, 4336 LOAD_BALANCE_NONE); 4337 load_balance = LOAD_BALANCE_NONE; 4338 } 4339 } else if (strncmp(dataptr, LOGICAL_BLOCK_REGION_SIZE, 4340 strlen(LOGICAL_BLOCK_REGION_SIZE)) == 0) { 4341 int i = 0; 4342 char *ptr; 4343 char *tmp; 4344 4345 tmp = dataptr + (strlen(LOGICAL_BLOCK_REGION_SIZE) + 1); 4346 /* check for numeric value */ 4347 for (ptr = tmp; i < strlen(tmp); i++, ptr++) { 4348 if (!isdigit(*ptr)) { 4349 cmn_err(CE_WARN, 4350 "Illegal region size: %s." 4351 " Setting to default value: %d", 4352 tmp, 4353 LOAD_BALANCE_DEFAULT_REGION_SIZE); 4354 region_size = 4355 LOAD_BALANCE_DEFAULT_REGION_SIZE; 4356 break; 4357 } 4358 } 4359 if (i >= strlen(tmp)) { 4360 region_size = stoi(&tmp); 4361 } 4362 (void) mdi_set_lb_region_size(cdip, region_size); 4363 } 4364 config_list_len -= (strlen(next_entry) + 1); 4365 next_entry += strlen(next_entry) + 1; 4366 } 4367 #ifdef DEBUG 4368 if ((region_size >= 0) && (load_balance != LOAD_BALANCE_LBA)) { 4369 VHCI_DEBUG(1, (CE_NOTE, dip, 4370 "!vhci_parse_mpxio_lb_options: region-size: %d" 4371 "only valid for load-balance=logical-block\n", 4372 region_size)); 4373 } 4374 #endif 4375 if ((region_size == -1) && (load_balance == LOAD_BALANCE_LBA)) { 4376 VHCI_DEBUG(1, (CE_NOTE, dip, 4377 "!vhci_parse_mpxio_lb_options: No region-size" 4378 " defined load-balance=logical-block." 4379 " Default to: %d\n", LOAD_BALANCE_DEFAULT_REGION_SIZE)); 4380 (void) mdi_set_lb_region_size(cdip, 4381 LOAD_BALANCE_DEFAULT_REGION_SIZE); 4382 } 4383 if (list_len > 0) { 4384 kmem_free(config_list, list_len); 4385 } 4386 } 4387 4388 /* 4389 * Parse the device-type-mpxio-options-list looking for the key of 4390 * "load-balance-options". If found, parse the load balancing options. 4391 * Check the comment of the vhci_get_device_type_mpxio_options() 4392 * for the device-type-mpxio-options-list. 4393 */ 4394 static void 4395 vhci_parse_mpxio_options(dev_info_t *dip, dev_info_t *cdip, 4396 caddr_t datanameptr, int list_len) 4397 { 4398 char *dataptr; 4399 int len; 4400 4401 /* 4402 * get the data list 4403 */ 4404 dataptr = datanameptr; 4405 len = 0; 4406 while (len < list_len && 4407 strncmp(dataptr, DEVICE_TYPE_STR, strlen(DEVICE_TYPE_STR)) 4408 != 0) { 4409 if (strncmp(dataptr, LOAD_BALANCE_OPTIONS, 4410 strlen(LOAD_BALANCE_OPTIONS)) == 0) { 4411 len += strlen(LOAD_BALANCE_OPTIONS) + 1; 4412 dataptr += strlen(LOAD_BALANCE_OPTIONS) + 1; 4413 vhci_parse_mpxio_lb_options(dip, cdip, dataptr); 4414 } 4415 len += strlen(dataptr) + 1; 4416 dataptr += strlen(dataptr) + 1; 4417 } 4418 } 4419 4420 /* 4421 * Check the inquriy string returned from the device wiith the device-type 4422 * Check for the existence of the device-type-mpxio-options-list and 4423 * if found parse the list checking for a match with the device-type 4424 * value and the inquiry string returned from the device. If a match 4425 * is found, parse the mpxio options list. The format of the 4426 * device-type-mpxio-options-list is: 4427 * device-type-mpxio-options-list= 4428 * "device-type=SUN SENA", "load-balance-options=logical-block-options" 4429 * "device-type=SUN SE6920", "round-robin-options"; 4430 * logical-block-options="load-balance=logical-block", "region-size=15"; 4431 * round-robin-options="load-balance=round-robin"; 4432 */ 4433 void 4434 vhci_get_device_type_mpxio_options(dev_info_t *dip, dev_info_t *cdip, 4435 struct scsi_device *devp) 4436 { 4437 4438 caddr_t config_list = NULL; 4439 caddr_t vidptr, datanameptr; 4440 int vidlen, dupletlen = 0; 4441 int config_list_len = 0, len; 4442 struct scsi_inquiry *inq = devp->sd_inq; 4443 4444 /* 4445 * look up the device-type-mpxio-options-list and walk thru 4446 * the list compare the vendor ids of the earlier inquiry command and 4447 * with those vids in the list if there is a match, lookup 4448 * the mpxio-options value 4449 */ 4450 if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 4451 MPXIO_OPTIONS_LIST, 4452 (caddr_t)&config_list, &config_list_len) == DDI_PROP_SUCCESS) { 4453 4454 /* 4455 * Compare vids in each duplet - if it matches, 4456 * parse the mpxio options list. 4457 */ 4458 for (len = config_list_len, vidptr = config_list; len > 0; 4459 len -= dupletlen) { 4460 4461 dupletlen = 0; 4462 4463 if (strlen(vidptr) != 0 && 4464 strncmp(vidptr, DEVICE_TYPE_STR, 4465 strlen(DEVICE_TYPE_STR)) == 0) { 4466 /* point to next duplet */ 4467 datanameptr = vidptr + strlen(vidptr) + 1; 4468 /* add len of this duplet */ 4469 dupletlen += strlen(vidptr) + 1; 4470 /* get to device type */ 4471 vidptr += strlen(DEVICE_TYPE_STR) + 1; 4472 vidlen = strlen(vidptr); 4473 if ((vidlen != 0) && 4474 bcmp(inq->inq_vid, vidptr, vidlen) == 0) { 4475 vhci_parse_mpxio_options(dip, cdip, 4476 datanameptr, len - dupletlen); 4477 break; 4478 } 4479 /* get to next duplet */ 4480 vidptr += strlen(vidptr) + 1; 4481 } 4482 /* get to the next device-type */ 4483 while (len - dupletlen > 0 && 4484 strlen(vidptr) != 0 && 4485 strncmp(vidptr, DEVICE_TYPE_STR, 4486 strlen(DEVICE_TYPE_STR)) != 0) { 4487 dupletlen += strlen(vidptr) + 1; 4488 vidptr += strlen(vidptr) + 1; 4489 } 4490 } 4491 if (config_list_len > 0) { 4492 kmem_free(config_list, config_list_len); 4493 } 4494 } 4495 } 4496 4497 static int 4498 vhci_update_pathinfo(struct scsi_device *psd, mdi_pathinfo_t *pip, 4499 struct scsi_failover_ops *fo, 4500 scsi_vhci_lun_t *vlun, 4501 struct scsi_vhci *vhci) 4502 { 4503 struct scsi_path_opinfo opinfo; 4504 char *pclass, *best_pclass; 4505 4506 if ((*fo->sfo_path_get_opinfo)(psd, &opinfo, 4507 vlun->svl_fops_ctpriv) != 0) { 4508 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_update_pathinfo: " 4509 "Failed to get operation info for path:%p\n", (void *)pip)); 4510 return (MDI_FAILURE); 4511 } 4512 /* set the xlf capable flag in the vlun for future use */ 4513 vlun->svl_xlf_capable = opinfo.opinfo_xlf_capable; 4514 (void) mdi_prop_update_string(pip, "path-class", 4515 opinfo.opinfo_path_attr); 4516 4517 pclass = opinfo.opinfo_path_attr; 4518 if (opinfo.opinfo_path_state == SCSI_PATH_ACTIVE) { 4519 mutex_enter(&vlun->svl_mutex); 4520 if (vlun->svl_active_pclass != NULL) { 4521 if (strcmp(vlun->svl_active_pclass, pclass) != 0) { 4522 mutex_exit(&vlun->svl_mutex); 4523 /* 4524 * Externally initiated failover has happened; 4525 * force the path state to be STANDBY/ONLINE, 4526 * next IO will trigger failover and thus 4527 * sync-up the pathstates. Reason we don't 4528 * sync-up immediately by invoking 4529 * vhci_update_pathstates() is because it 4530 * needs a VHCI_HOLD_LUN() and we don't 4531 * want to block here. 4532 * 4533 * Further, if the device is an ALUA device, 4534 * then failure to exactly match 'pclass' and 4535 * 'svl_active_pclass'(as is the case here) 4536 * indicates that the currently active path 4537 * is a 'non-optimized' path - which means 4538 * that 'svl_active_pclass' needs to be 4539 * replaced with opinfo.opinfo_path_state 4540 * value. 4541 */ 4542 4543 if (SCSI_FAILOVER_IS_TPGS(vlun->svl_fops)) { 4544 char *tptr; 4545 4546 /* 4547 * The device is ALUA compliant. The 4548 * state need to be changed to online 4549 * rather than standby state which is 4550 * done typically for a asymmetric 4551 * device that is non ALUA compliant. 4552 */ 4553 mdi_pi_set_state(pip, 4554 MDI_PATHINFO_STATE_ONLINE); 4555 tptr = kmem_alloc(strlen 4556 (opinfo.opinfo_path_attr)+1, 4557 KM_SLEEP); 4558 (void) strlcpy(tptr, 4559 opinfo.opinfo_path_attr, 4560 (strlen(opinfo.opinfo_path_attr) 4561 +1)); 4562 mutex_enter(&vlun->svl_mutex); 4563 kmem_free(vlun->svl_active_pclass, 4564 strlen(vlun->svl_active_pclass)+1); 4565 vlun->svl_active_pclass = tptr; 4566 mutex_exit(&vlun->svl_mutex); 4567 } else { 4568 /* 4569 * Non ALUA device case. 4570 */ 4571 mdi_pi_set_state(pip, 4572 MDI_PATHINFO_STATE_STANDBY); 4573 } 4574 vlun->svl_fo_support = opinfo.opinfo_mode; 4575 mdi_pi_set_preferred(pip, 4576 opinfo.opinfo_preferred); 4577 return (MDI_SUCCESS); 4578 } 4579 } else { 4580 char *tptr; 4581 4582 /* 4583 * lets release the mutex before we try to 4584 * allocate since the potential to sleep is 4585 * possible. 4586 */ 4587 mutex_exit(&vlun->svl_mutex); 4588 tptr = kmem_alloc(strlen(pclass)+1, KM_SLEEP); 4589 (void) strlcpy(tptr, pclass, (strlen(pclass)+1)); 4590 mutex_enter(&vlun->svl_mutex); 4591 vlun->svl_active_pclass = tptr; 4592 } 4593 mutex_exit(&vlun->svl_mutex); 4594 mdi_pi_set_state(pip, MDI_PATHINFO_STATE_ONLINE); 4595 vlun->svl_waiting_for_activepath = 0; 4596 } else if (opinfo.opinfo_path_state == SCSI_PATH_ACTIVE_NONOPT) { 4597 mutex_enter(&vlun->svl_mutex); 4598 if (vlun->svl_active_pclass == NULL) { 4599 char *tptr; 4600 4601 mutex_exit(&vlun->svl_mutex); 4602 tptr = kmem_alloc(strlen(pclass)+1, KM_SLEEP); 4603 (void) strlcpy(tptr, pclass, (strlen(pclass)+1)); 4604 mutex_enter(&vlun->svl_mutex); 4605 vlun->svl_active_pclass = tptr; 4606 } 4607 mutex_exit(&vlun->svl_mutex); 4608 mdi_pi_set_state(pip, MDI_PATHINFO_STATE_ONLINE); 4609 vlun->svl_waiting_for_activepath = 0; 4610 } else if (opinfo.opinfo_path_state == SCSI_PATH_INACTIVE) { 4611 mutex_enter(&vlun->svl_mutex); 4612 if (vlun->svl_active_pclass != NULL) { 4613 if (strcmp(vlun->svl_active_pclass, pclass) == 0) { 4614 mutex_exit(&vlun->svl_mutex); 4615 /* 4616 * externally initiated failover has happened; 4617 * force state to ONLINE (see comment above) 4618 */ 4619 mdi_pi_set_state(pip, 4620 MDI_PATHINFO_STATE_ONLINE); 4621 vlun->svl_fo_support = opinfo.opinfo_mode; 4622 mdi_pi_set_preferred(pip, 4623 opinfo.opinfo_preferred); 4624 return (MDI_SUCCESS); 4625 } 4626 } 4627 mutex_exit(&vlun->svl_mutex); 4628 mdi_pi_set_state(pip, MDI_PATHINFO_STATE_STANDBY); 4629 4630 /* 4631 * Initiate auto-failback, if enabled, for path if path-state 4632 * is transitioning from OFFLINE->STANDBY and pathclass is the 4633 * prefered pathclass for this storage. 4634 * NOTE: In case where opinfo_path_state is SCSI_PATH_ACTIVE 4635 * (above), where the pi state is set to STANDBY, we don't 4636 * initiate auto-failback as the next IO shall take care of. 4637 * this. See comment above. 4638 */ 4639 (*fo->sfo_pathclass_next)(NULL, &best_pclass, 4640 vlun->svl_fops_ctpriv); 4641 if (((vhci->vhci_conf_flags & VHCI_CONF_FLAGS_AUTO_FAILBACK) == 4642 VHCI_CONF_FLAGS_AUTO_FAILBACK) && 4643 ((strcmp(pclass, best_pclass) == 0) || 4644 mdi_pi_get_preferred(pip) == 1) && 4645 ((MDI_PI_OLD_STATE(pip) == MDI_PATHINFO_STATE_OFFLINE)|| 4646 (MDI_PI_OLD_STATE(pip) == MDI_PATHINFO_STATE_INIT))) { 4647 VHCI_DEBUG(1, (CE_NOTE, NULL, "%s pathclass path: %p" 4648 " OFFLINE->STANDBY transition for lun %s\n", 4649 best_pclass, (void *)pip, vlun->svl_lun_wwn)); 4650 (void) taskq_dispatch(vhci->vhci_taskq, 4651 vhci_initiate_auto_failback, (void *) vlun, 4652 KM_SLEEP); 4653 } 4654 } 4655 vlun->svl_fo_support = opinfo.opinfo_mode; 4656 mdi_pi_set_preferred(pip, opinfo.opinfo_preferred); 4657 4658 VHCI_DEBUG(8, (CE_NOTE, NULL, "vhci_update_pathinfo: opinfo_rev = %x," 4659 " opinfo_path_state = %x opinfo_preferred = %x, opinfo_mode = %x\n", 4660 opinfo.opinfo_rev, opinfo.opinfo_path_state, 4661 opinfo.opinfo_preferred, opinfo.opinfo_mode)); 4662 4663 return (MDI_SUCCESS); 4664 } 4665 4666 /* 4667 * Form the kstat name and and call mdi_pi_kstat_create() 4668 */ 4669 void 4670 vhci_kstat_create_pathinfo(mdi_pathinfo_t *pip) 4671 { 4672 dev_info_t *tgt_dip; 4673 dev_info_t *pdip; 4674 char *guid; 4675 char *target_port, *target_port_dup; 4676 char ks_name[KSTAT_STRLEN]; 4677 uint_t pid; 4678 int by_id; 4679 mod_hash_val_t hv; 4680 4681 4682 /* return if we have already allocated kstats */ 4683 if (mdi_pi_kstat_exists(pip)) 4684 return; 4685 4686 /* 4687 * We need instance numbers to create a kstat name, return if we don't 4688 * have instance numbers assigned yet. 4689 */ 4690 tgt_dip = mdi_pi_get_client(pip); 4691 pdip = mdi_pi_get_phci(pip); 4692 if ((ddi_get_instance(tgt_dip) == -1) || (ddi_get_instance(pdip) == -1)) 4693 return; 4694 4695 /* 4696 * A path oriented kstat has a ks_name of the form: 4697 * 4698 * <client-driver><instance>.t<pid>.<pHCI-driver><instance> 4699 * 4700 * We maintain a bidirectional 'target-port' to <pid> map, 4701 * called targetmap. All pathinfo nodes with the same 4702 * 'target-port' map to the same <pid>. The iostat(1M) code, 4703 * when parsing a path oriented kstat name, uses the <pid> as 4704 * a SCSI_VHCI_GET_TARGET_LONGNAME ioctl argument in order 4705 * to get the 'target-port'. For KSTAT_FLAG_PERSISTENT kstats, 4706 * this ioctl needs to translate a <pid> to a 'target-port' 4707 * even after all pathinfo nodes associated with the 4708 * 'target-port' have been destroyed. This is needed to support 4709 * consistent first-iteration activity-since-boot iostat(1M) 4710 * output. Because of this requirement, the mapping can't be 4711 * based on pathinfo information in a devinfo snapshot. 4712 */ 4713 4714 /* determine 'target-port' */ 4715 if (mdi_prop_lookup_string(pip, 4716 "target-port", &target_port) == MDI_SUCCESS) { 4717 target_port_dup = i_ddi_strdup(target_port, KM_SLEEP); 4718 (void) mdi_prop_free(target_port); 4719 by_id = 1; 4720 } else { 4721 /* 4722 * If the pHCI did not set up 'target-port' on this 4723 * pathinfo node, assume that our client is the only 4724 * one with paths to the device by using the guid 4725 * value as the 'target-port'. Since no other client 4726 * will have the same guid, no other client will use 4727 * the same <pid>. NOTE: a client with an instance 4728 * number always has a guid. 4729 */ 4730 (void) ddi_prop_lookup_string(DDI_DEV_T_ANY, tgt_dip, 4731 PROPFLAGS, MDI_CLIENT_GUID_PROP, &guid); 4732 target_port_dup = i_ddi_strdup(guid, KM_SLEEP); 4733 ddi_prop_free(guid); 4734 4735 /* 4736 * For this type of mapping we don't want the 4737 * <id> -> 'target-port' mapping to be made. This 4738 * will cause the SCSI_VHCI_GET_TARGET_LONGNAME ioctl 4739 * to fail, and the iostat(1M) long '-n' output will 4740 * still use the <pid>. We do this because we just 4741 * made up the 'target-port' using the guid, and we 4742 * don't want to expose that fact in iostat output. 4743 */ 4744 by_id = 0; 4745 } 4746 4747 /* find/establish <pid> given 'target-port' */ 4748 mutex_enter(&vhci_targetmap_mutex); 4749 if (mod_hash_find(vhci_targetmap_byport, 4750 (mod_hash_key_t)target_port_dup, &hv) == 0) { 4751 pid = (int)(intptr_t)hv; /* mapping exists */ 4752 } else { 4753 pid = vhci_targetmap_pid++; /* new mapping */ 4754 4755 (void) mod_hash_insert(vhci_targetmap_byport, 4756 (mod_hash_key_t)target_port_dup, 4757 (mod_hash_val_t)(intptr_t)pid); 4758 if (by_id) { 4759 (void) mod_hash_insert(vhci_targetmap_bypid, 4760 (mod_hash_key_t)(uintptr_t)pid, 4761 (mod_hash_val_t)(uintptr_t)target_port_dup); 4762 } 4763 target_port_dup = NULL; /* owned by hash */ 4764 } 4765 mutex_exit(&vhci_targetmap_mutex); 4766 4767 /* form kstat name */ 4768 (void) snprintf(ks_name, KSTAT_STRLEN, "%s%d.t%d.%s%d", 4769 ddi_driver_name(tgt_dip), ddi_get_instance(tgt_dip), 4770 pid, ddi_driver_name(pdip), ddi_get_instance(pdip)); 4771 4772 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_path_online: path:%p " 4773 "kstat %s: pid %x <-> port %s\n", (void *)pip, 4774 ks_name, pid, target_port_dup)); 4775 if (target_port_dup) 4776 kmem_free(target_port_dup, strlen(target_port_dup) + 1); 4777 4778 /* call mdi to create kstats with the name we built */ 4779 (void) mdi_pi_kstat_create(pip, ks_name); 4780 } 4781 4782 /* ARGSUSED */ 4783 static int 4784 vhci_pathinfo_online(dev_info_t *vdip, mdi_pathinfo_t *pip, int flags) 4785 { 4786 scsi_hba_tran_t *hba = NULL; 4787 struct scsi_device *psd = NULL; 4788 scsi_vhci_lun_t *vlun = NULL; 4789 dev_info_t *pdip = NULL; 4790 dev_info_t *tgt_dip; 4791 struct scsi_vhci *vhci; 4792 char *guid; 4793 struct scsi_failover *sf; 4794 struct scsi_failover_ops *sfo; 4795 char *override; 4796 scsi_vhci_priv_t *svp = NULL; 4797 struct buf *bp; 4798 struct scsi_address *ap; 4799 struct scsi_pkt *pkt; 4800 int rval = MDI_FAILURE; 4801 uint_t inq_size = VHCI_STD_INQ_SIZE; 4802 mpapi_item_list_t *list_ptr; 4803 mpapi_lu_data_t *ld; 4804 4805 ASSERT(vdip != NULL); 4806 ASSERT(pip != NULL); 4807 4808 vhci = ddi_get_soft_state(vhci_softstate, ddi_get_instance(vdip)); 4809 ASSERT(vhci != NULL); 4810 4811 pdip = mdi_pi_get_phci(pip); 4812 hba = ddi_get_driver_private(pdip); 4813 ASSERT(hba != NULL); 4814 4815 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip); 4816 ASSERT(svp != NULL); 4817 4818 tgt_dip = mdi_pi_get_client(pip); 4819 ASSERT(tgt_dip != NULL); 4820 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, tgt_dip, PROPFLAGS, 4821 MDI_CLIENT_GUID_PROP, &guid) != DDI_SUCCESS) { 4822 VHCI_DEBUG(1, (CE_WARN, NULL, "vhci_path_online: lun guid " 4823 "property failed")); 4824 goto failure; 4825 } 4826 4827 vlun = vhci_lun_lookup(tgt_dip); 4828 ASSERT(vlun != NULL); 4829 4830 ddi_prop_free(guid); 4831 4832 vlun->svl_dip = mdi_pi_get_client(pip); 4833 ASSERT(vlun->svl_dip != NULL); 4834 4835 psd = svp->svp_psd; 4836 ASSERT(psd != NULL); 4837 4838 /* 4839 * For INQUIRY response buffer size, we use VHCI_STD_INQ_SIZE(132bytes) 4840 * instead of SUN_INQSIZE(48bytes) which is used in sd layer. This is 4841 * because we could get the Vendor specific parameters(present 97th 4842 * byte onwards) which are required to process Vendor specific data 4843 * based on array type. 4844 * This INQUIRY buffer is freed in vhci_pathinfo_offline but NEVER 4845 * in a different layer like sd/phci transport. In other words, vhci 4846 * maintains its own copy of scsi_device and scsi_inquiry data on a 4847 * per-path basis. 4848 */ 4849 if (psd->sd_inq == NULL) { 4850 psd->sd_inq = (struct scsi_inquiry *) 4851 kmem_zalloc(inq_size, KM_SLEEP); 4852 } 4853 4854 tgt_dip = psd->sd_dev; 4855 ASSERT(tgt_dip != NULL); 4856 4857 /* 4858 * do inquiry to pass into probe routine; this 4859 * will avoid each probe routine doing scsi inquiry 4860 */ 4861 bp = getrbuf(KM_SLEEP); 4862 bp->b_un.b_addr = (caddr_t)psd->sd_inq; 4863 bp->b_flags = B_READ; 4864 bp->b_bcount = inq_size; 4865 bp->b_resid = 0; 4866 4867 ap = &psd->sd_address; 4868 pkt = scsi_init_pkt(ap, NULL, bp, CDB_GROUP0, 4869 sizeof (struct scsi_arq_status), 0, 0, SLEEP_FUNC, NULL); 4870 if (pkt == NULL) { 4871 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_pathinfo_online: " 4872 "Inquiry init_pkt failed :%p\n", (void *)pip)); 4873 rval = MDI_FAILURE; 4874 goto failure; 4875 } 4876 pkt->pkt_cdbp[0] = SCMD_INQUIRY; 4877 pkt->pkt_cdbp[4] = (uchar_t)inq_size; 4878 pkt->pkt_time = 60; 4879 4880 rval = vhci_do_scsi_cmd(pkt); 4881 scsi_destroy_pkt(pkt); 4882 freerbuf(bp); 4883 if (rval == 0) { 4884 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_pathinfo_online: " 4885 "Failover Inquiry failed path:%p rval:%x\n", 4886 (void *)pip, rval)); 4887 rval = MDI_FAILURE; 4888 goto failure; 4889 } 4890 4891 /* 4892 * Determine if device is supported under scsi_vhci, and select 4893 * failover module. 4894 * 4895 * See if there is a scsi_vhci.conf file override for this devices's 4896 * VID/PID. The following values can be returned: 4897 * 4898 * NULL If the NULL is returned then there is no scsi_vhci.conf 4899 * override. For NULL, we determine the failover_ops for 4900 * this device by checking the sfo_device_probe entry 4901 * point for each 'fops' module, in order. 4902 * 4903 * NOTE: Correct operation may depend on module ordering 4904 * of 'specific' (failover modules that are completely 4905 * VID/PID table based) to 'generic' (failover modules 4906 * that based on T10 standards like TPGS). Currently, 4907 * the value of 'ddi-forceload' in scsi_vhci.conf is used 4908 * to establish the module list and probe order. 4909 * 4910 * "NONE" If value "NONE" is returned then there is a 4911 * scsi_vhci.conf VID/PID override to indicate the device 4912 * should not be supported under scsi_vhci (even if there 4913 * is an 'fops' module supporting the device). 4914 * 4915 * "<other>" If another value is returned then that value is the 4916 * name of the 'fops' module that should be used. 4917 */ 4918 sfo = NULL; /* "NONE" */ 4919 override = scsi_get_device_type_string( 4920 "scsi-vhci-failover-override", vdip, psd); 4921 4922 if (override == NULL) { 4923 /* NULL: default: select based on sfo_device_probe results */ 4924 for (sf = scsi_failover_table; sf->sf_mod; sf++) { 4925 if ((sf->sf_sfo == NULL) || 4926 ((*sf->sf_sfo->sfo_device_probe) (psd, 4927 psd->sd_inq, &vlun->svl_fops_ctpriv) == 4928 SFO_DEVICE_PROBE_PHCI)) 4929 continue; 4930 4931 /* found failover module, supported under scsi_vhci */ 4932 sfo = sf->sf_sfo; 4933 vlun->svl_fops_name = 4934 i_ddi_strdup(sfo->sfo_name, KM_SLEEP); 4935 break; 4936 } 4937 } else if (strcmp(override, "NONE") && strcmp(override, "none")) { 4938 /* !"NONE": select based on driver.conf specified name */ 4939 for (sf = scsi_failover_table, sfo = NULL; sf->sf_mod; sf++) { 4940 if ((sf->sf_sfo == NULL) || 4941 (sf->sf_sfo->sfo_name == NULL) || 4942 strcmp(override, sf->sf_sfo->sfo_name)) 4943 continue; 4944 4945 /* found failover module, supported under scsi_vhci */ 4946 sfo = sf->sf_sfo; 4947 vlun->svl_fops_name = kmem_alloc(strlen("conf ") + 4948 strlen(sfo->sfo_name) + 1, KM_SLEEP); 4949 (void) sprintf(vlun->svl_fops_name, "conf %s", 4950 sfo->sfo_name); 4951 break; 4952 } 4953 } 4954 if (override) 4955 kmem_free(override, strlen(override) + 1); 4956 4957 if (sfo == NULL) { 4958 /* no failover module - device not supported */ 4959 VHCI_DEBUG(1, (CE_NOTE, vhci->vhci_dip, 4960 "!vhci_pathinfo_online: dev (path 0x%p) not " 4961 "supported\n", (void *)pip)); 4962 vlun->svl_not_supported = 1; 4963 rval = MDI_NOT_SUPPORTED; 4964 goto done; 4965 } 4966 4967 /* failover supported for device - save failover_ops in vlun */ 4968 vlun->svl_fops = sfo; 4969 4970 /* 4971 * Obtain the device-type based mpxio options as specified in 4972 * scsi_vhci.conf file. 4973 * 4974 * NOTE: currently, the end result is a call to 4975 * mdi_set_lb_region_size(). 4976 */ 4977 vhci_get_device_type_mpxio_options(vdip, tgt_dip, psd); 4978 4979 /* 4980 * if PGR is active, revalidate key and register on this path also, 4981 * if key is still valid 4982 */ 4983 sema_p(&vlun->svl_pgr_sema); 4984 if (vlun->svl_pgr_active) { 4985 rval = vhci_pgr_validate_and_register(svp); 4986 if (rval != 1) { 4987 rval = MDI_FAILURE; 4988 sema_v(&vlun->svl_pgr_sema); 4989 goto failure; 4990 } 4991 } 4992 sema_v(&vlun->svl_pgr_sema); 4993 4994 if (svp->svp_new_path) { 4995 /* 4996 * Last chance to perform any cleanup operations on this 4997 * new path before making this path completely online. 4998 */ 4999 svp->svp_new_path = 0; 5000 5001 /* 5002 * If scsi_vhci knows the lun is alread RESERVE'd, 5003 * then skip the issue of RELEASE on new path. 5004 */ 5005 if ((vlun->svl_flags & VLUN_RESERVE_ACTIVE_FLG) == 0) { 5006 /* 5007 * Issue SCSI-2 RELEASE only for the first time on 5008 * a new path just in case the host rebooted and 5009 * a reservation is still pending on this path. 5010 * IBM Shark storage does not clear RESERVE upon 5011 * host reboot. 5012 */ 5013 ap = &psd->sd_address; 5014 pkt = scsi_init_pkt(ap, NULL, NULL, CDB_GROUP0, 5015 sizeof (struct scsi_arq_status), 0, 0, 5016 SLEEP_FUNC, NULL); 5017 if (pkt == NULL) { 5018 VHCI_DEBUG(1, (CE_NOTE, NULL, 5019 "!vhci_pathinfo_online: " 5020 "Release init_pkt failed :%p\n", 5021 (void *)pip)); 5022 rval = MDI_FAILURE; 5023 goto failure; 5024 } 5025 pkt->pkt_cdbp[0] = SCMD_RELEASE; 5026 pkt->pkt_time = 60; 5027 5028 VHCI_DEBUG(1, (CE_NOTE, NULL, 5029 "!vhci_path_online: path:%p " 5030 "Issued SCSI-2 RELEASE\n", (void *)pip)); 5031 5032 /* Ignore the return value */ 5033 (void) vhci_do_scsi_cmd(pkt); 5034 scsi_destroy_pkt(pkt); 5035 } 5036 } 5037 5038 rval = vhci_update_pathinfo(psd, pip, sfo, vlun, vhci); 5039 if (rval == MDI_FAILURE) { 5040 goto failure; 5041 } 5042 5043 /* Initialize MP-API data */ 5044 vhci_update_mpapi_data(vhci, vlun, pip); 5045 5046 /* 5047 * MP-API also needs the Inquiry data to be maintained in the 5048 * mp_vendor_prop_t structure, so find the lun and update its 5049 * structure with this data. 5050 */ 5051 list_ptr = (mpapi_item_list_t *)vhci_get_mpapi_item(vhci, NULL, 5052 MP_OBJECT_TYPE_MULTIPATH_LU, (void *)vlun); 5053 ld = (mpapi_lu_data_t *)list_ptr->item->idata; 5054 if (ld != NULL) { 5055 bcopy(psd->sd_inq->inq_vid, ld->prop.prodInfo.vendor, 8); 5056 bcopy(psd->sd_inq->inq_pid, ld->prop.prodInfo.product, 16); 5057 bcopy(psd->sd_inq->inq_revision, ld->prop.prodInfo.revision, 4); 5058 } else { 5059 VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_pathinfo_online: " 5060 "mpapi_lu_data_t is NULL")); 5061 } 5062 5063 /* create kstats for path */ 5064 vhci_kstat_create_pathinfo(pip); 5065 5066 done: 5067 mutex_enter(&vhci_global_mutex); 5068 cv_broadcast(&vhci_cv); 5069 mutex_exit(&vhci_global_mutex); 5070 5071 if (vlun->svl_setcap_done) { 5072 (void) vhci_pHCI_cap(ap, "sector-size", 5073 vlun->svl_sector_size, 1, pip); 5074 } 5075 5076 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_path_online: path:%p\n", 5077 (void *)pip)); 5078 5079 failure: 5080 if ((rval != MDI_SUCCESS) && psd->sd_inq) { 5081 kmem_free((caddr_t)psd->sd_inq, inq_size); 5082 psd->sd_inq = (struct scsi_inquiry *)NULL; 5083 } 5084 return (rval); 5085 } 5086 5087 /* 5088 * path offline handler. Release all bindings that will not be 5089 * released by the normal packet transport/completion code path. 5090 * Since we don't (presently) keep any bindings alive outside of 5091 * the in-transport packets (which will be released on completion) 5092 * there is not much to do here. 5093 */ 5094 /* ARGSUSED */ 5095 static int 5096 vhci_pathinfo_offline(dev_info_t *vdip, mdi_pathinfo_t *pip, int flags) 5097 { 5098 scsi_hba_tran_t *hba = NULL; 5099 struct scsi_device *psd = NULL; 5100 dev_info_t *pdip = NULL; 5101 dev_info_t *cdip = NULL; 5102 scsi_vhci_priv_t *svp = NULL; 5103 uint_t inq_size = VHCI_STD_INQ_SIZE; 5104 5105 ASSERT(vdip != NULL); 5106 ASSERT(pip != NULL); 5107 5108 pdip = mdi_pi_get_phci(pip); 5109 ASSERT(pdip != NULL); 5110 if (pdip == NULL) { 5111 VHCI_DEBUG(1, (CE_WARN, vdip, "Invalid path 0x%p: NULL " 5112 "phci dip", (void *)pip)); 5113 return (MDI_FAILURE); 5114 } 5115 5116 cdip = mdi_pi_get_client(pip); 5117 ASSERT(cdip != NULL); 5118 if (cdip == NULL) { 5119 VHCI_DEBUG(1, (CE_WARN, vdip, "Invalid path 0x%p: NULL " 5120 "client dip", (void *)pip)); 5121 return (MDI_FAILURE); 5122 } 5123 5124 hba = ddi_get_driver_private(pdip); 5125 ASSERT(hba != NULL); 5126 5127 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip); 5128 if (svp == NULL) { 5129 /* 5130 * mdi_pathinfo node in INIT state can have vHCI private 5131 * information set to null 5132 */ 5133 VHCI_DEBUG(1, (CE_NOTE, vdip, "!vhci_pathinfo_offline: " 5134 "svp is NULL for pip 0x%p\n", (void *)pip)); 5135 return (MDI_SUCCESS); 5136 } 5137 5138 psd = svp->svp_psd; 5139 ASSERT(psd != NULL); 5140 5141 mutex_enter(&svp->svp_mutex); 5142 5143 VHCI_DEBUG(1, (CE_NOTE, vdip, "!vhci_pathinfo_offline: " 5144 "%d cmds pending on path: 0x%p\n", svp->svp_cmds, (void *)pip)); 5145 while (svp->svp_cmds != 0) { 5146 if (cv_timedwait(&svp->svp_cv, &svp->svp_mutex, 5147 ddi_get_lbolt() + 5148 drv_usectohz(vhci_path_quiesce_timeout * 1000000)) == -1) { 5149 /* 5150 * The timeout time reached without the condition 5151 * being signaled. 5152 */ 5153 VHCI_DEBUG(1, (CE_NOTE, vdip, "!vhci_pathinfo_offline: " 5154 "Timeout reached on path 0x%p without the cond\n", 5155 (void *)pip)); 5156 VHCI_DEBUG(1, (CE_NOTE, vdip, "!vhci_pathinfo_offline: " 5157 "%d cmds still pending on path: 0x%p\n", 5158 svp->svp_cmds, (void *)pip)); 5159 break; 5160 } 5161 } 5162 mutex_exit(&svp->svp_mutex); 5163 5164 /* 5165 * Check to see if this vlun has an active SCSI-II RESERVE. And this 5166 * is the pip for the path that has been reserved. 5167 * If so clear the reservation by sending a reset, so the host will not 5168 * get a reservation conflict. Reset the flag VLUN_RESERVE_ACTIVE_FLG 5169 * for this lun. Also a reset notify is sent to the target driver 5170 * just in case the POR check condition is cleared by some other layer 5171 * in the stack. 5172 */ 5173 if (svp->svp_svl->svl_flags & VLUN_RESERVE_ACTIVE_FLG) { 5174 if (pip == svp->svp_svl->svl_resrv_pip) { 5175 if (vhci_recovery_reset(svp->svp_svl, 5176 &svp->svp_psd->sd_address, TRUE, 5177 VHCI_DEPTH_TARGET) == 0) { 5178 VHCI_DEBUG(1, (CE_NOTE, NULL, 5179 "!vhci_pathinfo_offline (pip:%p):" 5180 "reset failed, retrying\n", (void *)pip)); 5181 delay(1*drv_usectohz(1000000)); 5182 if (vhci_recovery_reset(svp->svp_svl, 5183 &svp->svp_psd->sd_address, TRUE, 5184 VHCI_DEPTH_TARGET) == 0) { 5185 VHCI_DEBUG(1, (CE_NOTE, NULL, 5186 "!vhci_pathinfo_offline " 5187 "(pip:%p): reset failed, " 5188 "giving up!\n", (void *)pip)); 5189 } 5190 } 5191 svp->svp_svl->svl_flags &= ~VLUN_RESERVE_ACTIVE_FLG; 5192 } 5193 } 5194 5195 mdi_pi_set_state(pip, MDI_PATHINFO_STATE_OFFLINE); 5196 if (psd->sd_inq) { 5197 kmem_free((caddr_t)psd->sd_inq, inq_size); 5198 psd->sd_inq = (struct scsi_inquiry *)NULL; 5199 } 5200 vhci_mpapi_set_path_state(vdip, pip, MP_DRVR_PATH_STATE_REMOVED); 5201 5202 VHCI_DEBUG(1, (CE_NOTE, NULL, 5203 "!vhci_pathinfo_offline: offlined path 0x%p\n", (void *)pip)); 5204 return (MDI_SUCCESS); 5205 } 5206 5207 5208 /* 5209 * routine for SCSI VHCI IOCTL implementation. 5210 */ 5211 /* ARGSUSED */ 5212 static int 5213 vhci_ctl(dev_t dev, int cmd, intptr_t data, int mode, cred_t *credp, int *rval) 5214 { 5215 struct scsi_vhci *vhci; 5216 dev_info_t *vdip; 5217 mdi_pathinfo_t *pip; 5218 int instance, held; 5219 int retval = 0; 5220 caddr_t phci_path = NULL, client_path = NULL; 5221 caddr_t paddr = NULL; 5222 sv_iocdata_t ioc; 5223 sv_iocdata_t *pioc = &ioc; 5224 sv_switch_to_cntlr_iocdata_t iocsc; 5225 sv_switch_to_cntlr_iocdata_t *piocsc = &iocsc; 5226 caddr_t s; 5227 scsi_vhci_lun_t *vlun; 5228 struct scsi_failover_ops *fo; 5229 char *pclass; 5230 5231 /* Check for validity of vhci structure */ 5232 vhci = ddi_get_soft_state(vhci_softstate, MINOR2INST(getminor(dev))); 5233 if (vhci == NULL) { 5234 return (ENXIO); 5235 } 5236 5237 mutex_enter(&vhci->vhci_mutex); 5238 if ((vhci->vhci_state & VHCI_STATE_OPEN) == 0) { 5239 mutex_exit(&vhci->vhci_mutex); 5240 return (ENXIO); 5241 } 5242 mutex_exit(&vhci->vhci_mutex); 5243 5244 /* Get the vhci dip */ 5245 vdip = vhci->vhci_dip; 5246 ASSERT(vdip != NULL); 5247 instance = ddi_get_instance(vdip); 5248 5249 /* Allocate memory for getting parameters from userland */ 5250 phci_path = kmem_zalloc(MAXPATHLEN, KM_SLEEP); 5251 client_path = kmem_zalloc(MAXPATHLEN, KM_SLEEP); 5252 paddr = kmem_zalloc(MAXNAMELEN, KM_SLEEP); 5253 5254 /* 5255 * Set a local variable indicating the ioctl name. Used for 5256 * printing debug strings. 5257 */ 5258 switch (cmd) { 5259 case SCSI_VHCI_GET_CLIENT_MULTIPATH_INFO: 5260 s = "GET_CLIENT_MULTIPATH_INFO"; 5261 break; 5262 5263 case SCSI_VHCI_GET_PHCI_MULTIPATH_INFO: 5264 s = "GET_PHCI_MULTIPATH_INFO"; 5265 break; 5266 5267 case SCSI_VHCI_GET_CLIENT_NAME: 5268 s = "GET_CLIENT_NAME"; 5269 break; 5270 5271 case SCSI_VHCI_PATH_ONLINE: 5272 s = "PATH_ONLINE"; 5273 break; 5274 5275 case SCSI_VHCI_PATH_OFFLINE: 5276 s = "PATH_OFFLINE"; 5277 break; 5278 5279 case SCSI_VHCI_PATH_STANDBY: 5280 s = "PATH_STANDBY"; 5281 break; 5282 5283 case SCSI_VHCI_PATH_TEST: 5284 s = "PATH_TEST"; 5285 break; 5286 5287 case SCSI_VHCI_SWITCH_TO_CNTLR: 5288 s = "SWITCH_TO_CNTLR"; 5289 break; 5290 case SCSI_VHCI_PATH_DISABLE: 5291 s = "PATH_DISABLE"; 5292 break; 5293 case SCSI_VHCI_PATH_ENABLE: 5294 s = "PATH_ENABLE"; 5295 break; 5296 5297 case SCSI_VHCI_GET_TARGET_LONGNAME: 5298 s = "GET_TARGET_LONGNAME"; 5299 break; 5300 5301 #ifdef DEBUG 5302 case SCSI_VHCI_CONFIGURE_PHCI: 5303 s = "CONFIGURE_PHCI"; 5304 break; 5305 5306 case SCSI_VHCI_UNCONFIGURE_PHCI: 5307 s = "UNCONFIGURE_PHCI"; 5308 break; 5309 #endif 5310 5311 default: 5312 s = "Unknown"; 5313 vhci_log(CE_NOTE, vdip, 5314 "!vhci%d: ioctl %x (unsupported ioctl)", instance, cmd); 5315 retval = ENOTSUP; 5316 break; 5317 } 5318 if (retval != 0) { 5319 goto end; 5320 } 5321 5322 VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci%d: ioctl <%s>", instance, s)); 5323 5324 /* 5325 * Get IOCTL parameters from userland 5326 */ 5327 switch (cmd) { 5328 case SCSI_VHCI_GET_CLIENT_MULTIPATH_INFO: 5329 case SCSI_VHCI_GET_PHCI_MULTIPATH_INFO: 5330 case SCSI_VHCI_GET_CLIENT_NAME: 5331 case SCSI_VHCI_PATH_ONLINE: 5332 case SCSI_VHCI_PATH_OFFLINE: 5333 case SCSI_VHCI_PATH_STANDBY: 5334 case SCSI_VHCI_PATH_TEST: 5335 case SCSI_VHCI_PATH_DISABLE: 5336 case SCSI_VHCI_PATH_ENABLE: 5337 case SCSI_VHCI_GET_TARGET_LONGNAME: 5338 #ifdef DEBUG 5339 case SCSI_VHCI_CONFIGURE_PHCI: 5340 case SCSI_VHCI_UNCONFIGURE_PHCI: 5341 #endif 5342 retval = vhci_get_iocdata((const void *)data, pioc, mode, s); 5343 break; 5344 5345 case SCSI_VHCI_SWITCH_TO_CNTLR: 5346 retval = vhci_get_iocswitchdata((const void *)data, piocsc, 5347 mode, s); 5348 break; 5349 } 5350 if (retval != 0) { 5351 goto end; 5352 } 5353 5354 5355 /* 5356 * Process the IOCTL 5357 */ 5358 switch (cmd) { 5359 case SCSI_VHCI_GET_CLIENT_MULTIPATH_INFO: 5360 { 5361 uint_t num_paths; /* Num paths to client dev */ 5362 sv_path_info_t *upibuf = NULL; /* To keep userland values */ 5363 sv_path_info_t *kpibuf = NULL; /* Kernel data for ioctls */ 5364 dev_info_t *cdip; /* Client device dip */ 5365 5366 if (pioc->ret_elem == NULL) { 5367 retval = EINVAL; 5368 break; 5369 } 5370 5371 /* Get client device path from user land */ 5372 if (vhci_ioc_get_client_path(pioc, client_path, mode, s)) { 5373 retval = EFAULT; 5374 break; 5375 } 5376 5377 VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> " 5378 "client <%s>", s, client_path)); 5379 5380 /* Get number of paths to this client device */ 5381 if ((cdip = mdi_client_path2devinfo(vdip, client_path)) 5382 == NULL) { 5383 retval = ENXIO; 5384 VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> " 5385 "client dip doesn't exist. invalid path <%s>", 5386 s, client_path)); 5387 break; 5388 } 5389 num_paths = mdi_client_get_path_count(cdip); 5390 5391 if (ddi_copyout(&num_paths, pioc->ret_elem, 5392 sizeof (num_paths), mode)) { 5393 VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> " 5394 "num_paths copyout failed", s)); 5395 retval = EFAULT; 5396 break; 5397 } 5398 5399 /* If user just wanted num_paths, then return */ 5400 if (pioc->buf_elem == 0 || pioc->ret_buf == NULL || 5401 num_paths == 0) { 5402 break; 5403 } 5404 5405 /* Set num_paths to value as much as can be sent to userland */ 5406 if (num_paths > pioc->buf_elem) { 5407 num_paths = pioc->buf_elem; 5408 } 5409 5410 /* Allocate memory and get userland pointers */ 5411 if (vhci_ioc_alloc_pathinfo(&upibuf, &kpibuf, num_paths, 5412 pioc, mode, s) != 0) { 5413 retval = EFAULT; 5414 break; 5415 } 5416 ASSERT(upibuf != NULL); 5417 ASSERT(kpibuf != NULL); 5418 5419 /* 5420 * Get the path information and send it to userland. 5421 */ 5422 if (vhci_get_client_path_list(cdip, kpibuf, num_paths) 5423 != MDI_SUCCESS) { 5424 retval = ENXIO; 5425 vhci_ioc_free_pathinfo(upibuf, kpibuf, num_paths); 5426 break; 5427 } 5428 5429 if (vhci_ioc_send_pathinfo(upibuf, kpibuf, num_paths, 5430 pioc, mode, s)) { 5431 retval = EFAULT; 5432 vhci_ioc_free_pathinfo(upibuf, kpibuf, num_paths); 5433 break; 5434 } 5435 5436 /* Free the memory allocated for path information */ 5437 vhci_ioc_free_pathinfo(upibuf, kpibuf, num_paths); 5438 break; 5439 } 5440 5441 case SCSI_VHCI_GET_PHCI_MULTIPATH_INFO: 5442 { 5443 uint_t num_paths; /* Num paths to client dev */ 5444 sv_path_info_t *upibuf = NULL; /* To keep userland values */ 5445 sv_path_info_t *kpibuf = NULL; /* Kernel data for ioctls */ 5446 dev_info_t *pdip; /* PHCI device dip */ 5447 5448 if (pioc->ret_elem == NULL) { 5449 retval = EINVAL; 5450 break; 5451 } 5452 5453 /* Get PHCI device path from user land */ 5454 if (vhci_ioc_get_phci_path(pioc, phci_path, mode, s)) { 5455 retval = EFAULT; 5456 break; 5457 } 5458 5459 VHCI_DEBUG(6, (CE_WARN, vdip, 5460 "!vhci_ioctl: ioctl <%s> phci <%s>", s, phci_path)); 5461 5462 /* Get number of devices associated with this PHCI device */ 5463 if ((pdip = mdi_phci_path2devinfo(vdip, phci_path)) == NULL) { 5464 VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> " 5465 "phci dip doesn't exist. invalid path <%s>", 5466 s, phci_path)); 5467 retval = ENXIO; 5468 break; 5469 } 5470 5471 num_paths = mdi_phci_get_path_count(pdip); 5472 5473 if (ddi_copyout(&num_paths, pioc->ret_elem, 5474 sizeof (num_paths), mode)) { 5475 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> " 5476 "num_paths copyout failed", s)); 5477 retval = EFAULT; 5478 break; 5479 } 5480 5481 /* If user just wanted num_paths, then return */ 5482 if (pioc->buf_elem == 0 || pioc->ret_buf == NULL || 5483 num_paths == 0) { 5484 break; 5485 } 5486 5487 /* Set num_paths to value as much as can be sent to userland */ 5488 if (num_paths > pioc->buf_elem) { 5489 num_paths = pioc->buf_elem; 5490 } 5491 5492 /* Allocate memory and get userland pointers */ 5493 if (vhci_ioc_alloc_pathinfo(&upibuf, &kpibuf, num_paths, 5494 pioc, mode, s) != 0) { 5495 retval = EFAULT; 5496 break; 5497 } 5498 ASSERT(upibuf != NULL); 5499 ASSERT(kpibuf != NULL); 5500 5501 /* 5502 * Get the path information and send it to userland. 5503 */ 5504 if (vhci_get_phci_path_list(pdip, kpibuf, num_paths) 5505 != MDI_SUCCESS) { 5506 retval = ENXIO; 5507 vhci_ioc_free_pathinfo(upibuf, kpibuf, num_paths); 5508 break; 5509 } 5510 5511 if (vhci_ioc_send_pathinfo(upibuf, kpibuf, num_paths, 5512 pioc, mode, s)) { 5513 retval = EFAULT; 5514 vhci_ioc_free_pathinfo(upibuf, kpibuf, num_paths); 5515 break; 5516 } 5517 5518 /* Free the memory allocated for path information */ 5519 vhci_ioc_free_pathinfo(upibuf, kpibuf, num_paths); 5520 break; 5521 } 5522 5523 case SCSI_VHCI_GET_CLIENT_NAME: 5524 { 5525 dev_info_t *cdip, *pdip; 5526 5527 /* Get PHCI path and device address from user land */ 5528 if (vhci_ioc_get_phci_path(pioc, phci_path, mode, s) || 5529 vhci_ioc_get_paddr(pioc, paddr, mode, s)) { 5530 retval = EFAULT; 5531 break; 5532 } 5533 5534 VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> " 5535 "phci <%s>, paddr <%s>", s, phci_path, paddr)); 5536 5537 /* Get the PHCI dip */ 5538 if ((pdip = mdi_phci_path2devinfo(vdip, phci_path)) == NULL) { 5539 VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> " 5540 "phci dip doesn't exist. invalid path <%s>", 5541 s, phci_path)); 5542 retval = ENXIO; 5543 break; 5544 } 5545 5546 if ((pip = mdi_pi_find(pdip, NULL, paddr)) == NULL) { 5547 VHCI_DEBUG(1, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> " 5548 "pathinfo doesn't exist. invalid device addr", s)); 5549 retval = ENXIO; 5550 break; 5551 } 5552 5553 /* Get the client device pathname and send to userland */ 5554 cdip = mdi_pi_get_client(pip); 5555 vhci_ioc_devi_to_path(cdip, client_path); 5556 5557 VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> " 5558 "client <%s>", s, client_path)); 5559 5560 if (vhci_ioc_send_client_path(client_path, pioc, mode, s)) { 5561 retval = EFAULT; 5562 break; 5563 } 5564 break; 5565 } 5566 5567 case SCSI_VHCI_PATH_ONLINE: 5568 case SCSI_VHCI_PATH_OFFLINE: 5569 case SCSI_VHCI_PATH_STANDBY: 5570 case SCSI_VHCI_PATH_TEST: 5571 { 5572 dev_info_t *pdip; /* PHCI dip */ 5573 5574 /* Get PHCI path and device address from user land */ 5575 if (vhci_ioc_get_phci_path(pioc, phci_path, mode, s) || 5576 vhci_ioc_get_paddr(pioc, paddr, mode, s)) { 5577 retval = EFAULT; 5578 break; 5579 } 5580 5581 VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> " 5582 "phci <%s>, paddr <%s>", s, phci_path, paddr)); 5583 5584 /* Get the PHCI dip */ 5585 if ((pdip = mdi_phci_path2devinfo(vdip, phci_path)) == NULL) { 5586 VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> " 5587 "phci dip doesn't exist. invalid path <%s>", 5588 s, phci_path)); 5589 retval = ENXIO; 5590 break; 5591 } 5592 5593 if ((pip = mdi_pi_find(pdip, NULL, paddr)) == NULL) { 5594 VHCI_DEBUG(1, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> " 5595 "pathinfo doesn't exist. invalid device addr", s)); 5596 retval = ENXIO; 5597 break; 5598 } 5599 5600 VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> " 5601 "Calling MDI function to change device state", s)); 5602 5603 switch (cmd) { 5604 case SCSI_VHCI_PATH_ONLINE: 5605 retval = mdi_pi_online(pip, 0); 5606 break; 5607 5608 case SCSI_VHCI_PATH_OFFLINE: 5609 retval = mdi_pi_offline(pip, 0); 5610 break; 5611 5612 case SCSI_VHCI_PATH_STANDBY: 5613 retval = mdi_pi_standby(pip, 0); 5614 break; 5615 5616 case SCSI_VHCI_PATH_TEST: 5617 break; 5618 } 5619 break; 5620 } 5621 5622 case SCSI_VHCI_SWITCH_TO_CNTLR: 5623 { 5624 dev_info_t *cdip; 5625 struct scsi_device *devp; 5626 5627 /* Get the client device pathname */ 5628 if (ddi_copyin(piocsc->client, client_path, 5629 MAXPATHLEN, mode)) { 5630 VHCI_DEBUG(2, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> " 5631 "client_path copyin failed", s)); 5632 retval = EFAULT; 5633 break; 5634 } 5635 5636 /* Get the path class to which user wants to switch */ 5637 if (ddi_copyin(piocsc->class, paddr, MAXNAMELEN, mode)) { 5638 VHCI_DEBUG(2, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> " 5639 "controller_class copyin failed", s)); 5640 retval = EFAULT; 5641 break; 5642 } 5643 5644 /* Perform validity checks */ 5645 if ((cdip = mdi_client_path2devinfo(vdip, 5646 client_path)) == NULL) { 5647 VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> " 5648 "client dip doesn't exist. invalid path <%s>", 5649 s, client_path)); 5650 retval = ENXIO; 5651 break; 5652 } 5653 5654 VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: Calling MDI func " 5655 "to switch controller")); 5656 VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: client <%s> " 5657 "class <%s>", client_path, paddr)); 5658 5659 if (strcmp(paddr, PCLASS_PRIMARY) && 5660 strcmp(paddr, PCLASS_SECONDARY)) { 5661 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> " 5662 "invalid path class <%s>", s, paddr)); 5663 retval = ENXIO; 5664 break; 5665 } 5666 5667 devp = ddi_get_driver_private(cdip); 5668 if (devp == NULL) { 5669 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> " 5670 "invalid scsi device <%s>", s, client_path)); 5671 retval = ENXIO; 5672 break; 5673 } 5674 vlun = ADDR2VLUN(&devp->sd_address); 5675 ASSERT(vlun); 5676 5677 /* 5678 * Checking to see if device has only one pclass, PRIMARY. 5679 * If so this device doesn't support failovers. Assumed 5680 * that the devices with one pclass is PRIMARY, as thats the 5681 * case today. If this is not true and in future other 5682 * symmetric devices are supported with other pclass, this 5683 * IOCTL shall have to be overhauled anyways as now the only 5684 * arguments it accepts are PRIMARY and SECONDARY. 5685 */ 5686 fo = vlun->svl_fops; 5687 if ((*fo->sfo_pathclass_next)(PCLASS_PRIMARY, &pclass, 5688 vlun->svl_fops_ctpriv)) { 5689 retval = ENOTSUP; 5690 break; 5691 } 5692 5693 VHCI_HOLD_LUN(vlun, VH_SLEEP, held); 5694 mutex_enter(&vlun->svl_mutex); 5695 if (vlun->svl_active_pclass != NULL) { 5696 if (strcmp(vlun->svl_active_pclass, paddr) == 0) { 5697 mutex_exit(&vlun->svl_mutex); 5698 retval = EALREADY; 5699 VHCI_RELEASE_LUN(vlun); 5700 break; 5701 } 5702 } 5703 mutex_exit(&vlun->svl_mutex); 5704 /* Call mdi function to cause a switch over */ 5705 retval = mdi_failover(vdip, cdip, MDI_FAILOVER_SYNC); 5706 if (retval == MDI_SUCCESS) { 5707 retval = 0; 5708 } else if (retval == MDI_BUSY) { 5709 retval = EBUSY; 5710 } else { 5711 retval = EIO; 5712 } 5713 VHCI_RELEASE_LUN(vlun); 5714 break; 5715 } 5716 5717 case SCSI_VHCI_PATH_ENABLE: 5718 case SCSI_VHCI_PATH_DISABLE: 5719 { 5720 dev_info_t *cdip, *pdip; 5721 5722 /* 5723 * Get client device path from user land 5724 */ 5725 if (vhci_ioc_get_client_path(pioc, client_path, mode, s)) { 5726 retval = EFAULT; 5727 break; 5728 } 5729 5730 /* 5731 * Get Phci device path from user land 5732 */ 5733 if (vhci_ioc_get_phci_path(pioc, phci_path, mode, s)) { 5734 retval = EFAULT; 5735 break; 5736 } 5737 5738 /* 5739 * Get the devinfo for the Phci. 5740 */ 5741 if ((pdip = mdi_phci_path2devinfo(vdip, phci_path)) == NULL) { 5742 VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> " 5743 "phci dip doesn't exist. invalid path <%s>", 5744 s, phci_path)); 5745 retval = ENXIO; 5746 break; 5747 } 5748 5749 /* 5750 * If the client path is set to /scsi_vhci then we need 5751 * to do the operation on all the clients so set cdip to NULL. 5752 * Else, try to get the client dip. 5753 */ 5754 if (strcmp(client_path, "/scsi_vhci") == 0) { 5755 cdip = NULL; 5756 } else { 5757 if ((cdip = mdi_client_path2devinfo(vdip, 5758 client_path)) == NULL) { 5759 retval = ENXIO; 5760 VHCI_DEBUG(1, (CE_WARN, NULL, 5761 "!vhci_ioctl: ioctl <%s> client dip " 5762 "doesn't exist. invalid path <%s>", 5763 s, client_path)); 5764 break; 5765 } 5766 } 5767 5768 if (cmd == SCSI_VHCI_PATH_ENABLE) 5769 retval = mdi_pi_enable(cdip, pdip, USER_DISABLE); 5770 else 5771 retval = mdi_pi_disable(cdip, pdip, USER_DISABLE); 5772 5773 break; 5774 } 5775 5776 case SCSI_VHCI_GET_TARGET_LONGNAME: 5777 { 5778 uint_t pid = pioc->buf_elem; 5779 char *target_port; 5780 mod_hash_val_t hv; 5781 5782 /* targetmap lookup of 'target-port' by <pid> */ 5783 if (mod_hash_find(vhci_targetmap_bypid, 5784 (mod_hash_key_t)(uintptr_t)pid, &hv) != 0) { 5785 /* 5786 * NOTE: failure to find the mapping is OK for guid 5787 * based 'target-port' values. 5788 */ 5789 VHCI_DEBUG(3, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> " 5790 "targetport mapping doesn't exist: pid %d", 5791 s, pid)); 5792 retval = ENXIO; 5793 break; 5794 } 5795 5796 /* copyout 'target-port' result */ 5797 target_port = (char *)hv; 5798 if (copyoutstr(target_port, pioc->addr, MAXNAMELEN, NULL)) { 5799 VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> " 5800 "targetport copyout failed: len: %d", 5801 s, (int)strlen(target_port))); 5802 retval = EFAULT; 5803 } 5804 break; 5805 } 5806 5807 #ifdef DEBUG 5808 case SCSI_VHCI_CONFIGURE_PHCI: 5809 { 5810 dev_info_t *pdip; 5811 5812 /* Get PHCI path and device address from user land */ 5813 if (vhci_ioc_get_phci_path(pioc, phci_path, mode, s)) { 5814 retval = EFAULT; 5815 break; 5816 } 5817 5818 VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> " 5819 "phci <%s>", s, phci_path)); 5820 5821 /* Get the PHCI dip */ 5822 if ((pdip = e_ddi_hold_devi_by_path(phci_path, 0)) == NULL) { 5823 VHCI_DEBUG(3, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> " 5824 "phci dip doesn't exist. invalid path <%s>", 5825 s, phci_path)); 5826 retval = ENXIO; 5827 break; 5828 } 5829 5830 if (ndi_devi_config(pdip, 5831 NDI_DEVFS_CLEAN|NDI_DEVI_PERSIST) != NDI_SUCCESS) { 5832 retval = EIO; 5833 } 5834 5835 ddi_release_devi(pdip); 5836 break; 5837 } 5838 5839 case SCSI_VHCI_UNCONFIGURE_PHCI: 5840 { 5841 dev_info_t *pdip; 5842 5843 /* Get PHCI path and device address from user land */ 5844 if (vhci_ioc_get_phci_path(pioc, phci_path, mode, s)) { 5845 retval = EFAULT; 5846 break; 5847 } 5848 5849 VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> " 5850 "phci <%s>", s, phci_path)); 5851 5852 /* Get the PHCI dip */ 5853 if ((pdip = e_ddi_hold_devi_by_path(phci_path, 0)) == NULL) { 5854 VHCI_DEBUG(3, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> " 5855 "phci dip doesn't exist. invalid path <%s>", 5856 s, phci_path)); 5857 retval = ENXIO; 5858 break; 5859 } 5860 5861 if (ndi_devi_unconfig(pdip, 5862 NDI_DEVI_REMOVE|NDI_DEVFS_CLEAN) != NDI_SUCCESS) { 5863 retval = EBUSY; 5864 } 5865 5866 ddi_release_devi(pdip); 5867 break; 5868 } 5869 #endif 5870 } 5871 5872 end: 5873 /* Free the memory allocated above */ 5874 if (phci_path != NULL) { 5875 kmem_free(phci_path, MAXPATHLEN); 5876 } 5877 if (client_path != NULL) { 5878 kmem_free(client_path, MAXPATHLEN); 5879 } 5880 if (paddr != NULL) { 5881 kmem_free(paddr, MAXNAMELEN); 5882 } 5883 return (retval); 5884 } 5885 5886 /* 5887 * devctl IOCTL support for client device DR 5888 */ 5889 /* ARGSUSED */ 5890 int 5891 vhci_devctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp, 5892 int *rvalp) 5893 { 5894 dev_info_t *self; 5895 dev_info_t *child; 5896 scsi_hba_tran_t *hba; 5897 struct devctl_iocdata *dcp; 5898 struct scsi_vhci *vhci; 5899 int rv = 0; 5900 int retval = 0; 5901 scsi_vhci_priv_t *svp; 5902 mdi_pathinfo_t *pip; 5903 5904 if ((vhci = ddi_get_soft_state(vhci_softstate, 5905 MINOR2INST(getminor(dev)))) == NULL) 5906 return (ENXIO); 5907 5908 /* 5909 * check if :devctl minor device has been opened 5910 */ 5911 mutex_enter(&vhci->vhci_mutex); 5912 if ((vhci->vhci_state & VHCI_STATE_OPEN) == 0) { 5913 mutex_exit(&vhci->vhci_mutex); 5914 return (ENXIO); 5915 } 5916 mutex_exit(&vhci->vhci_mutex); 5917 5918 self = vhci->vhci_dip; 5919 hba = ddi_get_driver_private(self); 5920 if (hba == NULL) 5921 return (ENXIO); 5922 5923 /* 5924 * We can use the generic implementation for these ioctls 5925 */ 5926 switch (cmd) { 5927 case DEVCTL_DEVICE_GETSTATE: 5928 case DEVCTL_DEVICE_ONLINE: 5929 case DEVCTL_DEVICE_OFFLINE: 5930 case DEVCTL_DEVICE_REMOVE: 5931 case DEVCTL_BUS_GETSTATE: 5932 return (ndi_devctl_ioctl(self, cmd, arg, mode, 0)); 5933 } 5934 5935 /* 5936 * read devctl ioctl data 5937 */ 5938 if (ndi_dc_allochdl((void *)arg, &dcp) != NDI_SUCCESS) 5939 return (EFAULT); 5940 5941 switch (cmd) { 5942 5943 case DEVCTL_DEVICE_RESET: 5944 /* 5945 * lookup and hold child device 5946 */ 5947 if ((child = ndi_devi_find(self, ndi_dc_getname(dcp), 5948 ndi_dc_getaddr(dcp))) == NULL) { 5949 rv = ENXIO; 5950 break; 5951 } 5952 retval = mdi_select_path(child, NULL, 5953 (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH), 5954 NULL, &pip); 5955 if ((retval != MDI_SUCCESS) || (pip == NULL)) { 5956 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioctl:" 5957 "Unable to get a path, dip 0x%p", (void *)child)); 5958 rv = ENXIO; 5959 break; 5960 } 5961 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip); 5962 if (vhci_recovery_reset(svp->svp_svl, 5963 &svp->svp_psd->sd_address, TRUE, 5964 VHCI_DEPTH_TARGET) == 0) { 5965 VHCI_DEBUG(1, (CE_NOTE, NULL, 5966 "!vhci_ioctl(pip:%p): " 5967 "reset failed\n", (void *)pip)); 5968 rv = ENXIO; 5969 } 5970 mdi_rele_path(pip); 5971 break; 5972 5973 case DEVCTL_BUS_QUIESCE: 5974 case DEVCTL_BUS_UNQUIESCE: 5975 case DEVCTL_BUS_RESET: 5976 case DEVCTL_BUS_RESETALL: 5977 #ifdef DEBUG 5978 case DEVCTL_BUS_CONFIGURE: 5979 case DEVCTL_BUS_UNCONFIGURE: 5980 #endif 5981 rv = ENOTSUP; 5982 break; 5983 5984 default: 5985 rv = ENOTTY; 5986 } /* end of outer switch */ 5987 5988 ndi_dc_freehdl(dcp); 5989 return (rv); 5990 } 5991 5992 /* 5993 * Routine to get the PHCI pathname from ioctl structures in userland 5994 */ 5995 /* ARGSUSED */ 5996 static int 5997 vhci_ioc_get_phci_path(sv_iocdata_t *pioc, caddr_t phci_path, 5998 int mode, caddr_t s) 5999 { 6000 int retval = 0; 6001 6002 if (ddi_copyin(pioc->phci, phci_path, MAXPATHLEN, mode)) { 6003 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioc_get_phci: ioctl <%s> " 6004 "phci_path copyin failed", s)); 6005 retval = EFAULT; 6006 } 6007 return (retval); 6008 6009 } 6010 6011 6012 /* 6013 * Routine to get the Client device pathname from ioctl structures in userland 6014 */ 6015 /* ARGSUSED */ 6016 static int 6017 vhci_ioc_get_client_path(sv_iocdata_t *pioc, caddr_t client_path, 6018 int mode, caddr_t s) 6019 { 6020 int retval = 0; 6021 6022 if (ddi_copyin(pioc->client, client_path, MAXPATHLEN, mode)) { 6023 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioc_get_client: " 6024 "ioctl <%s> client_path copyin failed", s)); 6025 retval = EFAULT; 6026 } 6027 return (retval); 6028 } 6029 6030 6031 /* 6032 * Routine to get physical device address from ioctl structure in userland 6033 */ 6034 /* ARGSUSED */ 6035 static int 6036 vhci_ioc_get_paddr(sv_iocdata_t *pioc, caddr_t paddr, int mode, caddr_t s) 6037 { 6038 int retval = 0; 6039 6040 if (ddi_copyin(pioc->addr, paddr, MAXNAMELEN, mode)) { 6041 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioc_get_paddr: " 6042 "ioctl <%s> device addr copyin failed", s)); 6043 retval = EFAULT; 6044 } 6045 return (retval); 6046 } 6047 6048 6049 /* 6050 * Routine to send client device pathname to userland. 6051 */ 6052 /* ARGSUSED */ 6053 static int 6054 vhci_ioc_send_client_path(caddr_t client_path, sv_iocdata_t *pioc, 6055 int mode, caddr_t s) 6056 { 6057 int retval = 0; 6058 6059 if (ddi_copyout(client_path, pioc->client, MAXPATHLEN, mode)) { 6060 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioc_send_client: " 6061 "ioctl <%s> client_path copyout failed", s)); 6062 retval = EFAULT; 6063 } 6064 return (retval); 6065 } 6066 6067 6068 /* 6069 * Routine to translated dev_info pointer (dip) to device pathname. 6070 */ 6071 static void 6072 vhci_ioc_devi_to_path(dev_info_t *dip, caddr_t path) 6073 { 6074 (void) ddi_pathname(dip, path); 6075 } 6076 6077 6078 /* 6079 * vhci_get_phci_path_list: 6080 * get information about devices associated with a 6081 * given PHCI device. 6082 * 6083 * Return Values: 6084 * path information elements 6085 */ 6086 int 6087 vhci_get_phci_path_list(dev_info_t *pdip, sv_path_info_t *pibuf, 6088 uint_t num_elems) 6089 { 6090 uint_t count, done; 6091 mdi_pathinfo_t *pip; 6092 sv_path_info_t *ret_pip; 6093 int status; 6094 size_t prop_size; 6095 int circular; 6096 6097 /* 6098 * Get the PHCI structure and retrieve the path information 6099 * from the GUID hash table. 6100 */ 6101 6102 ret_pip = pibuf; 6103 count = 0; 6104 6105 ndi_devi_enter(pdip, &circular); 6106 6107 done = (count >= num_elems); 6108 pip = mdi_get_next_client_path(pdip, NULL); 6109 while (pip && !done) { 6110 mdi_pi_lock(pip); 6111 (void) ddi_pathname(mdi_pi_get_phci(pip), 6112 ret_pip->device.ret_phci); 6113 (void) strcpy(ret_pip->ret_addr, mdi_pi_get_addr(pip)); 6114 (void) mdi_pi_get_state2(pip, &ret_pip->ret_state, 6115 &ret_pip->ret_ext_state); 6116 6117 status = mdi_prop_size(pip, &prop_size); 6118 if (status == MDI_SUCCESS && ret_pip->ret_prop.ret_buf_size) { 6119 *ret_pip->ret_prop.ret_buf_size = (uint_t)prop_size; 6120 } 6121 6122 #ifdef DEBUG 6123 if (status != MDI_SUCCESS) { 6124 VHCI_DEBUG(2, (CE_WARN, NULL, 6125 "!vhci_get_phci_path_list: " 6126 "phci <%s>, prop size failure 0x%x", 6127 ret_pip->device.ret_phci, status)); 6128 } 6129 #endif /* DEBUG */ 6130 6131 6132 if (status == MDI_SUCCESS && ret_pip->ret_prop.buf && 6133 prop_size && ret_pip->ret_prop.buf_size >= prop_size) { 6134 status = mdi_prop_pack(pip, 6135 &ret_pip->ret_prop.buf, 6136 ret_pip->ret_prop.buf_size); 6137 6138 #ifdef DEBUG 6139 if (status != MDI_SUCCESS) { 6140 VHCI_DEBUG(2, (CE_WARN, NULL, 6141 "!vhci_get_phci_path_list: " 6142 "phci <%s>, prop pack failure 0x%x", 6143 ret_pip->device.ret_phci, status)); 6144 } 6145 #endif /* DEBUG */ 6146 } 6147 6148 mdi_pi_unlock(pip); 6149 pip = mdi_get_next_client_path(pdip, pip); 6150 ret_pip++; 6151 count++; 6152 done = (count >= num_elems); 6153 } 6154 6155 ndi_devi_exit(pdip, circular); 6156 6157 return (MDI_SUCCESS); 6158 } 6159 6160 6161 /* 6162 * vhci_get_client_path_list: 6163 * get information about various paths associated with a 6164 * given client device. 6165 * 6166 * Return Values: 6167 * path information elements 6168 */ 6169 int 6170 vhci_get_client_path_list(dev_info_t *cdip, sv_path_info_t *pibuf, 6171 uint_t num_elems) 6172 { 6173 uint_t count, done; 6174 mdi_pathinfo_t *pip; 6175 sv_path_info_t *ret_pip; 6176 int status; 6177 size_t prop_size; 6178 int circular; 6179 6180 ret_pip = pibuf; 6181 count = 0; 6182 6183 ndi_devi_enter(cdip, &circular); 6184 6185 done = (count >= num_elems); 6186 pip = mdi_get_next_phci_path(cdip, NULL); 6187 while (pip && !done) { 6188 mdi_pi_lock(pip); 6189 (void) ddi_pathname(mdi_pi_get_phci(pip), 6190 ret_pip->device.ret_phci); 6191 (void) strcpy(ret_pip->ret_addr, mdi_pi_get_addr(pip)); 6192 (void) mdi_pi_get_state2(pip, &ret_pip->ret_state, 6193 &ret_pip->ret_ext_state); 6194 6195 status = mdi_prop_size(pip, &prop_size); 6196 if (status == MDI_SUCCESS && ret_pip->ret_prop.ret_buf_size) { 6197 *ret_pip->ret_prop.ret_buf_size = (uint_t)prop_size; 6198 } 6199 6200 #ifdef DEBUG 6201 if (status != MDI_SUCCESS) { 6202 VHCI_DEBUG(2, (CE_WARN, NULL, 6203 "!vhci_get_client_path_list: " 6204 "phci <%s>, prop size failure 0x%x", 6205 ret_pip->device.ret_phci, status)); 6206 } 6207 #endif /* DEBUG */ 6208 6209 6210 if (status == MDI_SUCCESS && ret_pip->ret_prop.buf && 6211 prop_size && ret_pip->ret_prop.buf_size >= prop_size) { 6212 status = mdi_prop_pack(pip, 6213 &ret_pip->ret_prop.buf, 6214 ret_pip->ret_prop.buf_size); 6215 6216 #ifdef DEBUG 6217 if (status != MDI_SUCCESS) { 6218 VHCI_DEBUG(2, (CE_WARN, NULL, 6219 "!vhci_get_client_path_list: " 6220 "phci <%s>, prop pack failure 0x%x", 6221 ret_pip->device.ret_phci, status)); 6222 } 6223 #endif /* DEBUG */ 6224 } 6225 6226 mdi_pi_unlock(pip); 6227 pip = mdi_get_next_phci_path(cdip, pip); 6228 ret_pip++; 6229 count++; 6230 done = (count >= num_elems); 6231 } 6232 6233 ndi_devi_exit(cdip, circular); 6234 6235 return (MDI_SUCCESS); 6236 } 6237 6238 6239 /* 6240 * Routine to get ioctl argument structure from userland. 6241 */ 6242 /* ARGSUSED */ 6243 static int 6244 vhci_get_iocdata(const void *data, sv_iocdata_t *pioc, int mode, caddr_t s) 6245 { 6246 int retval = 0; 6247 6248 #ifdef _MULTI_DATAMODEL 6249 switch (ddi_model_convert_from(mode & FMODELS)) { 6250 case DDI_MODEL_ILP32: 6251 { 6252 sv_iocdata32_t ioc32; 6253 6254 if (ddi_copyin(data, &ioc32, sizeof (ioc32), mode)) { 6255 retval = EFAULT; 6256 break; 6257 } 6258 pioc->client = (caddr_t)(uintptr_t)ioc32.client; 6259 pioc->phci = (caddr_t)(uintptr_t)ioc32.phci; 6260 pioc->addr = (caddr_t)(uintptr_t)ioc32.addr; 6261 pioc->buf_elem = (uint_t)ioc32.buf_elem; 6262 pioc->ret_buf = (sv_path_info_t *)(uintptr_t)ioc32.ret_buf; 6263 pioc->ret_elem = (uint_t *)(uintptr_t)ioc32.ret_elem; 6264 break; 6265 } 6266 6267 case DDI_MODEL_NONE: 6268 if (ddi_copyin(data, pioc, sizeof (*pioc), mode)) { 6269 retval = EFAULT; 6270 break; 6271 } 6272 break; 6273 } 6274 #else /* _MULTI_DATAMODEL */ 6275 if (ddi_copyin(data, pioc, sizeof (*pioc), mode)) { 6276 retval = EFAULT; 6277 } 6278 #endif /* _MULTI_DATAMODEL */ 6279 6280 #ifdef DEBUG 6281 if (retval) { 6282 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_get_ioc: cmd <%s> " 6283 "iocdata copyin failed", s)); 6284 } 6285 #endif 6286 6287 return (retval); 6288 } 6289 6290 6291 /* 6292 * Routine to get the ioctl argument for ioctl causing controller switchover. 6293 */ 6294 /* ARGSUSED */ 6295 static int 6296 vhci_get_iocswitchdata(const void *data, sv_switch_to_cntlr_iocdata_t *piocsc, 6297 int mode, caddr_t s) 6298 { 6299 int retval = 0; 6300 6301 #ifdef _MULTI_DATAMODEL 6302 switch (ddi_model_convert_from(mode & FMODELS)) { 6303 case DDI_MODEL_ILP32: 6304 { 6305 sv_switch_to_cntlr_iocdata32_t ioc32; 6306 6307 if (ddi_copyin(data, &ioc32, sizeof (ioc32), mode)) { 6308 retval = EFAULT; 6309 break; 6310 } 6311 piocsc->client = (caddr_t)(uintptr_t)ioc32.client; 6312 piocsc->class = (caddr_t)(uintptr_t)ioc32.class; 6313 break; 6314 } 6315 6316 case DDI_MODEL_NONE: 6317 if (ddi_copyin(data, piocsc, sizeof (*piocsc), mode)) { 6318 retval = EFAULT; 6319 } 6320 break; 6321 } 6322 #else /* _MULTI_DATAMODEL */ 6323 if (ddi_copyin(data, piocsc, sizeof (*piocsc), mode)) { 6324 retval = EFAULT; 6325 } 6326 #endif /* _MULTI_DATAMODEL */ 6327 6328 #ifdef DEBUG 6329 if (retval) { 6330 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_get_ioc: cmd <%s> " 6331 "switch_to_cntlr_iocdata copyin failed", s)); 6332 } 6333 #endif 6334 6335 return (retval); 6336 } 6337 6338 6339 /* 6340 * Routine to allocate memory for the path information structures. 6341 * It allocates two chunks of memory - one for keeping userland 6342 * pointers/values for path information and path properties, second for 6343 * keeping allocating kernel memory for path properties. These path 6344 * properties are finally copied to userland. 6345 */ 6346 /* ARGSUSED */ 6347 static int 6348 vhci_ioc_alloc_pathinfo(sv_path_info_t **upibuf, sv_path_info_t **kpibuf, 6349 uint_t num_paths, sv_iocdata_t *pioc, int mode, caddr_t s) 6350 { 6351 sv_path_info_t *pi; 6352 uint_t bufsize; 6353 int retval = 0; 6354 int index; 6355 6356 /* Allocate memory */ 6357 *upibuf = (sv_path_info_t *) 6358 kmem_zalloc(sizeof (sv_path_info_t) * num_paths, KM_SLEEP); 6359 ASSERT(*upibuf != NULL); 6360 *kpibuf = (sv_path_info_t *) 6361 kmem_zalloc(sizeof (sv_path_info_t) * num_paths, KM_SLEEP); 6362 ASSERT(*kpibuf != NULL); 6363 6364 /* 6365 * Get the path info structure from the user space. 6366 * We are interested in the following fields: 6367 * - user size of buffer for per path properties. 6368 * - user address of buffer for path info properties. 6369 * - user pointer for returning actual buffer size 6370 * Keep these fields in the 'upibuf' structures. 6371 * Allocate buffer for per path info properties in kernel 6372 * structure ('kpibuf'). 6373 * Size of these buffers will be equal to the size of buffers 6374 * in the user space. 6375 */ 6376 #ifdef _MULTI_DATAMODEL 6377 switch (ddi_model_convert_from(mode & FMODELS)) { 6378 case DDI_MODEL_ILP32: 6379 { 6380 sv_path_info32_t *src; 6381 sv_path_info32_t pi32; 6382 6383 src = (sv_path_info32_t *)pioc->ret_buf; 6384 pi = (sv_path_info_t *)*upibuf; 6385 for (index = 0; index < num_paths; index++, src++, pi++) { 6386 if (ddi_copyin(src, &pi32, sizeof (pi32), mode)) { 6387 retval = EFAULT; 6388 break; 6389 } 6390 6391 pi->ret_prop.buf_size = 6392 (uint_t)pi32.ret_prop.buf_size; 6393 pi->ret_prop.ret_buf_size = 6394 (uint_t *)(uintptr_t)pi32.ret_prop.ret_buf_size; 6395 pi->ret_prop.buf = 6396 (caddr_t)(uintptr_t)pi32.ret_prop.buf; 6397 } 6398 break; 6399 } 6400 6401 case DDI_MODEL_NONE: 6402 if (ddi_copyin(pioc->ret_buf, *upibuf, 6403 sizeof (sv_path_info_t) * num_paths, mode)) { 6404 retval = EFAULT; 6405 } 6406 break; 6407 } 6408 #else /* _MULTI_DATAMODEL */ 6409 if (ddi_copyin(pioc->ret_buf, *upibuf, 6410 sizeof (sv_path_info_t) * num_paths, mode)) { 6411 retval = EFAULT; 6412 } 6413 #endif /* _MULTI_DATAMODEL */ 6414 6415 if (retval != 0) { 6416 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_alloc_path_info: " 6417 "ioctl <%s> normal: path_info copyin failed", s)); 6418 kmem_free(*upibuf, sizeof (sv_path_info_t) * num_paths); 6419 kmem_free(*kpibuf, sizeof (sv_path_info_t) * num_paths); 6420 *upibuf = NULL; 6421 *kpibuf = NULL; 6422 return (retval); 6423 } 6424 6425 /* 6426 * Allocate memory for per path properties. 6427 */ 6428 for (index = 0, pi = *kpibuf; index < num_paths; index++, pi++) { 6429 bufsize = (*upibuf)[index].ret_prop.buf_size; 6430 6431 if (bufsize && bufsize <= SV_PROP_MAX_BUF_SIZE) { 6432 pi->ret_prop.buf_size = bufsize; 6433 pi->ret_prop.buf = (caddr_t) 6434 kmem_zalloc(bufsize, KM_SLEEP); 6435 ASSERT(pi->ret_prop.buf != NULL); 6436 } else { 6437 pi->ret_prop.buf_size = 0; 6438 pi->ret_prop.buf = NULL; 6439 } 6440 6441 if ((*upibuf)[index].ret_prop.ret_buf_size != NULL) { 6442 pi->ret_prop.ret_buf_size = (uint_t *)kmem_zalloc( 6443 sizeof (*pi->ret_prop.ret_buf_size), KM_SLEEP); 6444 ASSERT(pi->ret_prop.ret_buf_size != NULL); 6445 } else { 6446 pi->ret_prop.ret_buf_size = NULL; 6447 } 6448 } 6449 6450 return (0); 6451 } 6452 6453 6454 /* 6455 * Routine to free memory for the path information structures. 6456 * This is the memory which was allocated earlier. 6457 */ 6458 /* ARGSUSED */ 6459 static void 6460 vhci_ioc_free_pathinfo(sv_path_info_t *upibuf, sv_path_info_t *kpibuf, 6461 uint_t num_paths) 6462 { 6463 sv_path_info_t *pi; 6464 int index; 6465 6466 /* Free memory for per path properties */ 6467 for (index = 0, pi = kpibuf; index < num_paths; index++, pi++) { 6468 if (pi->ret_prop.ret_buf_size != NULL) { 6469 kmem_free(pi->ret_prop.ret_buf_size, 6470 sizeof (*pi->ret_prop.ret_buf_size)); 6471 } 6472 6473 if (pi->ret_prop.buf != NULL) { 6474 kmem_free(pi->ret_prop.buf, pi->ret_prop.buf_size); 6475 } 6476 } 6477 6478 /* Free memory for path info structures */ 6479 kmem_free(upibuf, sizeof (sv_path_info_t) * num_paths); 6480 kmem_free(kpibuf, sizeof (sv_path_info_t) * num_paths); 6481 } 6482 6483 6484 /* 6485 * Routine to copy path information and path properties to userland. 6486 */ 6487 /* ARGSUSED */ 6488 static int 6489 vhci_ioc_send_pathinfo(sv_path_info_t *upibuf, sv_path_info_t *kpibuf, 6490 uint_t num_paths, sv_iocdata_t *pioc, int mode, caddr_t s) 6491 { 6492 int retval = 0, index; 6493 sv_path_info_t *upi_ptr; 6494 sv_path_info32_t *upi32_ptr; 6495 6496 #ifdef _MULTI_DATAMODEL 6497 switch (ddi_model_convert_from(mode & FMODELS)) { 6498 case DDI_MODEL_ILP32: 6499 goto copy_32bit; 6500 6501 case DDI_MODEL_NONE: 6502 goto copy_normal; 6503 } 6504 #else /* _MULTI_DATAMODEL */ 6505 6506 goto copy_normal; 6507 6508 #endif /* _MULTI_DATAMODEL */ 6509 6510 copy_normal: 6511 6512 /* 6513 * Copy path information and path properties to user land. 6514 * Pointer fields inside the path property structure were 6515 * saved in the 'upibuf' structure earlier. 6516 */ 6517 upi_ptr = pioc->ret_buf; 6518 for (index = 0; index < num_paths; index++) { 6519 if (ddi_copyout(kpibuf[index].device.ret_ct, 6520 upi_ptr[index].device.ret_ct, MAXPATHLEN, mode)) { 6521 retval = EFAULT; 6522 break; 6523 } 6524 6525 if (ddi_copyout(kpibuf[index].ret_addr, 6526 upi_ptr[index].ret_addr, MAXNAMELEN, mode)) { 6527 retval = EFAULT; 6528 break; 6529 } 6530 6531 if (ddi_copyout(&kpibuf[index].ret_state, 6532 &upi_ptr[index].ret_state, sizeof (kpibuf[index].ret_state), 6533 mode)) { 6534 retval = EFAULT; 6535 break; 6536 } 6537 6538 if (ddi_copyout(&kpibuf[index].ret_ext_state, 6539 &upi_ptr[index].ret_ext_state, 6540 sizeof (kpibuf[index].ret_ext_state), mode)) { 6541 retval = EFAULT; 6542 break; 6543 } 6544 6545 if ((kpibuf[index].ret_prop.ret_buf_size != NULL) && 6546 ddi_copyout(kpibuf[index].ret_prop.ret_buf_size, 6547 upibuf[index].ret_prop.ret_buf_size, 6548 sizeof (*upibuf[index].ret_prop.ret_buf_size), mode)) { 6549 retval = EFAULT; 6550 break; 6551 } 6552 6553 if ((kpibuf[index].ret_prop.buf != NULL) && 6554 ddi_copyout(kpibuf[index].ret_prop.buf, 6555 upibuf[index].ret_prop.buf, 6556 upibuf[index].ret_prop.buf_size, mode)) { 6557 retval = EFAULT; 6558 break; 6559 } 6560 } 6561 6562 #ifdef DEBUG 6563 if (retval) { 6564 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_get_ioc: ioctl <%s> " 6565 "normal: path_info copyout failed", s)); 6566 } 6567 #endif 6568 6569 return (retval); 6570 6571 copy_32bit: 6572 /* 6573 * Copy path information and path properties to user land. 6574 * Pointer fields inside the path property structure were 6575 * saved in the 'upibuf' structure earlier. 6576 */ 6577 upi32_ptr = (sv_path_info32_t *)pioc->ret_buf; 6578 for (index = 0; index < num_paths; index++) { 6579 if (ddi_copyout(kpibuf[index].device.ret_ct, 6580 upi32_ptr[index].device.ret_ct, MAXPATHLEN, mode)) { 6581 retval = EFAULT; 6582 break; 6583 } 6584 6585 if (ddi_copyout(kpibuf[index].ret_addr, 6586 upi32_ptr[index].ret_addr, MAXNAMELEN, mode)) { 6587 retval = EFAULT; 6588 break; 6589 } 6590 6591 if (ddi_copyout(&kpibuf[index].ret_state, 6592 &upi32_ptr[index].ret_state, 6593 sizeof (kpibuf[index].ret_state), mode)) { 6594 retval = EFAULT; 6595 break; 6596 } 6597 6598 if (ddi_copyout(&kpibuf[index].ret_ext_state, 6599 &upi32_ptr[index].ret_ext_state, 6600 sizeof (kpibuf[index].ret_ext_state), mode)) { 6601 retval = EFAULT; 6602 break; 6603 } 6604 if ((kpibuf[index].ret_prop.ret_buf_size != NULL) && 6605 ddi_copyout(kpibuf[index].ret_prop.ret_buf_size, 6606 upibuf[index].ret_prop.ret_buf_size, 6607 sizeof (*upibuf[index].ret_prop.ret_buf_size), mode)) { 6608 retval = EFAULT; 6609 break; 6610 } 6611 6612 if ((kpibuf[index].ret_prop.buf != NULL) && 6613 ddi_copyout(kpibuf[index].ret_prop.buf, 6614 upibuf[index].ret_prop.buf, 6615 upibuf[index].ret_prop.buf_size, mode)) { 6616 retval = EFAULT; 6617 break; 6618 } 6619 } 6620 6621 #ifdef DEBUG 6622 if (retval) { 6623 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_get_ioc: ioctl <%s> " 6624 "normal: path_info copyout failed", s)); 6625 } 6626 #endif 6627 6628 return (retval); 6629 } 6630 6631 6632 /* 6633 * vhci_failover() 6634 * This routine expects VHCI_HOLD_LUN before being invoked. It can be invoked 6635 * as MDI_FAILOVER_ASYNC or MDI_FAILOVER_SYNC. For Asynchronous failovers 6636 * this routine shall VHCI_RELEASE_LUN on exiting. For synchronous failovers 6637 * it is the callers responsibility to release lun. 6638 */ 6639 6640 /* ARGSUSED */ 6641 static int 6642 vhci_failover(dev_info_t *vdip, dev_info_t *cdip, int flags) 6643 { 6644 char *guid; 6645 scsi_vhci_lun_t *vlun = NULL; 6646 struct scsi_vhci *vhci; 6647 mdi_pathinfo_t *pip, *npip; 6648 char *s_pclass, *pclass1, *pclass2, *pclass; 6649 char active_pclass_copy[255], *active_pclass_ptr; 6650 char *ptr1, *ptr2; 6651 mdi_pathinfo_state_t pi_state; 6652 uint32_t pi_ext_state; 6653 scsi_vhci_priv_t *svp; 6654 struct scsi_device *sd; 6655 struct scsi_failover_ops *sfo; 6656 int sps; /* mdi_select_path() status */ 6657 int activation_done = 0; 6658 int rval, retval = MDI_FAILURE; 6659 int reserve_pending, check_condition, UA_condition; 6660 struct scsi_pkt *pkt; 6661 struct buf *bp; 6662 6663 vhci = ddi_get_soft_state(vhci_softstate, ddi_get_instance(vdip)); 6664 sd = ddi_get_driver_private(cdip); 6665 vlun = ADDR2VLUN(&sd->sd_address); 6666 ASSERT(vlun != 0); 6667 ASSERT(VHCI_LUN_IS_HELD(vlun)); 6668 guid = vlun->svl_lun_wwn; 6669 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(1): guid %s\n", guid)); 6670 vhci_log(CE_NOTE, vdip, "!Initiating failover for device %s " 6671 "(GUID %s)", ddi_node_name(cdip), guid); 6672 6673 /* 6674 * Lets maintain a local copy of the vlun->svl_active_pclass 6675 * for the rest of the processing. Accessing the field 6676 * directly in the loop below causes loop logic to break 6677 * especially when the field gets updated by other threads 6678 * update path status etc and causes 'paths are not currently 6679 * available' condition to be declared prematurely. 6680 */ 6681 mutex_enter(&vlun->svl_mutex); 6682 if (vlun->svl_active_pclass != NULL) { 6683 (void) strlcpy(active_pclass_copy, vlun->svl_active_pclass, 6684 sizeof (active_pclass_copy)); 6685 active_pclass_ptr = &active_pclass_copy[0]; 6686 mutex_exit(&vlun->svl_mutex); 6687 if (vhci_quiesce_paths(vdip, cdip, vlun, guid, 6688 active_pclass_ptr) != 0) { 6689 retval = MDI_FAILURE; 6690 } 6691 } else { 6692 /* 6693 * can happen only when the available path to device 6694 * discovered is a STANDBY path. 6695 */ 6696 mutex_exit(&vlun->svl_mutex); 6697 active_pclass_copy[0] = '\0'; 6698 active_pclass_ptr = NULL; 6699 } 6700 6701 sfo = vlun->svl_fops; 6702 ASSERT(sfo != NULL); 6703 pclass1 = s_pclass = active_pclass_ptr; 6704 VHCI_DEBUG(1, (CE_NOTE, NULL, "!(%s)failing over from %s\n", guid, 6705 (s_pclass == NULL ? "<none>" : s_pclass))); 6706 6707 next_pathclass: 6708 6709 rval = (*sfo->sfo_pathclass_next)(pclass1, &pclass2, 6710 vlun->svl_fops_ctpriv); 6711 if (rval == ENOENT) { 6712 if (s_pclass == NULL) { 6713 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(4)(%s): " 6714 "failed, no more pathclasses\n", guid)); 6715 goto done; 6716 } else { 6717 (*sfo->sfo_pathclass_next)(NULL, &pclass2, 6718 vlun->svl_fops_ctpriv); 6719 } 6720 } else if (rval == EINVAL) { 6721 vhci_log(CE_NOTE, vdip, "!Failover operation failed for " 6722 "device %s (GUID %s): Invalid path-class %s", 6723 ddi_node_name(cdip), guid, 6724 ((pclass1 == NULL) ? "<none>" : pclass1)); 6725 goto done; 6726 } 6727 if ((s_pclass != NULL) && (strcmp(pclass2, s_pclass) == 0)) { 6728 /* 6729 * paths are not currently available 6730 */ 6731 vhci_log(CE_NOTE, vdip, "!Failover path currently unavailable" 6732 " for device %s (GUID %s)", 6733 ddi_node_name(cdip), guid); 6734 goto done; 6735 } 6736 pip = npip = NULL; 6737 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(5.2)(%s): considering " 6738 "%s as failover destination\n", guid, pclass2)); 6739 sps = mdi_select_path(cdip, NULL, MDI_SELECT_STANDBY_PATH, NULL, &npip); 6740 if ((npip == NULL) || (sps != MDI_SUCCESS)) { 6741 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(%s): no " 6742 "STANDBY paths found (status:%x)!\n", guid, sps)); 6743 pclass1 = pclass2; 6744 goto next_pathclass; 6745 } 6746 do { 6747 pclass = NULL; 6748 if ((mdi_prop_lookup_string(npip, "path-class", 6749 &pclass) != MDI_SUCCESS) || (strcmp(pclass2, 6750 pclass) != 0)) { 6751 VHCI_DEBUG(1, (CE_NOTE, NULL, 6752 "!vhci_failover(5.5)(%s): skipping path " 6753 "%p(%s)...\n", guid, (void *)npip, pclass)); 6754 pip = npip; 6755 sps = mdi_select_path(cdip, NULL, 6756 MDI_SELECT_STANDBY_PATH, pip, &npip); 6757 mdi_rele_path(pip); 6758 (void) mdi_prop_free(pclass); 6759 continue; 6760 } 6761 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(npip); 6762 6763 /* 6764 * Issue READ at non-zer block on this STANDBY path. 6765 * Purple returns 6766 * 1. RESERVATION_CONFLICT if reservation is pending 6767 * 2. POR check condition if it reset happened. 6768 * 2. failover Check Conditions if one is already in progress. 6769 */ 6770 reserve_pending = 0; 6771 check_condition = 0; 6772 UA_condition = 0; 6773 6774 bp = scsi_alloc_consistent_buf(&svp->svp_psd->sd_address, 6775 (struct buf *)NULL, DEV_BSIZE, B_READ, NULL, NULL); 6776 if (!bp) { 6777 VHCI_DEBUG(1, (CE_NOTE, NULL, 6778 "vhci_failover !No resources (buf)\n")); 6779 mdi_rele_path(npip); 6780 goto done; 6781 } 6782 pkt = scsi_init_pkt(&svp->svp_psd->sd_address, NULL, bp, 6783 CDB_GROUP1, sizeof (struct scsi_arq_status), 0, 6784 PKT_CONSISTENT, NULL, NULL); 6785 if (pkt) { 6786 (void) scsi_setup_cdb((union scsi_cdb *)(uintptr_t) 6787 pkt->pkt_cdbp, SCMD_READ, 1, 1, 0); 6788 pkt->pkt_flags = FLAG_NOINTR; 6789 check_path_again: 6790 pkt->pkt_path_instance = mdi_pi_get_path_instance(npip); 6791 pkt->pkt_time = 3*30; 6792 6793 if (scsi_transport(pkt) == TRAN_ACCEPT) { 6794 switch (pkt->pkt_reason) { 6795 case CMD_CMPLT: 6796 switch (SCBP_C(pkt)) { 6797 case STATUS_GOOD: 6798 /* Already failed over */ 6799 activation_done = 1; 6800 break; 6801 case STATUS_RESERVATION_CONFLICT: 6802 reserve_pending = 1; 6803 break; 6804 case STATUS_CHECK: 6805 check_condition = 1; 6806 break; 6807 } 6808 } 6809 } 6810 if (check_condition && 6811 (pkt->pkt_state & STATE_ARQ_DONE)) { 6812 struct scsi_extended_sense *sns = 6813 &(((struct scsi_arq_status *)(uintptr_t) 6814 (pkt->pkt_scbp))->sts_sensedata); 6815 if (sns->es_key == KEY_UNIT_ATTENTION && 6816 sns->es_add_code == 0x29) { 6817 /* Already failed over */ 6818 VHCI_DEBUG(1, (CE_NOTE, NULL, 6819 "!vhci_failover(7)(%s): " 6820 "path 0x%p POR UA condition\n", 6821 guid, (void *)npip)); 6822 if (UA_condition == 0) { 6823 UA_condition = 1; 6824 goto check_path_again; 6825 } 6826 } else { 6827 activation_done = 0; 6828 VHCI_DEBUG(1, (CE_NOTE, NULL, 6829 "!vhci_failover(%s): path 0x%p " 6830 "unhandled chkcond %x %x %x\n", 6831 guid, (void *)npip, sns->es_key, 6832 sns->es_add_code, 6833 sns->es_qual_code)); 6834 } 6835 } 6836 scsi_destroy_pkt(pkt); 6837 } 6838 scsi_free_consistent_buf(bp); 6839 6840 if (activation_done) { 6841 mdi_rele_path(npip); 6842 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(7)(%s): " 6843 "path 0x%p already failedover\n", guid, 6844 (void *)npip)); 6845 break; 6846 } 6847 if (reserve_pending && (vlun->svl_xlf_capable == 0)) { 6848 (void) vhci_recovery_reset(vlun, 6849 &svp->svp_psd->sd_address, 6850 FALSE, VHCI_DEPTH_ALL); 6851 } 6852 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(6)(%s): " 6853 "activating path 0x%p(psd:%p)\n", guid, (void *)npip, 6854 (void *)svp->svp_psd)); 6855 if ((*sfo->sfo_path_activate)(svp->svp_psd, pclass2, 6856 vlun->svl_fops_ctpriv) == 0) { 6857 activation_done = 1; 6858 mdi_rele_path(npip); 6859 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(7)(%s): " 6860 "path 0x%p successfully activated\n", guid, 6861 (void *)npip)); 6862 break; 6863 } 6864 pip = npip; 6865 sps = mdi_select_path(cdip, NULL, MDI_SELECT_STANDBY_PATH, 6866 pip, &npip); 6867 mdi_rele_path(pip); 6868 } while ((npip != NULL) && (sps == MDI_SUCCESS)); 6869 if (activation_done == 0) { 6870 pclass1 = pclass2; 6871 goto next_pathclass; 6872 } 6873 6874 /* 6875 * if we are here, we have succeeded in activating path npip of 6876 * pathclass pclass2; let us validate all paths of pclass2 by 6877 * "ping"-ing each one and mark the good ones ONLINE 6878 * Also, set the state of the paths belonging to the previously 6879 * active pathclass to STANDBY 6880 */ 6881 pip = npip = NULL; 6882 sps = mdi_select_path(cdip, NULL, (MDI_SELECT_ONLINE_PATH | 6883 MDI_SELECT_STANDBY_PATH | MDI_SELECT_USER_DISABLE_PATH), 6884 NULL, &npip); 6885 if (npip == NULL || sps != MDI_SUCCESS) { 6886 VHCI_DEBUG(1, (CE_NOTE, NULL, "!Failover operation failed for " 6887 "device %s (GUID %s): paths may be busy\n", 6888 ddi_node_name(cdip), guid)); 6889 goto done; 6890 } 6891 do { 6892 (void) mdi_pi_get_state2(npip, &pi_state, &pi_ext_state); 6893 if (mdi_prop_lookup_string(npip, "path-class", &pclass) 6894 != MDI_SUCCESS) { 6895 pip = npip; 6896 sps = mdi_select_path(cdip, NULL, 6897 (MDI_SELECT_ONLINE_PATH | 6898 MDI_SELECT_STANDBY_PATH | 6899 MDI_SELECT_USER_DISABLE_PATH), 6900 pip, &npip); 6901 mdi_rele_path(pip); 6902 continue; 6903 } 6904 if (strcmp(pclass, pclass2) == 0) { 6905 if (pi_state == MDI_PATHINFO_STATE_STANDBY) { 6906 svp = (scsi_vhci_priv_t *) 6907 mdi_pi_get_vhci_private(npip); 6908 VHCI_DEBUG(1, (CE_NOTE, NULL, 6909 "!vhci_failover(8)(%s): " 6910 "pinging path 0x%p\n", 6911 guid, (void *)npip)); 6912 if ((*sfo->sfo_path_ping)(svp->svp_psd, 6913 vlun->svl_fops_ctpriv) == 1) { 6914 mdi_pi_set_state(npip, 6915 MDI_PATHINFO_STATE_ONLINE); 6916 VHCI_DEBUG(1, (CE_NOTE, NULL, 6917 "!vhci_failover(9)(%s): " 6918 "path 0x%p ping successful, " 6919 "marked online\n", guid, 6920 (void *)npip)); 6921 MDI_PI_ERRSTAT(npip, MDI_PI_FAILTO); 6922 } 6923 } 6924 } else if ((s_pclass != NULL) && (strcmp(pclass, s_pclass) 6925 == 0)) { 6926 if (pi_state == MDI_PATHINFO_STATE_ONLINE) { 6927 mdi_pi_set_state(npip, 6928 MDI_PATHINFO_STATE_STANDBY); 6929 VHCI_DEBUG(1, (CE_NOTE, NULL, 6930 "!vhci_failover(10)(%s): path 0x%p marked " 6931 "STANDBY\n", guid, (void *)npip)); 6932 MDI_PI_ERRSTAT(npip, MDI_PI_FAILFROM); 6933 } 6934 } 6935 (void) mdi_prop_free(pclass); 6936 pip = npip; 6937 sps = mdi_select_path(cdip, NULL, (MDI_SELECT_ONLINE_PATH | 6938 MDI_SELECT_STANDBY_PATH|MDI_SELECT_USER_DISABLE_PATH), 6939 pip, &npip); 6940 mdi_rele_path(pip); 6941 } while ((npip != NULL) && (sps == MDI_SUCCESS)); 6942 6943 /* 6944 * Update the AccessState of related MP-API TPGs 6945 */ 6946 (void) vhci_mpapi_update_tpg_acc_state_for_lu(vhci, vlun); 6947 6948 vhci_log(CE_NOTE, vdip, "!Failover operation completed successfully " 6949 "for device %s (GUID %s): failed over from %s to %s", 6950 ddi_node_name(cdip), guid, ((s_pclass == NULL) ? "<none>" : 6951 s_pclass), pclass2); 6952 ptr1 = kmem_alloc(strlen(pclass2)+1, KM_SLEEP); 6953 (void) strlcpy(ptr1, pclass2, (strlen(pclass2)+1)); 6954 mutex_enter(&vlun->svl_mutex); 6955 ptr2 = vlun->svl_active_pclass; 6956 vlun->svl_active_pclass = ptr1; 6957 mutex_exit(&vlun->svl_mutex); 6958 if (ptr2) { 6959 kmem_free(ptr2, strlen(ptr2)+1); 6960 } 6961 mutex_enter(&vhci->vhci_mutex); 6962 scsi_hba_reset_notify_callback(&vhci->vhci_mutex, 6963 &vhci->vhci_reset_notify_listf); 6964 /* All reservations are cleared upon these resets. */ 6965 vlun->svl_flags &= ~VLUN_RESERVE_ACTIVE_FLG; 6966 mutex_exit(&vhci->vhci_mutex); 6967 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(11): DONE! Active " 6968 "pathclass for %s is now %s\n", guid, pclass2)); 6969 retval = MDI_SUCCESS; 6970 6971 done: 6972 if (flags == MDI_FAILOVER_ASYNC) { 6973 VHCI_RELEASE_LUN(vlun); 6974 VHCI_DEBUG(6, (CE_NOTE, NULL, "!vhci_failover(12): DONE! " 6975 "releasing lun, as failover was ASYNC\n")); 6976 } else { 6977 VHCI_DEBUG(6, (CE_NOTE, NULL, "!vhci_failover(12): DONE! " 6978 "NOT releasing lun, as failover was SYNC\n")); 6979 } 6980 return (retval); 6981 } 6982 6983 /* 6984 * vhci_client_attached is called after the successful attach of a 6985 * client devinfo node. 6986 */ 6987 static void 6988 vhci_client_attached(dev_info_t *cdip) 6989 { 6990 mdi_pathinfo_t *pip; 6991 int circular; 6992 6993 /* 6994 * At this point the client has attached and it's instance number is 6995 * valid, so we can set up kstats. We need to do this here because it 6996 * is possible for paths to go online prior to client attach, in which 6997 * case the call to vhci_kstat_create_pathinfo in vhci_pathinfo_online 6998 * was a noop. 6999 */ 7000 ndi_devi_enter(cdip, &circular); 7001 for (pip = mdi_get_next_phci_path(cdip, NULL); pip; 7002 pip = mdi_get_next_phci_path(cdip, pip)) 7003 vhci_kstat_create_pathinfo(pip); 7004 ndi_devi_exit(cdip, circular); 7005 } 7006 7007 /* 7008 * quiesce all of the online paths 7009 */ 7010 static int 7011 vhci_quiesce_paths(dev_info_t *vdip, dev_info_t *cdip, scsi_vhci_lun_t *vlun, 7012 char *guid, char *active_pclass_ptr) 7013 { 7014 scsi_vhci_priv_t *svp; 7015 char *s_pclass = NULL; 7016 mdi_pathinfo_t *npip, *pip; 7017 int sps; 7018 7019 /* quiesce currently active paths */ 7020 s_pclass = NULL; 7021 pip = npip = NULL; 7022 sps = mdi_select_path(cdip, NULL, MDI_SELECT_ONLINE_PATH, NULL, &npip); 7023 if ((npip == NULL) || (sps != MDI_SUCCESS)) { 7024 return (1); 7025 } 7026 do { 7027 if (mdi_prop_lookup_string(npip, "path-class", 7028 &s_pclass) != MDI_SUCCESS) { 7029 mdi_rele_path(npip); 7030 vhci_log(CE_NOTE, vdip, "!Failover operation failed " 7031 "for device %s (GUID %s) due to an internal " 7032 "error", ddi_node_name(cdip), guid); 7033 return (1); 7034 } 7035 if (strcmp(s_pclass, active_pclass_ptr) == 0) { 7036 /* 7037 * quiesce path. Free s_pclass since 7038 * we don't need it anymore 7039 */ 7040 VHCI_DEBUG(1, (CE_NOTE, NULL, 7041 "!vhci_failover(2)(%s): failing over " 7042 "from %s; quiescing path %p\n", 7043 guid, s_pclass, (void *)npip)); 7044 (void) mdi_prop_free(s_pclass); 7045 svp = (scsi_vhci_priv_t *) 7046 mdi_pi_get_vhci_private(npip); 7047 if (svp == NULL) { 7048 VHCI_DEBUG(1, (CE_NOTE, NULL, 7049 "!vhci_failover(2.5)(%s): no " 7050 "client priv! %p offlined?\n", 7051 guid, (void *)npip)); 7052 pip = npip; 7053 sps = mdi_select_path(cdip, NULL, 7054 MDI_SELECT_ONLINE_PATH, pip, &npip); 7055 mdi_rele_path(pip); 7056 continue; 7057 } 7058 if (scsi_abort(&svp->svp_psd->sd_address, NULL) 7059 == 0) { 7060 (void) vhci_recovery_reset(vlun, 7061 &svp->svp_psd->sd_address, FALSE, 7062 VHCI_DEPTH_TARGET); 7063 } 7064 mutex_enter(&svp->svp_mutex); 7065 if (svp->svp_cmds == 0) { 7066 VHCI_DEBUG(1, (CE_NOTE, NULL, 7067 "!vhci_failover(3)(%s):" 7068 "quiesced path %p\n", guid, (void *)npip)); 7069 } else { 7070 while (svp->svp_cmds != 0) { 7071 cv_wait(&svp->svp_cv, &svp->svp_mutex); 7072 VHCI_DEBUG(1, (CE_NOTE, NULL, 7073 "!vhci_failover(3.cv)(%s):" 7074 "quiesced path %p\n", guid, 7075 (void *)npip)); 7076 } 7077 } 7078 mutex_exit(&svp->svp_mutex); 7079 } else { 7080 /* 7081 * make sure we freeup the memory 7082 */ 7083 (void) mdi_prop_free(s_pclass); 7084 } 7085 pip = npip; 7086 sps = mdi_select_path(cdip, NULL, MDI_SELECT_ONLINE_PATH, 7087 pip, &npip); 7088 mdi_rele_path(pip); 7089 } while ((npip != NULL) && (sps == MDI_SUCCESS)); 7090 return (0); 7091 } 7092 7093 static struct scsi_vhci_lun * 7094 vhci_lun_lookup(dev_info_t *tgt_dip) 7095 { 7096 return ((struct scsi_vhci_lun *) 7097 mdi_client_get_vhci_private(tgt_dip)); 7098 } 7099 7100 static struct scsi_vhci_lun * 7101 vhci_lun_lookup_alloc(dev_info_t *tgt_dip, char *guid, int *didalloc) 7102 { 7103 struct scsi_vhci_lun *svl; 7104 7105 if (svl = vhci_lun_lookup(tgt_dip)) { 7106 return (svl); 7107 } 7108 7109 svl = kmem_zalloc(sizeof (*svl), KM_SLEEP); 7110 svl->svl_lun_wwn = kmem_zalloc(strlen(guid)+1, KM_SLEEP); 7111 (void) strcpy(svl->svl_lun_wwn, guid); 7112 mutex_init(&svl->svl_mutex, NULL, MUTEX_DRIVER, NULL); 7113 cv_init(&svl->svl_cv, NULL, CV_DRIVER, NULL); 7114 sema_init(&svl->svl_pgr_sema, 1, NULL, SEMA_DRIVER, NULL); 7115 svl->svl_waiting_for_activepath = 1; 7116 svl->svl_sector_size = 1; 7117 mdi_client_set_vhci_private(tgt_dip, svl); 7118 *didalloc = 1; 7119 VHCI_DEBUG(1, (CE_NOTE, NULL, 7120 "vhci_lun_lookup_alloc: guid %s vlun 0x%p\n", 7121 guid, (void *)svl)); 7122 return (svl); 7123 } 7124 7125 static void 7126 vhci_lun_free(dev_info_t *tgt_dip) 7127 { 7128 struct scsi_vhci_lun *dvlp; 7129 char *guid; 7130 struct scsi_device *sd; 7131 7132 /* 7133 * The scsi_device was set to driver private during child node 7134 * initialization in the scsi_hba_bus_ctl(). 7135 */ 7136 sd = (struct scsi_device *)ddi_get_driver_private(tgt_dip); 7137 7138 dvlp = (struct scsi_vhci_lun *) 7139 mdi_client_get_vhci_private(tgt_dip); 7140 ASSERT(dvlp != NULL); 7141 7142 mdi_client_set_vhci_private(tgt_dip, NULL); 7143 7144 guid = dvlp->svl_lun_wwn; 7145 ASSERT(guid != NULL); 7146 VHCI_DEBUG(4, (CE_NOTE, NULL, "!vhci_lun_free: %s\n", guid)); 7147 7148 mutex_enter(&dvlp->svl_mutex); 7149 if (dvlp->svl_active_pclass != NULL) { 7150 kmem_free(dvlp->svl_active_pclass, 7151 strlen(dvlp->svl_active_pclass)+1); 7152 } 7153 dvlp->svl_active_pclass = NULL; 7154 mutex_exit(&dvlp->svl_mutex); 7155 7156 if (dvlp->svl_lun_wwn != NULL) { 7157 kmem_free(dvlp->svl_lun_wwn, strlen(dvlp->svl_lun_wwn)+1); 7158 } 7159 dvlp->svl_lun_wwn = NULL; 7160 7161 if (dvlp->svl_fops_name) { 7162 kmem_free(dvlp->svl_fops_name, strlen(dvlp->svl_fops_name)+1); 7163 } 7164 dvlp->svl_fops_name = NULL; 7165 7166 if (dvlp->svl_fops_ctpriv != NULL) { 7167 dvlp->svl_fops->sfo_device_unprobe(sd, dvlp->svl_fops_ctpriv); 7168 } 7169 7170 if (dvlp->svl_flags & VLUN_TASK_D_ALIVE_FLG) 7171 taskq_destroy(dvlp->svl_taskq); 7172 7173 mutex_destroy(&dvlp->svl_mutex); 7174 cv_destroy(&dvlp->svl_cv); 7175 sema_destroy(&dvlp->svl_pgr_sema); 7176 kmem_free(dvlp, sizeof (*dvlp)); 7177 /* 7178 * vhci_lun_free may be called before the tgt_dip 7179 * initialization so check if the sd is NULL. 7180 */ 7181 if (sd != NULL) 7182 sd->sd_address.a_hba_tran->tran_tgt_private = NULL; 7183 } 7184 7185 7186 int 7187 vhci_do_scsi_cmd(struct scsi_pkt *pkt) 7188 { 7189 int err = 0; 7190 int retry_cnt = 0; 7191 struct scsi_extended_sense *sns; 7192 7193 retry: 7194 err = scsi_poll(pkt); 7195 if (err) { 7196 if (pkt->pkt_cdbp[0] == SCMD_RELEASE) { 7197 if (SCBP_C(pkt) == STATUS_RESERVATION_CONFLICT) { 7198 VHCI_DEBUG(1, (CE_NOTE, NULL, 7199 "!v_s_do_s_c: RELEASE conflict\n")); 7200 return (0); 7201 } 7202 } 7203 if (retry_cnt++ < 3) { 7204 VHCI_DEBUG(1, (CE_WARN, NULL, 7205 "!v_s_do_s_c:retry packet 0x%p " 7206 "status 0x%x reason %s", 7207 (void *)pkt, SCBP_C(pkt), 7208 scsi_rname(pkt->pkt_reason))); 7209 if ((pkt->pkt_reason == CMD_CMPLT) && 7210 (SCBP_C(pkt) == STATUS_CHECK) && 7211 (pkt->pkt_state & STATE_ARQ_DONE)) { 7212 sns = &(((struct scsi_arq_status *)(uintptr_t) 7213 (pkt->pkt_scbp))->sts_sensedata); 7214 VHCI_DEBUG(1, (CE_WARN, NULL, 7215 "!v_s_do_s_c:retry " 7216 "packet 0x%p sense data %s", (void *)pkt, 7217 scsi_sname(sns->es_key))); 7218 } 7219 goto retry; 7220 } 7221 VHCI_DEBUG(1, (CE_WARN, NULL, 7222 "!v_s_do_s_c: failed transport 0x%p 0x%x", 7223 (void *)pkt, SCBP_C(pkt))); 7224 return (0); 7225 } 7226 7227 switch (pkt->pkt_reason) { 7228 case CMD_TIMEOUT: 7229 VHCI_DEBUG(1, (CE_WARN, NULL, "!pkt timed " 7230 "out (pkt 0x%p)", (void *)pkt)); 7231 return (0); 7232 case CMD_CMPLT: 7233 switch (SCBP_C(pkt)) { 7234 case STATUS_GOOD: 7235 break; 7236 case STATUS_CHECK: 7237 if (pkt->pkt_state & STATE_ARQ_DONE) { 7238 sns = &((( 7239 struct scsi_arq_status *) 7240 (uintptr_t) 7241 (pkt->pkt_scbp))-> 7242 sts_sensedata); 7243 if ((sns->es_key == 7244 KEY_UNIT_ATTENTION) || 7245 (sns->es_key == 7246 KEY_NOT_READY)) { 7247 /* 7248 * clear unit attn. 7249 */ 7250 7251 VHCI_DEBUG(1, 7252 (CE_WARN, NULL, 7253 "!v_s_do_s_c: " 7254 "retry " 7255 "packet 0x%p sense " 7256 "data %s", 7257 (void *)pkt, 7258 scsi_sname 7259 (sns->es_key))); 7260 goto retry; 7261 } 7262 VHCI_DEBUG(4, (CE_WARN, NULL, 7263 "!ARQ while " 7264 "transporting " 7265 "(pkt 0x%p)", 7266 (void *)pkt)); 7267 return (0); 7268 } 7269 return (0); 7270 default: 7271 VHCI_DEBUG(1, (CE_WARN, NULL, 7272 "!Bad status returned " 7273 "(pkt 0x%p, status %x)", 7274 (void *)pkt, SCBP_C(pkt))); 7275 return (0); 7276 } 7277 break; 7278 case CMD_INCOMPLETE: 7279 case CMD_RESET: 7280 case CMD_ABORTED: 7281 case CMD_TRAN_ERR: 7282 if (retry_cnt++ < 1) { 7283 VHCI_DEBUG(1, (CE_WARN, NULL, 7284 "!v_s_do_s_c: retry packet 0x%p %s", 7285 (void *)pkt, scsi_rname(pkt->pkt_reason))); 7286 goto retry; 7287 } 7288 /* FALLTHROUGH */ 7289 default: 7290 VHCI_DEBUG(1, (CE_WARN, NULL, "!pkt did not " 7291 "complete successfully (pkt 0x%p," 7292 "reason %x)", (void *)pkt, pkt->pkt_reason)); 7293 return (0); 7294 } 7295 return (1); 7296 } 7297 7298 static int 7299 vhci_quiesce_lun(struct scsi_vhci_lun *vlun) 7300 { 7301 mdi_pathinfo_t *pip, *spip; 7302 dev_info_t *cdip; 7303 struct scsi_vhci_priv *svp; 7304 mdi_pathinfo_state_t pstate; 7305 uint32_t p_ext_state; 7306 int circular; 7307 7308 cdip = vlun->svl_dip; 7309 pip = spip = NULL; 7310 ndi_devi_enter(cdip, &circular); 7311 pip = mdi_get_next_phci_path(cdip, NULL); 7312 while (pip != NULL) { 7313 (void) mdi_pi_get_state2(pip, &pstate, &p_ext_state); 7314 if (pstate != MDI_PATHINFO_STATE_ONLINE) { 7315 spip = pip; 7316 pip = mdi_get_next_phci_path(cdip, spip); 7317 continue; 7318 } 7319 mdi_hold_path(pip); 7320 ndi_devi_exit(cdip, circular); 7321 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip); 7322 mutex_enter(&svp->svp_mutex); 7323 while (svp->svp_cmds != 0) { 7324 if (cv_timedwait(&svp->svp_cv, &svp->svp_mutex, 7325 ddi_get_lbolt() + drv_usectohz 7326 (vhci_path_quiesce_timeout * 1000000)) == -1) { 7327 mutex_exit(&svp->svp_mutex); 7328 mdi_rele_path(pip); 7329 VHCI_DEBUG(1, (CE_WARN, NULL, 7330 "Quiesce of lun is not successful " 7331 "vlun: 0x%p.", (void *)vlun)); 7332 return (0); 7333 } 7334 } 7335 mutex_exit(&svp->svp_mutex); 7336 ndi_devi_enter(cdip, &circular); 7337 spip = pip; 7338 pip = mdi_get_next_phci_path(cdip, spip); 7339 mdi_rele_path(spip); 7340 } 7341 ndi_devi_exit(cdip, circular); 7342 return (1); 7343 } 7344 7345 static int 7346 vhci_pgr_validate_and_register(scsi_vhci_priv_t *svp) 7347 { 7348 scsi_vhci_lun_t *vlun; 7349 vhci_prout_t *prout; 7350 int rval, success; 7351 mdi_pathinfo_t *pip, *npip; 7352 scsi_vhci_priv_t *osvp; 7353 dev_info_t *cdip; 7354 uchar_t cdb_1; 7355 uchar_t temp_res_key[MHIOC_RESV_KEY_SIZE]; 7356 7357 7358 /* 7359 * see if there are any other paths available; if none, 7360 * then there is nothing to do. 7361 */ 7362 cdip = svp->svp_svl->svl_dip; 7363 rval = mdi_select_path(cdip, NULL, MDI_SELECT_ONLINE_PATH | 7364 MDI_SELECT_STANDBY_PATH, NULL, &pip); 7365 if ((rval != MDI_SUCCESS) || (pip == NULL)) { 7366 VHCI_DEBUG(4, (CE_NOTE, NULL, 7367 "%s%d: vhci_pgr_validate_and_register: first path\n", 7368 ddi_driver_name(cdip), ddi_get_instance(cdip))); 7369 return (1); 7370 } 7371 7372 vlun = svp->svp_svl; 7373 prout = &vlun->svl_prout; 7374 ASSERT(vlun->svl_pgr_active != 0); 7375 7376 /* 7377 * When the path was busy/offlined, some other host might have 7378 * cleared this key. Validate key on some other path first. 7379 * If it fails, return failure. 7380 */ 7381 7382 npip = pip; 7383 pip = NULL; 7384 success = 0; 7385 7386 /* Save the res key */ 7387 bcopy((const void *)prout->res_key, 7388 (void *)temp_res_key, MHIOC_RESV_KEY_SIZE); 7389 7390 /* 7391 * Sometimes CDB from application can be a Register_And_Ignore. 7392 * Instead of validation, this cdb would result in force registration. 7393 * Convert it to normal cdb for validation. 7394 * After that be sure to restore the cdb. 7395 */ 7396 cdb_1 = vlun->svl_cdb[1]; 7397 vlun->svl_cdb[1] &= 0xe0; 7398 7399 do { 7400 osvp = (scsi_vhci_priv_t *) 7401 mdi_pi_get_vhci_private(npip); 7402 if (osvp == NULL) { 7403 VHCI_DEBUG(4, (CE_NOTE, NULL, 7404 "vhci_pgr_validate_and_register: no " 7405 "client priv! 0x%p offlined?\n", 7406 (void *)npip)); 7407 goto next_path_1; 7408 } 7409 7410 if (osvp == svp) { 7411 VHCI_DEBUG(4, (CE_NOTE, NULL, 7412 "vhci_pgr_validate_and_register: same svp 0x%p" 7413 " npip 0x%p vlun 0x%p\n", 7414 (void *)svp, (void *)npip, (void *)vlun)); 7415 goto next_path_1; 7416 } 7417 7418 VHCI_DEBUG(4, (CE_NOTE, NULL, 7419 "vhci_pgr_validate_and_register: First validate on" 7420 " osvp 0x%p being done. vlun 0x%p thread 0x%p Before bcopy" 7421 " cdb1 %x\n", (void *)osvp, (void *)vlun, 7422 (void *)curthread, vlun->svl_cdb[1])); 7423 vhci_print_prout_keys(vlun, "v_pgr_val_reg: before bcopy:"); 7424 7425 bcopy((const void *)prout->service_key, 7426 (void *)prout->res_key, MHIOC_RESV_KEY_SIZE); 7427 7428 VHCI_DEBUG(4, (CE_WARN, NULL, "vlun 0x%p After bcopy", 7429 (void *)vlun)); 7430 vhci_print_prout_keys(vlun, "v_pgr_val_reg: after bcopy: "); 7431 7432 rval = vhci_do_prout(osvp); 7433 if (rval == 1) { 7434 VHCI_DEBUG(4, (CE_NOTE, NULL, 7435 "%s%d: vhci_pgr_validate_and_register: key" 7436 " validated thread 0x%p\n", ddi_driver_name(cdip), 7437 ddi_get_instance(cdip), (void *)curthread)); 7438 pip = npip; 7439 success = 1; 7440 break; 7441 } else { 7442 VHCI_DEBUG(4, (CE_NOTE, NULL, 7443 "vhci_pgr_validate_and_register: First validation" 7444 " on osvp 0x%p failed %x\n", (void *)osvp, rval)); 7445 vhci_print_prout_keys(vlun, "v_pgr_val_reg: failed:"); 7446 } 7447 7448 /* 7449 * Try other paths 7450 */ 7451 next_path_1: 7452 pip = npip; 7453 rval = mdi_select_path(cdip, NULL, 7454 MDI_SELECT_ONLINE_PATH|MDI_SELECT_STANDBY_PATH, 7455 pip, &npip); 7456 mdi_rele_path(pip); 7457 } while ((rval == MDI_SUCCESS) && (npip != NULL)); 7458 7459 7460 /* Be sure to restore original cdb */ 7461 vlun->svl_cdb[1] = cdb_1; 7462 7463 /* Restore the res_key */ 7464 bcopy((const void *)temp_res_key, 7465 (void *)prout->res_key, MHIOC_RESV_KEY_SIZE); 7466 7467 /* 7468 * If key could not be registered on any path for the first time, 7469 * return success as online should still continue. 7470 */ 7471 if (success == 0) { 7472 return (1); 7473 } 7474 7475 ASSERT(pip != NULL); 7476 7477 /* 7478 * Force register on new path 7479 */ 7480 cdb_1 = vlun->svl_cdb[1]; /* store the cdb */ 7481 7482 vlun->svl_cdb[1] &= 0xe0; 7483 vlun->svl_cdb[1] |= VHCI_PROUT_R_AND_IGNORE; 7484 7485 vhci_print_prout_keys(vlun, "v_pgr_val_reg: keys before bcopy: "); 7486 7487 bcopy((const void *)prout->active_service_key, 7488 (void *)prout->service_key, MHIOC_RESV_KEY_SIZE); 7489 bcopy((const void *)prout->active_res_key, 7490 (void *)prout->res_key, MHIOC_RESV_KEY_SIZE); 7491 7492 vhci_print_prout_keys(vlun, "v_pgr_val_reg:keys after bcopy: "); 7493 7494 rval = vhci_do_prout(svp); 7495 vlun->svl_cdb[1] = cdb_1; /* restore the cdb */ 7496 if (rval != 1) { 7497 VHCI_DEBUG(4, (CE_NOTE, NULL, 7498 "vhci_pgr_validate_and_register: register on new" 7499 " path 0x%p svp 0x%p failed %x\n", 7500 (void *)pip, (void *)svp, rval)); 7501 vhci_print_prout_keys(vlun, "v_pgr_val_reg: reg failed: "); 7502 mdi_rele_path(pip); 7503 return (0); 7504 } 7505 7506 if (bcmp(prout->service_key, zero_key, MHIOC_RESV_KEY_SIZE) == 0) { 7507 VHCI_DEBUG(4, (CE_NOTE, NULL, 7508 "vhci_pgr_validate_and_register: zero service key\n")); 7509 mdi_rele_path(pip); 7510 return (rval); 7511 } 7512 7513 /* 7514 * While the key was force registered, some other host might have 7515 * cleared the key. Re-validate key on another pre-existing path 7516 * before declaring success. 7517 */ 7518 npip = pip; 7519 pip = NULL; 7520 7521 /* 7522 * Sometimes CDB from application can be Register and Ignore. 7523 * Instead of validation, it would result in force registration. 7524 * Convert it to normal cdb for validation. 7525 * After that be sure to restore the cdb. 7526 */ 7527 cdb_1 = vlun->svl_cdb[1]; 7528 vlun->svl_cdb[1] &= 0xe0; 7529 success = 0; 7530 7531 do { 7532 osvp = (scsi_vhci_priv_t *) 7533 mdi_pi_get_vhci_private(npip); 7534 if (osvp == NULL) { 7535 VHCI_DEBUG(4, (CE_NOTE, NULL, 7536 "vhci_pgr_validate_and_register: no " 7537 "client priv! 0x%p offlined?\n", 7538 (void *)npip)); 7539 goto next_path_2; 7540 } 7541 7542 if (osvp == svp) { 7543 VHCI_DEBUG(4, (CE_NOTE, NULL, 7544 "vhci_pgr_validate_and_register: same osvp 0x%p" 7545 " npip 0x%p vlun 0x%p\n", 7546 (void *)svp, (void *)npip, (void *)vlun)); 7547 goto next_path_2; 7548 } 7549 7550 VHCI_DEBUG(4, (CE_NOTE, NULL, 7551 "vhci_pgr_validate_and_register: Re-validation on" 7552 " osvp 0x%p being done. vlun 0x%p Before bcopy cdb1 %x\n", 7553 (void *)osvp, (void *)vlun, vlun->svl_cdb[1])); 7554 vhci_print_prout_keys(vlun, "v_pgr_val_reg: before bcopy: "); 7555 7556 bcopy((const void *)prout->service_key, 7557 (void *)prout->res_key, MHIOC_RESV_KEY_SIZE); 7558 7559 vhci_print_prout_keys(vlun, "v_pgr_val_reg: after bcopy: "); 7560 7561 rval = vhci_do_prout(osvp); 7562 if (rval == 1) { 7563 VHCI_DEBUG(4, (CE_NOTE, NULL, 7564 "%s%d: vhci_pgr_validate_and_register: key" 7565 " validated thread 0x%p\n", ddi_driver_name(cdip), 7566 ddi_get_instance(cdip), (void *)curthread)); 7567 pip = npip; 7568 success = 1; 7569 break; 7570 } else { 7571 VHCI_DEBUG(4, (CE_NOTE, NULL, 7572 "vhci_pgr_validate_and_register: Re-validation on" 7573 " osvp 0x%p failed %x\n", (void *)osvp, rval)); 7574 vhci_print_prout_keys(vlun, 7575 "v_pgr_val_reg: reval failed: "); 7576 } 7577 7578 /* 7579 * Try other paths 7580 */ 7581 next_path_2: 7582 pip = npip; 7583 rval = mdi_select_path(cdip, NULL, 7584 MDI_SELECT_ONLINE_PATH|MDI_SELECT_STANDBY_PATH, 7585 pip, &npip); 7586 mdi_rele_path(pip); 7587 } while ((rval == MDI_SUCCESS) && (npip != NULL)); 7588 7589 /* Be sure to restore original cdb */ 7590 vlun->svl_cdb[1] = cdb_1; 7591 7592 if (success == 1) { 7593 /* Successfully validated registration */ 7594 mdi_rele_path(pip); 7595 return (1); 7596 } 7597 7598 VHCI_DEBUG(4, (CE_WARN, NULL, "key validation failed")); 7599 7600 /* 7601 * key invalid, back out by registering key value of 0 7602 */ 7603 VHCI_DEBUG(4, (CE_NOTE, NULL, 7604 "vhci_pgr_validate_and_register: backout on" 7605 " svp 0x%p being done\n", (void *)svp)); 7606 vhci_print_prout_keys(vlun, "v_pgr_val_reg: before bcopy: "); 7607 7608 bcopy((const void *)prout->service_key, (void *)prout->res_key, 7609 MHIOC_RESV_KEY_SIZE); 7610 bzero((void *)prout->service_key, MHIOC_RESV_KEY_SIZE); 7611 7612 vhci_print_prout_keys(vlun, "v_pgr_val_reg: before bcopy: "); 7613 7614 /* 7615 * Get a new path 7616 */ 7617 rval = mdi_select_path(cdip, NULL, MDI_SELECT_ONLINE_PATH | 7618 MDI_SELECT_STANDBY_PATH, NULL, &pip); 7619 if ((rval != MDI_SUCCESS) || (pip == NULL)) { 7620 VHCI_DEBUG(4, (CE_NOTE, NULL, 7621 "%s%d: vhci_pgr_validate_and_register: no valid pip\n", 7622 ddi_driver_name(cdip), ddi_get_instance(cdip))); 7623 return (0); 7624 } 7625 7626 if ((rval = vhci_do_prout(svp)) != 1) { 7627 VHCI_DEBUG(4, (CE_NOTE, NULL, 7628 "vhci_pgr_validate_and_register: backout on" 7629 " svp 0x%p failed\n", (void *)svp)); 7630 vhci_print_prout_keys(vlun, "backout failed"); 7631 7632 VHCI_DEBUG(4, (CE_WARN, NULL, 7633 "%s%d: vhci_pgr_validate_and_register: key" 7634 " validation and backout failed", ddi_driver_name(cdip), 7635 ddi_get_instance(cdip))); 7636 if (rval == VHCI_PGR_ILLEGALOP) { 7637 VHCI_DEBUG(4, (CE_WARN, NULL, 7638 "%s%d: vhci_pgr_validate_and_register: key" 7639 " already cleared", ddi_driver_name(cdip), 7640 ddi_get_instance(cdip))); 7641 rval = 1; 7642 } else 7643 rval = 0; 7644 } else { 7645 VHCI_DEBUG(4, (CE_NOTE, NULL, 7646 "%s%d: vhci_pgr_validate_and_register: key" 7647 " validation failed, key backed out\n", 7648 ddi_driver_name(cdip), ddi_get_instance(cdip))); 7649 vhci_print_prout_keys(vlun, "v_pgr_val_reg: key backed out: "); 7650 } 7651 mdi_rele_path(pip); 7652 7653 return (rval); 7654 } 7655 7656 /* 7657 * taskq routine to dispatch a scsi cmd to vhci_scsi_start. This ensures 7658 * that vhci_scsi_start is not called in interrupt context. 7659 * As the upper layer gets TRAN_ACCEPT when the command is dispatched, we 7660 * need to complete the command if something goes wrong. 7661 */ 7662 static void 7663 vhci_dispatch_scsi_start(void *arg) 7664 { 7665 struct vhci_pkt *vpkt = (struct vhci_pkt *)arg; 7666 struct scsi_pkt *tpkt = vpkt->vpkt_tgt_pkt; 7667 int rval = TRAN_BUSY; 7668 7669 VHCI_DEBUG(6, (CE_NOTE, NULL, "!vhci_dispatch_scsi_start: sending" 7670 " scsi-2 reserve for 0x%p\n", 7671 (void *)ADDR2DIP(&(vpkt->vpkt_tgt_pkt->pkt_address)))); 7672 7673 /* 7674 * To prevent the taskq from being called recursively we set the 7675 * the VHCI_PKT_THRU_TASKQ bit in the vhci_pkt_states. 7676 */ 7677 vpkt->vpkt_state |= VHCI_PKT_THRU_TASKQ; 7678 7679 /* 7680 * Wait for the transport to get ready to send packets 7681 * and if it times out, it will return something other than 7682 * TRAN_BUSY. The vhci_reserve_delay may want to 7683 * get tuned for other transports and is therefore a global. 7684 * Using delay since this routine is called by taskq dispatch 7685 * and not called during interrupt context. 7686 */ 7687 while ((rval = vhci_scsi_start(&(vpkt->vpkt_tgt_pkt->pkt_address), 7688 vpkt->vpkt_tgt_pkt)) == TRAN_BUSY) { 7689 delay(drv_usectohz(vhci_reserve_delay)); 7690 } 7691 7692 switch (rval) { 7693 case TRAN_ACCEPT: 7694 return; 7695 7696 default: 7697 /* 7698 * This pkt shall be retried, and to ensure another taskq 7699 * is dispatched for it, clear the VHCI_PKT_THRU_TASKQ 7700 * flag. 7701 */ 7702 vpkt->vpkt_state &= ~VHCI_PKT_THRU_TASKQ; 7703 7704 /* Ensure that the pkt is retried without a reset */ 7705 tpkt->pkt_reason = CMD_ABORTED; 7706 tpkt->pkt_statistics |= STAT_ABORTED; 7707 VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_dispatch_scsi_start: " 7708 "TRAN_rval %d returned for dip 0x%p", rval, 7709 (void *)ADDR2DIP(&(vpkt->vpkt_tgt_pkt->pkt_address)))); 7710 break; 7711 } 7712 7713 /* 7714 * vpkt_org_vpkt should always be NULL here if the retry command 7715 * has been successfully dispatched. If vpkt_org_vpkt != NULL at 7716 * this point, it is an error so restore the original vpkt and 7717 * return an error to the target driver so it can retry the 7718 * command as appropriate. 7719 */ 7720 if (vpkt->vpkt_org_vpkt != NULL) { 7721 struct vhci_pkt *new_vpkt = vpkt; 7722 scsi_vhci_priv_t *svp = (scsi_vhci_priv_t *) 7723 mdi_pi_get_vhci_private(vpkt->vpkt_path); 7724 7725 vpkt = vpkt->vpkt_org_vpkt; 7726 7727 vpkt->vpkt_tgt_pkt->pkt_reason = tpkt->pkt_reason; 7728 vpkt->vpkt_tgt_pkt->pkt_statistics = tpkt->pkt_statistics; 7729 7730 vhci_scsi_destroy_pkt(&svp->svp_psd->sd_address, 7731 new_vpkt->vpkt_tgt_pkt); 7732 7733 tpkt = vpkt->vpkt_tgt_pkt; 7734 } 7735 7736 if (tpkt->pkt_comp) { 7737 (*tpkt->pkt_comp)(tpkt); 7738 } 7739 } 7740 7741 static void 7742 vhci_initiate_auto_failback(void *arg) 7743 { 7744 struct scsi_vhci_lun *vlun = (struct scsi_vhci_lun *)arg; 7745 dev_info_t *vdip, *cdip; 7746 int held; 7747 7748 cdip = vlun->svl_dip; 7749 vdip = ddi_get_parent(cdip); 7750 7751 VHCI_HOLD_LUN(vlun, VH_SLEEP, held); 7752 7753 /* 7754 * Perform a final check to see if the active path class is indeed 7755 * not the prefered path class. As in the time the auto failback 7756 * was dispatched, an external failover could have been detected. 7757 * [Some other host could have detected this condition and triggered 7758 * the auto failback before]. 7759 * In such a case if we go ahead with failover we will be negating the 7760 * whole purpose of auto failback. 7761 */ 7762 mutex_enter(&vlun->svl_mutex); 7763 if (vlun->svl_active_pclass != NULL) { 7764 char *best_pclass; 7765 struct scsi_failover_ops *fo; 7766 7767 fo = vlun->svl_fops; 7768 7769 (*fo->sfo_pathclass_next)(NULL, &best_pclass, 7770 vlun->svl_fops_ctpriv); 7771 if (strcmp(vlun->svl_active_pclass, best_pclass) == 0) { 7772 mutex_exit(&vlun->svl_mutex); 7773 VHCI_RELEASE_LUN(vlun); 7774 VHCI_DEBUG(1, (CE_NOTE, NULL, "Not initiating " 7775 "auto failback for %s as %s pathclass already " 7776 "active.\n", vlun->svl_lun_wwn, best_pclass)); 7777 return; 7778 } 7779 } 7780 mutex_exit(&vlun->svl_mutex); 7781 if (mdi_failover(vdip, vlun->svl_dip, MDI_FAILOVER_SYNC) 7782 == MDI_SUCCESS) { 7783 vhci_log(CE_NOTE, vdip, "!Auto failback operation " 7784 "succeeded for device %s (GUID %s)", 7785 ddi_node_name(cdip), vlun->svl_lun_wwn); 7786 } else { 7787 vhci_log(CE_NOTE, vdip, "!Auto failback operation " 7788 "failed for device %s (GUID %s)", 7789 ddi_node_name(cdip), vlun->svl_lun_wwn); 7790 } 7791 VHCI_RELEASE_LUN(vlun); 7792 } 7793 7794 #ifdef DEBUG 7795 static void 7796 vhci_print_prin_keys(vhci_prin_readkeys_t *prin, int numkeys) 7797 { 7798 uchar_t index = 0; 7799 char buf[100]; 7800 7801 VHCI_DEBUG(5, (CE_NOTE, NULL, "num keys %d\n", numkeys)); 7802 7803 while (index < numkeys) { 7804 bcopy(&prin->keylist[index], buf, MHIOC_RESV_KEY_SIZE); 7805 VHCI_DEBUG(5, (CE_NOTE, NULL, 7806 "%02x%02x%02x%02x%02x%02x%02x%02x\t", 7807 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], 7808 buf[7])); 7809 index++; 7810 } 7811 } 7812 #endif 7813 7814 static void 7815 vhci_print_prout_keys(scsi_vhci_lun_t *vlun, char *msg) 7816 { 7817 int i; 7818 vhci_prout_t *prout; 7819 char buf1[4*MHIOC_RESV_KEY_SIZE + 1]; 7820 char buf2[4*MHIOC_RESV_KEY_SIZE + 1]; 7821 char buf3[4*MHIOC_RESV_KEY_SIZE + 1]; 7822 char buf4[4*MHIOC_RESV_KEY_SIZE + 1]; 7823 7824 prout = &vlun->svl_prout; 7825 7826 for (i = 0; i < MHIOC_RESV_KEY_SIZE; i++) 7827 (void) sprintf(&buf1[4*i], "[%02x]", prout->res_key[i]); 7828 for (i = 0; i < MHIOC_RESV_KEY_SIZE; i++) 7829 (void) sprintf(&buf2[(4*i)], "[%02x]", prout->service_key[i]); 7830 for (i = 0; i < MHIOC_RESV_KEY_SIZE; i++) 7831 (void) sprintf(&buf3[4*i], "[%02x]", prout->active_res_key[i]); 7832 for (i = 0; i < MHIOC_RESV_KEY_SIZE; i++) 7833 (void) sprintf(&buf4[4*i], "[%02x]", 7834 prout->active_service_key[i]); 7835 7836 /* Printing all in one go. Otherwise it will jumble up */ 7837 VHCI_DEBUG(5, (CE_CONT, NULL, "%s vlun 0x%p, thread 0x%p\n" 7838 "res_key: : %s\n" 7839 "service_key : %s\n" 7840 "active_res_key : %s\n" 7841 "active_service_key: %s\n", 7842 msg, (void *)vlun, (void *)curthread, buf1, buf2, buf3, buf4)); 7843 } 7844 7845 /* 7846 * Called from vhci_scsi_start to update the pHCI pkt with target packet. 7847 */ 7848 static void 7849 vhci_update_pHCI_pkt(struct vhci_pkt *vpkt, struct scsi_pkt *pkt) 7850 { 7851 7852 ASSERT(vpkt->vpkt_hba_pkt); 7853 7854 vpkt->vpkt_hba_pkt->pkt_flags = pkt->pkt_flags; 7855 vpkt->vpkt_hba_pkt->pkt_flags |= FLAG_NOQUEUE; 7856 7857 if ((vpkt->vpkt_hba_pkt->pkt_flags & FLAG_NOINTR) || 7858 MDI_PI_IS_SUSPENDED(vpkt->vpkt_path)) { 7859 /* 7860 * Polled Command is requested or HBA is in 7861 * suspended state 7862 */ 7863 vpkt->vpkt_hba_pkt->pkt_flags |= FLAG_NOINTR; 7864 vpkt->vpkt_hba_pkt->pkt_comp = NULL; 7865 } else { 7866 vpkt->vpkt_hba_pkt->pkt_comp = vhci_intr; 7867 } 7868 vpkt->vpkt_hba_pkt->pkt_time = pkt->pkt_time; 7869 bcopy(pkt->pkt_cdbp, vpkt->vpkt_hba_pkt->pkt_cdbp, 7870 vpkt->vpkt_tgt_init_cdblen); 7871 vpkt->vpkt_hba_pkt->pkt_resid = pkt->pkt_resid; 7872 7873 /* Re-initialize the following pHCI packet state information */ 7874 vpkt->vpkt_hba_pkt->pkt_state = 0; 7875 vpkt->vpkt_hba_pkt->pkt_statistics = 0; 7876 vpkt->vpkt_hba_pkt->pkt_reason = 0; 7877 } 7878 7879 static int 7880 vhci_scsi_bus_power(dev_info_t *parent, void *impl_arg, pm_bus_power_op_t op, 7881 void *arg, void *result) 7882 { 7883 int ret = DDI_SUCCESS; 7884 7885 /* 7886 * Generic processing in MPxIO framework 7887 */ 7888 ret = mdi_bus_power(parent, impl_arg, op, arg, result); 7889 7890 switch (ret) { 7891 case MDI_SUCCESS: 7892 ret = DDI_SUCCESS; 7893 break; 7894 case MDI_FAILURE: 7895 ret = DDI_FAILURE; 7896 break; 7897 default: 7898 break; 7899 } 7900 7901 return (ret); 7902 } 7903 7904 static int 7905 vhci_pHCI_cap(struct scsi_address *ap, char *cap, int val, int whom, 7906 mdi_pathinfo_t *pip) 7907 { 7908 dev_info_t *cdip; 7909 mdi_pathinfo_t *npip = NULL; 7910 scsi_vhci_priv_t *svp = NULL; 7911 struct scsi_address *pap = NULL; 7912 scsi_hba_tran_t *hba = NULL; 7913 int sps; 7914 int mps_flag; 7915 int rval = 0; 7916 7917 mps_flag = (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH); 7918 if (pip) { 7919 /* 7920 * If the call is from vhci_pathinfo_state_change, 7921 * then this path was busy and is becoming ready to accept IO. 7922 */ 7923 ASSERT(ap != NULL); 7924 hba = ap->a_hba_tran; 7925 ASSERT(hba != NULL); 7926 rval = scsi_ifsetcap(ap, cap, val, whom); 7927 7928 VHCI_DEBUG(2, (CE_NOTE, NULL, 7929 "!vhci_pHCI_cap: only on path %p, ap %p, rval %x\n", 7930 (void *)pip, (void *)ap, rval)); 7931 7932 return (rval); 7933 } 7934 7935 /* 7936 * Set capability on all the pHCIs. 7937 * If any path is busy, then the capability would be set by 7938 * vhci_pathinfo_state_change. 7939 */ 7940 7941 cdip = ADDR2DIP(ap); 7942 ASSERT(cdip != NULL); 7943 sps = mdi_select_path(cdip, NULL, mps_flag, NULL, &pip); 7944 if ((sps != MDI_SUCCESS) || (pip == NULL)) { 7945 VHCI_DEBUG(2, (CE_WARN, NULL, 7946 "!vhci_pHCI_cap: Unable to get a path, dip 0x%p", 7947 (void *)cdip)); 7948 return (0); 7949 } 7950 7951 again: 7952 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip); 7953 if (svp == NULL) { 7954 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_pHCI_cap: " 7955 "priv is NULL, pip 0x%p", (void *)pip)); 7956 mdi_rele_path(pip); 7957 return (rval); 7958 } 7959 7960 if (svp->svp_psd == NULL) { 7961 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_pHCI_cap: " 7962 "psd is NULL, pip 0x%p, svp 0x%p", 7963 (void *)pip, (void *)svp)); 7964 mdi_rele_path(pip); 7965 return (rval); 7966 } 7967 7968 pap = &svp->svp_psd->sd_address; 7969 ASSERT(pap != NULL); 7970 hba = pap->a_hba_tran; 7971 ASSERT(hba != NULL); 7972 7973 if (hba->tran_setcap != NULL) { 7974 rval = scsi_ifsetcap(pap, cap, val, whom); 7975 7976 VHCI_DEBUG(2, (CE_NOTE, NULL, 7977 "!vhci_pHCI_cap: path %p, ap %p, rval %x\n", 7978 (void *)pip, (void *)ap, rval)); 7979 7980 /* 7981 * Select next path and issue the setcap, repeat 7982 * until all paths are exhausted 7983 */ 7984 sps = mdi_select_path(cdip, NULL, mps_flag, pip, &npip); 7985 if ((sps != MDI_SUCCESS) || (npip == NULL)) { 7986 mdi_rele_path(pip); 7987 return (1); 7988 } 7989 mdi_rele_path(pip); 7990 pip = npip; 7991 goto again; 7992 } 7993 mdi_rele_path(pip); 7994 return (rval); 7995 } 7996 7997 static int 7998 vhci_scsi_bus_config(dev_info_t *pdip, uint_t flags, ddi_bus_config_op_t op, 7999 void *arg, dev_info_t **child) 8000 { 8001 char *guid; 8002 8003 if (op == BUS_CONFIG_ONE || op == BUS_UNCONFIG_ONE) 8004 guid = vhci_devnm_to_guid((char *)arg); 8005 else 8006 guid = NULL; 8007 8008 if (mdi_vhci_bus_config(pdip, flags, op, arg, child, guid) 8009 == MDI_SUCCESS) 8010 return (NDI_SUCCESS); 8011 else 8012 return (NDI_FAILURE); 8013 } 8014 8015 /* 8016 * Take the original vhci_pkt, create a duplicate of the pkt for resending 8017 * as though it originated in ssd. 8018 */ 8019 static struct scsi_pkt * 8020 vhci_create_retry_pkt(struct vhci_pkt *vpkt) 8021 { 8022 struct vhci_pkt *new_vpkt = NULL; 8023 struct scsi_pkt *pkt = NULL; 8024 8025 scsi_vhci_priv_t *svp = (scsi_vhci_priv_t *) 8026 mdi_pi_get_vhci_private(vpkt->vpkt_path); 8027 8028 /* 8029 * Ensure consistent data at completion time by setting PKT_CONSISTENT 8030 */ 8031 pkt = vhci_scsi_init_pkt(&svp->svp_psd->sd_address, pkt, 8032 vpkt->vpkt_tgt_init_bp, vpkt->vpkt_tgt_init_cdblen, 8033 vpkt->vpkt_tgt_init_scblen, 0, 8034 PKT_CONSISTENT, 8035 NULL_FUNC, NULL); 8036 if (pkt != NULL) { 8037 new_vpkt = TGTPKT2VHCIPKT(pkt); 8038 8039 pkt->pkt_address = vpkt->vpkt_tgt_pkt->pkt_address; 8040 pkt->pkt_flags = vpkt->vpkt_tgt_pkt->pkt_flags; 8041 pkt->pkt_time = vpkt->vpkt_tgt_pkt->pkt_time; 8042 pkt->pkt_comp = vpkt->vpkt_tgt_pkt->pkt_comp; 8043 8044 pkt->pkt_resid = 0; 8045 pkt->pkt_statistics = 0; 8046 pkt->pkt_reason = 0; 8047 8048 bcopy(vpkt->vpkt_tgt_pkt->pkt_cdbp, 8049 pkt->pkt_cdbp, vpkt->vpkt_tgt_init_cdblen); 8050 8051 /* 8052 * Save a pointer to the original vhci_pkt 8053 */ 8054 new_vpkt->vpkt_org_vpkt = vpkt; 8055 } 8056 8057 return (pkt); 8058 } 8059 8060 /* 8061 * Copy the successful completion information from the hba packet into 8062 * the original target pkt from the upper layer. Returns the original 8063 * vpkt and destroys the new vpkt from the internal retry. 8064 */ 8065 static struct vhci_pkt * 8066 vhci_sync_retry_pkt(struct vhci_pkt *vpkt) 8067 { 8068 struct vhci_pkt *ret_vpkt = NULL; 8069 struct scsi_pkt *tpkt = NULL; 8070 struct scsi_pkt *hba_pkt = NULL; 8071 scsi_vhci_priv_t *svp = (scsi_vhci_priv_t *) 8072 mdi_pi_get_vhci_private(vpkt->vpkt_path); 8073 8074 ASSERT(vpkt->vpkt_org_vpkt != NULL); 8075 VHCI_DEBUG(0, (CE_NOTE, NULL, "vhci_sync_retry_pkt: Retry pkt " 8076 "completed successfully!\n")); 8077 8078 ret_vpkt = vpkt->vpkt_org_vpkt; 8079 tpkt = ret_vpkt->vpkt_tgt_pkt; 8080 hba_pkt = vpkt->vpkt_hba_pkt; 8081 8082 /* 8083 * Copy the good status into the target driver's packet 8084 */ 8085 *(tpkt->pkt_scbp) = *(hba_pkt->pkt_scbp); 8086 tpkt->pkt_resid = hba_pkt->pkt_resid; 8087 tpkt->pkt_state = hba_pkt->pkt_state; 8088 tpkt->pkt_statistics = hba_pkt->pkt_statistics; 8089 tpkt->pkt_reason = hba_pkt->pkt_reason; 8090 8091 /* 8092 * Destroy the internally created vpkt for the retry 8093 */ 8094 vhci_scsi_destroy_pkt(&svp->svp_psd->sd_address, 8095 vpkt->vpkt_tgt_pkt); 8096 8097 return (ret_vpkt); 8098 } 8099 8100 /* restart the request sense request */ 8101 static void 8102 vhci_uscsi_restart_sense(void *arg) 8103 { 8104 struct buf *rqbp; 8105 struct buf *bp; 8106 struct scsi_pkt *rqpkt = (struct scsi_pkt *)arg; 8107 mp_uscsi_cmd_t *mp_uscmdp; 8108 8109 VHCI_DEBUG(4, (CE_WARN, NULL, 8110 "vhci_uscsi_restart_sense: enter: rqpkt: %p", (void *)rqpkt)); 8111 8112 if (scsi_transport(rqpkt) != TRAN_ACCEPT) { 8113 /* if it fails - need to wakeup the original command */ 8114 mp_uscmdp = rqpkt->pkt_private; 8115 bp = mp_uscmdp->cmdbp; 8116 rqbp = mp_uscmdp->rqbp; 8117 ASSERT(mp_uscmdp && bp && rqbp); 8118 scsi_free_consistent_buf(rqbp); 8119 scsi_destroy_pkt(rqpkt); 8120 bp->b_resid = bp->b_bcount; 8121 bioerror(bp, EIO); 8122 biodone(bp); 8123 } 8124 } 8125 8126 /* 8127 * auto-rqsense is not enabled so we have to retrieve the request sense 8128 * manually. 8129 */ 8130 static int 8131 vhci_uscsi_send_sense(struct scsi_pkt *pkt, mp_uscsi_cmd_t *mp_uscmdp) 8132 { 8133 struct buf *rqbp, *cmdbp; 8134 struct scsi_pkt *rqpkt; 8135 int rval = 0; 8136 8137 cmdbp = mp_uscmdp->cmdbp; 8138 ASSERT(cmdbp != NULL); 8139 8140 VHCI_DEBUG(4, (CE_WARN, NULL, 8141 "vhci_uscsi_send_sense: enter: bp: %p pkt: %p scmd: %p", 8142 (void *)cmdbp, (void *)pkt, (void *)mp_uscmdp)); 8143 /* set up the packet information and cdb */ 8144 if ((rqbp = scsi_alloc_consistent_buf(mp_uscmdp->ap, NULL, 8145 SENSE_LENGTH, B_READ, NULL, NULL)) == NULL) { 8146 return (-1); 8147 } 8148 8149 if ((rqpkt = scsi_init_pkt(mp_uscmdp->ap, NULL, rqbp, 8150 CDB_GROUP0, 1, 0, PKT_CONSISTENT, NULL, NULL)) == NULL) { 8151 scsi_free_consistent_buf(rqbp); 8152 return (-1); 8153 } 8154 8155 (void) scsi_setup_cdb((union scsi_cdb *)(intptr_t)rqpkt->pkt_cdbp, 8156 SCMD_REQUEST_SENSE, 0, SENSE_LENGTH, 0); 8157 8158 mp_uscmdp->rqbp = rqbp; 8159 rqbp->b_private = mp_uscmdp; 8160 rqpkt->pkt_flags |= FLAG_SENSING; 8161 rqpkt->pkt_time = 60; 8162 rqpkt->pkt_comp = vhci_uscsi_iodone; 8163 rqpkt->pkt_private = mp_uscmdp; 8164 8165 /* 8166 * NOTE: This code path is related to MPAPI uscsi(7I), so path 8167 * selection is not based on path_instance. 8168 */ 8169 if (scsi_pkt_allocated_correctly(rqpkt)) 8170 rqpkt->pkt_path_instance = 0; 8171 8172 /* get her done */ 8173 switch (scsi_transport(rqpkt)) { 8174 case TRAN_ACCEPT: 8175 VHCI_DEBUG(1, (CE_NOTE, NULL, "vhci_uscsi_send_sense: " 8176 "transport accepted.")); 8177 break; 8178 case TRAN_BUSY: 8179 VHCI_DEBUG(1, (CE_NOTE, NULL, "vhci_uscsi_send_sense: " 8180 "transport busy, setting timeout.")); 8181 vhci_restart_timeid = timeout(vhci_uscsi_restart_sense, rqpkt, 8182 (drv_usectohz(5 * 1000000))); 8183 break; 8184 default: 8185 VHCI_DEBUG(1, (CE_NOTE, NULL, "vhci_uscsi_send_sense: " 8186 "transport failed")); 8187 scsi_free_consistent_buf(rqbp); 8188 scsi_destroy_pkt(rqpkt); 8189 rval = -1; 8190 } 8191 8192 return (rval); 8193 } 8194 8195 /* 8196 * done routine for the mpapi uscsi command - this is behaving as though 8197 * FLAG_DIAGNOSE is set meaning there are no retries except for a manual 8198 * request sense. 8199 */ 8200 void 8201 vhci_uscsi_iodone(struct scsi_pkt *pkt) 8202 { 8203 struct buf *bp; 8204 mp_uscsi_cmd_t *mp_uscmdp; 8205 struct uscsi_cmd *uscmdp; 8206 struct scsi_arq_status *arqstat; 8207 int err; 8208 8209 mp_uscmdp = (mp_uscsi_cmd_t *)pkt->pkt_private; 8210 uscmdp = mp_uscmdp->uscmdp; 8211 bp = mp_uscmdp->cmdbp; 8212 ASSERT(bp != NULL); 8213 VHCI_DEBUG(4, (CE_WARN, NULL, 8214 "vhci_uscsi_iodone: enter: bp: %p pkt: %p scmd: %p", 8215 (void *)bp, (void *)pkt, (void *)mp_uscmdp)); 8216 /* Save the status and the residual into the uscsi_cmd struct */ 8217 uscmdp->uscsi_status = ((*(pkt)->pkt_scbp) & STATUS_MASK); 8218 uscmdp->uscsi_resid = bp->b_resid; 8219 8220 /* return on a very successful command */ 8221 if (pkt->pkt_reason == CMD_CMPLT && 8222 SCBP_C(pkt) == 0 && ((pkt->pkt_flags & FLAG_SENSING) == 0) && 8223 pkt->pkt_resid == 0) { 8224 mdi_pi_kstat_iosupdate(mp_uscmdp->pip, bp); 8225 scsi_destroy_pkt(pkt); 8226 biodone(bp); 8227 return; 8228 } 8229 VHCI_DEBUG(4, (CE_NOTE, NULL, "iodone: reason=0x%x " 8230 " pkt_resid=%ld pkt_state: 0x%x b_count: %ld b_resid: %ld", 8231 pkt->pkt_reason, pkt->pkt_resid, 8232 pkt->pkt_state, bp->b_bcount, bp->b_resid)); 8233 8234 err = EIO; 8235 8236 arqstat = (struct scsi_arq_status *)(intptr_t)(pkt->pkt_scbp); 8237 if (pkt->pkt_reason != CMD_CMPLT) { 8238 /* 8239 * The command did not complete. 8240 */ 8241 VHCI_DEBUG(4, (CE_NOTE, NULL, 8242 "vhci_uscsi_iodone: command did not complete." 8243 " reason: %x flag: %x", pkt->pkt_reason, pkt->pkt_flags)); 8244 if (pkt->pkt_flags & FLAG_SENSING) { 8245 MDI_PI_ERRSTAT(mp_uscmdp->pip, MDI_PI_TRANSERR); 8246 } else if (pkt->pkt_reason == CMD_TIMEOUT) { 8247 MDI_PI_ERRSTAT(mp_uscmdp->pip, MDI_PI_HARDERR); 8248 err = ETIMEDOUT; 8249 } 8250 } else if (pkt->pkt_state & STATE_ARQ_DONE && mp_uscmdp->arq_enabled) { 8251 /* 8252 * The auto-rqsense happened, and the packet has a filled-in 8253 * scsi_arq_status structure, pointed to by pkt_scbp. 8254 */ 8255 VHCI_DEBUG(4, (CE_NOTE, NULL, 8256 "vhci_uscsi_iodone: received auto-requested sense")); 8257 if (uscmdp->uscsi_flags & USCSI_RQENABLE) { 8258 /* get the amount of data to copy into rqbuf */ 8259 int rqlen = SENSE_LENGTH - arqstat->sts_rqpkt_resid; 8260 rqlen = min(((int)uscmdp->uscsi_rqlen), rqlen); 8261 uscmdp->uscsi_rqresid = uscmdp->uscsi_rqlen - rqlen; 8262 uscmdp->uscsi_rqstatus = 8263 *((char *)&arqstat->sts_rqpkt_status); 8264 if (uscmdp->uscsi_rqbuf && uscmdp->uscsi_rqlen && 8265 rqlen != 0) { 8266 bcopy(&(arqstat->sts_sensedata), 8267 uscmdp->uscsi_rqbuf, rqlen); 8268 } 8269 mdi_pi_kstat_iosupdate(mp_uscmdp->pip, bp); 8270 VHCI_DEBUG(4, (CE_NOTE, NULL, 8271 "vhci_uscsi_iodone: ARQ " 8272 "uscsi_rqstatus=0x%x uscsi_rqresid=%d rqlen: %d " 8273 "xfer: %d rqpkt_resid: %d\n", 8274 uscmdp->uscsi_rqstatus, uscmdp->uscsi_rqresid, 8275 uscmdp->uscsi_rqlen, rqlen, 8276 arqstat->sts_rqpkt_resid)); 8277 } 8278 } else if (pkt->pkt_flags & FLAG_SENSING) { 8279 struct buf *rqbp; 8280 struct scsi_status *rqstatus; 8281 8282 rqstatus = (struct scsi_status *)pkt->pkt_scbp; 8283 /* a manual request sense was done - get the information */ 8284 if (uscmdp->uscsi_flags & USCSI_RQENABLE) { 8285 int rqlen = SENSE_LENGTH - pkt->pkt_resid; 8286 8287 rqbp = mp_uscmdp->rqbp; 8288 /* get the amount of data to copy into rqbuf */ 8289 rqlen = min(((int)uscmdp->uscsi_rqlen), rqlen); 8290 uscmdp->uscsi_rqresid = uscmdp->uscsi_rqlen - rqlen; 8291 uscmdp->uscsi_rqstatus = *((char *)rqstatus); 8292 if (uscmdp->uscsi_rqlen && uscmdp->uscsi_rqbuf) { 8293 bcopy(rqbp->b_un.b_addr, uscmdp->uscsi_rqbuf, 8294 rqlen); 8295 } 8296 MDI_PI_ERRSTAT(mp_uscmdp->pip, MDI_PI_TRANSERR); 8297 scsi_free_consistent_buf(rqbp); 8298 } 8299 VHCI_DEBUG(4, (CE_NOTE, NULL, "vhci_uscsi_iodone: FLAG_SENSING" 8300 "uscsi_rqstatus=0x%x uscsi_rqresid=%d\n", 8301 uscmdp->uscsi_rqstatus, uscmdp->uscsi_rqresid)); 8302 } else { 8303 struct scsi_status *status = 8304 (struct scsi_status *)pkt->pkt_scbp; 8305 /* 8306 * Command completed and we're not getting sense. Check for 8307 * errors and decide what to do next. 8308 */ 8309 VHCI_DEBUG(4, (CE_NOTE, NULL, 8310 "vhci_uscsi_iodone: command appears complete: reason: %x", 8311 pkt->pkt_reason)); 8312 if (status->sts_chk) { 8313 /* need to manually get the request sense */ 8314 if (vhci_uscsi_send_sense(pkt, mp_uscmdp) == 0) { 8315 scsi_destroy_pkt(pkt); 8316 return; 8317 } 8318 } else { 8319 VHCI_DEBUG(4, (CE_NOTE, NULL, 8320 "vhci_chk_err: appears complete")); 8321 err = 0; 8322 mdi_pi_kstat_iosupdate(mp_uscmdp->pip, bp); 8323 if (pkt->pkt_resid) { 8324 bp->b_resid += pkt->pkt_resid; 8325 } 8326 } 8327 } 8328 8329 if (err) { 8330 if (bp->b_resid == 0) 8331 bp->b_resid = bp->b_bcount; 8332 bioerror(bp, err); 8333 bp->b_flags |= B_ERROR; 8334 } 8335 8336 scsi_destroy_pkt(pkt); 8337 biodone(bp); 8338 8339 VHCI_DEBUG(4, (CE_WARN, NULL, "vhci_uscsi_iodone: exit")); 8340 } 8341 8342 /* 8343 * start routine for the mpapi uscsi command 8344 */ 8345 int 8346 vhci_uscsi_iostart(struct buf *bp) 8347 { 8348 struct scsi_pkt *pkt; 8349 struct uscsi_cmd *uscmdp; 8350 mp_uscsi_cmd_t *mp_uscmdp; 8351 int stat_size, rval; 8352 int retry = 0; 8353 8354 ASSERT(bp->b_private != NULL); 8355 8356 mp_uscmdp = (mp_uscsi_cmd_t *)bp->b_private; 8357 uscmdp = mp_uscmdp->uscmdp; 8358 if (uscmdp->uscsi_flags & USCSI_RQENABLE) { 8359 stat_size = SENSE_LENGTH; 8360 } else { 8361 stat_size = 1; 8362 } 8363 8364 pkt = scsi_init_pkt(mp_uscmdp->ap, NULL, bp, uscmdp->uscsi_cdblen, 8365 stat_size, 0, 0, SLEEP_FUNC, NULL); 8366 if (pkt == NULL) { 8367 VHCI_DEBUG(4, (CE_NOTE, NULL, 8368 "vhci_uscsi_iostart: rval: EINVAL")); 8369 bp->b_resid = bp->b_bcount; 8370 uscmdp->uscsi_resid = bp->b_bcount; 8371 bioerror(bp, EINVAL); 8372 biodone(bp); 8373 return (EINVAL); 8374 } 8375 8376 pkt->pkt_time = uscmdp->uscsi_timeout; 8377 bcopy(uscmdp->uscsi_cdb, pkt->pkt_cdbp, (size_t)uscmdp->uscsi_cdblen); 8378 pkt->pkt_comp = vhci_uscsi_iodone; 8379 pkt->pkt_private = mp_uscmdp; 8380 if (uscmdp->uscsi_flags & USCSI_SILENT) 8381 pkt->pkt_flags |= FLAG_SILENT; 8382 if (uscmdp->uscsi_flags & USCSI_ISOLATE) 8383 pkt->pkt_flags |= FLAG_ISOLATE; 8384 if (uscmdp->uscsi_flags & USCSI_DIAGNOSE) 8385 pkt->pkt_flags |= FLAG_DIAGNOSE; 8386 if (uscmdp->uscsi_flags & USCSI_RENEGOT) { 8387 pkt->pkt_flags |= FLAG_RENEGOTIATE_WIDE_SYNC; 8388 } 8389 VHCI_DEBUG(4, (CE_WARN, NULL, 8390 "vhci_uscsi_iostart: ap: %p pkt: %p pcdbp: %p uscmdp: %p" 8391 " ucdbp: %p pcdblen: %d bp: %p count: %ld pip: %p" 8392 " stat_size: %d", 8393 (void *)mp_uscmdp->ap, (void *)pkt, (void *)pkt->pkt_cdbp, 8394 (void *)uscmdp, (void *)uscmdp->uscsi_cdb, pkt->pkt_cdblen, 8395 (void *)bp, bp->b_bcount, (void *)mp_uscmdp->pip, stat_size)); 8396 8397 /* 8398 * NOTE: This code path is related to MPAPI uscsi(7I), so path 8399 * selection is not based on path_instance. 8400 */ 8401 if (scsi_pkt_allocated_correctly(pkt)) 8402 pkt->pkt_path_instance = 0; 8403 8404 while (((rval = scsi_transport(pkt)) == TRAN_BUSY) && 8405 retry < vhci_uscsi_retry_count) { 8406 delay(drv_usectohz(vhci_uscsi_delay)); 8407 retry++; 8408 } 8409 if (retry >= vhci_uscsi_retry_count) { 8410 VHCI_DEBUG(4, (CE_NOTE, NULL, 8411 "vhci_uscsi_iostart: tran_busy - retry: %d", retry)); 8412 } 8413 switch (rval) { 8414 case TRAN_ACCEPT: 8415 rval = 0; 8416 break; 8417 8418 default: 8419 VHCI_DEBUG(4, (CE_NOTE, NULL, 8420 "vhci_uscsi_iostart: rval: %d count: %ld res: %ld", 8421 rval, bp->b_bcount, bp->b_resid)); 8422 bp->b_resid = bp->b_bcount; 8423 uscmdp->uscsi_resid = bp->b_bcount; 8424 bioerror(bp, EIO); 8425 scsi_destroy_pkt(pkt); 8426 biodone(bp); 8427 rval = EIO; 8428 MDI_PI_ERRSTAT(mp_uscmdp->pip, MDI_PI_TRANSERR); 8429 break; 8430 } 8431 VHCI_DEBUG(4, (CE_NOTE, NULL, 8432 "vhci_uscsi_iostart: exit: rval: %d", rval)); 8433 return (rval); 8434 } 8435