1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2010 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 /* 27 * Multiplexed I/O SCSI vHCI implementation 28 */ 29 30 #include <sys/conf.h> 31 #include <sys/file.h> 32 #include <sys/ddi.h> 33 #include <sys/sunddi.h> 34 #include <sys/scsi/scsi.h> 35 #include <sys/scsi/impl/scsi_reset_notify.h> 36 #include <sys/scsi/impl/services.h> 37 #include <sys/sunmdi.h> 38 #include <sys/mdi_impldefs.h> 39 #include <sys/scsi/adapters/scsi_vhci.h> 40 #include <sys/disp.h> 41 #include <sys/byteorder.h> 42 43 extern uintptr_t scsi_callback_id; 44 extern ddi_dma_attr_t scsi_alloc_attr; 45 46 #ifdef DEBUG 47 int vhci_debug = VHCI_DEBUG_DEFAULT_VAL; 48 #endif 49 50 /* retry for the vhci_do_prout command when a not ready is returned */ 51 int vhci_prout_not_ready_retry = 180; 52 53 /* 54 * These values are defined to support the internal retry of 55 * SCSI packets for better sense code handling. 56 */ 57 #define VHCI_CMD_CMPLT 0 58 #define VHCI_CMD_RETRY 1 59 #define VHCI_CMD_ERROR -1 60 61 #define PROPFLAGS (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM) 62 #define VHCI_SCSI_PERR 0x47 63 #define VHCI_PGR_ILLEGALOP -2 64 #define VHCI_NUM_UPDATE_TASKQ 8 65 /* changed to 132 to accomodate HDS */ 66 67 /* 68 * Version Macros 69 */ 70 #define VHCI_NAME_VERSION "SCSI VHCI Driver" 71 char vhci_version_name[] = VHCI_NAME_VERSION; 72 73 int vhci_first_time = 0; 74 clock_t vhci_to_ticks = 0; 75 int vhci_init_wait_timeout = VHCI_INIT_WAIT_TIMEOUT; 76 kcondvar_t vhci_cv; 77 kmutex_t vhci_global_mutex; 78 void *vhci_softstate = NULL; /* for soft state */ 79 80 /* 81 * Flag to delay the retry of the reserve command 82 */ 83 int vhci_reserve_delay = 100000; 84 static int vhci_path_quiesce_timeout = 60; 85 static uchar_t zero_key[MHIOC_RESV_KEY_SIZE]; 86 87 /* uscsi delay for a TRAN_BUSY */ 88 static int vhci_uscsi_delay = 100000; 89 static int vhci_uscsi_retry_count = 180; 90 /* uscsi_restart_sense timeout id in case it needs to get canceled */ 91 static timeout_id_t vhci_restart_timeid = 0; 92 93 static int vhci_bus_config_debug = 0; 94 95 /* 96 * Bidirectional map of 'target-port' to port id <pid> for support of 97 * iostat(1M) '-Xx' and '-Yx' output. 98 */ 99 static kmutex_t vhci_targetmap_mutex; 100 static uint_t vhci_targetmap_pid = 1; 101 static mod_hash_t *vhci_targetmap_bypid; /* <pid> -> 'target-port' */ 102 static mod_hash_t *vhci_targetmap_byport; /* 'target-port' -> <pid> */ 103 104 /* 105 * functions exported by scsi_vhci struct cb_ops 106 */ 107 static int vhci_open(dev_t *, int, int, cred_t *); 108 static int vhci_close(dev_t, int, int, cred_t *); 109 static int vhci_ioctl(dev_t, int, intptr_t, int, cred_t *, int *); 110 111 /* 112 * functions exported by scsi_vhci struct dev_ops 113 */ 114 static int vhci_getinfo(dev_info_t *, ddi_info_cmd_t, void *, void **); 115 static int vhci_attach(dev_info_t *, ddi_attach_cmd_t); 116 static int vhci_detach(dev_info_t *, ddi_detach_cmd_t); 117 118 /* 119 * functions exported by scsi_vhci scsi_hba_tran_t transport table 120 */ 121 static int vhci_scsi_tgt_init(dev_info_t *, dev_info_t *, 122 scsi_hba_tran_t *, struct scsi_device *); 123 static void vhci_scsi_tgt_free(dev_info_t *, dev_info_t *, scsi_hba_tran_t *, 124 struct scsi_device *); 125 static int vhci_pgr_register_start(scsi_vhci_lun_t *, struct scsi_pkt *); 126 static int vhci_scsi_start(struct scsi_address *, struct scsi_pkt *); 127 static int vhci_scsi_abort(struct scsi_address *, struct scsi_pkt *); 128 static int vhci_scsi_reset(struct scsi_address *, int); 129 static int vhci_scsi_reset_target(struct scsi_address *, int level, 130 uint8_t select_path); 131 static int vhci_scsi_reset_bus(struct scsi_address *); 132 static int vhci_scsi_getcap(struct scsi_address *, char *, int); 133 static int vhci_scsi_setcap(struct scsi_address *, char *, int, int); 134 static int vhci_commoncap(struct scsi_address *, char *, int, int, int); 135 static int vhci_pHCI_cap(struct scsi_address *ap, char *cap, int val, int whom, 136 mdi_pathinfo_t *pip); 137 static struct scsi_pkt *vhci_scsi_init_pkt(struct scsi_address *, 138 struct scsi_pkt *, struct buf *, int, int, int, int, int (*)(), caddr_t); 139 static void vhci_scsi_destroy_pkt(struct scsi_address *, struct scsi_pkt *); 140 static void vhci_scsi_dmafree(struct scsi_address *, struct scsi_pkt *); 141 static void vhci_scsi_sync_pkt(struct scsi_address *, struct scsi_pkt *); 142 static int vhci_scsi_reset_notify(struct scsi_address *, int, void (*)(caddr_t), 143 caddr_t); 144 static int vhci_scsi_get_bus_addr(struct scsi_device *, char *, int); 145 static int vhci_scsi_get_name(struct scsi_device *, char *, int); 146 static int vhci_scsi_bus_power(dev_info_t *, void *, pm_bus_power_op_t, 147 void *, void *); 148 static int vhci_scsi_bus_config(dev_info_t *, uint_t, ddi_bus_config_op_t, 149 void *, dev_info_t **); 150 static int vhci_scsi_bus_unconfig(dev_info_t *, uint_t, ddi_bus_config_op_t, 151 void *); 152 static struct scsi_failover_ops *vhci_dev_fo(dev_info_t *, struct scsi_device *, 153 void **, char **); 154 155 /* 156 * functions registered with the mpxio framework via mdi_vhci_ops_t 157 */ 158 static int vhci_pathinfo_init(dev_info_t *, mdi_pathinfo_t *, int); 159 static int vhci_pathinfo_uninit(dev_info_t *, mdi_pathinfo_t *, int); 160 static int vhci_pathinfo_state_change(dev_info_t *, mdi_pathinfo_t *, 161 mdi_pathinfo_state_t, uint32_t, int); 162 static int vhci_pathinfo_online(dev_info_t *, mdi_pathinfo_t *, int); 163 static int vhci_pathinfo_offline(dev_info_t *, mdi_pathinfo_t *, int); 164 static int vhci_failover(dev_info_t *, dev_info_t *, int); 165 static void vhci_client_attached(dev_info_t *); 166 static int vhci_is_dev_supported(dev_info_t *, dev_info_t *, void *); 167 168 static int vhci_ctl(dev_t, int, intptr_t, int, cred_t *, int *); 169 static int vhci_devctl(dev_t, int, intptr_t, int, cred_t *, int *); 170 static int vhci_ioc_get_phci_path(sv_iocdata_t *, caddr_t, int, caddr_t); 171 static int vhci_ioc_get_client_path(sv_iocdata_t *, caddr_t, int, caddr_t); 172 static int vhci_ioc_get_paddr(sv_iocdata_t *, caddr_t, int, caddr_t); 173 static int vhci_ioc_send_client_path(caddr_t, sv_iocdata_t *, int, caddr_t); 174 static void vhci_ioc_devi_to_path(dev_info_t *, caddr_t); 175 static int vhci_get_phci_path_list(dev_info_t *, sv_path_info_t *, uint_t); 176 static int vhci_get_client_path_list(dev_info_t *, sv_path_info_t *, uint_t); 177 static int vhci_get_iocdata(const void *, sv_iocdata_t *, int, caddr_t); 178 static int vhci_get_iocswitchdata(const void *, sv_switch_to_cntlr_iocdata_t *, 179 int, caddr_t); 180 static int vhci_ioc_alloc_pathinfo(sv_path_info_t **, sv_path_info_t **, 181 uint_t, sv_iocdata_t *, int, caddr_t); 182 static void vhci_ioc_free_pathinfo(sv_path_info_t *, sv_path_info_t *, uint_t); 183 static int vhci_ioc_send_pathinfo(sv_path_info_t *, sv_path_info_t *, uint_t, 184 sv_iocdata_t *, int, caddr_t); 185 static int vhci_handle_ext_fo(struct scsi_pkt *, int); 186 static int vhci_efo_watch_cb(caddr_t, struct scsi_watch_result *); 187 static int vhci_quiesce_lun(struct scsi_vhci_lun *); 188 static int vhci_pgr_validate_and_register(scsi_vhci_priv_t *); 189 static void vhci_dispatch_scsi_start(void *); 190 static void vhci_efo_done(void *); 191 static void vhci_initiate_auto_failback(void *); 192 static void vhci_update_pHCI_pkt(struct vhci_pkt *, struct scsi_pkt *); 193 static int vhci_update_pathinfo(struct scsi_device *, mdi_pathinfo_t *, 194 struct scsi_failover_ops *, scsi_vhci_lun_t *, struct scsi_vhci *); 195 static void vhci_kstat_create_pathinfo(mdi_pathinfo_t *); 196 static int vhci_quiesce_paths(dev_info_t *, dev_info_t *, 197 scsi_vhci_lun_t *, char *, char *); 198 199 static char *vhci_devnm_to_guid(char *); 200 static int vhci_bind_transport(struct scsi_address *, struct vhci_pkt *, 201 int, int (*func)(caddr_t)); 202 static void vhci_intr(struct scsi_pkt *); 203 static int vhci_do_prout(scsi_vhci_priv_t *); 204 static void vhci_run_cmd(void *); 205 static int vhci_do_prin(struct vhci_pkt **); 206 static struct scsi_pkt *vhci_create_retry_pkt(struct vhci_pkt *); 207 static struct vhci_pkt *vhci_sync_retry_pkt(struct vhci_pkt *); 208 static struct scsi_vhci_lun *vhci_lun_lookup(dev_info_t *); 209 static struct scsi_vhci_lun *vhci_lun_lookup_alloc(dev_info_t *, char *, int *); 210 static void vhci_lun_free(dev_info_t *); 211 static int vhci_recovery_reset(scsi_vhci_lun_t *, struct scsi_address *, 212 uint8_t, uint8_t); 213 void vhci_update_pathstates(void *); 214 215 #ifdef DEBUG 216 static void vhci_print_prin_keys(vhci_prin_readkeys_t *, int); 217 static void vhci_print_cdb(dev_info_t *dip, uint_t level, 218 char *title, uchar_t *cdb); 219 static void vhci_clean_print(dev_info_t *dev, uint_t level, 220 char *title, uchar_t *data, int len); 221 #endif 222 static void vhci_print_prout_keys(scsi_vhci_lun_t *, char *); 223 static void vhci_uscsi_iodone(struct scsi_pkt *pkt); 224 static void vhci_invalidate_mpapi_lu(struct scsi_vhci *, scsi_vhci_lun_t *); 225 226 /* 227 * MP-API related functions 228 */ 229 extern int vhci_mpapi_init(struct scsi_vhci *); 230 extern void vhci_mpapi_add_dev_prod(struct scsi_vhci *, char *); 231 extern int vhci_mpapi_ctl(dev_t, int, intptr_t, int, cred_t *, int *); 232 extern void vhci_update_mpapi_data(struct scsi_vhci *, 233 scsi_vhci_lun_t *, mdi_pathinfo_t *); 234 extern void* vhci_get_mpapi_item(struct scsi_vhci *, mpapi_list_header_t *, 235 uint8_t, void*); 236 extern void vhci_mpapi_set_path_state(dev_info_t *, mdi_pathinfo_t *, int); 237 extern int vhci_mpapi_update_tpg_acc_state_for_lu(struct scsi_vhci *, 238 scsi_vhci_lun_t *); 239 240 #define VHCI_DMA_MAX_XFER_CAP INT_MAX 241 242 #define VHCI_MAX_PGR_RETRIES 3 243 244 /* 245 * Macros for the device-type mpxio options 246 */ 247 #define LOAD_BALANCE_OPTIONS "load-balance-options" 248 #define LOGICAL_BLOCK_REGION_SIZE "region-size" 249 #define MPXIO_OPTIONS_LIST "device-type-mpxio-options-list" 250 #define DEVICE_TYPE_STR "device-type" 251 #define isdigit(ch) ((ch) >= '0' && (ch) <= '9') 252 253 static struct cb_ops vhci_cb_ops = { 254 vhci_open, /* open */ 255 vhci_close, /* close */ 256 nodev, /* strategy */ 257 nodev, /* print */ 258 nodev, /* dump */ 259 nodev, /* read */ 260 nodev, /* write */ 261 vhci_ioctl, /* ioctl */ 262 nodev, /* devmap */ 263 nodev, /* mmap */ 264 nodev, /* segmap */ 265 nochpoll, /* chpoll */ 266 ddi_prop_op, /* cb_prop_op */ 267 0, /* streamtab */ 268 D_NEW | D_MP, /* cb_flag */ 269 CB_REV, /* rev */ 270 nodev, /* aread */ 271 nodev /* awrite */ 272 }; 273 274 static struct dev_ops vhci_ops = { 275 DEVO_REV, 276 0, 277 vhci_getinfo, 278 nulldev, /* identify */ 279 nulldev, /* probe */ 280 vhci_attach, /* attach and detach are mandatory */ 281 vhci_detach, 282 nodev, /* reset */ 283 &vhci_cb_ops, /* cb_ops */ 284 NULL, /* bus_ops */ 285 NULL, /* power */ 286 ddi_quiesce_not_needed, /* quiesce */ 287 }; 288 289 extern struct mod_ops mod_driverops; 290 291 static struct modldrv modldrv = { 292 &mod_driverops, 293 vhci_version_name, /* module name */ 294 &vhci_ops 295 }; 296 297 static struct modlinkage modlinkage = { 298 MODREV_1, 299 &modldrv, 300 NULL 301 }; 302 303 static mdi_vhci_ops_t vhci_opinfo = { 304 MDI_VHCI_OPS_REV, 305 vhci_pathinfo_init, /* Pathinfo node init callback */ 306 vhci_pathinfo_uninit, /* Pathinfo uninit callback */ 307 vhci_pathinfo_state_change, /* Pathinfo node state change */ 308 vhci_failover, /* failover callback */ 309 vhci_client_attached, /* client attached callback */ 310 vhci_is_dev_supported /* is device supported by mdi */ 311 }; 312 313 /* 314 * The scsi_failover table defines an ordered set of 'fops' modules supported 315 * by scsi_vhci. Currently, initialize this table from the 'ddi-forceload' 316 * property specified in scsi_vhci.conf. 317 */ 318 static struct scsi_failover { 319 ddi_modhandle_t sf_mod; 320 struct scsi_failover_ops *sf_sfo; 321 } *scsi_failover_table; 322 static uint_t scsi_nfailover; 323 324 int 325 _init(void) 326 { 327 int rval; 328 329 /* 330 * Allocate soft state and prepare to do ddi_soft_state_zalloc() 331 * before registering with the transport first. 332 */ 333 if ((rval = ddi_soft_state_init(&vhci_softstate, 334 sizeof (struct scsi_vhci), 1)) != 0) { 335 VHCI_DEBUG(1, (CE_NOTE, NULL, 336 "!_init:soft state init failed\n")); 337 return (rval); 338 } 339 340 if ((rval = scsi_hba_init(&modlinkage)) != 0) { 341 VHCI_DEBUG(1, (CE_NOTE, NULL, 342 "!_init: scsi hba init failed\n")); 343 ddi_soft_state_fini(&vhci_softstate); 344 return (rval); 345 } 346 347 mutex_init(&vhci_global_mutex, NULL, MUTEX_DRIVER, NULL); 348 cv_init(&vhci_cv, NULL, CV_DRIVER, NULL); 349 350 mutex_init(&vhci_targetmap_mutex, NULL, MUTEX_DRIVER, NULL); 351 vhci_targetmap_byport = mod_hash_create_strhash( 352 "vhci_targetmap_byport", 256, mod_hash_null_valdtor); 353 vhci_targetmap_bypid = mod_hash_create_idhash( 354 "vhci_targetmap_bypid", 256, mod_hash_null_valdtor); 355 356 if ((rval = mod_install(&modlinkage)) != 0) { 357 VHCI_DEBUG(1, (CE_NOTE, NULL, "!_init: mod_install failed\n")); 358 if (vhci_targetmap_bypid) 359 mod_hash_destroy_idhash(vhci_targetmap_bypid); 360 if (vhci_targetmap_byport) 361 mod_hash_destroy_strhash(vhci_targetmap_byport); 362 mutex_destroy(&vhci_targetmap_mutex); 363 cv_destroy(&vhci_cv); 364 mutex_destroy(&vhci_global_mutex); 365 scsi_hba_fini(&modlinkage); 366 ddi_soft_state_fini(&vhci_softstate); 367 } 368 return (rval); 369 } 370 371 372 /* 373 * the system is done with us as a driver, so clean up 374 */ 375 int 376 _fini(void) 377 { 378 int rval; 379 380 /* 381 * don't start cleaning up until we know that the module remove 382 * has worked -- if this works, then we know that each instance 383 * has successfully been DDI_DETACHed 384 */ 385 if ((rval = mod_remove(&modlinkage)) != 0) { 386 VHCI_DEBUG(4, (CE_NOTE, NULL, "!_fini: mod_remove failed\n")); 387 return (rval); 388 } 389 390 if (vhci_targetmap_bypid) 391 mod_hash_destroy_idhash(vhci_targetmap_bypid); 392 if (vhci_targetmap_byport) 393 mod_hash_destroy_strhash(vhci_targetmap_byport); 394 mutex_destroy(&vhci_targetmap_mutex); 395 cv_destroy(&vhci_cv); 396 mutex_destroy(&vhci_global_mutex); 397 scsi_hba_fini(&modlinkage); 398 ddi_soft_state_fini(&vhci_softstate); 399 400 return (rval); 401 } 402 403 int 404 _info(struct modinfo *modinfop) 405 { 406 return (mod_info(&modlinkage, modinfop)); 407 } 408 409 /* 410 * Lookup scsi_failover by "short name" of failover module. 411 */ 412 struct scsi_failover_ops * 413 vhci_failover_ops_by_name(char *name) 414 { 415 struct scsi_failover *sf; 416 417 for (sf = scsi_failover_table; sf->sf_mod; sf++) { 418 if (sf->sf_sfo == NULL) 419 continue; 420 if (strcmp(sf->sf_sfo->sfo_name, name) == 0) 421 return (sf->sf_sfo); 422 } 423 return (NULL); 424 } 425 426 /* 427 * Load all scsi_failover_ops 'fops' modules. 428 */ 429 static void 430 vhci_failover_modopen(struct scsi_vhci *vhci) 431 { 432 char **module; 433 int i; 434 struct scsi_failover *sf; 435 char **dt; 436 int e; 437 438 if (scsi_failover_table) 439 return; 440 441 /* Get the list of modules from scsi_vhci.conf */ 442 if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, 443 vhci->vhci_dip, DDI_PROP_DONTPASS, "ddi-forceload", 444 &module, &scsi_nfailover) != DDI_PROP_SUCCESS) { 445 cmn_err(CE_WARN, "scsi_vhci: " 446 "scsi_vhci.conf is missing 'ddi-forceload'"); 447 return; 448 } 449 if (scsi_nfailover == 0) { 450 cmn_err(CE_WARN, "scsi_vhci: " 451 "scsi_vhci.conf has empty 'ddi-forceload'"); 452 ddi_prop_free(module); 453 return; 454 } 455 456 /* allocate failover table based on number of modules */ 457 scsi_failover_table = (struct scsi_failover *) 458 kmem_zalloc(sizeof (struct scsi_failover) * (scsi_nfailover + 1), 459 KM_SLEEP); 460 461 /* loop over modules specified in scsi_vhci.conf and open each module */ 462 for (i = 0, sf = scsi_failover_table; i < scsi_nfailover; i++) { 463 if (module[i] == NULL) 464 continue; 465 466 sf->sf_mod = ddi_modopen(module[i], KRTLD_MODE_FIRST, &e); 467 if (sf->sf_mod == NULL) { 468 /* 469 * A module returns EEXIST if other software is 470 * supporting the intended function: for example 471 * the scsi_vhci_f_sum_emc module returns EEXIST 472 * from _init if EMC powerpath software is installed. 473 */ 474 if (e != EEXIST) 475 cmn_err(CE_WARN, "scsi_vhci: unable to open " 476 "module '%s', error %d", module[i], e); 477 continue; 478 } 479 sf->sf_sfo = ddi_modsym(sf->sf_mod, 480 "scsi_vhci_failover_ops", &e); 481 if (sf->sf_sfo == NULL) { 482 cmn_err(CE_WARN, "scsi_vhci: " 483 "unable to import 'scsi_failover_ops' from '%s', " 484 "error %d", module[i], e); 485 (void) ddi_modclose(sf->sf_mod); 486 sf->sf_mod = NULL; 487 continue; 488 } 489 490 /* register vid/pid of devices supported with mpapi */ 491 for (dt = sf->sf_sfo->sfo_devices; *dt; dt++) 492 vhci_mpapi_add_dev_prod(vhci, *dt); 493 sf++; 494 } 495 496 /* verify that at least the "well-known" modules were there */ 497 if (vhci_failover_ops_by_name(SFO_NAME_SYM) == NULL) 498 cmn_err(CE_WARN, "scsi_vhci: well-known module \"" 499 SFO_NAME_SYM "\" not defined in scsi_vhci.conf's " 500 "'ddi-forceload'"); 501 if (vhci_failover_ops_by_name(SFO_NAME_TPGS) == NULL) 502 cmn_err(CE_WARN, "scsi_vhci: well-known module \"" 503 SFO_NAME_TPGS "\" not defined in scsi_vhci.conf's " 504 "'ddi-forceload'"); 505 506 /* call sfo_init for modules that need it */ 507 for (sf = scsi_failover_table; sf->sf_mod; sf++) { 508 if (sf->sf_sfo && sf->sf_sfo->sfo_init) 509 sf->sf_sfo->sfo_init(); 510 } 511 512 ddi_prop_free(module); 513 } 514 515 /* 516 * unload all loaded scsi_failover_ops modules 517 */ 518 static void 519 vhci_failover_modclose() 520 { 521 struct scsi_failover *sf; 522 523 for (sf = scsi_failover_table; sf->sf_mod; sf++) { 524 if ((sf->sf_mod == NULL) || (sf->sf_sfo == NULL)) 525 continue; 526 (void) ddi_modclose(sf->sf_mod); 527 sf->sf_mod = NULL; 528 sf->sf_sfo = NULL; 529 } 530 531 if (scsi_failover_table && scsi_nfailover) 532 kmem_free(scsi_failover_table, 533 sizeof (struct scsi_failover) * (scsi_nfailover + 1)); 534 scsi_failover_table = NULL; 535 scsi_nfailover = 0; 536 } 537 538 /* ARGSUSED */ 539 static int 540 vhci_open(dev_t *devp, int flag, int otype, cred_t *credp) 541 { 542 struct scsi_vhci *vhci; 543 544 if (otype != OTYP_CHR) { 545 return (EINVAL); 546 } 547 548 vhci = ddi_get_soft_state(vhci_softstate, MINOR2INST(getminor(*devp))); 549 if (vhci == NULL) { 550 VHCI_DEBUG(1, (CE_NOTE, NULL, "vhci_open: failed ENXIO\n")); 551 return (ENXIO); 552 } 553 554 mutex_enter(&vhci->vhci_mutex); 555 if ((flag & FEXCL) && (vhci->vhci_state & VHCI_STATE_OPEN)) { 556 mutex_exit(&vhci->vhci_mutex); 557 vhci_log(CE_NOTE, vhci->vhci_dip, 558 "!vhci%d: Already open\n", getminor(*devp)); 559 return (EBUSY); 560 } 561 562 vhci->vhci_state |= VHCI_STATE_OPEN; 563 mutex_exit(&vhci->vhci_mutex); 564 return (0); 565 } 566 567 568 /* ARGSUSED */ 569 static int 570 vhci_close(dev_t dev, int flag, int otype, cred_t *credp) 571 { 572 struct scsi_vhci *vhci; 573 574 if (otype != OTYP_CHR) { 575 return (EINVAL); 576 } 577 578 vhci = ddi_get_soft_state(vhci_softstate, MINOR2INST(getminor(dev))); 579 if (vhci == NULL) { 580 VHCI_DEBUG(1, (CE_NOTE, NULL, "vhci_close: failed ENXIO\n")); 581 return (ENXIO); 582 } 583 584 mutex_enter(&vhci->vhci_mutex); 585 vhci->vhci_state &= ~VHCI_STATE_OPEN; 586 mutex_exit(&vhci->vhci_mutex); 587 588 return (0); 589 } 590 591 /* ARGSUSED */ 592 static int 593 vhci_ioctl(dev_t dev, int cmd, intptr_t data, int mode, 594 cred_t *credp, int *rval) 595 { 596 if (IS_DEVCTL(cmd)) { 597 return (vhci_devctl(dev, cmd, data, mode, credp, rval)); 598 } else if (cmd == MP_CMD) { 599 return (vhci_mpapi_ctl(dev, cmd, data, mode, credp, rval)); 600 } else { 601 return (vhci_ctl(dev, cmd, data, mode, credp, rval)); 602 } 603 } 604 605 /* 606 * attach the module 607 */ 608 static int 609 vhci_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 610 { 611 int rval = DDI_FAILURE; 612 int scsi_hba_attached = 0; 613 int vhci_attached = 0; 614 int mutex_initted = 0; 615 int instance; 616 struct scsi_vhci *vhci; 617 scsi_hba_tran_t *tran; 618 char cache_name_buf[64]; 619 char *data; 620 621 VHCI_DEBUG(4, (CE_NOTE, NULL, "vhci_attach: cmd=0x%x\n", cmd)); 622 623 instance = ddi_get_instance(dip); 624 625 switch (cmd) { 626 case DDI_ATTACH: 627 break; 628 629 case DDI_RESUME: 630 case DDI_PM_RESUME: 631 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_attach: resume not yet" 632 "implemented\n")); 633 return (rval); 634 635 default: 636 VHCI_DEBUG(1, (CE_NOTE, NULL, 637 "!vhci_attach: unknown ddi command\n")); 638 return (rval); 639 } 640 641 /* 642 * Allocate vhci data structure. 643 */ 644 if (ddi_soft_state_zalloc(vhci_softstate, instance) != DDI_SUCCESS) { 645 VHCI_DEBUG(1, (CE_NOTE, dip, "!vhci_attach:" 646 "soft state alloc failed\n")); 647 return (DDI_FAILURE); 648 } 649 650 if ((vhci = ddi_get_soft_state(vhci_softstate, instance)) == NULL) { 651 VHCI_DEBUG(1, (CE_NOTE, dip, "!vhci_attach:" 652 "bad soft state\n")); 653 ddi_soft_state_free(vhci_softstate, instance); 654 return (DDI_FAILURE); 655 } 656 657 /* Allocate packet cache */ 658 (void) snprintf(cache_name_buf, sizeof (cache_name_buf), 659 "vhci%d_cache", instance); 660 661 mutex_init(&vhci->vhci_mutex, NULL, MUTEX_DRIVER, NULL); 662 mutex_initted++; 663 664 /* 665 * Allocate a transport structure 666 */ 667 tran = scsi_hba_tran_alloc(dip, SCSI_HBA_CANSLEEP); 668 ASSERT(tran != NULL); 669 670 vhci->vhci_tran = tran; 671 vhci->vhci_dip = dip; 672 vhci->vhci_instance = instance; 673 674 tran->tran_hba_private = vhci; 675 tran->tran_tgt_init = vhci_scsi_tgt_init; 676 tran->tran_tgt_probe = NULL; 677 tran->tran_tgt_free = vhci_scsi_tgt_free; 678 679 tran->tran_start = vhci_scsi_start; 680 tran->tran_abort = vhci_scsi_abort; 681 tran->tran_reset = vhci_scsi_reset; 682 tran->tran_getcap = vhci_scsi_getcap; 683 tran->tran_setcap = vhci_scsi_setcap; 684 tran->tran_init_pkt = vhci_scsi_init_pkt; 685 tran->tran_destroy_pkt = vhci_scsi_destroy_pkt; 686 tran->tran_dmafree = vhci_scsi_dmafree; 687 tran->tran_sync_pkt = vhci_scsi_sync_pkt; 688 tran->tran_reset_notify = vhci_scsi_reset_notify; 689 690 tran->tran_get_bus_addr = vhci_scsi_get_bus_addr; 691 tran->tran_get_name = vhci_scsi_get_name; 692 tran->tran_bus_reset = NULL; 693 tran->tran_quiesce = NULL; 694 tran->tran_unquiesce = NULL; 695 696 /* 697 * register event notification routines with scsa 698 */ 699 tran->tran_get_eventcookie = NULL; 700 tran->tran_add_eventcall = NULL; 701 tran->tran_remove_eventcall = NULL; 702 tran->tran_post_event = NULL; 703 704 tran->tran_bus_power = vhci_scsi_bus_power; 705 706 tran->tran_bus_config = vhci_scsi_bus_config; 707 tran->tran_bus_unconfig = vhci_scsi_bus_unconfig; 708 709 /* 710 * Attach this instance with the mpxio framework 711 */ 712 if (mdi_vhci_register(MDI_HCI_CLASS_SCSI, dip, &vhci_opinfo, 0) 713 != MDI_SUCCESS) { 714 VHCI_DEBUG(1, (CE_NOTE, dip, "!vhci_attach:" 715 "mdi_vhci_register failed\n")); 716 goto attach_fail; 717 } 718 vhci_attached++; 719 720 /* 721 * Attach this instance of the hba. 722 * 723 * Regarding dma attributes: Since scsi_vhci is a virtual scsi HBA 724 * driver, it has nothing to do with DMA. However, when calling 725 * scsi_hba_attach_setup() we need to pass something valid in the 726 * dma attributes parameter. So we just use scsi_alloc_attr. 727 * SCSA itself seems to care only for dma_attr_minxfer and 728 * dma_attr_burstsizes fields of dma attributes structure. 729 * It expects those fileds to be non-zero. 730 */ 731 if (scsi_hba_attach_setup(dip, &scsi_alloc_attr, tran, 732 SCSI_HBA_ADDR_COMPLEX) != DDI_SUCCESS) { 733 VHCI_DEBUG(1, (CE_NOTE, dip, "!vhci_attach:" 734 "hba attach failed\n")); 735 goto attach_fail; 736 } 737 scsi_hba_attached++; 738 739 if (ddi_create_minor_node(dip, "devctl", S_IFCHR, 740 INST2DEVCTL(instance), DDI_NT_SCSI_NEXUS, 0) != DDI_SUCCESS) { 741 VHCI_DEBUG(1, (CE_NOTE, dip, "!vhci_attach:" 742 " ddi_create_minor_node failed\n")); 743 goto attach_fail; 744 } 745 746 /* 747 * Set pm-want-child-notification property for 748 * power management of the phci and client 749 */ 750 if (ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP, 751 "pm-want-child-notification?", NULL, NULL) != DDI_PROP_SUCCESS) { 752 cmn_err(CE_WARN, 753 "%s%d fail to create pm-want-child-notification? prop", 754 ddi_driver_name(dip), ddi_get_instance(dip)); 755 goto attach_fail; 756 } 757 758 vhci->vhci_taskq = taskq_create("vhci_taskq", 1, MINCLSYSPRI, 1, 4, 0); 759 vhci->vhci_update_pathstates_taskq = 760 taskq_create("vhci_update_pathstates", VHCI_NUM_UPDATE_TASKQ, 761 MINCLSYSPRI, 1, 4, 0); 762 ASSERT(vhci->vhci_taskq); 763 ASSERT(vhci->vhci_update_pathstates_taskq); 764 765 /* 766 * Set appropriate configuration flags based on options set in 767 * conf file. 768 */ 769 vhci->vhci_conf_flags = 0; 770 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, PROPFLAGS, 771 "auto-failback", &data) == DDI_SUCCESS) { 772 if (strcmp(data, "enable") == 0) 773 vhci->vhci_conf_flags |= VHCI_CONF_FLAGS_AUTO_FAILBACK; 774 ddi_prop_free(data); 775 } 776 777 if (!(vhci->vhci_conf_flags & VHCI_CONF_FLAGS_AUTO_FAILBACK)) 778 vhci_log(CE_NOTE, dip, "!Auto-failback capability " 779 "disabled through scsi_vhci.conf file."); 780 781 /* 782 * Allocate an mpapi private structure 783 */ 784 vhci->mp_priv = kmem_zalloc(sizeof (mpapi_priv_t), KM_SLEEP); 785 if (vhci_mpapi_init(vhci) != 0) { 786 VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_attach: " 787 "vhci_mpapi_init() failed")); 788 } 789 790 vhci_failover_modopen(vhci); /* load failover modules */ 791 792 ddi_report_dev(dip); 793 return (DDI_SUCCESS); 794 795 attach_fail: 796 if (vhci_attached) 797 (void) mdi_vhci_unregister(dip, 0); 798 799 if (scsi_hba_attached) 800 (void) scsi_hba_detach(dip); 801 802 if (vhci->vhci_tran) 803 scsi_hba_tran_free(vhci->vhci_tran); 804 805 if (mutex_initted) { 806 mutex_destroy(&vhci->vhci_mutex); 807 } 808 809 ddi_soft_state_free(vhci_softstate, instance); 810 return (DDI_FAILURE); 811 } 812 813 814 /*ARGSUSED*/ 815 static int 816 vhci_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 817 { 818 int instance = ddi_get_instance(dip); 819 scsi_hba_tran_t *tran; 820 struct scsi_vhci *vhci; 821 822 VHCI_DEBUG(4, (CE_NOTE, NULL, "vhci_detach: cmd=0x%x\n", cmd)); 823 824 if ((tran = ddi_get_driver_private(dip)) == NULL) 825 return (DDI_FAILURE); 826 827 vhci = TRAN2HBAPRIVATE(tran); 828 if (!vhci) { 829 return (DDI_FAILURE); 830 } 831 832 switch (cmd) { 833 case DDI_DETACH: 834 break; 835 836 case DDI_SUSPEND: 837 case DDI_PM_SUSPEND: 838 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_detach: suspend/pm not yet" 839 "implemented\n")); 840 return (DDI_FAILURE); 841 842 default: 843 VHCI_DEBUG(1, (CE_NOTE, NULL, 844 "!vhci_detach: unknown ddi command\n")); 845 return (DDI_FAILURE); 846 } 847 848 (void) mdi_vhci_unregister(dip, 0); 849 (void) scsi_hba_detach(dip); 850 scsi_hba_tran_free(tran); 851 852 if (ddi_prop_remove(DDI_DEV_T_NONE, dip, 853 "pm-want-child-notification?") != DDI_PROP_SUCCESS) { 854 cmn_err(CE_WARN, 855 "%s%d unable to remove prop pm-want_child_notification?", 856 ddi_driver_name(dip), ddi_get_instance(dip)); 857 } 858 if (vhci_restart_timeid != 0) { 859 (void) untimeout(vhci_restart_timeid); 860 } 861 vhci_restart_timeid = 0; 862 863 mutex_destroy(&vhci->vhci_mutex); 864 vhci->vhci_dip = NULL; 865 vhci->vhci_tran = NULL; 866 taskq_destroy(vhci->vhci_taskq); 867 taskq_destroy(vhci->vhci_update_pathstates_taskq); 868 ddi_remove_minor_node(dip, NULL); 869 ddi_soft_state_free(vhci_softstate, instance); 870 871 vhci_failover_modclose(); /* unload failover modules */ 872 return (DDI_SUCCESS); 873 } 874 875 /* 876 * vhci_getinfo() 877 * Given the device number, return the devinfo pointer or the 878 * instance number. 879 * Note: always succeed DDI_INFO_DEVT2INSTANCE, even before attach. 880 */ 881 882 /*ARGSUSED*/ 883 static int 884 vhci_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result) 885 { 886 struct scsi_vhci *vhcip; 887 int instance = MINOR2INST(getminor((dev_t)arg)); 888 889 switch (cmd) { 890 case DDI_INFO_DEVT2DEVINFO: 891 vhcip = ddi_get_soft_state(vhci_softstate, instance); 892 if (vhcip != NULL) 893 *result = vhcip->vhci_dip; 894 else { 895 *result = NULL; 896 return (DDI_FAILURE); 897 } 898 break; 899 900 case DDI_INFO_DEVT2INSTANCE: 901 *result = (void *)(uintptr_t)instance; 902 break; 903 904 default: 905 return (DDI_FAILURE); 906 } 907 908 return (DDI_SUCCESS); 909 } 910 911 /*ARGSUSED*/ 912 static int 913 vhci_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip, 914 scsi_hba_tran_t *hba_tran, struct scsi_device *sd) 915 { 916 char *guid; 917 scsi_vhci_lun_t *vlun; 918 struct scsi_vhci *vhci; 919 clock_t from_ticks; 920 mdi_pathinfo_t *pip; 921 int rval; 922 923 ASSERT(hba_dip != NULL); 924 ASSERT(tgt_dip != NULL); 925 926 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, tgt_dip, PROPFLAGS, 927 MDI_CLIENT_GUID_PROP, &guid) != DDI_SUCCESS) { 928 /* 929 * This must be the .conf node without GUID property. 930 * The node under fp already inserts a delay, so we 931 * just return from here. We rely on this delay to have 932 * all dips be posted to the ndi hotplug thread's newdev 933 * list. This is necessary for the deferred attach 934 * mechanism to work and opens() done soon after boot to 935 * succeed. 936 */ 937 VHCI_DEBUG(4, (CE_WARN, hba_dip, "tgt_init: lun guid " 938 "property failed")); 939 return (DDI_NOT_WELL_FORMED); 940 } 941 942 if (ndi_dev_is_persistent_node(tgt_dip) == 0) { 943 /* 944 * This must be .conf node with the GUID property. We don't 945 * merge property by ndi_merge_node() here because the 946 * devi_addr_buf of .conf node is "" always according the 947 * implementation of vhci_scsi_get_name_bus_addr(). 948 */ 949 ddi_set_name_addr(tgt_dip, NULL); 950 return (DDI_FAILURE); 951 } 952 953 vhci = ddi_get_soft_state(vhci_softstate, ddi_get_instance(hba_dip)); 954 ASSERT(vhci != NULL); 955 956 VHCI_DEBUG(4, (CE_NOTE, hba_dip, 957 "!tgt_init: called for %s (instance %d)\n", 958 ddi_driver_name(tgt_dip), ddi_get_instance(tgt_dip))); 959 960 vlun = vhci_lun_lookup(tgt_dip); 961 962 mutex_enter(&vhci_global_mutex); 963 964 from_ticks = ddi_get_lbolt(); 965 if (vhci_to_ticks == 0) { 966 vhci_to_ticks = from_ticks + 967 drv_usectohz(vhci_init_wait_timeout); 968 } 969 970 #if DEBUG 971 if (vlun) { 972 VHCI_DEBUG(1, (CE_WARN, hba_dip, "tgt_init: " 973 "vhci_scsi_tgt_init: guid %s : found vlun 0x%p " 974 "from_ticks %lx to_ticks %lx", 975 guid, (void *)vlun, from_ticks, vhci_to_ticks)); 976 } else { 977 VHCI_DEBUG(1, (CE_WARN, hba_dip, "tgt_init: " 978 "vhci_scsi_tgt_init: guid %s : vlun not found " 979 "from_ticks %lx to_ticks %lx", guid, from_ticks, 980 vhci_to_ticks)); 981 } 982 #endif 983 984 rval = mdi_select_path(tgt_dip, NULL, 985 (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH), NULL, &pip); 986 if (rval == MDI_SUCCESS) { 987 mdi_rele_path(pip); 988 } 989 990 /* 991 * Wait for the following conditions : 992 * 1. no vlun available yet 993 * 2. no path established 994 * 3. timer did not expire 995 */ 996 while ((vlun == NULL) || (mdi_client_get_path_count(tgt_dip) == 0) || 997 (rval != MDI_SUCCESS)) { 998 if (vlun && vlun->svl_not_supported) { 999 VHCI_DEBUG(1, (CE_WARN, hba_dip, "tgt_init: " 1000 "vlun 0x%p lun guid %s not supported!", 1001 (void *)vlun, guid)); 1002 mutex_exit(&vhci_global_mutex); 1003 ddi_prop_free(guid); 1004 return (DDI_NOT_WELL_FORMED); 1005 } 1006 if ((vhci_first_time == 0) && (from_ticks >= vhci_to_ticks)) { 1007 vhci_first_time = 1; 1008 } 1009 if (vhci_first_time == 1) { 1010 VHCI_DEBUG(1, (CE_WARN, hba_dip, "vhci_scsi_tgt_init: " 1011 "no wait for %s. from_tick %lx, to_tick %lx", 1012 guid, from_ticks, vhci_to_ticks)); 1013 mutex_exit(&vhci_global_mutex); 1014 ddi_prop_free(guid); 1015 return (DDI_NOT_WELL_FORMED); 1016 } 1017 1018 if (cv_timedwait(&vhci_cv, 1019 &vhci_global_mutex, vhci_to_ticks) == -1) { 1020 /* Timed out */ 1021 #ifdef DEBUG 1022 if (vlun == NULL) { 1023 VHCI_DEBUG(1, (CE_WARN, hba_dip, 1024 "tgt_init: no vlun for %s!", guid)); 1025 } else if (mdi_client_get_path_count(tgt_dip) == 0) { 1026 VHCI_DEBUG(1, (CE_WARN, hba_dip, 1027 "tgt_init: client path count is " 1028 "zero for %s!", guid)); 1029 } else { 1030 VHCI_DEBUG(1, (CE_WARN, hba_dip, 1031 "tgt_init: client path not " 1032 "available yet for %s!", guid)); 1033 } 1034 #endif /* DEBUG */ 1035 mutex_exit(&vhci_global_mutex); 1036 ddi_prop_free(guid); 1037 return (DDI_NOT_WELL_FORMED); 1038 } 1039 vlun = vhci_lun_lookup(tgt_dip); 1040 rval = mdi_select_path(tgt_dip, NULL, 1041 (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH), 1042 NULL, &pip); 1043 if (rval == MDI_SUCCESS) { 1044 mdi_rele_path(pip); 1045 } 1046 from_ticks = ddi_get_lbolt(); 1047 } 1048 mutex_exit(&vhci_global_mutex); 1049 1050 ASSERT(vlun != NULL); 1051 ddi_prop_free(guid); 1052 1053 scsi_device_hba_private_set(sd, vlun); 1054 1055 return (DDI_SUCCESS); 1056 } 1057 1058 /*ARGSUSED*/ 1059 static void 1060 vhci_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip, 1061 scsi_hba_tran_t *hba_tran, struct scsi_device *sd) 1062 { 1063 } 1064 1065 /* 1066 * a PGR register command has started; copy the info we need 1067 */ 1068 int 1069 vhci_pgr_register_start(scsi_vhci_lun_t *vlun, struct scsi_pkt *pkt) 1070 { 1071 struct vhci_pkt *vpkt = TGTPKT2VHCIPKT(pkt); 1072 void *addr; 1073 1074 if (!vpkt->vpkt_tgt_init_bp) 1075 return (TRAN_BADPKT); 1076 1077 addr = bp_mapin_common(vpkt->vpkt_tgt_init_bp, 1078 (vpkt->vpkt_flags & CFLAG_NOWAIT) ? VM_NOSLEEP : VM_SLEEP); 1079 if (addr == NULL) 1080 return (TRAN_BUSY); 1081 1082 mutex_enter(&vlun->svl_mutex); 1083 1084 vhci_print_prout_keys(vlun, "v_pgr_reg_start: before bcopy:"); 1085 1086 bcopy(addr, &vlun->svl_prout, sizeof (vhci_prout_t) - 1087 (2 * MHIOC_RESV_KEY_SIZE*sizeof (char))); 1088 bcopy(pkt->pkt_cdbp, vlun->svl_cdb, sizeof (vlun->svl_cdb)); 1089 1090 vhci_print_prout_keys(vlun, "v_pgr_reg_start: after bcopy:"); 1091 1092 vlun->svl_time = pkt->pkt_time; 1093 vlun->svl_bcount = vpkt->vpkt_tgt_init_bp->b_bcount; 1094 vlun->svl_first_path = vpkt->vpkt_path; 1095 mutex_exit(&vlun->svl_mutex); 1096 return (0); 1097 } 1098 1099 /* 1100 * Function name : vhci_scsi_start() 1101 * 1102 * Return Values : TRAN_FATAL_ERROR - vhci has been shutdown 1103 * or other fatal failure 1104 * preventing packet transportation 1105 * TRAN_BUSY - request queue is full 1106 * TRAN_ACCEPT - pkt has been submitted to phci 1107 * (or is held in the waitQ) 1108 * Description : Implements SCSA's tran_start() entry point for 1109 * packet transport 1110 * 1111 */ 1112 static int 1113 vhci_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt) 1114 { 1115 int rval = TRAN_ACCEPT; 1116 int instance, held; 1117 struct scsi_vhci *vhci = ADDR2VHCI(ap); 1118 struct scsi_vhci_lun *vlun = ADDR2VLUN(ap); 1119 struct vhci_pkt *vpkt = TGTPKT2VHCIPKT(pkt); 1120 int flags = 0; 1121 scsi_vhci_priv_t *svp; 1122 dev_info_t *cdip; 1123 client_lb_t lbp; 1124 int restore_lbp = 0; 1125 /* set if pkt is SCSI-II RESERVE cmd */ 1126 int pkt_reserve_cmd = 0; 1127 int reserve_failed = 0; 1128 1129 ASSERT(vhci != NULL); 1130 ASSERT(vpkt != NULL); 1131 ASSERT(vpkt->vpkt_state != VHCI_PKT_ISSUED); 1132 cdip = ADDR2DIP(ap); 1133 1134 /* 1135 * Block IOs if LUN is held or QUIESCED for IOs. 1136 */ 1137 if ((VHCI_LUN_IS_HELD(vlun)) || 1138 ((vlun->svl_flags & VLUN_QUIESCED_FLG) == VLUN_QUIESCED_FLG)) { 1139 return (TRAN_BUSY); 1140 } 1141 1142 /* 1143 * vhci_lun needs to be quiesced before SCSI-II RESERVE command 1144 * can be issued. This may require a cv_timedwait, which is 1145 * dangerous to perform in an interrupt context. So if this 1146 * is a RESERVE command a taskq is dispatched to service it. 1147 * This taskq shall again call vhci_scsi_start, but we shall be 1148 * sure its not in an interrupt context. 1149 */ 1150 if ((pkt->pkt_cdbp[0] == SCMD_RESERVE) || 1151 (pkt->pkt_cdbp[0] == SCMD_RESERVE_G1)) { 1152 if (!(vpkt->vpkt_state & VHCI_PKT_THRU_TASKQ)) { 1153 if (taskq_dispatch(vhci->vhci_taskq, 1154 vhci_dispatch_scsi_start, (void *) vpkt, 1155 KM_NOSLEEP)) { 1156 return (TRAN_ACCEPT); 1157 } else { 1158 return (TRAN_BUSY); 1159 } 1160 } 1161 1162 /* 1163 * Here we ensure that simultaneous SCSI-II RESERVE cmds don't 1164 * get serviced for a lun. 1165 */ 1166 VHCI_HOLD_LUN(vlun, VH_NOSLEEP, held); 1167 if (!held) { 1168 return (TRAN_BUSY); 1169 } else if ((vlun->svl_flags & VLUN_QUIESCED_FLG) == 1170 VLUN_QUIESCED_FLG) { 1171 VHCI_RELEASE_LUN(vlun); 1172 return (TRAN_BUSY); 1173 } 1174 1175 /* 1176 * To ensure that no IOs occur for this LUN for the duration 1177 * of this pkt set the VLUN_QUIESCED_FLG. 1178 * In case this routine needs to exit on error make sure that 1179 * this flag is cleared. 1180 */ 1181 vlun->svl_flags |= VLUN_QUIESCED_FLG; 1182 pkt_reserve_cmd = 1; 1183 1184 /* 1185 * if this is a SCSI-II RESERVE command, set load balancing 1186 * policy to be ALTERNATE PATH to ensure that all subsequent 1187 * IOs are routed on the same path. This is because if commands 1188 * are routed across multiple paths then IOs on paths other than 1189 * the one on which the RESERVE was executed will get a 1190 * RESERVATION CONFLICT 1191 */ 1192 lbp = mdi_get_lb_policy(cdip); 1193 if (lbp != LOAD_BALANCE_NONE) { 1194 if (vhci_quiesce_lun(vlun) != 1) { 1195 vlun->svl_flags &= ~VLUN_QUIESCED_FLG; 1196 VHCI_RELEASE_LUN(vlun); 1197 return (TRAN_FATAL_ERROR); 1198 } 1199 vlun->svl_lb_policy_save = lbp; 1200 if (mdi_set_lb_policy(cdip, LOAD_BALANCE_NONE) != 1201 MDI_SUCCESS) { 1202 vlun->svl_flags &= ~VLUN_QUIESCED_FLG; 1203 VHCI_RELEASE_LUN(vlun); 1204 return (TRAN_FATAL_ERROR); 1205 } 1206 restore_lbp = 1; 1207 } 1208 /* 1209 * See comments for VLUN_RESERVE_ACTIVE_FLG in scsi_vhci.h 1210 * To narrow this window where a reserve command may be sent 1211 * down an inactive path the path states first need to be 1212 * updated. Before calling vhci_update_pathstates reset 1213 * VLUN_RESERVE_ACTIVE_FLG, just in case it was already set 1214 * for this lun. This shall prevent an unnecessary reset 1215 * from being sent out. 1216 */ 1217 vlun->svl_flags &= ~VLUN_RESERVE_ACTIVE_FLG; 1218 vhci_update_pathstates((void *)vlun); 1219 } 1220 1221 instance = ddi_get_instance(vhci->vhci_dip); 1222 1223 /* 1224 * If the command is PRIN with action of zero, then the cmd 1225 * is reading PR keys which requires filtering on completion. 1226 * Data cache sync must be guaranteed. 1227 */ 1228 if ((pkt->pkt_cdbp[0] == SCMD_PRIN) && (pkt->pkt_cdbp[1] == 0) && 1229 (vpkt->vpkt_org_vpkt == NULL)) { 1230 vpkt->vpkt_tgt_init_pkt_flags |= PKT_CONSISTENT; 1231 } 1232 1233 /* 1234 * Do not defer bind for PKT_DMA_PARTIAL 1235 */ 1236 if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) == 0) { 1237 1238 /* This is a non pkt_dma_partial case */ 1239 if ((rval = vhci_bind_transport( 1240 ap, vpkt, vpkt->vpkt_tgt_init_pkt_flags, NULL_FUNC)) 1241 != TRAN_ACCEPT) { 1242 VHCI_DEBUG(6, (CE_WARN, vhci->vhci_dip, 1243 "!vhci%d %x: failed to bind transport: " 1244 "vlun 0x%p pkt_reserved %x restore_lbp %x," 1245 "lbp %x", instance, rval, (void *)vlun, 1246 pkt_reserve_cmd, restore_lbp, lbp)); 1247 if (restore_lbp) 1248 (void) mdi_set_lb_policy(cdip, lbp); 1249 if (pkt_reserve_cmd) 1250 vlun->svl_flags &= ~VLUN_QUIESCED_FLG; 1251 return (rval); 1252 } 1253 VHCI_DEBUG(8, (CE_NOTE, NULL, 1254 "vhci_scsi_start: v_b_t called 0x%p\n", (void *)vpkt)); 1255 } 1256 ASSERT(vpkt->vpkt_hba_pkt != NULL); 1257 ASSERT(vpkt->vpkt_path != NULL); 1258 1259 /* 1260 * This is the chance to adjust the pHCI's pkt and other information 1261 * from target driver's pkt. 1262 */ 1263 VHCI_DEBUG(8, (CE_NOTE, vhci->vhci_dip, "vhci_scsi_start vpkt %p\n", 1264 (void *)vpkt)); 1265 vhci_update_pHCI_pkt(vpkt, pkt); 1266 1267 if (vlun->svl_flags & VLUN_RESERVE_ACTIVE_FLG) { 1268 if (vpkt->vpkt_path != vlun->svl_resrv_pip) { 1269 VHCI_DEBUG(1, (CE_WARN, vhci->vhci_dip, 1270 "!vhci_bind: reserve flag set for vlun 0x%p, but, " 1271 "pktpath 0x%p resrv path 0x%p differ. lb_policy %x", 1272 (void *)vlun, (void *)vpkt->vpkt_path, 1273 (void *)vlun->svl_resrv_pip, 1274 mdi_get_lb_policy(cdip))); 1275 reserve_failed = 1; 1276 } 1277 } 1278 1279 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(vpkt->vpkt_path); 1280 if (svp == NULL || reserve_failed) { 1281 if (pkt_reserve_cmd) { 1282 VHCI_DEBUG(6, (CE_WARN, vhci->vhci_dip, 1283 "!vhci_bind returned null svp vlun 0x%p", 1284 (void *)vlun)); 1285 vlun->svl_flags &= ~VLUN_QUIESCED_FLG; 1286 if (restore_lbp) 1287 (void) mdi_set_lb_policy(cdip, lbp); 1288 } 1289 pkt_cleanup: 1290 if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) == 0) { 1291 scsi_destroy_pkt(vpkt->vpkt_hba_pkt); 1292 vpkt->vpkt_hba_pkt = NULL; 1293 if (vpkt->vpkt_path) { 1294 mdi_rele_path(vpkt->vpkt_path); 1295 vpkt->vpkt_path = NULL; 1296 } 1297 } 1298 if ((pkt->pkt_cdbp[0] == SCMD_PROUT) && 1299 (((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_REGISTER) || 1300 ((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_R_AND_IGNORE))) { 1301 sema_v(&vlun->svl_pgr_sema); 1302 } 1303 return (TRAN_BUSY); 1304 } 1305 1306 VHCI_INCR_PATH_CMDCOUNT(svp); 1307 1308 /* 1309 * Ensure that no other IOs raced ahead, while a RESERVE cmd was 1310 * QUIESCING the same lun. 1311 */ 1312 if ((!pkt_reserve_cmd) && 1313 ((vlun->svl_flags & VLUN_QUIESCED_FLG) == VLUN_QUIESCED_FLG)) { 1314 VHCI_DECR_PATH_CMDCOUNT(svp); 1315 goto pkt_cleanup; 1316 } 1317 1318 if ((pkt->pkt_cdbp[0] == SCMD_PRIN) || 1319 (pkt->pkt_cdbp[0] == SCMD_PROUT)) { 1320 /* 1321 * currently this thread only handles running PGR 1322 * commands, so don't bother creating it unless 1323 * something interesting is going to happen (like 1324 * either a PGR out, or a PGR in with enough space 1325 * to hold the keys that are getting returned) 1326 */ 1327 mutex_enter(&vlun->svl_mutex); 1328 if (((vlun->svl_flags & VLUN_TASK_D_ALIVE_FLG) == 0) && 1329 (pkt->pkt_cdbp[0] == SCMD_PROUT)) { 1330 vlun->svl_taskq = taskq_create("vlun_pgr_task_daemon", 1331 1, MINCLSYSPRI, 1, 4, 0); 1332 vlun->svl_flags |= VLUN_TASK_D_ALIVE_FLG; 1333 } 1334 mutex_exit(&vlun->svl_mutex); 1335 if ((pkt->pkt_cdbp[0] == SCMD_PROUT) && 1336 (((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_REGISTER) || 1337 ((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_R_AND_IGNORE))) { 1338 if (rval = vhci_pgr_register_start(vlun, pkt)) { 1339 /* an error */ 1340 sema_v(&vlun->svl_pgr_sema); 1341 return (rval); 1342 } 1343 } 1344 } 1345 1346 /* 1347 * SCSI-II RESERVE cmd is not expected in polled mode. 1348 * If this changes it needs to be handled for the polled scenario. 1349 */ 1350 flags = vpkt->vpkt_hba_pkt->pkt_flags; 1351 1352 /* 1353 * Set the path_instance *before* sending the scsi_pkt down the path 1354 * to mpxio's pHCI so that additional path abstractions at a pHCI 1355 * level (like maybe iSCSI at some point in the future) can update 1356 * the path_instance. 1357 */ 1358 if (scsi_pkt_allocated_correctly(vpkt->vpkt_hba_pkt)) 1359 vpkt->vpkt_hba_pkt->pkt_path_instance = 1360 mdi_pi_get_path_instance(vpkt->vpkt_path); 1361 1362 rval = scsi_transport(vpkt->vpkt_hba_pkt); 1363 if (rval == TRAN_ACCEPT) { 1364 if (flags & FLAG_NOINTR) { 1365 struct scsi_pkt *tpkt = vpkt->vpkt_tgt_pkt; 1366 struct scsi_pkt *pkt = vpkt->vpkt_hba_pkt; 1367 1368 ASSERT(tpkt != NULL); 1369 *(tpkt->pkt_scbp) = *(pkt->pkt_scbp); 1370 tpkt->pkt_resid = pkt->pkt_resid; 1371 tpkt->pkt_state = pkt->pkt_state; 1372 tpkt->pkt_statistics = pkt->pkt_statistics; 1373 tpkt->pkt_reason = pkt->pkt_reason; 1374 1375 if ((*(pkt->pkt_scbp) == STATUS_CHECK) && 1376 (pkt->pkt_state & STATE_ARQ_DONE)) { 1377 bcopy(pkt->pkt_scbp, tpkt->pkt_scbp, 1378 vpkt->vpkt_tgt_init_scblen); 1379 } 1380 1381 VHCI_DECR_PATH_CMDCOUNT(svp); 1382 if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) == 0) { 1383 scsi_destroy_pkt(vpkt->vpkt_hba_pkt); 1384 vpkt->vpkt_hba_pkt = NULL; 1385 if (vpkt->vpkt_path) { 1386 mdi_rele_path(vpkt->vpkt_path); 1387 vpkt->vpkt_path = NULL; 1388 } 1389 } 1390 /* 1391 * This path will not automatically retry pkts 1392 * internally, therefore, vpkt_org_vpkt should 1393 * never be set. 1394 */ 1395 ASSERT(vpkt->vpkt_org_vpkt == NULL); 1396 scsi_hba_pkt_comp(tpkt); 1397 } 1398 return (rval); 1399 } else if ((pkt->pkt_cdbp[0] == SCMD_PROUT) && 1400 (((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_REGISTER) || 1401 ((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_R_AND_IGNORE))) { 1402 /* the command exited with bad status */ 1403 sema_v(&vlun->svl_pgr_sema); 1404 } else if (vpkt->vpkt_tgt_pkt->pkt_cdbp[0] == SCMD_PRIN) { 1405 /* the command exited with bad status */ 1406 sema_v(&vlun->svl_pgr_sema); 1407 } else if (pkt_reserve_cmd) { 1408 VHCI_DEBUG(6, (CE_WARN, vhci->vhci_dip, 1409 "!vhci_scsi_start: reserve failed vlun 0x%p", 1410 (void *)vlun)); 1411 vlun->svl_flags &= ~VLUN_QUIESCED_FLG; 1412 if (restore_lbp) 1413 (void) mdi_set_lb_policy(cdip, lbp); 1414 } 1415 1416 ASSERT(vpkt->vpkt_hba_pkt != NULL); 1417 VHCI_DECR_PATH_CMDCOUNT(svp); 1418 1419 /* Do not destroy phci packet information for PKT_DMA_PARTIAL */ 1420 if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) == 0) { 1421 scsi_destroy_pkt(vpkt->vpkt_hba_pkt); 1422 vpkt->vpkt_hba_pkt = NULL; 1423 if (vpkt->vpkt_path) { 1424 MDI_PI_ERRSTAT(vpkt->vpkt_path, MDI_PI_TRANSERR); 1425 mdi_rele_path(vpkt->vpkt_path); 1426 vpkt->vpkt_path = NULL; 1427 } 1428 } 1429 return (TRAN_BUSY); 1430 } 1431 1432 /* 1433 * Function name : vhci_scsi_reset() 1434 * 1435 * Return Values : 0 - reset failed 1436 * 1 - reset succeeded 1437 */ 1438 1439 /* ARGSUSED */ 1440 static int 1441 vhci_scsi_reset(struct scsi_address *ap, int level) 1442 { 1443 int rval = 0; 1444 1445 cmn_err(CE_WARN, "!vhci_scsi_reset 0x%x", level); 1446 if ((level == RESET_TARGET) || (level == RESET_LUN)) { 1447 return (vhci_scsi_reset_target(ap, level, TRUE)); 1448 } else if (level == RESET_ALL) { 1449 return (vhci_scsi_reset_bus(ap)); 1450 } 1451 1452 return (rval); 1453 } 1454 1455 /* 1456 * vhci_recovery_reset: 1457 * Issues reset to the device 1458 * Input: 1459 * vlun - vhci lun pointer of the device 1460 * ap - address of the device 1461 * select_path: 1462 * If select_path is FALSE, then the address specified in ap is 1463 * the path on which reset will be issued. 1464 * If select_path is TRUE, then path is obtained by calling 1465 * mdi_select_path. 1466 * 1467 * recovery_depth: 1468 * Caller can specify the level of reset. 1469 * VHCI_DEPTH_LUN - 1470 * Issues LUN RESET if device supports lun reset. 1471 * VHCI_DEPTH_TARGET - 1472 * If Lun Reset fails or the device does not support 1473 * Lun Reset, issues TARGET RESET 1474 * VHCI_DEPTH_ALL - 1475 * If Lun Reset fails or the device does not support 1476 * Lun Reset, issues TARGET RESET. 1477 * If TARGET RESET does not succeed, issues Bus Reset. 1478 */ 1479 1480 static int 1481 vhci_recovery_reset(scsi_vhci_lun_t *vlun, struct scsi_address *ap, 1482 uint8_t select_path, uint8_t recovery_depth) 1483 { 1484 int ret = 0; 1485 1486 ASSERT(ap != NULL); 1487 1488 if (vlun && vlun->svl_support_lun_reset == 1) { 1489 ret = vhci_scsi_reset_target(ap, RESET_LUN, 1490 select_path); 1491 } 1492 1493 recovery_depth--; 1494 1495 if ((ret == 0) && recovery_depth) { 1496 ret = vhci_scsi_reset_target(ap, RESET_TARGET, 1497 select_path); 1498 recovery_depth--; 1499 } 1500 1501 if ((ret == 0) && recovery_depth) { 1502 (void) scsi_reset(ap, RESET_ALL); 1503 } 1504 1505 return (ret); 1506 } 1507 1508 /* 1509 * Note: The scsi_address passed to this routine could be the scsi_address 1510 * for the virtual device or the physical device. No assumptions should be 1511 * made in this routine about the contents of the ap structure. 1512 * Further, note that the child dip would be the dip of the ssd node regardless 1513 * of the scsi_address passed in. 1514 */ 1515 static int 1516 vhci_scsi_reset_target(struct scsi_address *ap, int level, uint8_t select_path) 1517 { 1518 dev_info_t *vdip, *cdip; 1519 mdi_pathinfo_t *pip = NULL; 1520 mdi_pathinfo_t *npip = NULL; 1521 int rval = -1; 1522 scsi_vhci_priv_t *svp = NULL; 1523 struct scsi_address *pap = NULL; 1524 scsi_hba_tran_t *hba = NULL; 1525 int sps; 1526 struct scsi_vhci *vhci = NULL; 1527 1528 if (select_path != TRUE) { 1529 ASSERT(ap != NULL); 1530 if (level == RESET_LUN) { 1531 hba = ap->a_hba_tran; 1532 ASSERT(hba != NULL); 1533 return (hba->tran_reset(ap, RESET_LUN)); 1534 } 1535 return (scsi_reset(ap, level)); 1536 } 1537 1538 cdip = ADDR2DIP(ap); 1539 ASSERT(cdip != NULL); 1540 vdip = ddi_get_parent(cdip); 1541 ASSERT(vdip != NULL); 1542 vhci = ddi_get_soft_state(vhci_softstate, ddi_get_instance(vdip)); 1543 ASSERT(vhci != NULL); 1544 1545 rval = mdi_select_path(cdip, NULL, MDI_SELECT_ONLINE_PATH, NULL, &pip); 1546 if ((rval != MDI_SUCCESS) || (pip == NULL)) { 1547 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_scsi_reset_target: " 1548 "Unable to get a path, dip 0x%p", (void *)cdip)); 1549 return (0); 1550 } 1551 again: 1552 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip); 1553 if (svp == NULL) { 1554 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_scsi_reset_target: " 1555 "priv is NULL, pip 0x%p", (void *)pip)); 1556 mdi_rele_path(pip); 1557 return (0); 1558 } 1559 1560 if (svp->svp_psd == NULL) { 1561 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_scsi_reset_target: " 1562 "psd is NULL, pip 0x%p, svp 0x%p", 1563 (void *)pip, (void *)svp)); 1564 mdi_rele_path(pip); 1565 return (0); 1566 } 1567 1568 pap = &svp->svp_psd->sd_address; 1569 hba = pap->a_hba_tran; 1570 1571 ASSERT(pap != NULL); 1572 ASSERT(hba != NULL); 1573 1574 if (hba->tran_reset != NULL) { 1575 if (hba->tran_reset(pap, level) == 0) { 1576 vhci_log(CE_WARN, vdip, "!%s%d: " 1577 "path %s, reset %d failed", 1578 ddi_driver_name(cdip), ddi_get_instance(cdip), 1579 mdi_pi_spathname(pip), level); 1580 1581 /* 1582 * Select next path and issue the reset, repeat 1583 * until all paths are exhausted 1584 */ 1585 sps = mdi_select_path(cdip, NULL, 1586 MDI_SELECT_ONLINE_PATH, pip, &npip); 1587 if ((sps != MDI_SUCCESS) || (npip == NULL)) { 1588 mdi_rele_path(pip); 1589 return (0); 1590 } 1591 mdi_rele_path(pip); 1592 pip = npip; 1593 goto again; 1594 } 1595 mdi_rele_path(pip); 1596 mutex_enter(&vhci->vhci_mutex); 1597 scsi_hba_reset_notify_callback(&vhci->vhci_mutex, 1598 &vhci->vhci_reset_notify_listf); 1599 mutex_exit(&vhci->vhci_mutex); 1600 VHCI_DEBUG(6, (CE_NOTE, NULL, "!vhci_scsi_reset_target: " 1601 "reset %d sent down pip:%p for cdip:%p\n", level, 1602 (void *)pip, (void *)cdip)); 1603 return (1); 1604 } 1605 mdi_rele_path(pip); 1606 return (0); 1607 } 1608 1609 1610 /* ARGSUSED */ 1611 static int 1612 vhci_scsi_reset_bus(struct scsi_address *ap) 1613 { 1614 return (1); 1615 } 1616 1617 1618 /* 1619 * called by vhci_getcap and vhci_setcap to get and set (respectively) 1620 * SCSI capabilities 1621 */ 1622 /* ARGSUSED */ 1623 static int 1624 vhci_commoncap(struct scsi_address *ap, char *cap, 1625 int val, int tgtonly, int doset) 1626 { 1627 struct scsi_vhci *vhci = ADDR2VHCI(ap); 1628 struct scsi_vhci_lun *vlun = ADDR2VLUN(ap); 1629 int cidx; 1630 int rval = 0; 1631 1632 if (cap == (char *)0) { 1633 VHCI_DEBUG(3, (CE_WARN, vhci->vhci_dip, 1634 "!vhci_commoncap: invalid arg")); 1635 return (rval); 1636 } 1637 1638 if (vlun == NULL) { 1639 VHCI_DEBUG(3, (CE_WARN, vhci->vhci_dip, 1640 "!vhci_commoncap: vlun is null")); 1641 return (rval); 1642 } 1643 1644 if ((cidx = scsi_hba_lookup_capstr(cap)) == -1) { 1645 return (UNDEFINED); 1646 } 1647 1648 /* 1649 * Process setcap request. 1650 */ 1651 if (doset) { 1652 /* 1653 * At present, we can only set binary (0/1) values 1654 */ 1655 switch (cidx) { 1656 case SCSI_CAP_ARQ: 1657 if (val == 0) { 1658 rval = 0; 1659 } else { 1660 rval = 1; 1661 } 1662 break; 1663 1664 case SCSI_CAP_LUN_RESET: 1665 if (tgtonly == 0) { 1666 VHCI_DEBUG(1, (CE_WARN, vhci->vhci_dip, 1667 "scsi_vhci_setcap: " 1668 "Returning error since whom = 0")); 1669 rval = -1; 1670 break; 1671 } 1672 /* 1673 * Set the capability accordingly. 1674 */ 1675 mutex_enter(&vlun->svl_mutex); 1676 vlun->svl_support_lun_reset = val; 1677 rval = val; 1678 mutex_exit(&vlun->svl_mutex); 1679 break; 1680 1681 case SCSI_CAP_SECTOR_SIZE: 1682 mutex_enter(&vlun->svl_mutex); 1683 vlun->svl_sector_size = val; 1684 vlun->svl_setcap_done = 1; 1685 mutex_exit(&vlun->svl_mutex); 1686 (void) vhci_pHCI_cap(ap, cap, val, tgtonly, NULL); 1687 1688 /* Always return success */ 1689 rval = 1; 1690 break; 1691 1692 default: 1693 VHCI_DEBUG(6, (CE_WARN, vhci->vhci_dip, 1694 "!vhci_setcap: unsupported %d", cidx)); 1695 rval = UNDEFINED; 1696 break; 1697 } 1698 1699 VHCI_DEBUG(6, (CE_NOTE, vhci->vhci_dip, 1700 "!set cap: cap=%s, val/tgtonly/doset/rval = " 1701 "0x%x/0x%x/0x%x/%d\n", 1702 cap, val, tgtonly, doset, rval)); 1703 1704 } else { 1705 /* 1706 * Process getcap request. 1707 */ 1708 switch (cidx) { 1709 case SCSI_CAP_DMA_MAX: 1710 /* 1711 * For X86 this capability is caught in scsi_ifgetcap(). 1712 * XXX Should this be getting the value from the pHCI? 1713 */ 1714 rval = (int)VHCI_DMA_MAX_XFER_CAP; 1715 break; 1716 1717 case SCSI_CAP_INITIATOR_ID: 1718 rval = 0x00; 1719 break; 1720 1721 case SCSI_CAP_ARQ: 1722 case SCSI_CAP_RESET_NOTIFICATION: 1723 case SCSI_CAP_TAGGED_QING: 1724 rval = 1; 1725 break; 1726 1727 case SCSI_CAP_SCSI_VERSION: 1728 rval = 3; 1729 break; 1730 1731 case SCSI_CAP_INTERCONNECT_TYPE: 1732 rval = INTERCONNECT_FABRIC; 1733 break; 1734 1735 case SCSI_CAP_LUN_RESET: 1736 /* 1737 * scsi_vhci will always return success for LUN reset. 1738 * When request for doing LUN reset comes 1739 * through scsi_reset entry point, at that time attempt 1740 * will be made to do reset through all the possible 1741 * paths. 1742 */ 1743 mutex_enter(&vlun->svl_mutex); 1744 rval = vlun->svl_support_lun_reset; 1745 mutex_exit(&vlun->svl_mutex); 1746 VHCI_DEBUG(4, (CE_WARN, vhci->vhci_dip, 1747 "scsi_vhci_getcap:" 1748 "Getting the Lun reset capability %d", rval)); 1749 break; 1750 1751 case SCSI_CAP_SECTOR_SIZE: 1752 mutex_enter(&vlun->svl_mutex); 1753 rval = vlun->svl_sector_size; 1754 mutex_exit(&vlun->svl_mutex); 1755 break; 1756 1757 case SCSI_CAP_CDB_LEN: 1758 rval = VHCI_SCSI_CDB_SIZE; 1759 break; 1760 1761 case SCSI_CAP_DMA_MAX_ARCH: 1762 /* 1763 * For X86 this capability is caught in scsi_ifgetcap(). 1764 * XXX Should this be getting the value from the pHCI? 1765 */ 1766 rval = 0; 1767 break; 1768 1769 default: 1770 VHCI_DEBUG(6, (CE_WARN, vhci->vhci_dip, 1771 "!vhci_getcap: unsupported %d", cidx)); 1772 rval = UNDEFINED; 1773 break; 1774 } 1775 1776 VHCI_DEBUG(6, (CE_NOTE, vhci->vhci_dip, 1777 "!get cap: cap=%s, val/tgtonly/doset/rval = " 1778 "0x%x/0x%x/0x%x/%d\n", 1779 cap, val, tgtonly, doset, rval)); 1780 } 1781 return (rval); 1782 } 1783 1784 1785 /* 1786 * Function name : vhci_scsi_getcap() 1787 * 1788 */ 1789 static int 1790 vhci_scsi_getcap(struct scsi_address *ap, char *cap, int whom) 1791 { 1792 return (vhci_commoncap(ap, cap, 0, whom, 0)); 1793 } 1794 1795 static int 1796 vhci_scsi_setcap(struct scsi_address *ap, char *cap, int value, int whom) 1797 { 1798 return (vhci_commoncap(ap, cap, value, whom, 1)); 1799 } 1800 1801 /* 1802 * Function name : vhci_scsi_abort() 1803 */ 1804 /* ARGSUSED */ 1805 static int 1806 vhci_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt) 1807 { 1808 return (0); 1809 } 1810 1811 /* 1812 * Function name : vhci_scsi_init_pkt 1813 * 1814 * Return Values : pointer to scsi_pkt, or NULL 1815 */ 1816 /* ARGSUSED */ 1817 static struct scsi_pkt * 1818 vhci_scsi_init_pkt(struct scsi_address *ap, struct scsi_pkt *pkt, 1819 struct buf *bp, int cmdlen, int statuslen, int tgtlen, 1820 int flags, int (*callback)(caddr_t), caddr_t arg) 1821 { 1822 struct scsi_vhci *vhci = ADDR2VHCI(ap); 1823 struct vhci_pkt *vpkt; 1824 int rval; 1825 int newpkt = 0; 1826 struct scsi_pkt *pktp; 1827 1828 1829 if (pkt == NULL) { 1830 if (cmdlen > VHCI_SCSI_CDB_SIZE) { 1831 if ((cmdlen != VHCI_SCSI_OSD_CDB_SIZE) || 1832 ((flags & VHCI_SCSI_OSD_PKT_FLAGS) != 1833 VHCI_SCSI_OSD_PKT_FLAGS)) { 1834 VHCI_DEBUG(1, (CE_NOTE, NULL, 1835 "!init pkt: cdb size not supported\n")); 1836 return (NULL); 1837 } 1838 } 1839 1840 pktp = scsi_hba_pkt_alloc(vhci->vhci_dip, 1841 ap, cmdlen, statuslen, tgtlen, sizeof (*vpkt), callback, 1842 arg); 1843 1844 if (pktp == NULL) { 1845 return (NULL); 1846 } 1847 1848 /* Get the vhci's private structure */ 1849 vpkt = (struct vhci_pkt *)(pktp->pkt_ha_private); 1850 ASSERT(vpkt); 1851 1852 /* Save the target driver's packet */ 1853 vpkt->vpkt_tgt_pkt = pktp; 1854 1855 /* 1856 * Save pkt_tgt_init_pkt fields if deferred binding 1857 * is needed or for other purposes. 1858 */ 1859 vpkt->vpkt_tgt_init_pkt_flags = flags; 1860 vpkt->vpkt_flags = (callback == NULL_FUNC) ? CFLAG_NOWAIT : 0; 1861 vpkt->vpkt_state = VHCI_PKT_IDLE; 1862 vpkt->vpkt_tgt_init_cdblen = cmdlen; 1863 vpkt->vpkt_tgt_init_scblen = statuslen; 1864 newpkt = 1; 1865 } else { /* pkt not NULL */ 1866 vpkt = pkt->pkt_ha_private; 1867 } 1868 1869 VHCI_DEBUG(8, (CE_NOTE, NULL, "vhci_scsi_init_pkt " 1870 "vpkt %p flags %x\n", (void *)vpkt, flags)); 1871 1872 /* Clear any stale error flags */ 1873 if (bp) { 1874 bioerror(bp, 0); 1875 } 1876 1877 vpkt->vpkt_tgt_init_bp = bp; 1878 1879 if (flags & PKT_DMA_PARTIAL) { 1880 1881 /* 1882 * Immediate binding is needed. 1883 * Target driver may not set this flag in next invocation. 1884 * vhci has to remember this flag was set during first 1885 * invocation of vhci_scsi_init_pkt. 1886 */ 1887 vpkt->vpkt_flags |= CFLAG_DMA_PARTIAL; 1888 } 1889 1890 if (vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) { 1891 1892 /* 1893 * Re-initialize some of the target driver packet state 1894 * information. 1895 */ 1896 vpkt->vpkt_tgt_pkt->pkt_state = 0; 1897 vpkt->vpkt_tgt_pkt->pkt_statistics = 0; 1898 vpkt->vpkt_tgt_pkt->pkt_reason = 0; 1899 1900 /* 1901 * Binding a vpkt->vpkt_path for this IO at init_time. 1902 * If an IO error happens later, target driver will clear 1903 * this vpkt->vpkt_path binding before re-init IO again. 1904 */ 1905 VHCI_DEBUG(8, (CE_NOTE, NULL, 1906 "vhci_scsi_init_pkt: calling v_b_t %p, newpkt %d\n", 1907 (void *)vpkt, newpkt)); 1908 if (pkt && vpkt->vpkt_hba_pkt) { 1909 VHCI_DEBUG(4, (CE_NOTE, NULL, 1910 "v_s_i_p calling update_pHCI_pkt resid %ld\n", 1911 pkt->pkt_resid)); 1912 vhci_update_pHCI_pkt(vpkt, pkt); 1913 } 1914 if (callback == SLEEP_FUNC) { 1915 rval = vhci_bind_transport( 1916 ap, vpkt, flags, callback); 1917 } else { 1918 rval = vhci_bind_transport( 1919 ap, vpkt, flags, NULL_FUNC); 1920 } 1921 VHCI_DEBUG(8, (CE_NOTE, NULL, 1922 "vhci_scsi_init_pkt: v_b_t called 0x%p rval 0x%x\n", 1923 (void *)vpkt, rval)); 1924 if (bp) { 1925 if (rval == TRAN_FATAL_ERROR) { 1926 /* 1927 * No paths available. Could not bind 1928 * any pHCI. Setting EFAULT as a way 1929 * to indicate no DMA is mapped. 1930 */ 1931 bioerror(bp, EFAULT); 1932 } else { 1933 /* 1934 * Do not indicate any pHCI errors to 1935 * target driver otherwise. 1936 */ 1937 bioerror(bp, 0); 1938 } 1939 } 1940 if (rval != TRAN_ACCEPT) { 1941 VHCI_DEBUG(8, (CE_NOTE, NULL, 1942 "vhci_scsi_init_pkt: " 1943 "v_b_t failed 0x%p newpkt %x\n", 1944 (void *)vpkt, newpkt)); 1945 if (newpkt) { 1946 scsi_hba_pkt_free(ap, 1947 vpkt->vpkt_tgt_pkt); 1948 } 1949 return (NULL); 1950 } 1951 ASSERT(vpkt->vpkt_hba_pkt != NULL); 1952 ASSERT(vpkt->vpkt_path != NULL); 1953 1954 /* Update the resid for the target driver */ 1955 vpkt->vpkt_tgt_pkt->pkt_resid = 1956 vpkt->vpkt_hba_pkt->pkt_resid; 1957 } 1958 1959 return (vpkt->vpkt_tgt_pkt); 1960 } 1961 1962 /* 1963 * Function name : vhci_scsi_destroy_pkt 1964 * 1965 * Return Values : none 1966 */ 1967 static void 1968 vhci_scsi_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt) 1969 { 1970 struct vhci_pkt *vpkt = (struct vhci_pkt *)pkt->pkt_ha_private; 1971 1972 VHCI_DEBUG(8, (CE_NOTE, NULL, 1973 "vhci_scsi_destroy_pkt: vpkt 0x%p\n", (void *)vpkt)); 1974 1975 vpkt->vpkt_tgt_init_pkt_flags = 0; 1976 if (vpkt->vpkt_hba_pkt) { 1977 scsi_destroy_pkt(vpkt->vpkt_hba_pkt); 1978 vpkt->vpkt_hba_pkt = NULL; 1979 } 1980 if (vpkt->vpkt_path) { 1981 mdi_rele_path(vpkt->vpkt_path); 1982 vpkt->vpkt_path = NULL; 1983 } 1984 1985 ASSERT(vpkt->vpkt_state != VHCI_PKT_ISSUED); 1986 scsi_hba_pkt_free(ap, vpkt->vpkt_tgt_pkt); 1987 } 1988 1989 /* 1990 * Function name : vhci_scsi_dmafree() 1991 * 1992 * Return Values : none 1993 */ 1994 /*ARGSUSED*/ 1995 static void 1996 vhci_scsi_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt) 1997 { 1998 struct vhci_pkt *vpkt = (struct vhci_pkt *)pkt->pkt_ha_private; 1999 2000 VHCI_DEBUG(6, (CE_NOTE, NULL, 2001 "vhci_scsi_dmafree: vpkt 0x%p\n", (void *)vpkt)); 2002 2003 ASSERT(vpkt != NULL); 2004 if (vpkt->vpkt_hba_pkt) { 2005 scsi_destroy_pkt(vpkt->vpkt_hba_pkt); 2006 vpkt->vpkt_hba_pkt = NULL; 2007 } 2008 if (vpkt->vpkt_path) { 2009 mdi_rele_path(vpkt->vpkt_path); 2010 vpkt->vpkt_path = NULL; 2011 } 2012 } 2013 2014 /* 2015 * Function name : vhci_scsi_sync_pkt() 2016 * 2017 * Return Values : none 2018 */ 2019 /*ARGSUSED*/ 2020 static void 2021 vhci_scsi_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt) 2022 { 2023 struct vhci_pkt *vpkt = (struct vhci_pkt *)pkt->pkt_ha_private; 2024 2025 ASSERT(vpkt != NULL); 2026 if (vpkt->vpkt_hba_pkt) { 2027 scsi_sync_pkt(vpkt->vpkt_hba_pkt); 2028 } 2029 } 2030 2031 /* 2032 * routine for reset notification setup, to register or cancel. 2033 */ 2034 static int 2035 vhci_scsi_reset_notify(struct scsi_address *ap, int flag, 2036 void (*callback)(caddr_t), caddr_t arg) 2037 { 2038 struct scsi_vhci *vhci = ADDR2VHCI(ap); 2039 return (scsi_hba_reset_notify_setup(ap, flag, callback, arg, 2040 &vhci->vhci_mutex, &vhci->vhci_reset_notify_listf)); 2041 } 2042 2043 static int 2044 vhci_scsi_get_name_bus_addr(struct scsi_device *sd, 2045 char *name, int len, int bus_addr) 2046 { 2047 dev_info_t *cdip; 2048 char *guid; 2049 scsi_vhci_lun_t *vlun; 2050 2051 ASSERT(sd != NULL); 2052 ASSERT(name != NULL); 2053 2054 *name = 0; 2055 cdip = sd->sd_dev; 2056 2057 ASSERT(cdip != NULL); 2058 2059 if (mdi_component_is_client(cdip, NULL) != MDI_SUCCESS) 2060 return (1); 2061 2062 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, cdip, PROPFLAGS, 2063 MDI_CLIENT_GUID_PROP, &guid) != DDI_SUCCESS) 2064 return (1); 2065 2066 /* 2067 * Message is "sd# at scsi_vhci0: unit-address <guid>: <bus_addr>". 2068 * <guid> bus_addr argument == 0 2069 * <bus_addr> bus_addr argument != 0 2070 * Since the <guid> is already provided with unit-address, we just 2071 * provide failover module in <bus_addr> to keep output shorter. 2072 */ 2073 vlun = ADDR2VLUN(&sd->sd_address); 2074 if (bus_addr == 0) { 2075 /* report the guid: */ 2076 (void) snprintf(name, len, "g%s", guid); 2077 } else if (vlun && vlun->svl_fops_name) { 2078 /* report the name of the failover module */ 2079 (void) snprintf(name, len, "%s", vlun->svl_fops_name); 2080 } 2081 2082 ddi_prop_free(guid); 2083 return (1); 2084 } 2085 2086 static int 2087 vhci_scsi_get_bus_addr(struct scsi_device *sd, char *name, int len) 2088 { 2089 return (vhci_scsi_get_name_bus_addr(sd, name, len, 1)); 2090 } 2091 2092 static int 2093 vhci_scsi_get_name(struct scsi_device *sd, char *name, int len) 2094 { 2095 return (vhci_scsi_get_name_bus_addr(sd, name, len, 0)); 2096 } 2097 2098 /* 2099 * Return a pointer to the guid part of the devnm. 2100 * devnm format is "nodename@busaddr", busaddr format is "gGUID". 2101 */ 2102 static char * 2103 vhci_devnm_to_guid(char *devnm) 2104 { 2105 char *cp = devnm; 2106 2107 if (devnm == NULL) 2108 return (NULL); 2109 2110 while (*cp != '\0' && *cp != '@') 2111 cp++; 2112 if (*cp == '@' && *(cp + 1) == 'g') 2113 return (cp + 2); 2114 return (NULL); 2115 } 2116 2117 static int 2118 vhci_bind_transport(struct scsi_address *ap, struct vhci_pkt *vpkt, int flags, 2119 int (*func)(caddr_t)) 2120 { 2121 struct scsi_vhci *vhci = ADDR2VHCI(ap); 2122 dev_info_t *cdip = ADDR2DIP(ap); 2123 mdi_pathinfo_t *pip = NULL; 2124 mdi_pathinfo_t *npip = NULL; 2125 scsi_vhci_priv_t *svp = NULL; 2126 struct scsi_device *psd = NULL; 2127 struct scsi_address *address = NULL; 2128 struct scsi_pkt *pkt = NULL; 2129 int rval = -1; 2130 int pgr_sema_held = 0; 2131 int held; 2132 int mps_flag = MDI_SELECT_ONLINE_PATH; 2133 struct scsi_vhci_lun *vlun; 2134 time_t tnow; 2135 int path_instance = 0; 2136 2137 vlun = ADDR2VLUN(ap); 2138 ASSERT(vlun != 0); 2139 2140 if ((vpkt->vpkt_tgt_pkt->pkt_cdbp[0] == SCMD_PROUT) && 2141 (((vpkt->vpkt_tgt_pkt->pkt_cdbp[1] & 0x1f) == 2142 VHCI_PROUT_REGISTER) || 2143 ((vpkt->vpkt_tgt_pkt->pkt_cdbp[1] & 0x1f) == 2144 VHCI_PROUT_R_AND_IGNORE))) { 2145 if (!sema_tryp(&vlun->svl_pgr_sema)) 2146 return (TRAN_BUSY); 2147 pgr_sema_held = 1; 2148 if (vlun->svl_first_path != NULL) { 2149 rval = mdi_select_path(cdip, NULL, 2150 MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH, 2151 NULL, &pip); 2152 if ((rval != MDI_SUCCESS) || (pip == NULL)) { 2153 VHCI_DEBUG(4, (CE_NOTE, NULL, 2154 "vhci_bind_transport: path select fail\n")); 2155 } else { 2156 npip = pip; 2157 do { 2158 if (npip == vlun->svl_first_path) { 2159 VHCI_DEBUG(4, (CE_NOTE, NULL, 2160 "vhci_bind_transport: " 2161 "valid first path 0x%p\n", 2162 (void *) 2163 vlun->svl_first_path)); 2164 pip = vlun->svl_first_path; 2165 goto bind_path; 2166 } 2167 pip = npip; 2168 rval = mdi_select_path(cdip, NULL, 2169 MDI_SELECT_ONLINE_PATH | 2170 MDI_SELECT_STANDBY_PATH, 2171 pip, &npip); 2172 mdi_rele_path(pip); 2173 } while ((rval == MDI_SUCCESS) && 2174 (npip != NULL)); 2175 } 2176 } 2177 2178 if (vlun->svl_first_path) { 2179 VHCI_DEBUG(4, (CE_NOTE, NULL, 2180 "vhci_bind_transport: invalid first path 0x%p\n", 2181 (void *)vlun->svl_first_path)); 2182 vlun->svl_first_path = NULL; 2183 } 2184 } else if (vpkt->vpkt_tgt_pkt->pkt_cdbp[0] == SCMD_PRIN) { 2185 if ((vpkt->vpkt_state & VHCI_PKT_THRU_TASKQ) == 0) { 2186 if (!sema_tryp(&vlun->svl_pgr_sema)) 2187 return (TRAN_BUSY); 2188 } 2189 pgr_sema_held = 1; 2190 } 2191 2192 /* 2193 * If the path is already bound for PKT_PARTIAL_DMA case, 2194 * try to use the same path. 2195 */ 2196 if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) && vpkt->vpkt_path) { 2197 VHCI_DEBUG(4, (CE_NOTE, NULL, 2198 "vhci_bind_transport: PKT_PARTIAL_DMA " 2199 "vpkt 0x%p, path 0x%p\n", 2200 (void *)vpkt, (void *)vpkt->vpkt_path)); 2201 pip = vpkt->vpkt_path; 2202 goto bind_path; 2203 } 2204 2205 /* 2206 * Get path_instance. Non-zero with FLAG_PKT_PATH_INSTANCE set 2207 * indicates that mdi_select_path should be called to select a 2208 * specific instance. 2209 * 2210 * NB: Condition pkt_path_instance reference on proper allocation. 2211 */ 2212 if ((vpkt->vpkt_tgt_pkt->pkt_flags & FLAG_PKT_PATH_INSTANCE) && 2213 scsi_pkt_allocated_correctly(vpkt->vpkt_tgt_pkt)) { 2214 path_instance = vpkt->vpkt_tgt_pkt->pkt_path_instance; 2215 } 2216 2217 /* 2218 * If reservation is active bind the transport directly to the pip 2219 * with the reservation. 2220 */ 2221 if (vpkt->vpkt_hba_pkt == NULL) { 2222 if (vlun->svl_flags & VLUN_RESERVE_ACTIVE_FLG) { 2223 if (MDI_PI_IS_ONLINE(vlun->svl_resrv_pip)) { 2224 pip = vlun->svl_resrv_pip; 2225 mdi_hold_path(pip); 2226 vlun->svl_waiting_for_activepath = 0; 2227 rval = MDI_SUCCESS; 2228 goto bind_path; 2229 } else { 2230 if (pgr_sema_held) { 2231 sema_v(&vlun->svl_pgr_sema); 2232 } 2233 return (TRAN_BUSY); 2234 } 2235 } 2236 try_again: 2237 rval = mdi_select_path(cdip, vpkt->vpkt_tgt_init_bp, 2238 path_instance ? MDI_SELECT_PATH_INSTANCE : 0, 2239 (void *)(intptr_t)path_instance, &pip); 2240 if (rval == MDI_BUSY) { 2241 if (pgr_sema_held) { 2242 sema_v(&vlun->svl_pgr_sema); 2243 } 2244 return (TRAN_BUSY); 2245 } else if (rval == MDI_DEVI_ONLINING) { 2246 /* 2247 * if we are here then we are in the midst of 2248 * an attach/probe of the client device. 2249 * We attempt to bind to ONLINE path if available, 2250 * else it is OK to bind to a STANDBY path (instead 2251 * of triggering a failover) because IO associated 2252 * with attach/probe (eg. INQUIRY, block 0 read) 2253 * are completed by targets even on passive paths 2254 * If no ONLINE paths available, it is important 2255 * to set svl_waiting_for_activepath for two 2256 * reasons: (1) avoid sense analysis in the 2257 * "external failure detection" codepath in 2258 * vhci_intr(). Failure to do so will result in 2259 * infinite loop (unless an ONLINE path becomes 2260 * available at some point) (2) avoid 2261 * unnecessary failover (see "---Waiting For Active 2262 * Path---" comment below). 2263 */ 2264 VHCI_DEBUG(1, (CE_NOTE, NULL, "!%p in onlining " 2265 "state\n", (void *)cdip)); 2266 pip = NULL; 2267 rval = mdi_select_path(cdip, vpkt->vpkt_tgt_init_bp, 2268 mps_flag, NULL, &pip); 2269 if ((rval != MDI_SUCCESS) || (pip == NULL)) { 2270 if (vlun->svl_waiting_for_activepath == 0) { 2271 vlun->svl_waiting_for_activepath = 1; 2272 vlun->svl_wfa_time = ddi_get_time(); 2273 } 2274 mps_flag |= MDI_SELECT_STANDBY_PATH; 2275 rval = mdi_select_path(cdip, 2276 vpkt->vpkt_tgt_init_bp, 2277 mps_flag, NULL, &pip); 2278 if ((rval != MDI_SUCCESS) || (pip == NULL)) { 2279 if (pgr_sema_held) { 2280 sema_v(&vlun->svl_pgr_sema); 2281 } 2282 return (TRAN_FATAL_ERROR); 2283 } 2284 goto bind_path; 2285 } 2286 } else if ((rval == MDI_FAILURE) || 2287 ((rval == MDI_NOPATH) && (path_instance))) { 2288 if (pgr_sema_held) { 2289 sema_v(&vlun->svl_pgr_sema); 2290 } 2291 return (TRAN_FATAL_ERROR); 2292 } 2293 2294 if ((pip == NULL) || (rval == MDI_NOPATH)) { 2295 while (vlun->svl_waiting_for_activepath) { 2296 /* 2297 * ---Waiting For Active Path--- 2298 * This device was discovered across a 2299 * passive path; lets wait for a little 2300 * bit, hopefully an active path will 2301 * show up obviating the need for a 2302 * failover 2303 */ 2304 tnow = ddi_get_time(); 2305 if (tnow - vlun->svl_wfa_time >= 60) { 2306 vlun->svl_waiting_for_activepath = 0; 2307 } else { 2308 drv_usecwait(1000); 2309 if (vlun->svl_waiting_for_activepath 2310 == 0) { 2311 /* 2312 * an active path has come 2313 * online! 2314 */ 2315 goto try_again; 2316 } 2317 } 2318 } 2319 VHCI_HOLD_LUN(vlun, VH_NOSLEEP, held); 2320 if (!held) { 2321 VHCI_DEBUG(4, (CE_NOTE, NULL, 2322 "!Lun not held\n")); 2323 if (pgr_sema_held) { 2324 sema_v(&vlun->svl_pgr_sema); 2325 } 2326 return (TRAN_BUSY); 2327 } 2328 /* 2329 * now that the LUN is stable, one last check 2330 * to make sure no other changes sneaked in 2331 * (like a path coming online or a 2332 * failover initiated by another thread) 2333 */ 2334 pip = NULL; 2335 rval = mdi_select_path(cdip, vpkt->vpkt_tgt_init_bp, 2336 0, NULL, &pip); 2337 if (pip != NULL) { 2338 VHCI_RELEASE_LUN(vlun); 2339 vlun->svl_waiting_for_activepath = 0; 2340 goto bind_path; 2341 } 2342 2343 /* 2344 * Check if there is an ONLINE path OR a STANDBY path 2345 * available. If none is available, do not attempt 2346 * to do a failover, just return a fatal error at this 2347 * point. 2348 */ 2349 npip = NULL; 2350 rval = mdi_select_path(cdip, NULL, 2351 (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH), 2352 NULL, &npip); 2353 if ((npip == NULL) || (rval != MDI_SUCCESS)) { 2354 /* 2355 * No paths available, jus return FATAL error. 2356 */ 2357 VHCI_RELEASE_LUN(vlun); 2358 if (pgr_sema_held) { 2359 sema_v(&vlun->svl_pgr_sema); 2360 } 2361 return (TRAN_FATAL_ERROR); 2362 } 2363 mdi_rele_path(npip); 2364 VHCI_DEBUG(1, (CE_NOTE, NULL, "!invoking " 2365 "mdi_failover\n")); 2366 rval = mdi_failover(vhci->vhci_dip, cdip, 2367 MDI_FAILOVER_ASYNC); 2368 if (rval == MDI_FAILURE) { 2369 VHCI_RELEASE_LUN(vlun); 2370 if (pgr_sema_held) { 2371 sema_v(&vlun->svl_pgr_sema); 2372 } 2373 return (TRAN_FATAL_ERROR); 2374 } else if (rval == MDI_BUSY) { 2375 VHCI_RELEASE_LUN(vlun); 2376 if (pgr_sema_held) { 2377 sema_v(&vlun->svl_pgr_sema); 2378 } 2379 return (TRAN_BUSY); 2380 } else { 2381 if (pgr_sema_held) { 2382 sema_v(&vlun->svl_pgr_sema); 2383 } 2384 return (TRAN_BUSY); 2385 } 2386 } 2387 vlun->svl_waiting_for_activepath = 0; 2388 bind_path: 2389 vpkt->vpkt_path = pip; 2390 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip); 2391 ASSERT(svp != NULL); 2392 2393 psd = svp->svp_psd; 2394 ASSERT(psd != NULL); 2395 address = &psd->sd_address; 2396 } else { 2397 pkt = vpkt->vpkt_hba_pkt; 2398 address = &pkt->pkt_address; 2399 } 2400 2401 /* Verify match of specified path_instance and selected path_instance */ 2402 ASSERT((path_instance == 0) || 2403 (path_instance == mdi_pi_get_path_instance(vpkt->vpkt_path))); 2404 2405 /* 2406 * For PKT_PARTIAL_DMA case, call pHCI's scsi_init_pkt whenever 2407 * target driver calls vhci_scsi_init_pkt. 2408 */ 2409 if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) && 2410 vpkt->vpkt_path && vpkt->vpkt_hba_pkt) { 2411 VHCI_DEBUG(4, (CE_NOTE, NULL, 2412 "vhci_bind_transport: PKT_PARTIAL_DMA " 2413 "vpkt 0x%p, path 0x%p hba_pkt 0x%p\n", 2414 (void *)vpkt, (void *)vpkt->vpkt_path, (void *)pkt)); 2415 pkt = vpkt->vpkt_hba_pkt; 2416 address = &pkt->pkt_address; 2417 } 2418 2419 if (pkt == NULL || (vpkt->vpkt_flags & CFLAG_DMA_PARTIAL)) { 2420 pkt = scsi_init_pkt(address, pkt, 2421 vpkt->vpkt_tgt_init_bp, vpkt->vpkt_tgt_init_cdblen, 2422 vpkt->vpkt_tgt_init_scblen, 0, flags, func, NULL); 2423 2424 if (pkt == NULL) { 2425 VHCI_DEBUG(4, (CE_NOTE, NULL, 2426 "!bind transport: 0x%p 0x%p 0x%p\n", 2427 (void *)vhci, (void *)psd, (void *)vpkt)); 2428 if ((vpkt->vpkt_hba_pkt == NULL) && vpkt->vpkt_path) { 2429 MDI_PI_ERRSTAT(vpkt->vpkt_path, 2430 MDI_PI_TRANSERR); 2431 mdi_rele_path(vpkt->vpkt_path); 2432 vpkt->vpkt_path = NULL; 2433 } 2434 if (pgr_sema_held) { 2435 sema_v(&vlun->svl_pgr_sema); 2436 } 2437 /* 2438 * Consider it a fatal error if b_error is 2439 * set as a result of DMA binding failure 2440 * vs. a condition of being temporarily out of 2441 * some resource 2442 */ 2443 if (vpkt->vpkt_tgt_init_bp == NULL || 2444 geterror(vpkt->vpkt_tgt_init_bp)) 2445 return (TRAN_FATAL_ERROR); 2446 else 2447 return (TRAN_BUSY); 2448 } 2449 } 2450 2451 pkt->pkt_private = vpkt; 2452 vpkt->vpkt_hba_pkt = pkt; 2453 return (TRAN_ACCEPT); 2454 } 2455 2456 2457 /*PRINTFLIKE3*/ 2458 void 2459 vhci_log(int level, dev_info_t *dip, const char *fmt, ...) 2460 { 2461 char buf[256]; 2462 va_list ap; 2463 2464 va_start(ap, fmt); 2465 (void) vsprintf(buf, fmt, ap); 2466 va_end(ap); 2467 2468 scsi_log(dip, "scsi_vhci", level, buf); 2469 } 2470 2471 /* do a PGR out with the information we've saved away */ 2472 static int 2473 vhci_do_prout(scsi_vhci_priv_t *svp) 2474 { 2475 2476 struct scsi_pkt *new_pkt; 2477 struct buf *bp; 2478 scsi_vhci_lun_t *vlun = svp->svp_svl; 2479 int rval, retry, nr_retry, ua_retry; 2480 uint8_t *sns, skey; 2481 2482 bp = getrbuf(KM_SLEEP); 2483 bp->b_flags = B_WRITE; 2484 bp->b_resid = 0; 2485 bp->b_un.b_addr = (caddr_t)&vlun->svl_prout; 2486 bp->b_bcount = vlun->svl_bcount; 2487 2488 VHCI_INCR_PATH_CMDCOUNT(svp); 2489 2490 new_pkt = scsi_init_pkt(&svp->svp_psd->sd_address, NULL, bp, 2491 CDB_GROUP1, sizeof (struct scsi_arq_status), 0, 0, 2492 SLEEP_FUNC, NULL); 2493 if (new_pkt == NULL) { 2494 VHCI_DECR_PATH_CMDCOUNT(svp); 2495 freerbuf(bp); 2496 cmn_err(CE_WARN, "!vhci_do_prout: scsi_init_pkt failed"); 2497 return (0); 2498 } 2499 mutex_enter(&vlun->svl_mutex); 2500 bp->b_un.b_addr = (caddr_t)&vlun->svl_prout; 2501 bp->b_bcount = vlun->svl_bcount; 2502 bcopy(vlun->svl_cdb, new_pkt->pkt_cdbp, 2503 sizeof (vlun->svl_cdb)); 2504 new_pkt->pkt_time = vlun->svl_time; 2505 mutex_exit(&vlun->svl_mutex); 2506 new_pkt->pkt_flags = FLAG_NOINTR; 2507 2508 ua_retry = nr_retry = retry = 0; 2509 again: 2510 rval = vhci_do_scsi_cmd(new_pkt); 2511 if (rval != 1) { 2512 if ((new_pkt->pkt_reason == CMD_CMPLT) && 2513 (SCBP_C(new_pkt) == STATUS_CHECK) && 2514 (new_pkt->pkt_state & STATE_ARQ_DONE)) { 2515 sns = (uint8_t *) 2516 &(((struct scsi_arq_status *)(uintptr_t) 2517 (new_pkt->pkt_scbp))->sts_sensedata); 2518 skey = scsi_sense_key(sns); 2519 if ((skey == KEY_UNIT_ATTENTION) || 2520 (skey == KEY_NOT_READY)) { 2521 int max_retry; 2522 struct scsi_failover_ops *fops; 2523 fops = vlun->svl_fops; 2524 rval = fops->sfo_analyze_sense(svp->svp_psd, 2525 sns, vlun->svl_fops_ctpriv); 2526 if (rval == SCSI_SENSE_NOT_READY) { 2527 max_retry = vhci_prout_not_ready_retry; 2528 retry = nr_retry++; 2529 delay(1*drv_usectohz(1000000)); 2530 } else { 2531 /* chk for state change and update */ 2532 if (rval == SCSI_SENSE_STATE_CHANGED) { 2533 int held; 2534 VHCI_HOLD_LUN(vlun, 2535 VH_NOSLEEP, held); 2536 if (!held) { 2537 rval = TRAN_BUSY; 2538 } else { 2539 /* chk for alua first */ 2540 vhci_update_pathstates( 2541 (void *)vlun); 2542 } 2543 } 2544 retry = ua_retry++; 2545 max_retry = VHCI_MAX_PGR_RETRIES; 2546 } 2547 if (retry < max_retry) { 2548 VHCI_DEBUG(4, (CE_WARN, NULL, 2549 "!vhci_do_prout retry 0x%x " 2550 "(0x%x 0x%x 0x%x)", 2551 SCBP_C(new_pkt), 2552 new_pkt->pkt_cdbp[0], 2553 new_pkt->pkt_cdbp[1], 2554 new_pkt->pkt_cdbp[2])); 2555 goto again; 2556 } 2557 rval = 0; 2558 VHCI_DEBUG(4, (CE_WARN, NULL, 2559 "!vhci_do_prout 0x%x " 2560 "(0x%x 0x%x 0x%x)", 2561 SCBP_C(new_pkt), 2562 new_pkt->pkt_cdbp[0], 2563 new_pkt->pkt_cdbp[1], 2564 new_pkt->pkt_cdbp[2])); 2565 } else if (skey == KEY_ILLEGAL_REQUEST) 2566 rval = VHCI_PGR_ILLEGALOP; 2567 } 2568 } else { 2569 rval = 1; 2570 } 2571 scsi_destroy_pkt(new_pkt); 2572 VHCI_DECR_PATH_CMDCOUNT(svp); 2573 freerbuf(bp); 2574 return (rval); 2575 } 2576 2577 static void 2578 vhci_run_cmd(void *arg) 2579 { 2580 struct scsi_pkt *pkt = (struct scsi_pkt *)arg; 2581 struct scsi_pkt *tpkt; 2582 scsi_vhci_priv_t *svp; 2583 mdi_pathinfo_t *pip, *npip; 2584 scsi_vhci_lun_t *vlun; 2585 dev_info_t *cdip; 2586 scsi_vhci_priv_t *nsvp; 2587 int fail = 0; 2588 int rval; 2589 struct vhci_pkt *vpkt; 2590 uchar_t cdb_1; 2591 vhci_prout_t *prout; 2592 2593 vpkt = (struct vhci_pkt *)pkt->pkt_private; 2594 tpkt = vpkt->vpkt_tgt_pkt; 2595 pip = vpkt->vpkt_path; 2596 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip); 2597 if (svp == NULL) { 2598 tpkt->pkt_reason = CMD_TRAN_ERR; 2599 tpkt->pkt_statistics = STAT_ABORTED; 2600 goto done; 2601 } 2602 vlun = svp->svp_svl; 2603 prout = &vlun->svl_prout; 2604 if (SCBP_C(pkt) != STATUS_GOOD) 2605 fail++; 2606 cdip = vlun->svl_dip; 2607 pip = npip = NULL; 2608 rval = mdi_select_path(cdip, NULL, 2609 MDI_SELECT_ONLINE_PATH|MDI_SELECT_STANDBY_PATH, NULL, &npip); 2610 if ((rval != MDI_SUCCESS) || (npip == NULL)) { 2611 VHCI_DEBUG(4, (CE_NOTE, NULL, 2612 "vhci_run_cmd: no path! 0x%p\n", (void *)svp)); 2613 tpkt->pkt_reason = CMD_TRAN_ERR; 2614 tpkt->pkt_statistics = STAT_ABORTED; 2615 goto done; 2616 } 2617 2618 cdb_1 = vlun->svl_cdb[1]; 2619 vlun->svl_cdb[1] &= 0xe0; 2620 vlun->svl_cdb[1] |= VHCI_PROUT_R_AND_IGNORE; 2621 2622 do { 2623 nsvp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(npip); 2624 if (nsvp == NULL) { 2625 VHCI_DEBUG(4, (CE_NOTE, NULL, 2626 "vhci_run_cmd: no " 2627 "client priv! 0x%p offlined?\n", 2628 (void *)npip)); 2629 goto next_path; 2630 } 2631 if (vlun->svl_first_path == npip) { 2632 goto next_path; 2633 } else { 2634 if (vhci_do_prout(nsvp) != 1) 2635 fail++; 2636 } 2637 next_path: 2638 pip = npip; 2639 rval = mdi_select_path(cdip, NULL, 2640 MDI_SELECT_ONLINE_PATH|MDI_SELECT_STANDBY_PATH, 2641 pip, &npip); 2642 mdi_rele_path(pip); 2643 } while ((rval == MDI_SUCCESS) && (npip != NULL)); 2644 2645 vlun->svl_cdb[1] = cdb_1; 2646 2647 if (fail) { 2648 VHCI_DEBUG(4, (CE_WARN, NULL, "%s%d: key registration failed, " 2649 "couldn't be replicated on all paths", 2650 ddi_driver_name(cdip), ddi_get_instance(cdip))); 2651 vhci_print_prout_keys(vlun, "vhci_run_cmd: "); 2652 2653 if (SCBP_C(pkt) != STATUS_GOOD) { 2654 tpkt->pkt_reason = CMD_TRAN_ERR; 2655 tpkt->pkt_statistics = STAT_ABORTED; 2656 } 2657 } else { 2658 vlun->svl_pgr_active = 1; 2659 vhci_print_prout_keys(vlun, "vhci_run_cmd: before bcopy:"); 2660 2661 bcopy((const void *)prout->service_key, 2662 (void *)prout->active_service_key, MHIOC_RESV_KEY_SIZE); 2663 bcopy((const void *)prout->res_key, 2664 (void *)prout->active_res_key, MHIOC_RESV_KEY_SIZE); 2665 2666 vhci_print_prout_keys(vlun, "vhci_run_cmd: after bcopy:"); 2667 } 2668 done: 2669 if (SCBP_C(pkt) == STATUS_GOOD) 2670 vlun->svl_first_path = NULL; 2671 2672 if (svp) 2673 VHCI_DECR_PATH_CMDCOUNT(svp); 2674 2675 if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) == 0) { 2676 scsi_destroy_pkt(pkt); 2677 vpkt->vpkt_hba_pkt = NULL; 2678 if (vpkt->vpkt_path) { 2679 mdi_rele_path(vpkt->vpkt_path); 2680 vpkt->vpkt_path = NULL; 2681 } 2682 } 2683 2684 sema_v(&vlun->svl_pgr_sema); 2685 /* 2686 * The PROUT commands are not included in the automatic retry 2687 * mechanism, therefore, vpkt_org_vpkt should never be set here. 2688 */ 2689 ASSERT(vpkt->vpkt_org_vpkt == NULL); 2690 scsi_hba_pkt_comp(tpkt); 2691 } 2692 2693 /* 2694 * Get the keys registered with this target. Since we will have 2695 * registered the same key with multiple initiators, strip out 2696 * any duplicate keys. 2697 * 2698 * The pointers which will be used to filter the registered keys from 2699 * the device will be stored in filter_prin and filter_pkt. If the 2700 * allocation length of the buffer was sufficient for the number of 2701 * parameter data bytes available to be returned by the device then the 2702 * key filtering will use the keylist returned from the original 2703 * request. If the allocation length of the buffer was not sufficient, 2704 * then the filtering will use the keylist returned from the request 2705 * that is resent below. 2706 * 2707 * If the device returns an additional length field that is greater than 2708 * the allocation length of the buffer, then allocate a new buffer which 2709 * can accommodate the number of parameter data bytes available to be 2710 * returned. Resend the scsi PRIN command, filter out the duplicate 2711 * keys and return as many of the unique keys found that was originally 2712 * requested and set the additional length field equal to the data bytes 2713 * of unique reservation keys available to be returned. 2714 * 2715 * If the device returns an additional length field that is less than or 2716 * equal to the allocation length of the buffer, then all the available 2717 * keys registered were returned by the device. Filter out the 2718 * duplicate keys and return all of the unique keys found and set the 2719 * additional length field equal to the data bytes of the reservation 2720 * keys to be returned. 2721 */ 2722 2723 #define VHCI_PRIN_HEADER_SZ (sizeof (prin->length) + sizeof (prin->generation)) 2724 2725 static int 2726 vhci_do_prin(struct vhci_pkt **intr_vpkt) 2727 { 2728 scsi_vhci_priv_t *svp; 2729 struct vhci_pkt *vpkt = *intr_vpkt; 2730 vhci_prin_readkeys_t *prin; 2731 scsi_vhci_lun_t *vlun; 2732 struct scsi_vhci *vhci = ADDR2VHCI(&vpkt->vpkt_tgt_pkt->pkt_address); 2733 2734 struct buf *new_bp = NULL; 2735 struct scsi_pkt *new_pkt = NULL; 2736 struct vhci_pkt *new_vpkt = NULL; 2737 uint32_t needed_length; 2738 int rval = VHCI_CMD_CMPLT; 2739 uint32_t prin_length = 0; 2740 uint32_t svl_prin_length = 0; 2741 2742 ASSERT(vpkt->vpkt_path); 2743 svp = mdi_pi_get_vhci_private(vpkt->vpkt_path); 2744 ASSERT(svp); 2745 vlun = svp->svp_svl; 2746 ASSERT(vlun); 2747 2748 /* 2749 * If the caller only asked for an amount of data that would not 2750 * be enough to include any key data it is likely that they will 2751 * send the next command with a buffer size based on the information 2752 * from this header. Doing recovery on this would be a duplication 2753 * of efforts. 2754 */ 2755 if (vpkt->vpkt_tgt_init_bp->b_bcount <= VHCI_PRIN_HEADER_SZ) { 2756 rval = VHCI_CMD_CMPLT; 2757 goto exit; 2758 } 2759 2760 if (vpkt->vpkt_org_vpkt == NULL) { 2761 /* 2762 * Can fail as sleep is not allowed. 2763 */ 2764 prin = (vhci_prin_readkeys_t *) 2765 bp_mapin_common(vpkt->vpkt_tgt_init_bp, VM_NOSLEEP); 2766 } else { 2767 /* 2768 * The retry buf doesn't need to be mapped in. 2769 */ 2770 prin = (vhci_prin_readkeys_t *) 2771 vpkt->vpkt_tgt_init_bp->b_un.b_daddr; 2772 } 2773 2774 if (prin == NULL) { 2775 VHCI_DEBUG(5, (CE_WARN, NULL, 2776 "vhci_do_prin: bp_mapin_common failed.")); 2777 rval = VHCI_CMD_ERROR; 2778 goto fail; 2779 } 2780 2781 prin_length = BE_32(prin->length); 2782 2783 /* 2784 * According to SPC-3r22, sec 4.3.4.6: "If the amount of 2785 * information to be transferred exceeds the maximum value 2786 * that the ALLOCATION LENGTH field is capable of specifying, 2787 * the device server shall...terminate the command with CHECK 2788 * CONDITION status". The ALLOCATION LENGTH field of the 2789 * PERSISTENT RESERVE IN command is 2 bytes. We should never 2790 * get here with an ADDITIONAL LENGTH greater than 0xFFFF 2791 * so if we do, then it is an error! 2792 */ 2793 2794 2795 if ((prin_length + VHCI_PRIN_HEADER_SZ) > 0xFFFF) { 2796 VHCI_DEBUG(5, (CE_NOTE, NULL, 2797 "vhci_do_prin: Device returned invalid " 2798 "length 0x%x\n", prin_length)); 2799 rval = VHCI_CMD_ERROR; 2800 goto fail; 2801 } 2802 needed_length = prin_length + VHCI_PRIN_HEADER_SZ; 2803 2804 /* 2805 * If prin->length is greater than the byte count allocated in the 2806 * original buffer, then resend the request with enough buffer 2807 * allocated to get all of the available registered keys. 2808 */ 2809 if ((vpkt->vpkt_tgt_init_bp->b_bcount < needed_length) && 2810 (vpkt->vpkt_org_vpkt == NULL)) { 2811 2812 new_pkt = vhci_create_retry_pkt(vpkt); 2813 if (new_pkt == NULL) { 2814 rval = VHCI_CMD_ERROR; 2815 goto fail; 2816 } 2817 new_vpkt = TGTPKT2VHCIPKT(new_pkt); 2818 2819 /* 2820 * This is the buf with buffer pointer 2821 * where the prin readkeys will be 2822 * returned from the device 2823 */ 2824 new_bp = scsi_alloc_consistent_buf(&svp->svp_psd->sd_address, 2825 NULL, needed_length, B_READ, NULL_FUNC, NULL); 2826 if ((new_bp == NULL) || (new_bp->b_un.b_addr == NULL)) { 2827 if (new_bp) { 2828 scsi_free_consistent_buf(new_bp); 2829 } 2830 vhci_scsi_destroy_pkt(&new_pkt->pkt_address, new_pkt); 2831 rval = VHCI_CMD_ERROR; 2832 goto fail; 2833 } 2834 new_bp->b_bcount = needed_length; 2835 new_pkt->pkt_cdbp[7] = (uchar_t)(needed_length >> 8); 2836 new_pkt->pkt_cdbp[8] = (uchar_t)needed_length; 2837 2838 rval = VHCI_CMD_RETRY; 2839 2840 new_vpkt->vpkt_tgt_init_bp = new_bp; 2841 } 2842 2843 if (rval == VHCI_CMD_RETRY) { 2844 2845 /* 2846 * There were more keys then the original request asked for. 2847 */ 2848 mdi_pathinfo_t *path_holder = vpkt->vpkt_path; 2849 2850 /* 2851 * Release the old path because it does not matter which path 2852 * this command is sent down. This allows the normal bind 2853 * transport mechanism to be used. 2854 */ 2855 if (vpkt->vpkt_path != NULL) { 2856 mdi_rele_path(vpkt->vpkt_path); 2857 vpkt->vpkt_path = NULL; 2858 } 2859 2860 /* 2861 * Dispatch the retry command 2862 */ 2863 if (taskq_dispatch(vhci->vhci_taskq, vhci_dispatch_scsi_start, 2864 (void *) new_vpkt, KM_NOSLEEP) == NULL) { 2865 if (path_holder) { 2866 vpkt->vpkt_path = path_holder; 2867 mdi_hold_path(path_holder); 2868 } 2869 scsi_free_consistent_buf(new_bp); 2870 vhci_scsi_destroy_pkt(&new_pkt->pkt_address, new_pkt); 2871 rval = VHCI_CMD_ERROR; 2872 goto fail; 2873 } 2874 2875 /* 2876 * If we return VHCI_CMD_RETRY, that means the caller 2877 * is going to bail and wait for the reissued command 2878 * to complete. In that case, we need to decrement 2879 * the path command count right now. In any other 2880 * case, it'll be decremented by the caller. 2881 */ 2882 VHCI_DECR_PATH_CMDCOUNT(svp); 2883 goto exit; 2884 2885 } 2886 2887 if (rval == VHCI_CMD_CMPLT) { 2888 /* 2889 * The original request got all of the keys or the recovery 2890 * packet returns. 2891 */ 2892 int new; 2893 int old; 2894 int num_keys = prin_length / MHIOC_RESV_KEY_SIZE; 2895 2896 VHCI_DEBUG(4, (CE_NOTE, NULL, "vhci_do_prin: %d keys read\n", 2897 num_keys)); 2898 2899 #ifdef DEBUG 2900 VHCI_DEBUG(5, (CE_NOTE, NULL, "vhci_do_prin: from storage\n")); 2901 if (vhci_debug == 5) 2902 vhci_print_prin_keys(prin, num_keys); 2903 VHCI_DEBUG(5, (CE_NOTE, NULL, 2904 "vhci_do_prin: MPxIO old keys:\n")); 2905 if (vhci_debug == 5) 2906 vhci_print_prin_keys(&vlun->svl_prin, num_keys); 2907 #endif 2908 2909 /* 2910 * Filter out all duplicate keys returned from the device 2911 * We know that we use a different key for every host, so we 2912 * can simply strip out duplicates. Otherwise we would need to 2913 * do more bookkeeping to figure out which keys to strip out. 2914 */ 2915 2916 new = 0; 2917 2918 /* 2919 * If we got at least 1 key copy it. 2920 */ 2921 if (num_keys > 0) { 2922 vlun->svl_prin.keylist[0] = prin->keylist[0]; 2923 new++; 2924 } 2925 2926 /* 2927 * find next unique key. 2928 */ 2929 for (old = 1; old < num_keys; old++) { 2930 int j; 2931 int match = 0; 2932 2933 if (new >= VHCI_NUM_RESV_KEYS) 2934 break; 2935 for (j = 0; j < new; j++) { 2936 if (bcmp(&prin->keylist[old], 2937 &vlun->svl_prin.keylist[j], 2938 sizeof (mhioc_resv_key_t)) == 0) { 2939 match = 1; 2940 break; 2941 } 2942 } 2943 if (!match) { 2944 vlun->svl_prin.keylist[new] = 2945 prin->keylist[old]; 2946 new++; 2947 } 2948 } 2949 2950 /* Stored Big Endian */ 2951 vlun->svl_prin.generation = prin->generation; 2952 svl_prin_length = new * sizeof (mhioc_resv_key_t); 2953 /* Stored Big Endian */ 2954 vlun->svl_prin.length = BE_32(svl_prin_length); 2955 svl_prin_length += VHCI_PRIN_HEADER_SZ; 2956 2957 /* 2958 * If we arrived at this point after issuing a retry, make sure 2959 * that we put everything back the way it originally was so 2960 * that the target driver can complete the command correctly. 2961 */ 2962 if (vpkt->vpkt_org_vpkt != NULL) { 2963 new_bp = vpkt->vpkt_tgt_init_bp; 2964 2965 scsi_free_consistent_buf(new_bp); 2966 2967 vpkt = vhci_sync_retry_pkt(vpkt); 2968 *intr_vpkt = vpkt; 2969 2970 /* 2971 * Make sure the original buffer is mapped into kernel 2972 * space before we try to copy the filtered keys into 2973 * it. 2974 */ 2975 prin = (vhci_prin_readkeys_t *)bp_mapin_common( 2976 vpkt->vpkt_tgt_init_bp, VM_NOSLEEP); 2977 } 2978 2979 /* 2980 * Now copy the desired number of prin keys into the original 2981 * target buffer. 2982 */ 2983 if (svl_prin_length <= vpkt->vpkt_tgt_init_bp->b_bcount) { 2984 /* 2985 * It is safe to return all of the available unique 2986 * keys 2987 */ 2988 bcopy(&vlun->svl_prin, prin, svl_prin_length); 2989 } else { 2990 /* 2991 * Not all of the available keys were requested by the 2992 * original command. 2993 */ 2994 bcopy(&vlun->svl_prin, prin, 2995 vpkt->vpkt_tgt_init_bp->b_bcount); 2996 } 2997 #ifdef DEBUG 2998 VHCI_DEBUG(5, (CE_NOTE, NULL, 2999 "vhci_do_prin: To Application:\n")); 3000 if (vhci_debug == 5) 3001 vhci_print_prin_keys(prin, new); 3002 VHCI_DEBUG(5, (CE_NOTE, NULL, 3003 "vhci_do_prin: MPxIO new keys:\n")); 3004 if (vhci_debug == 5) 3005 vhci_print_prin_keys(&vlun->svl_prin, new); 3006 #endif 3007 } 3008 fail: 3009 if (rval == VHCI_CMD_ERROR) { 3010 /* 3011 * If we arrived at this point after issuing a 3012 * retry, make sure that we put everything back 3013 * the way it originally was so that ssd can 3014 * complete the command correctly. 3015 */ 3016 3017 if (vpkt->vpkt_org_vpkt != NULL) { 3018 new_bp = vpkt->vpkt_tgt_init_bp; 3019 if (new_bp != NULL) { 3020 scsi_free_consistent_buf(new_bp); 3021 } 3022 3023 new_vpkt = vpkt; 3024 vpkt = vpkt->vpkt_org_vpkt; 3025 3026 vhci_scsi_destroy_pkt(&svp->svp_psd->sd_address, 3027 new_vpkt->vpkt_tgt_pkt); 3028 } 3029 3030 /* 3031 * Mark this command completion as having an error so that 3032 * ssd will retry the command. 3033 */ 3034 3035 vpkt->vpkt_tgt_pkt->pkt_reason = CMD_ABORTED; 3036 vpkt->vpkt_tgt_pkt->pkt_statistics |= STAT_ABORTED; 3037 3038 rval = VHCI_CMD_CMPLT; 3039 } 3040 exit: 3041 /* 3042 * Make sure that the semaphore is only released once. 3043 */ 3044 if (rval == VHCI_CMD_CMPLT) { 3045 sema_v(&vlun->svl_pgr_sema); 3046 } 3047 3048 return (rval); 3049 } 3050 3051 static void 3052 vhci_intr(struct scsi_pkt *pkt) 3053 { 3054 struct vhci_pkt *vpkt = (struct vhci_pkt *)pkt->pkt_private; 3055 struct scsi_pkt *tpkt; 3056 scsi_vhci_priv_t *svp; 3057 scsi_vhci_lun_t *vlun; 3058 int rval, held; 3059 struct scsi_failover_ops *fops; 3060 uint8_t *sns, skey, asc, ascq; 3061 mdi_pathinfo_t *lpath; 3062 static char *timeout_err = "Command Timeout"; 3063 static char *parity_err = "Parity Error"; 3064 char *err_str = NULL; 3065 dev_info_t *vdip, *cdip; 3066 char *cpath; 3067 3068 ASSERT(vpkt != NULL); 3069 tpkt = vpkt->vpkt_tgt_pkt; 3070 ASSERT(tpkt != NULL); 3071 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(vpkt->vpkt_path); 3072 ASSERT(svp != NULL); 3073 vlun = svp->svp_svl; 3074 ASSERT(vlun != NULL); 3075 lpath = vpkt->vpkt_path; 3076 3077 /* 3078 * sync up the target driver's pkt with the pkt that 3079 * we actually used 3080 */ 3081 *(tpkt->pkt_scbp) = *(pkt->pkt_scbp); 3082 tpkt->pkt_resid = pkt->pkt_resid; 3083 tpkt->pkt_state = pkt->pkt_state; 3084 tpkt->pkt_statistics = pkt->pkt_statistics; 3085 tpkt->pkt_reason = pkt->pkt_reason; 3086 3087 /* Return path_instance information back to the target driver. */ 3088 if (scsi_pkt_allocated_correctly(tpkt)) { 3089 if (scsi_pkt_allocated_correctly(pkt)) { 3090 /* 3091 * If both packets were correctly allocated, 3092 * return path returned by pHCI. 3093 */ 3094 tpkt->pkt_path_instance = pkt->pkt_path_instance; 3095 } else { 3096 /* Otherwise return path of pHCI we used */ 3097 tpkt->pkt_path_instance = 3098 mdi_pi_get_path_instance(lpath); 3099 } 3100 } 3101 3102 if (pkt->pkt_cdbp[0] == SCMD_PROUT && 3103 ((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_REGISTER) || 3104 ((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_R_AND_IGNORE)) { 3105 if ((SCBP_C(pkt) != STATUS_GOOD) || 3106 (pkt->pkt_reason != CMD_CMPLT)) { 3107 sema_v(&vlun->svl_pgr_sema); 3108 } 3109 } else if (pkt->pkt_cdbp[0] == SCMD_PRIN) { 3110 if (pkt->pkt_reason != CMD_CMPLT || 3111 (SCBP_C(pkt) != STATUS_GOOD)) { 3112 sema_v(&vlun->svl_pgr_sema); 3113 } 3114 } 3115 3116 switch (pkt->pkt_reason) { 3117 case CMD_CMPLT: 3118 /* 3119 * cmd completed successfully, check for scsi errors 3120 */ 3121 switch (*(pkt->pkt_scbp)) { 3122 case STATUS_CHECK: 3123 if (pkt->pkt_state & STATE_ARQ_DONE) { 3124 sns = (uint8_t *) 3125 &(((struct scsi_arq_status *)(uintptr_t) 3126 (pkt->pkt_scbp))->sts_sensedata); 3127 skey = scsi_sense_key(sns); 3128 asc = scsi_sense_asc(sns); 3129 ascq = scsi_sense_ascq(sns); 3130 fops = vlun->svl_fops; 3131 ASSERT(fops != NULL); 3132 VHCI_DEBUG(4, (CE_NOTE, NULL, "vhci_intr: " 3133 "Received sns key %x esc %x escq %x\n", 3134 skey, asc, ascq)); 3135 3136 if (vlun->svl_waiting_for_activepath == 1) { 3137 /* 3138 * if we are here it means we are 3139 * in the midst of a probe/attach 3140 * through a passive path; this 3141 * case is exempt from sense analysis 3142 * for detection of ext. failover 3143 * because that would unnecessarily 3144 * increase attach time. 3145 */ 3146 bcopy(pkt->pkt_scbp, tpkt->pkt_scbp, 3147 vpkt->vpkt_tgt_init_scblen); 3148 break; 3149 } 3150 if (asc == VHCI_SCSI_PERR) { 3151 /* 3152 * parity error 3153 */ 3154 err_str = parity_err; 3155 bcopy(pkt->pkt_scbp, tpkt->pkt_scbp, 3156 vpkt->vpkt_tgt_init_scblen); 3157 break; 3158 } 3159 rval = fops->sfo_analyze_sense(svp->svp_psd, 3160 sns, vlun->svl_fops_ctpriv); 3161 if ((rval == SCSI_SENSE_NOFAILOVER) || 3162 (rval == SCSI_SENSE_UNKNOWN) || 3163 (rval == SCSI_SENSE_NOT_READY)) { 3164 bcopy(pkt->pkt_scbp, tpkt->pkt_scbp, 3165 vpkt->vpkt_tgt_init_scblen); 3166 break; 3167 } else if (rval == SCSI_SENSE_STATE_CHANGED) { 3168 struct scsi_vhci *vhci; 3169 vhci = ADDR2VHCI(&tpkt->pkt_address); 3170 VHCI_HOLD_LUN(vlun, VH_NOSLEEP, held); 3171 if (!held) { 3172 /* 3173 * looks like some other thread 3174 * has already detected this 3175 * condition 3176 */ 3177 tpkt->pkt_state &= 3178 ~STATE_ARQ_DONE; 3179 *(tpkt->pkt_scbp) = 3180 STATUS_BUSY; 3181 break; 3182 } 3183 (void) taskq_dispatch( 3184 vhci->vhci_update_pathstates_taskq, 3185 vhci_update_pathstates, 3186 (void *)vlun, KM_SLEEP); 3187 } else { 3188 /* 3189 * externally initiated failover 3190 * has occurred or is in progress 3191 */ 3192 VHCI_HOLD_LUN(vlun, VH_NOSLEEP, held); 3193 if (!held) { 3194 /* 3195 * looks like some other thread 3196 * has already detected this 3197 * condition 3198 */ 3199 tpkt->pkt_state &= 3200 ~STATE_ARQ_DONE; 3201 *(tpkt->pkt_scbp) = 3202 STATUS_BUSY; 3203 break; 3204 } else { 3205 rval = vhci_handle_ext_fo 3206 (pkt, rval); 3207 if (rval == BUSY_RETURN) { 3208 tpkt->pkt_state &= 3209 ~STATE_ARQ_DONE; 3210 *(tpkt->pkt_scbp) = 3211 STATUS_BUSY; 3212 break; 3213 } 3214 bcopy(pkt->pkt_scbp, 3215 tpkt->pkt_scbp, 3216 vpkt->vpkt_tgt_init_scblen); 3217 break; 3218 } 3219 } 3220 } 3221 break; 3222 3223 /* 3224 * If this is a good SCSI-II RELEASE cmd completion then restore 3225 * the load balancing policy and reset VLUN_RESERVE_ACTIVE_FLG. 3226 * If this is a good SCSI-II RESERVE cmd completion then set 3227 * VLUN_RESERVE_ACTIVE_FLG. 3228 */ 3229 case STATUS_GOOD: 3230 if ((pkt->pkt_cdbp[0] == SCMD_RELEASE) || 3231 (pkt->pkt_cdbp[0] == SCMD_RELEASE_G1)) { 3232 (void) mdi_set_lb_policy(vlun->svl_dip, 3233 vlun->svl_lb_policy_save); 3234 vlun->svl_flags &= ~VLUN_RESERVE_ACTIVE_FLG; 3235 VHCI_DEBUG(1, (CE_WARN, NULL, 3236 "!vhci_intr: vlun 0x%p release path 0x%p", 3237 (void *)vlun, (void *)vpkt->vpkt_path)); 3238 } 3239 3240 if ((pkt->pkt_cdbp[0] == SCMD_RESERVE) || 3241 (pkt->pkt_cdbp[0] == SCMD_RESERVE_G1)) { 3242 vlun->svl_flags |= VLUN_RESERVE_ACTIVE_FLG; 3243 vlun->svl_resrv_pip = vpkt->vpkt_path; 3244 VHCI_DEBUG(1, (CE_WARN, NULL, 3245 "!vhci_intr: vlun 0x%p reserved path 0x%p", 3246 (void *)vlun, (void *)vpkt->vpkt_path)); 3247 } 3248 break; 3249 3250 case STATUS_RESERVATION_CONFLICT: 3251 VHCI_DEBUG(1, (CE_WARN, NULL, 3252 "!vhci_intr: vlun 0x%p " 3253 "reserve conflict on path 0x%p", 3254 (void *)vlun, (void *)vpkt->vpkt_path)); 3255 /* FALLTHROUGH */ 3256 default: 3257 break; 3258 } 3259 3260 /* 3261 * Update I/O completion statistics for the path 3262 */ 3263 mdi_pi_kstat_iosupdate(vpkt->vpkt_path, vpkt->vpkt_tgt_init_bp); 3264 3265 /* 3266 * Command completed successfully, release the dma binding and 3267 * destroy the transport side of the packet. 3268 */ 3269 if ((pkt->pkt_cdbp[0] == SCMD_PROUT) && 3270 (((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_REGISTER) || 3271 ((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_R_AND_IGNORE))) { 3272 if (SCBP_C(pkt) == STATUS_GOOD) { 3273 ASSERT(vlun->svl_taskq); 3274 svp->svp_last_pkt_reason = pkt->pkt_reason; 3275 (void) taskq_dispatch(vlun->svl_taskq, 3276 vhci_run_cmd, pkt, KM_SLEEP); 3277 return; 3278 } 3279 } 3280 if ((SCBP_C(pkt) == STATUS_GOOD) && 3281 (pkt->pkt_cdbp[0] == SCMD_PRIN) && vpkt->vpkt_tgt_init_bp) { 3282 /* 3283 * If the action (value in byte 1 of the cdb) is zero, 3284 * we're reading keys, and that's the only condition 3285 * where we need to be concerned with filtering keys 3286 * and potential retries. Otherwise, we simply signal 3287 * the semaphore and move on. 3288 */ 3289 if (pkt->pkt_cdbp[1] == 0) { 3290 /* 3291 * If this is the completion of an internal 3292 * retry then we need to make sure that the 3293 * pkt and tpkt pointers are readjusted so 3294 * the calls to scsi_destroy_pkt and pkt_comp 3295 * below work * correctly. 3296 */ 3297 if (vpkt->vpkt_org_vpkt != NULL) { 3298 pkt = vpkt->vpkt_org_vpkt->vpkt_hba_pkt; 3299 tpkt = vpkt->vpkt_org_vpkt-> 3300 vpkt_tgt_pkt; 3301 3302 /* 3303 * If this command was issued through 3304 * the taskq then we need to clear 3305 * this flag for proper processing in 3306 * the case of a retry from the target 3307 * driver. 3308 */ 3309 vpkt->vpkt_state &= 3310 ~VHCI_PKT_THRU_TASKQ; 3311 } 3312 3313 /* 3314 * if vhci_do_prin returns VHCI_CMD_CMPLT then 3315 * vpkt will contain the address of the 3316 * original vpkt 3317 */ 3318 if (vhci_do_prin(&vpkt) == VHCI_CMD_RETRY) { 3319 /* 3320 * The command has been resent to get 3321 * all the keys from the device. Don't 3322 * complete the command with ssd until 3323 * the retry completes. 3324 */ 3325 return; 3326 } 3327 } else { 3328 sema_v(&vlun->svl_pgr_sema); 3329 } 3330 } 3331 3332 break; 3333 3334 case CMD_TIMEOUT: 3335 if ((pkt->pkt_statistics & 3336 (STAT_BUS_RESET|STAT_DEV_RESET|STAT_ABORTED)) == 0) { 3337 3338 VHCI_DEBUG(1, (CE_NOTE, NULL, 3339 "!scsi vhci timeout invoked\n")); 3340 3341 (void) vhci_recovery_reset(vlun, &pkt->pkt_address, 3342 FALSE, VHCI_DEPTH_ALL); 3343 } 3344 MDI_PI_ERRSTAT(lpath, MDI_PI_TRANSERR); 3345 tpkt->pkt_statistics |= STAT_ABORTED; 3346 err_str = timeout_err; 3347 break; 3348 3349 case CMD_TRAN_ERR: 3350 /* 3351 * This status is returned if the transport has sent the cmd 3352 * down the link to the target and then some error occurs. 3353 * In case of SCSI-II RESERVE cmd, we don't know if the 3354 * reservation been accepted by the target or not, so we need 3355 * to clear the reservation. 3356 */ 3357 if ((pkt->pkt_cdbp[0] == SCMD_RESERVE) || 3358 (pkt->pkt_cdbp[0] == SCMD_RESERVE_G1)) { 3359 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_intr received" 3360 " cmd_tran_err for scsi-2 reserve cmd\n")); 3361 if (!vhci_recovery_reset(vlun, &pkt->pkt_address, 3362 TRUE, VHCI_DEPTH_TARGET)) { 3363 VHCI_DEBUG(1, (CE_WARN, NULL, 3364 "!vhci_intr cmd_tran_err reset failed!")); 3365 } 3366 } 3367 break; 3368 3369 case CMD_DEV_GONE: 3370 /* 3371 * If this is the last path then report CMD_DEV_GONE to the 3372 * target driver, otherwise report BUSY to triggger retry. 3373 */ 3374 if (vlun->svl_dip && 3375 (mdi_client_get_path_count(vlun->svl_dip) <= 1)) { 3376 struct scsi_vhci *vhci; 3377 vhci = ADDR2VHCI(&tpkt->pkt_address); 3378 VHCI_DEBUG(1, (CE_NOTE, NULL, "vhci_intr received " 3379 "cmd_dev_gone on last path\n")); 3380 (void) vhci_invalidate_mpapi_lu(vhci, vlun); 3381 break; 3382 } 3383 3384 /* Report CMD_CMPLT-with-BUSY to cause retry. */ 3385 VHCI_DEBUG(1, (CE_NOTE, NULL, "vhci_intr received " 3386 "cmd_dev_gone\n")); 3387 tpkt->pkt_reason = CMD_CMPLT; 3388 tpkt->pkt_state = STATE_GOT_BUS | 3389 STATE_GOT_TARGET | STATE_SENT_CMD | 3390 STATE_GOT_STATUS; 3391 *(tpkt->pkt_scbp) = STATUS_BUSY; 3392 break; 3393 3394 default: 3395 break; 3396 } 3397 3398 /* 3399 * SCSI-II RESERVE cmd has been serviced by the lower layers clear 3400 * the flag so the lun is not QUIESCED any longer. 3401 * Also clear the VHCI_PKT_THRU_TASKQ flag, to ensure that if this pkt 3402 * is retried, a taskq shall again be dispatched to service it. Else 3403 * it may lead to a system hang if the retry is within interrupt 3404 * context. 3405 */ 3406 if ((pkt->pkt_cdbp[0] == SCMD_RESERVE) || 3407 (pkt->pkt_cdbp[0] == SCMD_RESERVE_G1)) { 3408 vlun->svl_flags &= ~VLUN_QUIESCED_FLG; 3409 vpkt->vpkt_state &= ~VHCI_PKT_THRU_TASKQ; 3410 } 3411 3412 /* 3413 * vpkt_org_vpkt should always be NULL here if the retry command 3414 * has been successfully processed. If vpkt_org_vpkt != NULL at 3415 * this point, it is an error so restore the original vpkt and 3416 * return an error to the target driver so it can retry the 3417 * command as appropriate. 3418 */ 3419 if (vpkt->vpkt_org_vpkt != NULL) { 3420 struct vhci_pkt *new_vpkt = vpkt; 3421 vpkt = vpkt->vpkt_org_vpkt; 3422 3423 vhci_scsi_destroy_pkt(&svp->svp_psd->sd_address, 3424 new_vpkt->vpkt_tgt_pkt); 3425 3426 /* 3427 * Mark this command completion as having an error so that 3428 * ssd will retry the command. 3429 */ 3430 vpkt->vpkt_tgt_pkt->pkt_reason = CMD_ABORTED; 3431 vpkt->vpkt_tgt_pkt->pkt_statistics |= STAT_ABORTED; 3432 3433 pkt = vpkt->vpkt_hba_pkt; 3434 tpkt = vpkt->vpkt_tgt_pkt; 3435 } 3436 3437 if ((err_str != NULL) && (pkt->pkt_reason != 3438 svp->svp_last_pkt_reason)) { 3439 cdip = vlun->svl_dip; 3440 vdip = ddi_get_parent(cdip); 3441 cpath = kmem_alloc(MAXPATHLEN, KM_SLEEP); 3442 vhci_log(CE_WARN, vdip, "!%s (%s%d): %s on path %s", 3443 ddi_pathname(cdip, cpath), ddi_driver_name(cdip), 3444 ddi_get_instance(cdip), err_str, 3445 mdi_pi_spathname(vpkt->vpkt_path)); 3446 kmem_free(cpath, MAXPATHLEN); 3447 } 3448 svp->svp_last_pkt_reason = pkt->pkt_reason; 3449 VHCI_DECR_PATH_CMDCOUNT(svp); 3450 3451 /* 3452 * For PARTIAL_DMA, vhci should not free the path. 3453 * Target driver will call into vhci_scsi_dmafree or 3454 * destroy pkt to release this path. 3455 */ 3456 if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) == 0) { 3457 scsi_destroy_pkt(pkt); 3458 vpkt->vpkt_hba_pkt = NULL; 3459 if (vpkt->vpkt_path) { 3460 mdi_rele_path(vpkt->vpkt_path); 3461 vpkt->vpkt_path = NULL; 3462 } 3463 } 3464 3465 scsi_hba_pkt_comp(tpkt); 3466 } 3467 3468 /* 3469 * two possibilities: (1) failover has completed 3470 * or (2) is in progress; update our path states for 3471 * the former case; for the latter case, 3472 * initiate a scsi_watch request to 3473 * determine when failover completes - vlun is HELD 3474 * until failover completes; BUSY is returned to upper 3475 * layer in both the cases 3476 */ 3477 static int 3478 vhci_handle_ext_fo(struct scsi_pkt *pkt, int fostat) 3479 { 3480 struct vhci_pkt *vpkt = (struct vhci_pkt *)pkt->pkt_private; 3481 struct scsi_pkt *tpkt; 3482 scsi_vhci_priv_t *svp; 3483 scsi_vhci_lun_t *vlun; 3484 struct scsi_vhci *vhci; 3485 scsi_vhci_swarg_t *swarg; 3486 char *path; 3487 3488 ASSERT(vpkt != NULL); 3489 tpkt = vpkt->vpkt_tgt_pkt; 3490 ASSERT(tpkt != NULL); 3491 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(vpkt->vpkt_path); 3492 ASSERT(svp != NULL); 3493 vlun = svp->svp_svl; 3494 ASSERT(vlun != NULL); 3495 ASSERT(VHCI_LUN_IS_HELD(vlun)); 3496 3497 vhci = ADDR2VHCI(&tpkt->pkt_address); 3498 3499 if (fostat == SCSI_SENSE_INACTIVE) { 3500 VHCI_DEBUG(1, (CE_NOTE, NULL, "!Failover " 3501 "detected for %s; updating path states...\n", 3502 vlun->svl_lun_wwn)); 3503 /* 3504 * set the vlun flag to indicate to the task that the target 3505 * port group needs updating 3506 */ 3507 vlun->svl_flags |= VLUN_UPDATE_TPG; 3508 (void) taskq_dispatch(vhci->vhci_update_pathstates_taskq, 3509 vhci_update_pathstates, (void *)vlun, KM_SLEEP); 3510 } else { 3511 path = kmem_alloc(MAXPATHLEN, KM_SLEEP); 3512 vhci_log(CE_NOTE, ddi_get_parent(vlun->svl_dip), 3513 "!%s (%s%d): Waiting for externally initiated failover " 3514 "to complete", ddi_pathname(vlun->svl_dip, path), 3515 ddi_driver_name(vlun->svl_dip), 3516 ddi_get_instance(vlun->svl_dip)); 3517 kmem_free(path, MAXPATHLEN); 3518 swarg = kmem_alloc(sizeof (*swarg), KM_NOSLEEP); 3519 if (swarg == NULL) { 3520 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_handle_ext_fo: " 3521 "request packet allocation for %s failed....\n", 3522 vlun->svl_lun_wwn)); 3523 VHCI_RELEASE_LUN(vlun); 3524 return (PKT_RETURN); 3525 } 3526 swarg->svs_svp = svp; 3527 swarg->svs_tos = ddi_get_time(); 3528 swarg->svs_pi = vpkt->vpkt_path; 3529 swarg->svs_release_lun = 0; 3530 swarg->svs_done = 0; 3531 /* 3532 * place a hold on the path...we don't want it to 3533 * vanish while scsi_watch is in progress 3534 */ 3535 mdi_hold_path(vpkt->vpkt_path); 3536 svp->svp_sw_token = scsi_watch_request_submit(svp->svp_psd, 3537 VHCI_FOWATCH_INTERVAL, SENSE_LENGTH, vhci_efo_watch_cb, 3538 (caddr_t)swarg); 3539 } 3540 return (BUSY_RETURN); 3541 } 3542 3543 /* 3544 * vhci_efo_watch_cb: 3545 * Callback from scsi_watch request to check the failover status. 3546 * Completion is either due to successful failover or timeout. 3547 * Upon successful completion, vhci_update_path_states is called. 3548 * For timeout condition, vhci_efo_done is called. 3549 * Always returns 0 to scsi_watch to keep retrying till vhci_efo_done 3550 * terminates this request properly in a separate thread. 3551 */ 3552 3553 static int 3554 vhci_efo_watch_cb(caddr_t arg, struct scsi_watch_result *resultp) 3555 { 3556 struct scsi_status *statusp = resultp->statusp; 3557 uint8_t *sensep = (uint8_t *)resultp->sensep; 3558 struct scsi_pkt *pkt = resultp->pkt; 3559 scsi_vhci_swarg_t *swarg; 3560 scsi_vhci_priv_t *svp; 3561 scsi_vhci_lun_t *vlun; 3562 struct scsi_vhci *vhci; 3563 dev_info_t *vdip; 3564 int rval, updt_paths; 3565 3566 swarg = (scsi_vhci_swarg_t *)(uintptr_t)arg; 3567 svp = swarg->svs_svp; 3568 if (swarg->svs_done) { 3569 /* 3570 * Already completed failover or timedout. 3571 * Waiting for vhci_efo_done to terminate this scsi_watch. 3572 */ 3573 return (0); 3574 } 3575 3576 ASSERT(svp != NULL); 3577 vlun = svp->svp_svl; 3578 ASSERT(vlun != NULL); 3579 ASSERT(VHCI_LUN_IS_HELD(vlun)); 3580 vlun->svl_efo_update_path = 0; 3581 vdip = ddi_get_parent(vlun->svl_dip); 3582 vhci = ddi_get_soft_state(vhci_softstate, 3583 ddi_get_instance(vdip)); 3584 3585 updt_paths = 0; 3586 3587 if (pkt->pkt_reason != CMD_CMPLT) { 3588 if ((ddi_get_time() - swarg->svs_tos) >= VHCI_EXTFO_TIMEOUT) { 3589 swarg->svs_release_lun = 1; 3590 goto done; 3591 } 3592 return (0); 3593 } 3594 if (*((unsigned char *)statusp) == STATUS_CHECK) { 3595 rval = vlun->svl_fops->sfo_analyze_sense(svp->svp_psd, sensep, 3596 vlun->svl_fops_ctpriv); 3597 switch (rval) { 3598 /* 3599 * Only update path states in case path is definitely 3600 * inactive, or no failover occurred. For all other 3601 * check conditions continue pinging. A unexpected 3602 * check condition shouldn't cause pinging to complete 3603 * prematurely. 3604 */ 3605 case SCSI_SENSE_INACTIVE: 3606 case SCSI_SENSE_NOFAILOVER: 3607 updt_paths = 1; 3608 break; 3609 default: 3610 if ((ddi_get_time() - swarg->svs_tos) 3611 >= VHCI_EXTFO_TIMEOUT) { 3612 swarg->svs_release_lun = 1; 3613 goto done; 3614 } 3615 return (0); 3616 } 3617 } else if (*((unsigned char *)statusp) == 3618 STATUS_RESERVATION_CONFLICT) { 3619 updt_paths = 1; 3620 } else if ((*((unsigned char *)statusp)) & 3621 (STATUS_BUSY | STATUS_QFULL)) { 3622 return (0); 3623 } 3624 if ((*((unsigned char *)statusp) == STATUS_GOOD) || 3625 (updt_paths == 1)) { 3626 /* 3627 * we got here because we had detected an 3628 * externally initiated failover; things 3629 * have settled down now, so let's 3630 * start up a task to update the 3631 * path states and target port group 3632 */ 3633 vlun->svl_efo_update_path = 1; 3634 swarg->svs_done = 1; 3635 vlun->svl_swarg = swarg; 3636 vlun->svl_flags |= VLUN_UPDATE_TPG; 3637 (void) taskq_dispatch(vhci->vhci_update_pathstates_taskq, 3638 vhci_update_pathstates, (void *)vlun, 3639 KM_SLEEP); 3640 return (0); 3641 } 3642 if ((ddi_get_time() - swarg->svs_tos) >= VHCI_EXTFO_TIMEOUT) { 3643 swarg->svs_release_lun = 1; 3644 goto done; 3645 } 3646 return (0); 3647 done: 3648 swarg->svs_done = 1; 3649 (void) taskq_dispatch(vhci->vhci_taskq, 3650 vhci_efo_done, (void *)swarg, KM_SLEEP); 3651 return (0); 3652 } 3653 3654 /* 3655 * vhci_efo_done: 3656 * cleanly terminates scsi_watch and free up resources. 3657 * Called as taskq function in vhci_efo_watch_cb for EFO timeout condition 3658 * or by vhci_update_path_states invoked during external initiated 3659 * failover completion. 3660 */ 3661 static void 3662 vhci_efo_done(void *arg) 3663 { 3664 scsi_vhci_lun_t *vlun; 3665 scsi_vhci_swarg_t *swarg = (scsi_vhci_swarg_t *)arg; 3666 scsi_vhci_priv_t *svp = swarg->svs_svp; 3667 ASSERT(svp); 3668 3669 vlun = svp->svp_svl; 3670 ASSERT(vlun); 3671 3672 /* Wait for clean termination of scsi_watch */ 3673 (void) scsi_watch_request_terminate(svp->svp_sw_token, 3674 SCSI_WATCH_TERMINATE_ALL_WAIT); 3675 svp->svp_sw_token = NULL; 3676 3677 /* release path and freeup resources to indicate failover completion */ 3678 mdi_rele_path(swarg->svs_pi); 3679 if (swarg->svs_release_lun) { 3680 VHCI_RELEASE_LUN(vlun); 3681 } 3682 kmem_free((void *)swarg, sizeof (*swarg)); 3683 } 3684 3685 /* 3686 * Update the path states 3687 * vlun should be HELD when this is invoked. 3688 * Calls vhci_efo_done to cleanup resources allocated for EFO. 3689 */ 3690 void 3691 vhci_update_pathstates(void *arg) 3692 { 3693 mdi_pathinfo_t *pip, *npip; 3694 dev_info_t *dip; 3695 struct scsi_failover_ops *fo; 3696 struct scsi_vhci_priv *svp; 3697 struct scsi_device *psd; 3698 struct scsi_path_opinfo opinfo; 3699 char *pclass, *tptr; 3700 struct scsi_vhci_lun *vlun = (struct scsi_vhci_lun *)arg; 3701 int sps; /* mdi_select_path() status */ 3702 char *cpath; 3703 struct scsi_vhci *vhci; 3704 struct scsi_pkt *pkt; 3705 struct buf *bp; 3706 int reserve_conflict = 0; 3707 3708 ASSERT(VHCI_LUN_IS_HELD(vlun)); 3709 dip = vlun->svl_dip; 3710 pip = npip = NULL; 3711 3712 vhci = ddi_get_soft_state(vhci_softstate, 3713 ddi_get_instance(ddi_get_parent(dip))); 3714 3715 sps = mdi_select_path(dip, NULL, (MDI_SELECT_ONLINE_PATH | 3716 MDI_SELECT_STANDBY_PATH | MDI_SELECT_NO_PREFERRED), NULL, &npip); 3717 if ((npip == NULL) || (sps != MDI_SUCCESS)) { 3718 goto done; 3719 } 3720 3721 fo = vlun->svl_fops; 3722 do { 3723 pip = npip; 3724 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip); 3725 psd = svp->svp_psd; 3726 if (fo->sfo_path_get_opinfo(psd, &opinfo, 3727 vlun->svl_fops_ctpriv) != 0) { 3728 sps = mdi_select_path(dip, NULL, 3729 (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH | 3730 MDI_SELECT_NO_PREFERRED), pip, &npip); 3731 mdi_rele_path(pip); 3732 continue; 3733 } 3734 3735 if (mdi_prop_lookup_string(pip, "path-class", &pclass) != 3736 MDI_SUCCESS) { 3737 VHCI_DEBUG(1, (CE_NOTE, NULL, 3738 "!vhci_update_pathstates: prop lookup failed for " 3739 "path 0x%p\n", (void *)pip)); 3740 sps = mdi_select_path(dip, NULL, 3741 (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH | 3742 MDI_SELECT_NO_PREFERRED), pip, &npip); 3743 mdi_rele_path(pip); 3744 continue; 3745 } 3746 3747 /* 3748 * Need to update the "path-class" property 3749 * value in the device tree if different 3750 * from the existing value. 3751 */ 3752 if (strcmp(pclass, opinfo.opinfo_path_attr) != 0) { 3753 (void) mdi_prop_update_string(pip, "path-class", 3754 opinfo.opinfo_path_attr); 3755 } 3756 3757 /* 3758 * Only change the state if needed. i.e. Don't call 3759 * mdi_pi_set_state to ONLINE a path if its already 3760 * ONLINE. Same for STANDBY paths. 3761 */ 3762 3763 if ((opinfo.opinfo_path_state == SCSI_PATH_ACTIVE || 3764 opinfo.opinfo_path_state == SCSI_PATH_ACTIVE_NONOPT)) { 3765 if (!(MDI_PI_IS_ONLINE(pip))) { 3766 VHCI_DEBUG(1, (CE_NOTE, NULL, 3767 "!vhci_update_pathstates: marking path" 3768 " 0x%p as ONLINE\n", (void *)pip)); 3769 cpath = kmem_alloc(MAXPATHLEN, KM_SLEEP); 3770 vhci_log(CE_NOTE, ddi_get_parent(dip), "!%s " 3771 "(%s%d): path %s " 3772 "is now ONLINE because of " 3773 "an externally initiated failover", 3774 ddi_pathname(dip, cpath), 3775 ddi_driver_name(dip), 3776 ddi_get_instance(dip), 3777 mdi_pi_spathname(pip)); 3778 kmem_free(cpath, MAXPATHLEN); 3779 mdi_pi_set_state(pip, 3780 MDI_PATHINFO_STATE_ONLINE); 3781 mdi_pi_set_preferred(pip, 3782 opinfo.opinfo_preferred); 3783 tptr = kmem_alloc(strlen 3784 (opinfo.opinfo_path_attr)+1, KM_SLEEP); 3785 (void) strlcpy(tptr, opinfo.opinfo_path_attr, 3786 (strlen(opinfo.opinfo_path_attr)+1)); 3787 mutex_enter(&vlun->svl_mutex); 3788 if (vlun->svl_active_pclass != NULL) { 3789 kmem_free(vlun->svl_active_pclass, 3790 strlen(vlun->svl_active_pclass)+1); 3791 } 3792 vlun->svl_active_pclass = tptr; 3793 if (vlun->svl_waiting_for_activepath) { 3794 vlun->svl_waiting_for_activepath = 0; 3795 } 3796 mutex_exit(&vlun->svl_mutex); 3797 /* Check for Reservation Conflict */ 3798 bp = scsi_alloc_consistent_buf( 3799 &svp->svp_psd->sd_address, 3800 (struct buf *)NULL, DEV_BSIZE, B_READ, 3801 NULL, NULL); 3802 if (!bp) { 3803 VHCI_DEBUG(1, (CE_NOTE, NULL, 3804 "vhci_update_pathstates: " 3805 "!No resources (buf)\n")); 3806 mdi_rele_path(pip); 3807 goto done; 3808 } 3809 pkt = scsi_init_pkt(&svp->svp_psd->sd_address, 3810 NULL, bp, CDB_GROUP1, 3811 sizeof (struct scsi_arq_status), 0, 3812 PKT_CONSISTENT, NULL, NULL); 3813 if (pkt) { 3814 (void) scsi_setup_cdb((union scsi_cdb *) 3815 (uintptr_t)pkt->pkt_cdbp, 3816 SCMD_READ, 1, 1, 0); 3817 pkt->pkt_time = 3*30; 3818 pkt->pkt_flags = FLAG_NOINTR; 3819 pkt->pkt_path_instance = 3820 mdi_pi_get_path_instance(pip); 3821 3822 if ((scsi_transport(pkt) == 3823 TRAN_ACCEPT) && (pkt->pkt_reason 3824 == CMD_CMPLT) && (SCBP_C(pkt) == 3825 STATUS_RESERVATION_CONFLICT)) { 3826 reserve_conflict = 1; 3827 } 3828 scsi_destroy_pkt(pkt); 3829 } 3830 scsi_free_consistent_buf(bp); 3831 } else if (MDI_PI_IS_ONLINE(pip)) { 3832 if (strcmp(pclass, opinfo.opinfo_path_attr) 3833 != 0) { 3834 mdi_pi_set_preferred(pip, 3835 opinfo.opinfo_preferred); 3836 mutex_enter(&vlun->svl_mutex); 3837 if (vlun->svl_active_pclass == NULL || 3838 strcmp(opinfo.opinfo_path_attr, 3839 vlun->svl_active_pclass) != 0) { 3840 mutex_exit(&vlun->svl_mutex); 3841 tptr = kmem_alloc(strlen 3842 (opinfo.opinfo_path_attr)+1, 3843 KM_SLEEP); 3844 (void) strlcpy(tptr, 3845 opinfo.opinfo_path_attr, 3846 (strlen 3847 (opinfo.opinfo_path_attr) 3848 +1)); 3849 mutex_enter(&vlun->svl_mutex); 3850 } else { 3851 /* 3852 * No need to update 3853 * svl_active_pclass 3854 */ 3855 tptr = NULL; 3856 mutex_exit(&vlun->svl_mutex); 3857 } 3858 if (tptr) { 3859 if (vlun->svl_active_pclass 3860 != NULL) { 3861 kmem_free(vlun-> 3862 svl_active_pclass, 3863 strlen(vlun-> 3864 svl_active_pclass) 3865 +1); 3866 } 3867 vlun->svl_active_pclass = tptr; 3868 mutex_exit(&vlun->svl_mutex); 3869 } 3870 } 3871 } 3872 } else if ((opinfo.opinfo_path_state == SCSI_PATH_INACTIVE) && 3873 !(MDI_PI_IS_STANDBY(pip))) { 3874 VHCI_DEBUG(1, (CE_NOTE, NULL, 3875 "!vhci_update_pathstates: marking path" 3876 " 0x%p as STANDBY\n", (void *)pip)); 3877 cpath = kmem_alloc(MAXPATHLEN, KM_SLEEP); 3878 vhci_log(CE_NOTE, ddi_get_parent(dip), "!%s " 3879 "(%s%d): path %s " 3880 "is now STANDBY because of " 3881 "an externally initiated failover", 3882 ddi_pathname(dip, cpath), 3883 ddi_driver_name(dip), 3884 ddi_get_instance(dip), 3885 mdi_pi_spathname(pip)); 3886 kmem_free(cpath, MAXPATHLEN); 3887 mdi_pi_set_state(pip, 3888 MDI_PATHINFO_STATE_STANDBY); 3889 mdi_pi_set_preferred(pip, 3890 opinfo.opinfo_preferred); 3891 mutex_enter(&vlun->svl_mutex); 3892 if (vlun->svl_active_pclass != NULL) { 3893 if (strcmp(vlun->svl_active_pclass, 3894 opinfo.opinfo_path_attr) == 0) { 3895 kmem_free(vlun-> 3896 svl_active_pclass, 3897 strlen(vlun-> 3898 svl_active_pclass)+1); 3899 vlun->svl_active_pclass = NULL; 3900 } 3901 } 3902 mutex_exit(&vlun->svl_mutex); 3903 } 3904 (void) mdi_prop_free(pclass); 3905 sps = mdi_select_path(dip, NULL, 3906 (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH | 3907 MDI_SELECT_NO_PREFERRED), pip, &npip); 3908 mdi_rele_path(pip); 3909 3910 } while ((npip != NULL) && (sps == MDI_SUCCESS)); 3911 3912 /* 3913 * Check to see if this vlun has an active SCSI-II RESERVE. If so 3914 * clear the reservation by sending a reset, so the host doesn't 3915 * receive a reservation conflict. 3916 * Reset VLUN_RESERVE_ACTIVE_FLG for this vlun. Also notify ssd 3917 * of the reset, explicitly. 3918 */ 3919 if (vlun->svl_flags & VLUN_RESERVE_ACTIVE_FLG) { 3920 if (reserve_conflict && (vlun->svl_xlf_capable == 0)) { 3921 (void) vhci_recovery_reset(vlun, 3922 &svp->svp_psd->sd_address, FALSE, 3923 VHCI_DEPTH_TARGET); 3924 } 3925 vlun->svl_flags &= ~VLUN_RESERVE_ACTIVE_FLG; 3926 mutex_enter(&vhci->vhci_mutex); 3927 scsi_hba_reset_notify_callback(&vhci->vhci_mutex, 3928 &vhci->vhci_reset_notify_listf); 3929 mutex_exit(&vhci->vhci_mutex); 3930 } 3931 if (vlun->svl_flags & VLUN_UPDATE_TPG) { 3932 /* 3933 * Update the AccessState of related MP-API TPGs 3934 */ 3935 (void) vhci_mpapi_update_tpg_acc_state_for_lu(vhci, vlun); 3936 vlun->svl_flags &= ~VLUN_UPDATE_TPG; 3937 } 3938 done: 3939 if (vlun->svl_efo_update_path) { 3940 vlun->svl_efo_update_path = 0; 3941 vhci_efo_done(vlun->svl_swarg); 3942 vlun->svl_swarg = 0; 3943 } 3944 VHCI_RELEASE_LUN(vlun); 3945 } 3946 3947 /* ARGSUSED */ 3948 static int 3949 vhci_pathinfo_init(dev_info_t *vdip, mdi_pathinfo_t *pip, int flags) 3950 { 3951 scsi_hba_tran_t *hba = NULL; 3952 struct scsi_device *psd = NULL; 3953 scsi_vhci_lun_t *vlun = NULL; 3954 dev_info_t *pdip = NULL; 3955 dev_info_t *tgt_dip; 3956 struct scsi_vhci *vhci; 3957 char *guid; 3958 scsi_vhci_priv_t *svp = NULL; 3959 int rval = MDI_FAILURE; 3960 int vlun_alloced = 0; 3961 3962 ASSERT(vdip != NULL); 3963 ASSERT(pip != NULL); 3964 3965 vhci = ddi_get_soft_state(vhci_softstate, ddi_get_instance(vdip)); 3966 ASSERT(vhci != NULL); 3967 3968 pdip = mdi_pi_get_phci(pip); 3969 ASSERT(pdip != NULL); 3970 3971 hba = ddi_get_driver_private(pdip); 3972 ASSERT(hba != NULL); 3973 3974 tgt_dip = mdi_pi_get_client(pip); 3975 ASSERT(tgt_dip != NULL); 3976 3977 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, tgt_dip, PROPFLAGS, 3978 MDI_CLIENT_GUID_PROP, &guid) != DDI_SUCCESS) { 3979 VHCI_DEBUG(1, (CE_WARN, NULL, 3980 "vhci_pathinfo_init: lun guid property failed")); 3981 goto failure; 3982 } 3983 3984 vlun = vhci_lun_lookup_alloc(tgt_dip, guid, &vlun_alloced); 3985 ddi_prop_free(guid); 3986 3987 vlun->svl_dip = tgt_dip; 3988 3989 svp = kmem_zalloc(sizeof (*svp), KM_SLEEP); 3990 svp->svp_svl = vlun; 3991 3992 vlun->svl_lb_policy_save = mdi_get_lb_policy(tgt_dip); 3993 mutex_init(&svp->svp_mutex, NULL, MUTEX_DRIVER, NULL); 3994 cv_init(&svp->svp_cv, NULL, CV_DRIVER, NULL); 3995 3996 psd = kmem_zalloc(sizeof (*psd), KM_SLEEP); 3997 mutex_init(&psd->sd_mutex, NULL, MUTEX_DRIVER, NULL); 3998 3999 if (hba->tran_hba_flags & SCSI_HBA_ADDR_COMPLEX) { 4000 /* 4001 * For a SCSI_HBA_ADDR_COMPLEX transport we store a pointer to 4002 * scsi_device in the scsi_address structure. This allows an 4003 * an HBA driver to find its scsi_device(9S) and 4004 * per-scsi_device(9S) HBA private data given a 4005 * scsi_address(9S) by using scsi_address_device(9F) and 4006 * scsi_device_hba_private_get(9F)). 4007 */ 4008 psd->sd_address.a.a_sd = psd; 4009 } else if (hba->tran_hba_flags & SCSI_HBA_TRAN_CLONE) { 4010 /* 4011 * Clone transport structure if requested, so 4012 * Self enumerating HBAs always need to use cloning 4013 */ 4014 scsi_hba_tran_t *clone = 4015 kmem_alloc(sizeof (scsi_hba_tran_t), KM_SLEEP); 4016 bcopy(hba, clone, sizeof (scsi_hba_tran_t)); 4017 hba = clone; 4018 hba->tran_sd = psd; 4019 } else { 4020 /* 4021 * SPI pHCI unit-address. If we ever need to support this 4022 * we could set a.spi.a_target/a.spi.a_lun based on pathinfo 4023 * node unit-address properties. For now we fail... 4024 */ 4025 goto failure; 4026 } 4027 4028 psd->sd_dev = tgt_dip; 4029 psd->sd_address.a_hba_tran = hba; 4030 4031 /* 4032 * Mark scsi_device as being associated with a pathinfo node. For 4033 * a scsi_device structure associated with a devinfo node, 4034 * scsi_ctlops_initchild sets this field to NULL. 4035 */ 4036 psd->sd_pathinfo = pip; 4037 4038 /* 4039 * LEGACY: sd_private: set for older mpxio-capable pHCI drivers with 4040 * too much scsi_vhci/mdi/ndi knowledge. Remove this code when all 4041 * mpxio-capable pHCI drivers use SCSA enumeration services (or at 4042 * least have been changed to use sd_pathinfo instead). 4043 */ 4044 psd->sd_private = (caddr_t)pip; 4045 4046 /* See scsi_hba.c for info on sd_tran_safe kludge */ 4047 psd->sd_tran_safe = hba; 4048 4049 svp->svp_psd = psd; 4050 mdi_pi_set_vhci_private(pip, (caddr_t)svp); 4051 4052 /* 4053 * call hba's target init entry point if it exists 4054 */ 4055 if (hba->tran_tgt_init != NULL) { 4056 psd->sd_tran_tgt_free_done = 0; 4057 if ((rval = (*hba->tran_tgt_init)(pdip, tgt_dip, 4058 hba, psd)) != DDI_SUCCESS) { 4059 VHCI_DEBUG(1, (CE_WARN, pdip, 4060 "!vhci_pathinfo_init: tran_tgt_init failed for " 4061 "path=0x%p rval=%x", (void *)pip, rval)); 4062 goto failure; 4063 } 4064 } 4065 4066 svp->svp_new_path = 1; 4067 4068 VHCI_DEBUG(4, (CE_NOTE, NULL, "!vhci_pathinfo_init: path:%p\n", 4069 (void *)pip)); 4070 return (MDI_SUCCESS); 4071 4072 failure: 4073 if (psd) { 4074 mutex_destroy(&psd->sd_mutex); 4075 kmem_free(psd, sizeof (*psd)); 4076 } 4077 if (svp) { 4078 mdi_pi_set_vhci_private(pip, NULL); 4079 mutex_destroy(&svp->svp_mutex); 4080 cv_destroy(&svp->svp_cv); 4081 kmem_free(svp, sizeof (*svp)); 4082 } 4083 if (hba && (hba->tran_hba_flags & SCSI_HBA_TRAN_CLONE)) 4084 kmem_free(hba, sizeof (scsi_hba_tran_t)); 4085 4086 if (vlun_alloced) 4087 vhci_lun_free(tgt_dip); 4088 4089 return (rval); 4090 } 4091 4092 /* ARGSUSED */ 4093 static int 4094 vhci_pathinfo_uninit(dev_info_t *vdip, mdi_pathinfo_t *pip, int flags) 4095 { 4096 scsi_hba_tran_t *hba = NULL; 4097 struct scsi_device *psd = NULL; 4098 dev_info_t *pdip = NULL; 4099 dev_info_t *cdip = NULL; 4100 scsi_vhci_priv_t *svp = NULL; 4101 4102 ASSERT(vdip != NULL); 4103 ASSERT(pip != NULL); 4104 4105 pdip = mdi_pi_get_phci(pip); 4106 ASSERT(pdip != NULL); 4107 4108 cdip = mdi_pi_get_client(pip); 4109 ASSERT(cdip != NULL); 4110 4111 hba = ddi_get_driver_private(pdip); 4112 ASSERT(hba != NULL); 4113 4114 vhci_mpapi_set_path_state(vdip, pip, MP_DRVR_PATH_STATE_UNINIT); 4115 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip); 4116 if (svp == NULL) { 4117 /* path already freed. Nothing to do. */ 4118 return (MDI_SUCCESS); 4119 } 4120 4121 psd = svp->svp_psd; 4122 ASSERT(psd != NULL); 4123 4124 if (hba->tran_hba_flags & SCSI_HBA_ADDR_COMPLEX) { 4125 /* Verify plumbing */ 4126 ASSERT(psd->sd_address.a_hba_tran == hba); 4127 ASSERT(psd->sd_address.a.a_sd == psd); 4128 } else if (hba->tran_hba_flags & SCSI_HBA_TRAN_CLONE) { 4129 /* Switch to cloned scsi_hba_tran(9S) structure */ 4130 hba = psd->sd_address.a_hba_tran; 4131 ASSERT(hba->tran_hba_flags & SCSI_HBA_TRAN_CLONE); 4132 ASSERT(hba->tran_sd == psd); 4133 } 4134 4135 if ((hba->tran_tgt_free != NULL) && !psd->sd_tran_tgt_free_done) { 4136 (*hba->tran_tgt_free) (pdip, cdip, hba, psd); 4137 psd->sd_tran_tgt_free_done = 1; 4138 } 4139 mutex_destroy(&psd->sd_mutex); 4140 if (hba->tran_hba_flags & SCSI_HBA_TRAN_CLONE) { 4141 kmem_free(hba, sizeof (*hba)); 4142 } 4143 4144 mdi_pi_set_vhci_private(pip, NULL); 4145 4146 /* 4147 * Free the pathinfo related scsi_device inquiry data. Note that this 4148 * matches what happens for scsi_hba.c devinfo case at uninitchild time. 4149 */ 4150 if (psd->sd_inq) 4151 kmem_free((caddr_t)psd->sd_inq, sizeof (struct scsi_inquiry)); 4152 kmem_free((caddr_t)psd, sizeof (*psd)); 4153 4154 mutex_destroy(&svp->svp_mutex); 4155 cv_destroy(&svp->svp_cv); 4156 kmem_free((caddr_t)svp, sizeof (*svp)); 4157 4158 /* 4159 * If this is the last path to the client, 4160 * then free up the vlun as well. 4161 */ 4162 if (mdi_client_get_path_count(cdip) == 1) { 4163 vhci_lun_free(cdip); 4164 } 4165 4166 VHCI_DEBUG(4, (CE_NOTE, NULL, "!vhci_pathinfo_uninit: path=0x%p\n", 4167 (void *)pip)); 4168 return (MDI_SUCCESS); 4169 } 4170 4171 /* ARGSUSED */ 4172 static int 4173 vhci_pathinfo_state_change(dev_info_t *vdip, mdi_pathinfo_t *pip, 4174 mdi_pathinfo_state_t state, uint32_t ext_state, int flags) 4175 { 4176 int rval = MDI_SUCCESS; 4177 scsi_vhci_priv_t *svp; 4178 scsi_vhci_lun_t *vlun; 4179 int held; 4180 int op = (flags & 0xf00) >> 8; 4181 struct scsi_vhci *vhci; 4182 4183 vhci = ddi_get_soft_state(vhci_softstate, ddi_get_instance(vdip)); 4184 4185 if (flags & MDI_EXT_STATE_CHANGE) { 4186 /* 4187 * We do not want to issue any commands down the path in case 4188 * sync flag is set. Lower layers might not be ready to accept 4189 * any I/O commands. 4190 */ 4191 if (op == DRIVER_DISABLE) 4192 return (MDI_SUCCESS); 4193 4194 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip); 4195 if (svp == NULL) { 4196 return (MDI_FAILURE); 4197 } 4198 vlun = svp->svp_svl; 4199 4200 if (flags & MDI_BEFORE_STATE_CHANGE) { 4201 /* 4202 * Hold the LUN. 4203 */ 4204 VHCI_HOLD_LUN(vlun, VH_SLEEP, held); 4205 if (flags & MDI_DISABLE_OP) { 4206 /* 4207 * Issue scsi reset if it happens to be 4208 * reserved path. 4209 */ 4210 if (vlun->svl_flags & VLUN_RESERVE_ACTIVE_FLG) { 4211 /* 4212 * if reservation pending on 4213 * this path, dont' mark the 4214 * path busy 4215 */ 4216 if (op == DRIVER_DISABLE_TRANSIENT) { 4217 VHCI_DEBUG(1, (CE_NOTE, NULL, 4218 "!vhci_pathinfo" 4219 "_state_change (pip:%p): " 4220 " reservation: fail busy\n", 4221 (void *)pip)); 4222 return (MDI_FAILURE); 4223 } 4224 if (pip == vlun->svl_resrv_pip) { 4225 if (vhci_recovery_reset( 4226 svp->svp_svl, 4227 &svp->svp_psd->sd_address, 4228 TRUE, 4229 VHCI_DEPTH_TARGET) == 0) { 4230 VHCI_DEBUG(1, 4231 (CE_NOTE, NULL, 4232 "!vhci_pathinfo" 4233 "_state_change " 4234 " (pip:%p): " 4235 "reset failed, " 4236 "give up!\n", 4237 (void *)pip)); 4238 } 4239 vlun->svl_flags &= 4240 ~VLUN_RESERVE_ACTIVE_FLG; 4241 } 4242 } 4243 } else if (flags & MDI_ENABLE_OP) { 4244 if (((vhci->vhci_conf_flags & 4245 VHCI_CONF_FLAGS_AUTO_FAILBACK) == 4246 VHCI_CONF_FLAGS_AUTO_FAILBACK) && 4247 MDI_PI_IS_USER_DISABLE(pip) && 4248 MDI_PI_IS_STANDBY(pip)) { 4249 struct scsi_failover_ops *fo; 4250 char *best_pclass, *pclass = NULL; 4251 int best_class, rv; 4252 /* 4253 * Failback if enabling a standby path 4254 * and it is the primary class or 4255 * preferred class 4256 */ 4257 best_class = mdi_pi_get_preferred(pip); 4258 if (best_class == 0) { 4259 /* 4260 * if not preferred - compare 4261 * path-class with class 4262 */ 4263 fo = vlun->svl_fops; 4264 (void) fo->sfo_pathclass_next( 4265 NULL, &best_pclass, 4266 vlun->svl_fops_ctpriv); 4267 pclass = NULL; 4268 rv = mdi_prop_lookup_string(pip, 4269 "path-class", &pclass); 4270 if (rv != MDI_SUCCESS || 4271 pclass == NULL) { 4272 vhci_log(CE_NOTE, vdip, 4273 "!path-class " 4274 " lookup " 4275 "failed. rv: %d" 4276 "class: %p", rv, 4277 (void *)pclass); 4278 } else if (strncmp(pclass, 4279 best_pclass, 4280 strlen(best_pclass)) == 0) { 4281 best_class = 1; 4282 } 4283 if (rv == MDI_SUCCESS && 4284 pclass != NULL) { 4285 rv = mdi_prop_free( 4286 pclass); 4287 if (rv != 4288 DDI_PROP_SUCCESS) { 4289 vhci_log( 4290 CE_NOTE, 4291 vdip, 4292 "!path-" 4293 "class" 4294 " free" 4295 " failed" 4296 " rv: %d" 4297 " class: " 4298 "%p", 4299 rv, 4300 (void *) 4301 pclass); 4302 } 4303 } 4304 } 4305 if (best_class == 1) { 4306 VHCI_DEBUG(1, (CE_NOTE, NULL, 4307 "preferred path: %p " 4308 "USER_DISABLE->USER_ENABLE " 4309 "transition for lun %s\n", 4310 (void *)pip, 4311 vlun->svl_lun_wwn)); 4312 (void) taskq_dispatch( 4313 vhci->vhci_taskq, 4314 vhci_initiate_auto_failback, 4315 (void *) vlun, KM_SLEEP); 4316 } 4317 } 4318 /* 4319 * if PGR is active, revalidate key and 4320 * register on this path also, if key is 4321 * still valid 4322 */ 4323 sema_p(&vlun->svl_pgr_sema); 4324 if (vlun->svl_pgr_active) 4325 (void) 4326 vhci_pgr_validate_and_register(svp); 4327 sema_v(&vlun->svl_pgr_sema); 4328 /* 4329 * Inform target driver about any 4330 * reservations to be reinstated if target 4331 * has dropped reservation during the busy 4332 * period. 4333 */ 4334 mutex_enter(&vhci->vhci_mutex); 4335 scsi_hba_reset_notify_callback( 4336 &vhci->vhci_mutex, 4337 &vhci->vhci_reset_notify_listf); 4338 mutex_exit(&vhci->vhci_mutex); 4339 } 4340 } 4341 if (flags & MDI_AFTER_STATE_CHANGE) { 4342 if (flags & MDI_ENABLE_OP) { 4343 mutex_enter(&vhci_global_mutex); 4344 cv_broadcast(&vhci_cv); 4345 mutex_exit(&vhci_global_mutex); 4346 } 4347 if (vlun->svl_setcap_done) { 4348 (void) vhci_pHCI_cap(&svp->svp_psd->sd_address, 4349 "sector-size", vlun->svl_sector_size, 4350 1, pip); 4351 } 4352 4353 /* 4354 * Release the LUN 4355 */ 4356 VHCI_RELEASE_LUN(vlun); 4357 4358 /* 4359 * Path transition is complete. 4360 * Run callback to indicate target driver to 4361 * retry to prevent IO starvation. 4362 */ 4363 if (scsi_callback_id != 0) { 4364 ddi_run_callback(&scsi_callback_id); 4365 } 4366 } 4367 } else { 4368 switch (state) { 4369 case MDI_PATHINFO_STATE_ONLINE: 4370 rval = vhci_pathinfo_online(vdip, pip, flags); 4371 break; 4372 4373 case MDI_PATHINFO_STATE_OFFLINE: 4374 rval = vhci_pathinfo_offline(vdip, pip, flags); 4375 break; 4376 4377 default: 4378 break; 4379 } 4380 /* 4381 * Path transition is complete. 4382 * Run callback to indicate target driver to 4383 * retry to prevent IO starvation. 4384 */ 4385 if ((rval == MDI_SUCCESS) && (scsi_callback_id != 0)) { 4386 ddi_run_callback(&scsi_callback_id); 4387 } 4388 return (rval); 4389 } 4390 4391 return (MDI_SUCCESS); 4392 } 4393 4394 /* 4395 * Parse the mpxio load balancing options. The datanameptr 4396 * will point to a string containing the load-balance-options value. 4397 * The load-balance-options value will be a property that 4398 * defines the load-balance algorithm and any arguments to that 4399 * algorithm. 4400 * For example: 4401 * device-type-mpxio-options-list= 4402 * "device-type=SUN SENA", "load-balance-options=logical-block-options" 4403 * "device-type=SUN SE6920", "round-robin-options"; 4404 * logical-block-options="load-balance=logical-block", "region-size=15"; 4405 * round-robin-options="load-balance=round-robin"; 4406 * 4407 * If the load-balance is not defined the load balance algorithm will 4408 * default to the global setting. There will be default values assigned 4409 * to the arguments (region-size=18) and if an argument is one 4410 * that is not known, it will be ignored. 4411 */ 4412 static void 4413 vhci_parse_mpxio_lb_options(dev_info_t *dip, dev_info_t *cdip, 4414 caddr_t datanameptr) 4415 { 4416 char *dataptr, *next_entry; 4417 caddr_t config_list = NULL; 4418 int config_list_len = 0, list_len = 0; 4419 int region_size = -1; 4420 client_lb_t load_balance; 4421 4422 if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, datanameptr, 4423 (caddr_t)&config_list, &config_list_len) != DDI_PROP_SUCCESS) { 4424 return; 4425 } 4426 4427 list_len = config_list_len; 4428 next_entry = config_list; 4429 while (config_list_len > 0) { 4430 dataptr = next_entry; 4431 4432 if (strncmp(mdi_load_balance, dataptr, 4433 strlen(mdi_load_balance)) == 0) { 4434 /* get the load-balance scheme */ 4435 dataptr += strlen(mdi_load_balance) + 1; 4436 if (strcmp(dataptr, LOAD_BALANCE_PROP_RR) == 0) { 4437 (void) mdi_set_lb_policy(cdip, LOAD_BALANCE_RR); 4438 load_balance = LOAD_BALANCE_RR; 4439 } else if (strcmp(dataptr, 4440 LOAD_BALANCE_PROP_LBA) == 0) { 4441 (void) mdi_set_lb_policy(cdip, 4442 LOAD_BALANCE_LBA); 4443 load_balance = LOAD_BALANCE_LBA; 4444 } else if (strcmp(dataptr, 4445 LOAD_BALANCE_PROP_NONE) == 0) { 4446 (void) mdi_set_lb_policy(cdip, 4447 LOAD_BALANCE_NONE); 4448 load_balance = LOAD_BALANCE_NONE; 4449 } 4450 } else if (strncmp(dataptr, LOGICAL_BLOCK_REGION_SIZE, 4451 strlen(LOGICAL_BLOCK_REGION_SIZE)) == 0) { 4452 int i = 0; 4453 char *ptr; 4454 char *tmp; 4455 4456 tmp = dataptr + (strlen(LOGICAL_BLOCK_REGION_SIZE) + 1); 4457 /* check for numeric value */ 4458 for (ptr = tmp; i < strlen(tmp); i++, ptr++) { 4459 if (!isdigit(*ptr)) { 4460 cmn_err(CE_WARN, 4461 "Illegal region size: %s." 4462 " Setting to default value: %d", 4463 tmp, 4464 LOAD_BALANCE_DEFAULT_REGION_SIZE); 4465 region_size = 4466 LOAD_BALANCE_DEFAULT_REGION_SIZE; 4467 break; 4468 } 4469 } 4470 if (i >= strlen(tmp)) { 4471 region_size = stoi(&tmp); 4472 } 4473 (void) mdi_set_lb_region_size(cdip, region_size); 4474 } 4475 config_list_len -= (strlen(next_entry) + 1); 4476 next_entry += strlen(next_entry) + 1; 4477 } 4478 #ifdef DEBUG 4479 if ((region_size >= 0) && (load_balance != LOAD_BALANCE_LBA)) { 4480 VHCI_DEBUG(1, (CE_NOTE, dip, 4481 "!vhci_parse_mpxio_lb_options: region-size: %d" 4482 "only valid for load-balance=logical-block\n", 4483 region_size)); 4484 } 4485 #endif 4486 if ((region_size == -1) && (load_balance == LOAD_BALANCE_LBA)) { 4487 VHCI_DEBUG(1, (CE_NOTE, dip, 4488 "!vhci_parse_mpxio_lb_options: No region-size" 4489 " defined load-balance=logical-block." 4490 " Default to: %d\n", LOAD_BALANCE_DEFAULT_REGION_SIZE)); 4491 (void) mdi_set_lb_region_size(cdip, 4492 LOAD_BALANCE_DEFAULT_REGION_SIZE); 4493 } 4494 if (list_len > 0) { 4495 kmem_free(config_list, list_len); 4496 } 4497 } 4498 4499 /* 4500 * Parse the device-type-mpxio-options-list looking for the key of 4501 * "load-balance-options". If found, parse the load balancing options. 4502 * Check the comment of the vhci_get_device_type_mpxio_options() 4503 * for the device-type-mpxio-options-list. 4504 */ 4505 static void 4506 vhci_parse_mpxio_options(dev_info_t *dip, dev_info_t *cdip, 4507 caddr_t datanameptr, int list_len) 4508 { 4509 char *dataptr; 4510 int len; 4511 4512 /* 4513 * get the data list 4514 */ 4515 dataptr = datanameptr; 4516 len = 0; 4517 while (len < list_len && 4518 strncmp(dataptr, DEVICE_TYPE_STR, strlen(DEVICE_TYPE_STR)) 4519 != 0) { 4520 if (strncmp(dataptr, LOAD_BALANCE_OPTIONS, 4521 strlen(LOAD_BALANCE_OPTIONS)) == 0) { 4522 len += strlen(LOAD_BALANCE_OPTIONS) + 1; 4523 dataptr += strlen(LOAD_BALANCE_OPTIONS) + 1; 4524 vhci_parse_mpxio_lb_options(dip, cdip, dataptr); 4525 } 4526 len += strlen(dataptr) + 1; 4527 dataptr += strlen(dataptr) + 1; 4528 } 4529 } 4530 4531 /* 4532 * Check the inquriy string returned from the device with the device-type 4533 * Check for the existence of the device-type-mpxio-options-list and 4534 * if found parse the list checking for a match with the device-type 4535 * value and the inquiry string returned from the device. If a match 4536 * is found, parse the mpxio options list. The format of the 4537 * device-type-mpxio-options-list is: 4538 * device-type-mpxio-options-list= 4539 * "device-type=SUN SENA", "load-balance-options=logical-block-options" 4540 * "device-type=SUN SE6920", "round-robin-options"; 4541 * logical-block-options="load-balance=logical-block", "region-size=15"; 4542 * round-robin-options="load-balance=round-robin"; 4543 */ 4544 void 4545 vhci_get_device_type_mpxio_options(dev_info_t *dip, dev_info_t *cdip, 4546 struct scsi_device *devp) 4547 { 4548 4549 caddr_t config_list = NULL; 4550 caddr_t vidptr, datanameptr; 4551 int vidlen, dupletlen = 0; 4552 int config_list_len = 0, len; 4553 struct scsi_inquiry *inq = devp->sd_inq; 4554 4555 /* 4556 * look up the device-type-mpxio-options-list and walk thru 4557 * the list compare the vendor ids of the earlier inquiry command and 4558 * with those vids in the list if there is a match, lookup 4559 * the mpxio-options value 4560 */ 4561 if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 4562 MPXIO_OPTIONS_LIST, 4563 (caddr_t)&config_list, &config_list_len) == DDI_PROP_SUCCESS) { 4564 4565 /* 4566 * Compare vids in each duplet - if it matches, 4567 * parse the mpxio options list. 4568 */ 4569 for (len = config_list_len, vidptr = config_list; len > 0; 4570 len -= dupletlen) { 4571 4572 dupletlen = 0; 4573 4574 if (strlen(vidptr) != 0 && 4575 strncmp(vidptr, DEVICE_TYPE_STR, 4576 strlen(DEVICE_TYPE_STR)) == 0) { 4577 /* point to next duplet */ 4578 datanameptr = vidptr + strlen(vidptr) + 1; 4579 /* add len of this duplet */ 4580 dupletlen += strlen(vidptr) + 1; 4581 /* get to device type */ 4582 vidptr += strlen(DEVICE_TYPE_STR) + 1; 4583 vidlen = strlen(vidptr); 4584 if ((vidlen != 0) && 4585 bcmp(inq->inq_vid, vidptr, vidlen) == 0) { 4586 vhci_parse_mpxio_options(dip, cdip, 4587 datanameptr, len - dupletlen); 4588 break; 4589 } 4590 /* get to next duplet */ 4591 vidptr += strlen(vidptr) + 1; 4592 } 4593 /* get to the next device-type */ 4594 while (len - dupletlen > 0 && 4595 strlen(vidptr) != 0 && 4596 strncmp(vidptr, DEVICE_TYPE_STR, 4597 strlen(DEVICE_TYPE_STR)) != 0) { 4598 dupletlen += strlen(vidptr) + 1; 4599 vidptr += strlen(vidptr) + 1; 4600 } 4601 } 4602 if (config_list_len > 0) { 4603 kmem_free(config_list, config_list_len); 4604 } 4605 } 4606 } 4607 4608 static int 4609 vhci_update_pathinfo(struct scsi_device *psd, mdi_pathinfo_t *pip, 4610 struct scsi_failover_ops *fo, 4611 scsi_vhci_lun_t *vlun, 4612 struct scsi_vhci *vhci) 4613 { 4614 struct scsi_path_opinfo opinfo; 4615 char *pclass, *best_pclass; 4616 4617 if (fo->sfo_path_get_opinfo(psd, &opinfo, vlun->svl_fops_ctpriv) != 0) { 4618 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_update_pathinfo: " 4619 "Failed to get operation info for path:%p\n", (void *)pip)); 4620 return (MDI_FAILURE); 4621 } 4622 /* set the xlf capable flag in the vlun for future use */ 4623 vlun->svl_xlf_capable = opinfo.opinfo_xlf_capable; 4624 (void) mdi_prop_update_string(pip, "path-class", 4625 opinfo.opinfo_path_attr); 4626 4627 pclass = opinfo.opinfo_path_attr; 4628 if (opinfo.opinfo_path_state == SCSI_PATH_ACTIVE) { 4629 mutex_enter(&vlun->svl_mutex); 4630 if (vlun->svl_active_pclass != NULL) { 4631 if (strcmp(vlun->svl_active_pclass, pclass) != 0) { 4632 mutex_exit(&vlun->svl_mutex); 4633 /* 4634 * Externally initiated failover has happened; 4635 * force the path state to be STANDBY/ONLINE, 4636 * next IO will trigger failover and thus 4637 * sync-up the pathstates. Reason we don't 4638 * sync-up immediately by invoking 4639 * vhci_update_pathstates() is because it 4640 * needs a VHCI_HOLD_LUN() and we don't 4641 * want to block here. 4642 * 4643 * Further, if the device is an ALUA device, 4644 * then failure to exactly match 'pclass' and 4645 * 'svl_active_pclass'(as is the case here) 4646 * indicates that the currently active path 4647 * is a 'non-optimized' path - which means 4648 * that 'svl_active_pclass' needs to be 4649 * replaced with opinfo.opinfo_path_state 4650 * value. 4651 */ 4652 4653 if (SCSI_FAILOVER_IS_TPGS(vlun->svl_fops)) { 4654 char *tptr; 4655 4656 /* 4657 * The device is ALUA compliant. The 4658 * state need to be changed to online 4659 * rather than standby state which is 4660 * done typically for a asymmetric 4661 * device that is non ALUA compliant. 4662 */ 4663 mdi_pi_set_state(pip, 4664 MDI_PATHINFO_STATE_ONLINE); 4665 tptr = kmem_alloc(strlen 4666 (opinfo.opinfo_path_attr)+1, 4667 KM_SLEEP); 4668 (void) strlcpy(tptr, 4669 opinfo.opinfo_path_attr, 4670 (strlen(opinfo.opinfo_path_attr) 4671 +1)); 4672 mutex_enter(&vlun->svl_mutex); 4673 kmem_free(vlun->svl_active_pclass, 4674 strlen(vlun->svl_active_pclass)+1); 4675 vlun->svl_active_pclass = tptr; 4676 mutex_exit(&vlun->svl_mutex); 4677 } else { 4678 /* 4679 * Non ALUA device case. 4680 */ 4681 mdi_pi_set_state(pip, 4682 MDI_PATHINFO_STATE_STANDBY); 4683 } 4684 vlun->svl_fo_support = opinfo.opinfo_mode; 4685 mdi_pi_set_preferred(pip, 4686 opinfo.opinfo_preferred); 4687 return (MDI_SUCCESS); 4688 } 4689 } else { 4690 char *tptr; 4691 4692 /* 4693 * lets release the mutex before we try to 4694 * allocate since the potential to sleep is 4695 * possible. 4696 */ 4697 mutex_exit(&vlun->svl_mutex); 4698 tptr = kmem_alloc(strlen(pclass)+1, KM_SLEEP); 4699 (void) strlcpy(tptr, pclass, (strlen(pclass)+1)); 4700 mutex_enter(&vlun->svl_mutex); 4701 vlun->svl_active_pclass = tptr; 4702 } 4703 mutex_exit(&vlun->svl_mutex); 4704 mdi_pi_set_state(pip, MDI_PATHINFO_STATE_ONLINE); 4705 vlun->svl_waiting_for_activepath = 0; 4706 } else if (opinfo.opinfo_path_state == SCSI_PATH_ACTIVE_NONOPT) { 4707 mutex_enter(&vlun->svl_mutex); 4708 if (vlun->svl_active_pclass == NULL) { 4709 char *tptr; 4710 4711 mutex_exit(&vlun->svl_mutex); 4712 tptr = kmem_alloc(strlen(pclass)+1, KM_SLEEP); 4713 (void) strlcpy(tptr, pclass, (strlen(pclass)+1)); 4714 mutex_enter(&vlun->svl_mutex); 4715 vlun->svl_active_pclass = tptr; 4716 } 4717 mutex_exit(&vlun->svl_mutex); 4718 mdi_pi_set_state(pip, MDI_PATHINFO_STATE_ONLINE); 4719 vlun->svl_waiting_for_activepath = 0; 4720 } else if (opinfo.opinfo_path_state == SCSI_PATH_INACTIVE) { 4721 mutex_enter(&vlun->svl_mutex); 4722 if (vlun->svl_active_pclass != NULL) { 4723 if (strcmp(vlun->svl_active_pclass, pclass) == 0) { 4724 mutex_exit(&vlun->svl_mutex); 4725 /* 4726 * externally initiated failover has happened; 4727 * force state to ONLINE (see comment above) 4728 */ 4729 mdi_pi_set_state(pip, 4730 MDI_PATHINFO_STATE_ONLINE); 4731 vlun->svl_fo_support = opinfo.opinfo_mode; 4732 mdi_pi_set_preferred(pip, 4733 opinfo.opinfo_preferred); 4734 return (MDI_SUCCESS); 4735 } 4736 } 4737 mutex_exit(&vlun->svl_mutex); 4738 mdi_pi_set_state(pip, MDI_PATHINFO_STATE_STANDBY); 4739 4740 /* 4741 * Initiate auto-failback, if enabled, for path if path-state 4742 * is transitioning from OFFLINE->STANDBY and pathclass is the 4743 * preferred pathclass for this storage. 4744 * NOTE: In case where opinfo_path_state is SCSI_PATH_ACTIVE 4745 * (above), where the pi state is set to STANDBY, we don't 4746 * initiate auto-failback as the next IO shall take care of. 4747 * this. See comment above. 4748 */ 4749 (void) fo->sfo_pathclass_next(NULL, &best_pclass, 4750 vlun->svl_fops_ctpriv); 4751 if (((vhci->vhci_conf_flags & VHCI_CONF_FLAGS_AUTO_FAILBACK) == 4752 VHCI_CONF_FLAGS_AUTO_FAILBACK) && 4753 (strcmp(pclass, best_pclass) == 0) && 4754 ((MDI_PI_OLD_STATE(pip) == MDI_PATHINFO_STATE_OFFLINE)|| 4755 (MDI_PI_OLD_STATE(pip) == MDI_PATHINFO_STATE_INIT))) { 4756 VHCI_DEBUG(1, (CE_NOTE, NULL, "%s pathclass path: %p" 4757 " OFFLINE->STANDBY transition for lun %s\n", 4758 best_pclass, (void *)pip, vlun->svl_lun_wwn)); 4759 (void) taskq_dispatch(vhci->vhci_taskq, 4760 vhci_initiate_auto_failback, (void *) vlun, 4761 KM_SLEEP); 4762 } 4763 } 4764 vlun->svl_fo_support = opinfo.opinfo_mode; 4765 mdi_pi_set_preferred(pip, opinfo.opinfo_preferred); 4766 4767 VHCI_DEBUG(8, (CE_NOTE, NULL, "vhci_update_pathinfo: opinfo_rev = %x," 4768 " opinfo_path_state = %x opinfo_preferred = %x, opinfo_mode = %x\n", 4769 opinfo.opinfo_rev, opinfo.opinfo_path_state, 4770 opinfo.opinfo_preferred, opinfo.opinfo_mode)); 4771 4772 return (MDI_SUCCESS); 4773 } 4774 4775 /* 4776 * Form the kstat name and and call mdi_pi_kstat_create() 4777 */ 4778 void 4779 vhci_kstat_create_pathinfo(mdi_pathinfo_t *pip) 4780 { 4781 dev_info_t *tgt_dip; 4782 dev_info_t *pdip; 4783 char *guid; 4784 char *target_port, *target_port_dup; 4785 char ks_name[KSTAT_STRLEN]; 4786 uint_t pid; 4787 int by_id; 4788 mod_hash_val_t hv; 4789 4790 4791 /* return if we have already allocated kstats */ 4792 if (mdi_pi_kstat_exists(pip)) 4793 return; 4794 4795 /* 4796 * We need instance numbers to create a kstat name, return if we don't 4797 * have instance numbers assigned yet. 4798 */ 4799 tgt_dip = mdi_pi_get_client(pip); 4800 pdip = mdi_pi_get_phci(pip); 4801 if ((ddi_get_instance(tgt_dip) == -1) || (ddi_get_instance(pdip) == -1)) 4802 return; 4803 4804 /* 4805 * A path oriented kstat has a ks_name of the form: 4806 * 4807 * <client-driver><instance>.t<pid>.<pHCI-driver><instance> 4808 * 4809 * We maintain a bidirectional 'target-port' to <pid> map, 4810 * called targetmap. All pathinfo nodes with the same 4811 * 'target-port' map to the same <pid>. The iostat(1M) code, 4812 * when parsing a path oriented kstat name, uses the <pid> as 4813 * a SCSI_VHCI_GET_TARGET_LONGNAME ioctl argument in order 4814 * to get the 'target-port'. For KSTAT_FLAG_PERSISTENT kstats, 4815 * this ioctl needs to translate a <pid> to a 'target-port' 4816 * even after all pathinfo nodes associated with the 4817 * 'target-port' have been destroyed. This is needed to support 4818 * consistent first-iteration activity-since-boot iostat(1M) 4819 * output. Because of this requirement, the mapping can't be 4820 * based on pathinfo information in a devinfo snapshot. 4821 */ 4822 4823 /* determine 'target-port' */ 4824 if (mdi_prop_lookup_string(pip, 4825 SCSI_ADDR_PROP_TARGET_PORT, &target_port) == MDI_SUCCESS) { 4826 target_port_dup = i_ddi_strdup(target_port, KM_SLEEP); 4827 (void) mdi_prop_free(target_port); 4828 by_id = 1; 4829 } else { 4830 /* 4831 * If the pHCI did not set up 'target-port' on this 4832 * pathinfo node, assume that our client is the only 4833 * one with paths to the device by using the guid 4834 * value as the 'target-port'. Since no other client 4835 * will have the same guid, no other client will use 4836 * the same <pid>. NOTE: a client with an instance 4837 * number always has a guid. 4838 */ 4839 (void) ddi_prop_lookup_string(DDI_DEV_T_ANY, tgt_dip, 4840 PROPFLAGS, MDI_CLIENT_GUID_PROP, &guid); 4841 target_port_dup = i_ddi_strdup(guid, KM_SLEEP); 4842 ddi_prop_free(guid); 4843 4844 /* 4845 * For this type of mapping we don't want the 4846 * <id> -> 'target-port' mapping to be made. This 4847 * will cause the SCSI_VHCI_GET_TARGET_LONGNAME ioctl 4848 * to fail, and the iostat(1M) long '-n' output will 4849 * still use the <pid>. We do this because we just 4850 * made up the 'target-port' using the guid, and we 4851 * don't want to expose that fact in iostat output. 4852 */ 4853 by_id = 0; 4854 } 4855 4856 /* find/establish <pid> given 'target-port' */ 4857 mutex_enter(&vhci_targetmap_mutex); 4858 if (mod_hash_find(vhci_targetmap_byport, 4859 (mod_hash_key_t)target_port_dup, &hv) == 0) { 4860 pid = (int)(intptr_t)hv; /* mapping exists */ 4861 } else { 4862 pid = vhci_targetmap_pid++; /* new mapping */ 4863 4864 (void) mod_hash_insert(vhci_targetmap_byport, 4865 (mod_hash_key_t)target_port_dup, 4866 (mod_hash_val_t)(intptr_t)pid); 4867 if (by_id) { 4868 (void) mod_hash_insert(vhci_targetmap_bypid, 4869 (mod_hash_key_t)(uintptr_t)pid, 4870 (mod_hash_val_t)(uintptr_t)target_port_dup); 4871 } 4872 target_port_dup = NULL; /* owned by hash */ 4873 } 4874 mutex_exit(&vhci_targetmap_mutex); 4875 4876 /* form kstat name */ 4877 (void) snprintf(ks_name, KSTAT_STRLEN, "%s%d.t%d.%s%d", 4878 ddi_driver_name(tgt_dip), ddi_get_instance(tgt_dip), 4879 pid, ddi_driver_name(pdip), ddi_get_instance(pdip)); 4880 4881 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_path_online: path:%p " 4882 "kstat %s: pid %x <-> port %s\n", (void *)pip, 4883 ks_name, pid, target_port_dup)); 4884 if (target_port_dup) 4885 kmem_free(target_port_dup, strlen(target_port_dup) + 1); 4886 4887 /* call mdi to create kstats with the name we built */ 4888 (void) mdi_pi_kstat_create(pip, ks_name); 4889 } 4890 4891 /* ARGSUSED */ 4892 static int 4893 vhci_pathinfo_online(dev_info_t *vdip, mdi_pathinfo_t *pip, int flags) 4894 { 4895 scsi_hba_tran_t *hba = NULL; 4896 struct scsi_device *psd = NULL; 4897 scsi_vhci_lun_t *vlun = NULL; 4898 dev_info_t *pdip = NULL; 4899 dev_info_t *cdip; 4900 dev_info_t *tgt_dip; 4901 struct scsi_vhci *vhci; 4902 char *guid; 4903 struct scsi_failover_ops *sfo; 4904 scsi_vhci_priv_t *svp = NULL; 4905 struct scsi_address *ap; 4906 struct scsi_pkt *pkt; 4907 int rval = MDI_FAILURE; 4908 mpapi_item_list_t *list_ptr; 4909 mpapi_lu_data_t *ld; 4910 4911 ASSERT(vdip != NULL); 4912 ASSERT(pip != NULL); 4913 4914 vhci = ddi_get_soft_state(vhci_softstate, ddi_get_instance(vdip)); 4915 ASSERT(vhci != NULL); 4916 4917 pdip = mdi_pi_get_phci(pip); 4918 hba = ddi_get_driver_private(pdip); 4919 ASSERT(hba != NULL); 4920 4921 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip); 4922 ASSERT(svp != NULL); 4923 4924 cdip = mdi_pi_get_client(pip); 4925 ASSERT(cdip != NULL); 4926 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, cdip, PROPFLAGS, 4927 MDI_CLIENT_GUID_PROP, &guid) != DDI_SUCCESS) { 4928 VHCI_DEBUG(1, (CE_WARN, NULL, "vhci_path_online: lun guid " 4929 "property failed")); 4930 goto failure; 4931 } 4932 4933 vlun = vhci_lun_lookup(cdip); 4934 ASSERT(vlun != NULL); 4935 4936 ddi_prop_free(guid); 4937 4938 vlun->svl_dip = mdi_pi_get_client(pip); 4939 ASSERT(vlun->svl_dip != NULL); 4940 4941 psd = svp->svp_psd; 4942 ASSERT(psd != NULL); 4943 4944 /* 4945 * Get inquiry data into pathinfo related scsi_device structure. 4946 * Free sq_inq when pathinfo related scsi_device structure is destroyed 4947 * by vhci_pathinfo_uninit(). In other words, vhci maintains its own 4948 * copy of scsi_device and scsi_inquiry data on a per-path basis. 4949 */ 4950 if (scsi_probe(psd, SLEEP_FUNC) != SCSIPROBE_EXISTS) { 4951 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_pathinfo_online: " 4952 "scsi_probe failed path:%p rval:%x\n", (void *)pip, rval)); 4953 rval = MDI_FAILURE; 4954 goto failure; 4955 } 4956 4957 /* 4958 * See if we have a failover module to support the device. 4959 * 4960 * We re-probe to determine the failover ops for each path. This 4961 * is done in case there are any path-specific side-effects associated 4962 * with the sfo_device_probe implementation. 4963 * 4964 * Give the first successfull sfo_device_probe the opportunity to 4965 * establish 'ctpriv', vlun/client private data. The ctpriv will 4966 * then be passed into the failover module on all other sfo_device_*() 4967 * operations (and must be freed by sfo_device_unprobe implementation). 4968 * 4969 * NOTE: While sfo_device_probe is done once per path, 4970 * sfo_device_unprobe only occurs once - when the vlun is destroyed. 4971 * 4972 * NOTE: We don't currently support per-path fops private data 4973 * mechanism. 4974 */ 4975 sfo = vhci_dev_fo(vdip, psd, 4976 &vlun->svl_fops_ctpriv, &vlun->svl_fops_name); 4977 4978 /* check path configuration result with current vlun state */ 4979 if (((sfo && vlun->svl_fops) && (sfo != vlun->svl_fops)) || 4980 (sfo && vlun->svl_not_supported) || 4981 ((sfo == NULL) && vlun->svl_fops)) { 4982 /* Getting different results for different paths. */ 4983 VHCI_DEBUG(1, (CE_NOTE, vhci->vhci_dip, 4984 "!vhci_pathinfo_online: dev (path 0x%p) contradiction\n", 4985 (void *)pip)); 4986 cmn_err(CE_WARN, "scsi_vhci: failover contradiction: " 4987 "'%s'.vs.'%s': path %s\n", 4988 vlun->svl_fops ? vlun->svl_fops->sfo_name : "NULL", 4989 sfo ? sfo->sfo_name : "NULL", mdi_pi_pathname(pip)); 4990 vlun->svl_not_supported = 1; 4991 rval = MDI_NOT_SUPPORTED; 4992 goto done; 4993 } else if (sfo == NULL) { 4994 /* No failover module - device not supported under vHCI. */ 4995 VHCI_DEBUG(1, (CE_NOTE, vhci->vhci_dip, 4996 "!vhci_pathinfo_online: dev (path 0x%p) not " 4997 "supported\n", (void *)pip)); 4998 4999 /* XXX does this contradict vhci_is_dev_supported ? */ 5000 vlun->svl_not_supported = 1; 5001 rval = MDI_NOT_SUPPORTED; 5002 goto done; 5003 } 5004 5005 /* failover supported for device - save failover_ops in vlun */ 5006 vlun->svl_fops = sfo; 5007 ASSERT(vlun->svl_fops_name != NULL); 5008 5009 /* 5010 * Obtain the device-type based mpxio options as specified in 5011 * scsi_vhci.conf file. 5012 * 5013 * NOTE: currently, the end result is a call to 5014 * mdi_set_lb_region_size(). 5015 */ 5016 tgt_dip = psd->sd_dev; 5017 ASSERT(tgt_dip != NULL); 5018 vhci_get_device_type_mpxio_options(vdip, tgt_dip, psd); 5019 5020 /* 5021 * The device probe or options in conf file may have set/changed the 5022 * lb policy, save the current value. 5023 */ 5024 vlun->svl_lb_policy_save = mdi_get_lb_policy(tgt_dip); 5025 5026 /* 5027 * if PGR is active, revalidate key and register on this path also, 5028 * if key is still valid 5029 */ 5030 sema_p(&vlun->svl_pgr_sema); 5031 if (vlun->svl_pgr_active) { 5032 rval = vhci_pgr_validate_and_register(svp); 5033 if (rval != 1) { 5034 rval = MDI_FAILURE; 5035 sema_v(&vlun->svl_pgr_sema); 5036 goto failure; 5037 } 5038 } 5039 sema_v(&vlun->svl_pgr_sema); 5040 5041 if (svp->svp_new_path) { 5042 /* 5043 * Last chance to perform any cleanup operations on this 5044 * new path before making this path completely online. 5045 */ 5046 svp->svp_new_path = 0; 5047 5048 /* 5049 * If scsi_vhci knows the lun is alread RESERVE'd, 5050 * then skip the issue of RELEASE on new path. 5051 */ 5052 if ((vlun->svl_flags & VLUN_RESERVE_ACTIVE_FLG) == 0) { 5053 /* 5054 * Issue SCSI-2 RELEASE only for the first time on 5055 * a new path just in case the host rebooted and 5056 * a reservation is still pending on this path. 5057 * IBM Shark storage does not clear RESERVE upon 5058 * host reboot. 5059 */ 5060 ap = &psd->sd_address; 5061 pkt = scsi_init_pkt(ap, NULL, NULL, CDB_GROUP0, 5062 sizeof (struct scsi_arq_status), 0, 0, 5063 SLEEP_FUNC, NULL); 5064 if (pkt == NULL) { 5065 VHCI_DEBUG(1, (CE_NOTE, NULL, 5066 "!vhci_pathinfo_online: " 5067 "Release init_pkt failed :%p\n", 5068 (void *)pip)); 5069 rval = MDI_FAILURE; 5070 goto failure; 5071 } 5072 pkt->pkt_cdbp[0] = SCMD_RELEASE; 5073 pkt->pkt_time = 60; 5074 5075 VHCI_DEBUG(1, (CE_NOTE, NULL, 5076 "!vhci_path_online: path:%p " 5077 "Issued SCSI-2 RELEASE\n", (void *)pip)); 5078 5079 /* Ignore the return value */ 5080 (void) vhci_do_scsi_cmd(pkt); 5081 scsi_destroy_pkt(pkt); 5082 } 5083 } 5084 5085 rval = vhci_update_pathinfo(psd, pip, sfo, vlun, vhci); 5086 if (rval == MDI_FAILURE) { 5087 goto failure; 5088 } 5089 5090 /* Initialize MP-API data */ 5091 vhci_update_mpapi_data(vhci, vlun, pip); 5092 5093 /* 5094 * MP-API also needs the Inquiry data to be maintained in the 5095 * mp_vendor_prop_t structure, so find the lun and update its 5096 * structure with this data. 5097 */ 5098 list_ptr = (mpapi_item_list_t *)vhci_get_mpapi_item(vhci, NULL, 5099 MP_OBJECT_TYPE_MULTIPATH_LU, (void *)vlun); 5100 ld = (mpapi_lu_data_t *)list_ptr->item->idata; 5101 if (ld != NULL) { 5102 bcopy(psd->sd_inq->inq_vid, ld->prop.prodInfo.vendor, 8); 5103 bcopy(psd->sd_inq->inq_pid, ld->prop.prodInfo.product, 16); 5104 bcopy(psd->sd_inq->inq_revision, ld->prop.prodInfo.revision, 4); 5105 } else { 5106 VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_pathinfo_online: " 5107 "mpapi_lu_data_t is NULL")); 5108 } 5109 5110 /* create kstats for path */ 5111 vhci_kstat_create_pathinfo(pip); 5112 5113 done: 5114 mutex_enter(&vhci_global_mutex); 5115 cv_broadcast(&vhci_cv); 5116 mutex_exit(&vhci_global_mutex); 5117 5118 if (vlun->svl_setcap_done) { 5119 (void) vhci_pHCI_cap(ap, "sector-size", 5120 vlun->svl_sector_size, 1, pip); 5121 } 5122 5123 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_path_online: path:%p\n", 5124 (void *)pip)); 5125 5126 failure: 5127 return (rval); 5128 } 5129 5130 /* 5131 * path offline handler. Release all bindings that will not be 5132 * released by the normal packet transport/completion code path. 5133 * Since we don't (presently) keep any bindings alive outside of 5134 * the in-transport packets (which will be released on completion) 5135 * there is not much to do here. 5136 */ 5137 /* ARGSUSED */ 5138 static int 5139 vhci_pathinfo_offline(dev_info_t *vdip, mdi_pathinfo_t *pip, int flags) 5140 { 5141 scsi_hba_tran_t *hba = NULL; 5142 struct scsi_device *psd = NULL; 5143 dev_info_t *pdip = NULL; 5144 dev_info_t *cdip = NULL; 5145 scsi_vhci_priv_t *svp = NULL; 5146 5147 ASSERT(vdip != NULL); 5148 ASSERT(pip != NULL); 5149 5150 pdip = mdi_pi_get_phci(pip); 5151 ASSERT(pdip != NULL); 5152 if (pdip == NULL) { 5153 VHCI_DEBUG(1, (CE_WARN, vdip, "Invalid path 0x%p: NULL " 5154 "phci dip", (void *)pip)); 5155 return (MDI_FAILURE); 5156 } 5157 5158 cdip = mdi_pi_get_client(pip); 5159 ASSERT(cdip != NULL); 5160 if (cdip == NULL) { 5161 VHCI_DEBUG(1, (CE_WARN, vdip, "Invalid path 0x%p: NULL " 5162 "client dip", (void *)pip)); 5163 return (MDI_FAILURE); 5164 } 5165 5166 hba = ddi_get_driver_private(pdip); 5167 ASSERT(hba != NULL); 5168 5169 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip); 5170 if (svp == NULL) { 5171 /* 5172 * mdi_pathinfo node in INIT state can have vHCI private 5173 * information set to null 5174 */ 5175 VHCI_DEBUG(1, (CE_NOTE, vdip, "!vhci_pathinfo_offline: " 5176 "svp is NULL for pip 0x%p\n", (void *)pip)); 5177 return (MDI_SUCCESS); 5178 } 5179 5180 psd = svp->svp_psd; 5181 ASSERT(psd != NULL); 5182 5183 mutex_enter(&svp->svp_mutex); 5184 5185 VHCI_DEBUG(1, (CE_NOTE, vdip, "!vhci_pathinfo_offline: " 5186 "%d cmds pending on path: 0x%p\n", svp->svp_cmds, (void *)pip)); 5187 while (svp->svp_cmds != 0) { 5188 if (cv_reltimedwait(&svp->svp_cv, &svp->svp_mutex, 5189 drv_usectohz(vhci_path_quiesce_timeout * 1000000), 5190 TR_CLOCK_TICK) == -1) { 5191 /* 5192 * The timeout time reached without the condition 5193 * being signaled. 5194 */ 5195 VHCI_DEBUG(1, (CE_NOTE, vdip, "!vhci_pathinfo_offline: " 5196 "Timeout reached on path 0x%p without the cond\n", 5197 (void *)pip)); 5198 VHCI_DEBUG(1, (CE_NOTE, vdip, "!vhci_pathinfo_offline: " 5199 "%d cmds still pending on path: 0x%p\n", 5200 svp->svp_cmds, (void *)pip)); 5201 break; 5202 } 5203 } 5204 mutex_exit(&svp->svp_mutex); 5205 5206 /* 5207 * Check to see if this vlun has an active SCSI-II RESERVE. And this 5208 * is the pip for the path that has been reserved. 5209 * If so clear the reservation by sending a reset, so the host will not 5210 * get a reservation conflict. Reset the flag VLUN_RESERVE_ACTIVE_FLG 5211 * for this lun. Also a reset notify is sent to the target driver 5212 * just in case the POR check condition is cleared by some other layer 5213 * in the stack. 5214 */ 5215 if (svp->svp_svl->svl_flags & VLUN_RESERVE_ACTIVE_FLG) { 5216 if (pip == svp->svp_svl->svl_resrv_pip) { 5217 if (vhci_recovery_reset(svp->svp_svl, 5218 &svp->svp_psd->sd_address, TRUE, 5219 VHCI_DEPTH_TARGET) == 0) { 5220 VHCI_DEBUG(1, (CE_NOTE, NULL, 5221 "!vhci_pathinfo_offline (pip:%p):" 5222 "reset failed, retrying\n", (void *)pip)); 5223 delay(1*drv_usectohz(1000000)); 5224 if (vhci_recovery_reset(svp->svp_svl, 5225 &svp->svp_psd->sd_address, TRUE, 5226 VHCI_DEPTH_TARGET) == 0) { 5227 VHCI_DEBUG(1, (CE_NOTE, NULL, 5228 "!vhci_pathinfo_offline " 5229 "(pip:%p): reset failed, " 5230 "giving up!\n", (void *)pip)); 5231 } 5232 } 5233 svp->svp_svl->svl_flags &= ~VLUN_RESERVE_ACTIVE_FLG; 5234 } 5235 } 5236 5237 mdi_pi_set_state(pip, MDI_PATHINFO_STATE_OFFLINE); 5238 vhci_mpapi_set_path_state(vdip, pip, MP_DRVR_PATH_STATE_REMOVED); 5239 5240 VHCI_DEBUG(1, (CE_NOTE, NULL, 5241 "!vhci_pathinfo_offline: offlined path 0x%p\n", (void *)pip)); 5242 return (MDI_SUCCESS); 5243 } 5244 5245 5246 /* 5247 * routine for SCSI VHCI IOCTL implementation. 5248 */ 5249 /* ARGSUSED */ 5250 static int 5251 vhci_ctl(dev_t dev, int cmd, intptr_t data, int mode, cred_t *credp, int *rval) 5252 { 5253 struct scsi_vhci *vhci; 5254 dev_info_t *vdip; 5255 mdi_pathinfo_t *pip; 5256 int instance, held; 5257 int retval = 0; 5258 caddr_t phci_path = NULL, client_path = NULL; 5259 caddr_t paddr = NULL; 5260 sv_iocdata_t ioc; 5261 sv_iocdata_t *pioc = &ioc; 5262 sv_switch_to_cntlr_iocdata_t iocsc; 5263 sv_switch_to_cntlr_iocdata_t *piocsc = &iocsc; 5264 caddr_t s; 5265 scsi_vhci_lun_t *vlun; 5266 struct scsi_failover_ops *fo; 5267 char *pclass; 5268 5269 /* Check for validity of vhci structure */ 5270 vhci = ddi_get_soft_state(vhci_softstate, MINOR2INST(getminor(dev))); 5271 if (vhci == NULL) { 5272 return (ENXIO); 5273 } 5274 5275 mutex_enter(&vhci->vhci_mutex); 5276 if ((vhci->vhci_state & VHCI_STATE_OPEN) == 0) { 5277 mutex_exit(&vhci->vhci_mutex); 5278 return (ENXIO); 5279 } 5280 mutex_exit(&vhci->vhci_mutex); 5281 5282 /* Get the vhci dip */ 5283 vdip = vhci->vhci_dip; 5284 ASSERT(vdip != NULL); 5285 instance = ddi_get_instance(vdip); 5286 5287 /* Allocate memory for getting parameters from userland */ 5288 phci_path = kmem_zalloc(MAXPATHLEN, KM_SLEEP); 5289 client_path = kmem_zalloc(MAXPATHLEN, KM_SLEEP); 5290 paddr = kmem_zalloc(MAXNAMELEN, KM_SLEEP); 5291 5292 /* 5293 * Set a local variable indicating the ioctl name. Used for 5294 * printing debug strings. 5295 */ 5296 switch (cmd) { 5297 case SCSI_VHCI_GET_CLIENT_MULTIPATH_INFO: 5298 s = "GET_CLIENT_MULTIPATH_INFO"; 5299 break; 5300 5301 case SCSI_VHCI_GET_PHCI_MULTIPATH_INFO: 5302 s = "GET_PHCI_MULTIPATH_INFO"; 5303 break; 5304 5305 case SCSI_VHCI_GET_CLIENT_NAME: 5306 s = "GET_CLIENT_NAME"; 5307 break; 5308 5309 case SCSI_VHCI_PATH_ONLINE: 5310 s = "PATH_ONLINE"; 5311 break; 5312 5313 case SCSI_VHCI_PATH_OFFLINE: 5314 s = "PATH_OFFLINE"; 5315 break; 5316 5317 case SCSI_VHCI_PATH_STANDBY: 5318 s = "PATH_STANDBY"; 5319 break; 5320 5321 case SCSI_VHCI_PATH_TEST: 5322 s = "PATH_TEST"; 5323 break; 5324 5325 case SCSI_VHCI_SWITCH_TO_CNTLR: 5326 s = "SWITCH_TO_CNTLR"; 5327 break; 5328 case SCSI_VHCI_PATH_DISABLE: 5329 s = "PATH_DISABLE"; 5330 break; 5331 case SCSI_VHCI_PATH_ENABLE: 5332 s = "PATH_ENABLE"; 5333 break; 5334 5335 case SCSI_VHCI_GET_TARGET_LONGNAME: 5336 s = "GET_TARGET_LONGNAME"; 5337 break; 5338 5339 #ifdef DEBUG 5340 case SCSI_VHCI_CONFIGURE_PHCI: 5341 s = "CONFIGURE_PHCI"; 5342 break; 5343 5344 case SCSI_VHCI_UNCONFIGURE_PHCI: 5345 s = "UNCONFIGURE_PHCI"; 5346 break; 5347 #endif 5348 5349 default: 5350 s = "Unknown"; 5351 vhci_log(CE_NOTE, vdip, 5352 "!vhci%d: ioctl %x (unsupported ioctl)", instance, cmd); 5353 retval = ENOTSUP; 5354 break; 5355 } 5356 if (retval != 0) { 5357 goto end; 5358 } 5359 5360 VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci%d: ioctl <%s>", instance, s)); 5361 5362 /* 5363 * Get IOCTL parameters from userland 5364 */ 5365 switch (cmd) { 5366 case SCSI_VHCI_GET_CLIENT_MULTIPATH_INFO: 5367 case SCSI_VHCI_GET_PHCI_MULTIPATH_INFO: 5368 case SCSI_VHCI_GET_CLIENT_NAME: 5369 case SCSI_VHCI_PATH_ONLINE: 5370 case SCSI_VHCI_PATH_OFFLINE: 5371 case SCSI_VHCI_PATH_STANDBY: 5372 case SCSI_VHCI_PATH_TEST: 5373 case SCSI_VHCI_PATH_DISABLE: 5374 case SCSI_VHCI_PATH_ENABLE: 5375 case SCSI_VHCI_GET_TARGET_LONGNAME: 5376 #ifdef DEBUG 5377 case SCSI_VHCI_CONFIGURE_PHCI: 5378 case SCSI_VHCI_UNCONFIGURE_PHCI: 5379 #endif 5380 retval = vhci_get_iocdata((const void *)data, pioc, mode, s); 5381 break; 5382 5383 case SCSI_VHCI_SWITCH_TO_CNTLR: 5384 retval = vhci_get_iocswitchdata((const void *)data, piocsc, 5385 mode, s); 5386 break; 5387 } 5388 if (retval != 0) { 5389 goto end; 5390 } 5391 5392 5393 /* 5394 * Process the IOCTL 5395 */ 5396 switch (cmd) { 5397 case SCSI_VHCI_GET_CLIENT_MULTIPATH_INFO: 5398 { 5399 uint_t num_paths; /* Num paths to client dev */ 5400 sv_path_info_t *upibuf = NULL; /* To keep userland values */ 5401 sv_path_info_t *kpibuf = NULL; /* Kernel data for ioctls */ 5402 dev_info_t *cdip; /* Client device dip */ 5403 5404 if (pioc->ret_elem == NULL) { 5405 retval = EINVAL; 5406 break; 5407 } 5408 5409 /* Get client device path from user land */ 5410 if (vhci_ioc_get_client_path(pioc, client_path, mode, s)) { 5411 retval = EFAULT; 5412 break; 5413 } 5414 5415 VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> " 5416 "client <%s>", s, client_path)); 5417 5418 /* Get number of paths to this client device */ 5419 if ((cdip = mdi_client_path2devinfo(vdip, client_path)) 5420 == NULL) { 5421 retval = ENXIO; 5422 VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> " 5423 "client dip doesn't exist. invalid path <%s>", 5424 s, client_path)); 5425 break; 5426 } 5427 num_paths = mdi_client_get_path_count(cdip); 5428 5429 if (ddi_copyout(&num_paths, pioc->ret_elem, 5430 sizeof (num_paths), mode)) { 5431 VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> " 5432 "num_paths copyout failed", s)); 5433 retval = EFAULT; 5434 break; 5435 } 5436 5437 /* If user just wanted num_paths, then return */ 5438 if (pioc->buf_elem == 0 || pioc->ret_buf == NULL || 5439 num_paths == 0) { 5440 break; 5441 } 5442 5443 /* Set num_paths to value as much as can be sent to userland */ 5444 if (num_paths > pioc->buf_elem) { 5445 num_paths = pioc->buf_elem; 5446 } 5447 5448 /* Allocate memory and get userland pointers */ 5449 if (vhci_ioc_alloc_pathinfo(&upibuf, &kpibuf, num_paths, 5450 pioc, mode, s) != 0) { 5451 retval = EFAULT; 5452 break; 5453 } 5454 ASSERT(upibuf != NULL); 5455 ASSERT(kpibuf != NULL); 5456 5457 /* 5458 * Get the path information and send it to userland. 5459 */ 5460 if (vhci_get_client_path_list(cdip, kpibuf, num_paths) 5461 != MDI_SUCCESS) { 5462 retval = ENXIO; 5463 vhci_ioc_free_pathinfo(upibuf, kpibuf, num_paths); 5464 break; 5465 } 5466 5467 if (vhci_ioc_send_pathinfo(upibuf, kpibuf, num_paths, 5468 pioc, mode, s)) { 5469 retval = EFAULT; 5470 vhci_ioc_free_pathinfo(upibuf, kpibuf, num_paths); 5471 break; 5472 } 5473 5474 /* Free the memory allocated for path information */ 5475 vhci_ioc_free_pathinfo(upibuf, kpibuf, num_paths); 5476 break; 5477 } 5478 5479 case SCSI_VHCI_GET_PHCI_MULTIPATH_INFO: 5480 { 5481 uint_t num_paths; /* Num paths to client dev */ 5482 sv_path_info_t *upibuf = NULL; /* To keep userland values */ 5483 sv_path_info_t *kpibuf = NULL; /* Kernel data for ioctls */ 5484 dev_info_t *pdip; /* PHCI device dip */ 5485 5486 if (pioc->ret_elem == NULL) { 5487 retval = EINVAL; 5488 break; 5489 } 5490 5491 /* Get PHCI device path from user land */ 5492 if (vhci_ioc_get_phci_path(pioc, phci_path, mode, s)) { 5493 retval = EFAULT; 5494 break; 5495 } 5496 5497 VHCI_DEBUG(6, (CE_WARN, vdip, 5498 "!vhci_ioctl: ioctl <%s> phci <%s>", s, phci_path)); 5499 5500 /* Get number of devices associated with this PHCI device */ 5501 if ((pdip = mdi_phci_path2devinfo(vdip, phci_path)) == NULL) { 5502 VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> " 5503 "phci dip doesn't exist. invalid path <%s>", 5504 s, phci_path)); 5505 retval = ENXIO; 5506 break; 5507 } 5508 5509 num_paths = mdi_phci_get_path_count(pdip); 5510 5511 if (ddi_copyout(&num_paths, pioc->ret_elem, 5512 sizeof (num_paths), mode)) { 5513 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> " 5514 "num_paths copyout failed", s)); 5515 retval = EFAULT; 5516 break; 5517 } 5518 5519 /* If user just wanted num_paths, then return */ 5520 if (pioc->buf_elem == 0 || pioc->ret_buf == NULL || 5521 num_paths == 0) { 5522 break; 5523 } 5524 5525 /* Set num_paths to value as much as can be sent to userland */ 5526 if (num_paths > pioc->buf_elem) { 5527 num_paths = pioc->buf_elem; 5528 } 5529 5530 /* Allocate memory and get userland pointers */ 5531 if (vhci_ioc_alloc_pathinfo(&upibuf, &kpibuf, num_paths, 5532 pioc, mode, s) != 0) { 5533 retval = EFAULT; 5534 break; 5535 } 5536 ASSERT(upibuf != NULL); 5537 ASSERT(kpibuf != NULL); 5538 5539 /* 5540 * Get the path information and send it to userland. 5541 */ 5542 if (vhci_get_phci_path_list(pdip, kpibuf, num_paths) 5543 != MDI_SUCCESS) { 5544 retval = ENXIO; 5545 vhci_ioc_free_pathinfo(upibuf, kpibuf, num_paths); 5546 break; 5547 } 5548 5549 if (vhci_ioc_send_pathinfo(upibuf, kpibuf, num_paths, 5550 pioc, mode, s)) { 5551 retval = EFAULT; 5552 vhci_ioc_free_pathinfo(upibuf, kpibuf, num_paths); 5553 break; 5554 } 5555 5556 /* Free the memory allocated for path information */ 5557 vhci_ioc_free_pathinfo(upibuf, kpibuf, num_paths); 5558 break; 5559 } 5560 5561 case SCSI_VHCI_GET_CLIENT_NAME: 5562 { 5563 dev_info_t *cdip, *pdip; 5564 5565 /* Get PHCI path and device address from user land */ 5566 if (vhci_ioc_get_phci_path(pioc, phci_path, mode, s) || 5567 vhci_ioc_get_paddr(pioc, paddr, mode, s)) { 5568 retval = EFAULT; 5569 break; 5570 } 5571 5572 VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> " 5573 "phci <%s>, paddr <%s>", s, phci_path, paddr)); 5574 5575 /* Get the PHCI dip */ 5576 if ((pdip = mdi_phci_path2devinfo(vdip, phci_path)) == NULL) { 5577 VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> " 5578 "phci dip doesn't exist. invalid path <%s>", 5579 s, phci_path)); 5580 retval = ENXIO; 5581 break; 5582 } 5583 5584 if ((pip = mdi_pi_find(pdip, NULL, paddr)) == NULL) { 5585 VHCI_DEBUG(1, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> " 5586 "pathinfo doesn't exist. invalid device addr", s)); 5587 retval = ENXIO; 5588 break; 5589 } 5590 5591 /* Get the client device pathname and send to userland */ 5592 cdip = mdi_pi_get_client(pip); 5593 vhci_ioc_devi_to_path(cdip, client_path); 5594 5595 VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> " 5596 "client <%s>", s, client_path)); 5597 5598 if (vhci_ioc_send_client_path(client_path, pioc, mode, s)) { 5599 retval = EFAULT; 5600 break; 5601 } 5602 break; 5603 } 5604 5605 case SCSI_VHCI_PATH_ONLINE: 5606 case SCSI_VHCI_PATH_OFFLINE: 5607 case SCSI_VHCI_PATH_STANDBY: 5608 case SCSI_VHCI_PATH_TEST: 5609 { 5610 dev_info_t *pdip; /* PHCI dip */ 5611 5612 /* Get PHCI path and device address from user land */ 5613 if (vhci_ioc_get_phci_path(pioc, phci_path, mode, s) || 5614 vhci_ioc_get_paddr(pioc, paddr, mode, s)) { 5615 retval = EFAULT; 5616 break; 5617 } 5618 5619 VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> " 5620 "phci <%s>, paddr <%s>", s, phci_path, paddr)); 5621 5622 /* Get the PHCI dip */ 5623 if ((pdip = mdi_phci_path2devinfo(vdip, phci_path)) == NULL) { 5624 VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> " 5625 "phci dip doesn't exist. invalid path <%s>", 5626 s, phci_path)); 5627 retval = ENXIO; 5628 break; 5629 } 5630 5631 if ((pip = mdi_pi_find(pdip, NULL, paddr)) == NULL) { 5632 VHCI_DEBUG(1, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> " 5633 "pathinfo doesn't exist. invalid device addr", s)); 5634 retval = ENXIO; 5635 break; 5636 } 5637 5638 VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> " 5639 "Calling MDI function to change device state", s)); 5640 5641 switch (cmd) { 5642 case SCSI_VHCI_PATH_ONLINE: 5643 retval = mdi_pi_online(pip, 0); 5644 break; 5645 5646 case SCSI_VHCI_PATH_OFFLINE: 5647 retval = mdi_pi_offline(pip, 0); 5648 break; 5649 5650 case SCSI_VHCI_PATH_STANDBY: 5651 retval = mdi_pi_standby(pip, 0); 5652 break; 5653 5654 case SCSI_VHCI_PATH_TEST: 5655 break; 5656 } 5657 break; 5658 } 5659 5660 case SCSI_VHCI_SWITCH_TO_CNTLR: 5661 { 5662 dev_info_t *cdip; 5663 struct scsi_device *devp; 5664 5665 /* Get the client device pathname */ 5666 if (ddi_copyin(piocsc->client, client_path, 5667 MAXPATHLEN, mode)) { 5668 VHCI_DEBUG(2, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> " 5669 "client_path copyin failed", s)); 5670 retval = EFAULT; 5671 break; 5672 } 5673 5674 /* Get the path class to which user wants to switch */ 5675 if (ddi_copyin(piocsc->class, paddr, MAXNAMELEN, mode)) { 5676 VHCI_DEBUG(2, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> " 5677 "controller_class copyin failed", s)); 5678 retval = EFAULT; 5679 break; 5680 } 5681 5682 /* Perform validity checks */ 5683 if ((cdip = mdi_client_path2devinfo(vdip, 5684 client_path)) == NULL) { 5685 VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> " 5686 "client dip doesn't exist. invalid path <%s>", 5687 s, client_path)); 5688 retval = ENXIO; 5689 break; 5690 } 5691 5692 VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: Calling MDI func " 5693 "to switch controller")); 5694 VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: client <%s> " 5695 "class <%s>", client_path, paddr)); 5696 5697 if (strcmp(paddr, PCLASS_PRIMARY) && 5698 strcmp(paddr, PCLASS_SECONDARY)) { 5699 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> " 5700 "invalid path class <%s>", s, paddr)); 5701 retval = ENXIO; 5702 break; 5703 } 5704 5705 devp = ddi_get_driver_private(cdip); 5706 if (devp == NULL) { 5707 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> " 5708 "invalid scsi device <%s>", s, client_path)); 5709 retval = ENXIO; 5710 break; 5711 } 5712 vlun = ADDR2VLUN(&devp->sd_address); 5713 ASSERT(vlun); 5714 5715 /* 5716 * Checking to see if device has only one pclass, PRIMARY. 5717 * If so this device doesn't support failovers. Assumed 5718 * that the devices with one pclass is PRIMARY, as thats the 5719 * case today. If this is not true and in future other 5720 * symmetric devices are supported with other pclass, this 5721 * IOCTL shall have to be overhauled anyways as now the only 5722 * arguments it accepts are PRIMARY and SECONDARY. 5723 */ 5724 fo = vlun->svl_fops; 5725 if (fo->sfo_pathclass_next(PCLASS_PRIMARY, &pclass, 5726 vlun->svl_fops_ctpriv)) { 5727 retval = ENOTSUP; 5728 break; 5729 } 5730 5731 VHCI_HOLD_LUN(vlun, VH_SLEEP, held); 5732 mutex_enter(&vlun->svl_mutex); 5733 if (vlun->svl_active_pclass != NULL) { 5734 if (strcmp(vlun->svl_active_pclass, paddr) == 0) { 5735 mutex_exit(&vlun->svl_mutex); 5736 retval = EALREADY; 5737 VHCI_RELEASE_LUN(vlun); 5738 break; 5739 } 5740 } 5741 mutex_exit(&vlun->svl_mutex); 5742 /* Call mdi function to cause a switch over */ 5743 retval = mdi_failover(vdip, cdip, MDI_FAILOVER_SYNC); 5744 if (retval == MDI_SUCCESS) { 5745 retval = 0; 5746 } else if (retval == MDI_BUSY) { 5747 retval = EBUSY; 5748 } else { 5749 retval = EIO; 5750 } 5751 VHCI_RELEASE_LUN(vlun); 5752 break; 5753 } 5754 5755 case SCSI_VHCI_PATH_ENABLE: 5756 case SCSI_VHCI_PATH_DISABLE: 5757 { 5758 dev_info_t *cdip, *pdip; 5759 5760 /* 5761 * Get client device path from user land 5762 */ 5763 if (vhci_ioc_get_client_path(pioc, client_path, mode, s)) { 5764 retval = EFAULT; 5765 break; 5766 } 5767 5768 /* 5769 * Get Phci device path from user land 5770 */ 5771 if (vhci_ioc_get_phci_path(pioc, phci_path, mode, s)) { 5772 retval = EFAULT; 5773 break; 5774 } 5775 5776 /* 5777 * Get the devinfo for the Phci. 5778 */ 5779 if ((pdip = mdi_phci_path2devinfo(vdip, phci_path)) == NULL) { 5780 VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> " 5781 "phci dip doesn't exist. invalid path <%s>", 5782 s, phci_path)); 5783 retval = ENXIO; 5784 break; 5785 } 5786 5787 /* 5788 * If the client path is set to /scsi_vhci then we need 5789 * to do the operation on all the clients so set cdip to NULL. 5790 * Else, try to get the client dip. 5791 */ 5792 if (strcmp(client_path, "/scsi_vhci") == 0) { 5793 cdip = NULL; 5794 } else { 5795 if ((cdip = mdi_client_path2devinfo(vdip, 5796 client_path)) == NULL) { 5797 retval = ENXIO; 5798 VHCI_DEBUG(1, (CE_WARN, NULL, 5799 "!vhci_ioctl: ioctl <%s> client dip " 5800 "doesn't exist. invalid path <%s>", 5801 s, client_path)); 5802 break; 5803 } 5804 } 5805 5806 if (cmd == SCSI_VHCI_PATH_ENABLE) 5807 retval = mdi_pi_enable(cdip, pdip, USER_DISABLE); 5808 else 5809 retval = mdi_pi_disable(cdip, pdip, USER_DISABLE); 5810 5811 break; 5812 } 5813 5814 case SCSI_VHCI_GET_TARGET_LONGNAME: 5815 { 5816 uint_t pid = pioc->buf_elem; 5817 char *target_port; 5818 mod_hash_val_t hv; 5819 5820 /* targetmap lookup of 'target-port' by <pid> */ 5821 if (mod_hash_find(vhci_targetmap_bypid, 5822 (mod_hash_key_t)(uintptr_t)pid, &hv) != 0) { 5823 /* 5824 * NOTE: failure to find the mapping is OK for guid 5825 * based 'target-port' values. 5826 */ 5827 VHCI_DEBUG(3, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> " 5828 "targetport mapping doesn't exist: pid %d", 5829 s, pid)); 5830 retval = ENXIO; 5831 break; 5832 } 5833 5834 /* copyout 'target-port' result */ 5835 target_port = (char *)hv; 5836 if (copyoutstr(target_port, pioc->addr, MAXNAMELEN, NULL)) { 5837 VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> " 5838 "targetport copyout failed: len: %d", 5839 s, (int)strlen(target_port))); 5840 retval = EFAULT; 5841 } 5842 break; 5843 } 5844 5845 #ifdef DEBUG 5846 case SCSI_VHCI_CONFIGURE_PHCI: 5847 { 5848 dev_info_t *pdip; 5849 5850 /* Get PHCI path and device address from user land */ 5851 if (vhci_ioc_get_phci_path(pioc, phci_path, mode, s)) { 5852 retval = EFAULT; 5853 break; 5854 } 5855 5856 VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> " 5857 "phci <%s>", s, phci_path)); 5858 5859 /* Get the PHCI dip */ 5860 if ((pdip = e_ddi_hold_devi_by_path(phci_path, 0)) == NULL) { 5861 VHCI_DEBUG(3, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> " 5862 "phci dip doesn't exist. invalid path <%s>", 5863 s, phci_path)); 5864 retval = ENXIO; 5865 break; 5866 } 5867 5868 if (ndi_devi_config(pdip, 5869 NDI_DEVFS_CLEAN|NDI_DEVI_PERSIST) != NDI_SUCCESS) { 5870 retval = EIO; 5871 } 5872 5873 ddi_release_devi(pdip); 5874 break; 5875 } 5876 5877 case SCSI_VHCI_UNCONFIGURE_PHCI: 5878 { 5879 dev_info_t *pdip; 5880 5881 /* Get PHCI path and device address from user land */ 5882 if (vhci_ioc_get_phci_path(pioc, phci_path, mode, s)) { 5883 retval = EFAULT; 5884 break; 5885 } 5886 5887 VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> " 5888 "phci <%s>", s, phci_path)); 5889 5890 /* Get the PHCI dip */ 5891 if ((pdip = e_ddi_hold_devi_by_path(phci_path, 0)) == NULL) { 5892 VHCI_DEBUG(3, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> " 5893 "phci dip doesn't exist. invalid path <%s>", 5894 s, phci_path)); 5895 retval = ENXIO; 5896 break; 5897 } 5898 5899 if (ndi_devi_unconfig(pdip, 5900 NDI_DEVI_REMOVE|NDI_DEVFS_CLEAN) != NDI_SUCCESS) { 5901 retval = EBUSY; 5902 } 5903 5904 ddi_release_devi(pdip); 5905 break; 5906 } 5907 #endif 5908 } 5909 5910 end: 5911 /* Free the memory allocated above */ 5912 if (phci_path != NULL) { 5913 kmem_free(phci_path, MAXPATHLEN); 5914 } 5915 if (client_path != NULL) { 5916 kmem_free(client_path, MAXPATHLEN); 5917 } 5918 if (paddr != NULL) { 5919 kmem_free(paddr, MAXNAMELEN); 5920 } 5921 return (retval); 5922 } 5923 5924 /* 5925 * devctl IOCTL support for client device DR 5926 */ 5927 /* ARGSUSED */ 5928 int 5929 vhci_devctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp, 5930 int *rvalp) 5931 { 5932 dev_info_t *self; 5933 dev_info_t *child; 5934 scsi_hba_tran_t *hba; 5935 struct devctl_iocdata *dcp; 5936 struct scsi_vhci *vhci; 5937 int rv = 0; 5938 int retval = 0; 5939 scsi_vhci_priv_t *svp; 5940 mdi_pathinfo_t *pip; 5941 5942 if ((vhci = ddi_get_soft_state(vhci_softstate, 5943 MINOR2INST(getminor(dev)))) == NULL) 5944 return (ENXIO); 5945 5946 /* 5947 * check if :devctl minor device has been opened 5948 */ 5949 mutex_enter(&vhci->vhci_mutex); 5950 if ((vhci->vhci_state & VHCI_STATE_OPEN) == 0) { 5951 mutex_exit(&vhci->vhci_mutex); 5952 return (ENXIO); 5953 } 5954 mutex_exit(&vhci->vhci_mutex); 5955 5956 self = vhci->vhci_dip; 5957 hba = ddi_get_driver_private(self); 5958 if (hba == NULL) 5959 return (ENXIO); 5960 5961 /* 5962 * We can use the generic implementation for these ioctls 5963 */ 5964 switch (cmd) { 5965 case DEVCTL_DEVICE_GETSTATE: 5966 case DEVCTL_DEVICE_ONLINE: 5967 case DEVCTL_DEVICE_OFFLINE: 5968 case DEVCTL_DEVICE_REMOVE: 5969 case DEVCTL_BUS_GETSTATE: 5970 return (ndi_devctl_ioctl(self, cmd, arg, mode, 0)); 5971 } 5972 5973 /* 5974 * read devctl ioctl data 5975 */ 5976 if (ndi_dc_allochdl((void *)arg, &dcp) != NDI_SUCCESS) 5977 return (EFAULT); 5978 5979 switch (cmd) { 5980 5981 case DEVCTL_DEVICE_RESET: 5982 /* 5983 * lookup and hold child device 5984 */ 5985 if ((child = ndi_devi_find(self, ndi_dc_getname(dcp), 5986 ndi_dc_getaddr(dcp))) == NULL) { 5987 rv = ENXIO; 5988 break; 5989 } 5990 retval = mdi_select_path(child, NULL, 5991 (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH), 5992 NULL, &pip); 5993 if ((retval != MDI_SUCCESS) || (pip == NULL)) { 5994 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioctl:" 5995 "Unable to get a path, dip 0x%p", (void *)child)); 5996 rv = ENXIO; 5997 break; 5998 } 5999 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip); 6000 if (vhci_recovery_reset(svp->svp_svl, 6001 &svp->svp_psd->sd_address, TRUE, 6002 VHCI_DEPTH_TARGET) == 0) { 6003 VHCI_DEBUG(1, (CE_NOTE, NULL, 6004 "!vhci_ioctl(pip:%p): " 6005 "reset failed\n", (void *)pip)); 6006 rv = ENXIO; 6007 } 6008 mdi_rele_path(pip); 6009 break; 6010 6011 case DEVCTL_BUS_QUIESCE: 6012 case DEVCTL_BUS_UNQUIESCE: 6013 case DEVCTL_BUS_RESET: 6014 case DEVCTL_BUS_RESETALL: 6015 #ifdef DEBUG 6016 case DEVCTL_BUS_CONFIGURE: 6017 case DEVCTL_BUS_UNCONFIGURE: 6018 #endif 6019 rv = ENOTSUP; 6020 break; 6021 6022 default: 6023 rv = ENOTTY; 6024 } /* end of outer switch */ 6025 6026 ndi_dc_freehdl(dcp); 6027 return (rv); 6028 } 6029 6030 /* 6031 * Routine to get the PHCI pathname from ioctl structures in userland 6032 */ 6033 /* ARGSUSED */ 6034 static int 6035 vhci_ioc_get_phci_path(sv_iocdata_t *pioc, caddr_t phci_path, 6036 int mode, caddr_t s) 6037 { 6038 int retval = 0; 6039 6040 if (ddi_copyin(pioc->phci, phci_path, MAXPATHLEN, mode)) { 6041 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioc_get_phci: ioctl <%s> " 6042 "phci_path copyin failed", s)); 6043 retval = EFAULT; 6044 } 6045 return (retval); 6046 6047 } 6048 6049 6050 /* 6051 * Routine to get the Client device pathname from ioctl structures in userland 6052 */ 6053 /* ARGSUSED */ 6054 static int 6055 vhci_ioc_get_client_path(sv_iocdata_t *pioc, caddr_t client_path, 6056 int mode, caddr_t s) 6057 { 6058 int retval = 0; 6059 6060 if (ddi_copyin(pioc->client, client_path, MAXPATHLEN, mode)) { 6061 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioc_get_client: " 6062 "ioctl <%s> client_path copyin failed", s)); 6063 retval = EFAULT; 6064 } 6065 return (retval); 6066 } 6067 6068 6069 /* 6070 * Routine to get physical device address from ioctl structure in userland 6071 */ 6072 /* ARGSUSED */ 6073 static int 6074 vhci_ioc_get_paddr(sv_iocdata_t *pioc, caddr_t paddr, int mode, caddr_t s) 6075 { 6076 int retval = 0; 6077 6078 if (ddi_copyin(pioc->addr, paddr, MAXNAMELEN, mode)) { 6079 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioc_get_paddr: " 6080 "ioctl <%s> device addr copyin failed", s)); 6081 retval = EFAULT; 6082 } 6083 return (retval); 6084 } 6085 6086 6087 /* 6088 * Routine to send client device pathname to userland. 6089 */ 6090 /* ARGSUSED */ 6091 static int 6092 vhci_ioc_send_client_path(caddr_t client_path, sv_iocdata_t *pioc, 6093 int mode, caddr_t s) 6094 { 6095 int retval = 0; 6096 6097 if (ddi_copyout(client_path, pioc->client, MAXPATHLEN, mode)) { 6098 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioc_send_client: " 6099 "ioctl <%s> client_path copyout failed", s)); 6100 retval = EFAULT; 6101 } 6102 return (retval); 6103 } 6104 6105 6106 /* 6107 * Routine to translated dev_info pointer (dip) to device pathname. 6108 */ 6109 static void 6110 vhci_ioc_devi_to_path(dev_info_t *dip, caddr_t path) 6111 { 6112 (void) ddi_pathname(dip, path); 6113 } 6114 6115 6116 /* 6117 * vhci_get_phci_path_list: 6118 * get information about devices associated with a 6119 * given PHCI device. 6120 * 6121 * Return Values: 6122 * path information elements 6123 */ 6124 int 6125 vhci_get_phci_path_list(dev_info_t *pdip, sv_path_info_t *pibuf, 6126 uint_t num_elems) 6127 { 6128 uint_t count, done; 6129 mdi_pathinfo_t *pip; 6130 sv_path_info_t *ret_pip; 6131 int status; 6132 size_t prop_size; 6133 int circular; 6134 6135 /* 6136 * Get the PHCI structure and retrieve the path information 6137 * from the GUID hash table. 6138 */ 6139 6140 ret_pip = pibuf; 6141 count = 0; 6142 6143 ndi_devi_enter(pdip, &circular); 6144 6145 done = (count >= num_elems); 6146 pip = mdi_get_next_client_path(pdip, NULL); 6147 while (pip && !done) { 6148 mdi_pi_lock(pip); 6149 (void) ddi_pathname(mdi_pi_get_phci(pip), 6150 ret_pip->device.ret_phci); 6151 (void) strcpy(ret_pip->ret_addr, mdi_pi_get_addr(pip)); 6152 (void) mdi_pi_get_state2(pip, &ret_pip->ret_state, 6153 &ret_pip->ret_ext_state); 6154 6155 status = mdi_prop_size(pip, &prop_size); 6156 if (status == MDI_SUCCESS && ret_pip->ret_prop.ret_buf_size) { 6157 *ret_pip->ret_prop.ret_buf_size = (uint_t)prop_size; 6158 } 6159 6160 #ifdef DEBUG 6161 if (status != MDI_SUCCESS) { 6162 VHCI_DEBUG(2, (CE_WARN, NULL, 6163 "!vhci_get_phci_path_list: " 6164 "phci <%s>, prop size failure 0x%x", 6165 ret_pip->device.ret_phci, status)); 6166 } 6167 #endif /* DEBUG */ 6168 6169 6170 if (status == MDI_SUCCESS && ret_pip->ret_prop.buf && 6171 prop_size && ret_pip->ret_prop.buf_size >= prop_size) { 6172 status = mdi_prop_pack(pip, 6173 &ret_pip->ret_prop.buf, 6174 ret_pip->ret_prop.buf_size); 6175 6176 #ifdef DEBUG 6177 if (status != MDI_SUCCESS) { 6178 VHCI_DEBUG(2, (CE_WARN, NULL, 6179 "!vhci_get_phci_path_list: " 6180 "phci <%s>, prop pack failure 0x%x", 6181 ret_pip->device.ret_phci, status)); 6182 } 6183 #endif /* DEBUG */ 6184 } 6185 6186 mdi_pi_unlock(pip); 6187 pip = mdi_get_next_client_path(pdip, pip); 6188 ret_pip++; 6189 count++; 6190 done = (count >= num_elems); 6191 } 6192 6193 ndi_devi_exit(pdip, circular); 6194 6195 return (MDI_SUCCESS); 6196 } 6197 6198 6199 /* 6200 * vhci_get_client_path_list: 6201 * get information about various paths associated with a 6202 * given client device. 6203 * 6204 * Return Values: 6205 * path information elements 6206 */ 6207 int 6208 vhci_get_client_path_list(dev_info_t *cdip, sv_path_info_t *pibuf, 6209 uint_t num_elems) 6210 { 6211 uint_t count, done; 6212 mdi_pathinfo_t *pip; 6213 sv_path_info_t *ret_pip; 6214 int status; 6215 size_t prop_size; 6216 int circular; 6217 6218 ret_pip = pibuf; 6219 count = 0; 6220 6221 ndi_devi_enter(cdip, &circular); 6222 6223 done = (count >= num_elems); 6224 pip = mdi_get_next_phci_path(cdip, NULL); 6225 while (pip && !done) { 6226 mdi_pi_lock(pip); 6227 (void) ddi_pathname(mdi_pi_get_phci(pip), 6228 ret_pip->device.ret_phci); 6229 (void) strcpy(ret_pip->ret_addr, mdi_pi_get_addr(pip)); 6230 (void) mdi_pi_get_state2(pip, &ret_pip->ret_state, 6231 &ret_pip->ret_ext_state); 6232 6233 status = mdi_prop_size(pip, &prop_size); 6234 if (status == MDI_SUCCESS && ret_pip->ret_prop.ret_buf_size) { 6235 *ret_pip->ret_prop.ret_buf_size = (uint_t)prop_size; 6236 } 6237 6238 #ifdef DEBUG 6239 if (status != MDI_SUCCESS) { 6240 VHCI_DEBUG(2, (CE_WARN, NULL, 6241 "!vhci_get_client_path_list: " 6242 "phci <%s>, prop size failure 0x%x", 6243 ret_pip->device.ret_phci, status)); 6244 } 6245 #endif /* DEBUG */ 6246 6247 6248 if (status == MDI_SUCCESS && ret_pip->ret_prop.buf && 6249 prop_size && ret_pip->ret_prop.buf_size >= prop_size) { 6250 status = mdi_prop_pack(pip, 6251 &ret_pip->ret_prop.buf, 6252 ret_pip->ret_prop.buf_size); 6253 6254 #ifdef DEBUG 6255 if (status != MDI_SUCCESS) { 6256 VHCI_DEBUG(2, (CE_WARN, NULL, 6257 "!vhci_get_client_path_list: " 6258 "phci <%s>, prop pack failure 0x%x", 6259 ret_pip->device.ret_phci, status)); 6260 } 6261 #endif /* DEBUG */ 6262 } 6263 6264 mdi_pi_unlock(pip); 6265 pip = mdi_get_next_phci_path(cdip, pip); 6266 ret_pip++; 6267 count++; 6268 done = (count >= num_elems); 6269 } 6270 6271 ndi_devi_exit(cdip, circular); 6272 6273 return (MDI_SUCCESS); 6274 } 6275 6276 6277 /* 6278 * Routine to get ioctl argument structure from userland. 6279 */ 6280 /* ARGSUSED */ 6281 static int 6282 vhci_get_iocdata(const void *data, sv_iocdata_t *pioc, int mode, caddr_t s) 6283 { 6284 int retval = 0; 6285 6286 #ifdef _MULTI_DATAMODEL 6287 switch (ddi_model_convert_from(mode & FMODELS)) { 6288 case DDI_MODEL_ILP32: 6289 { 6290 sv_iocdata32_t ioc32; 6291 6292 if (ddi_copyin(data, &ioc32, sizeof (ioc32), mode)) { 6293 retval = EFAULT; 6294 break; 6295 } 6296 pioc->client = (caddr_t)(uintptr_t)ioc32.client; 6297 pioc->phci = (caddr_t)(uintptr_t)ioc32.phci; 6298 pioc->addr = (caddr_t)(uintptr_t)ioc32.addr; 6299 pioc->buf_elem = (uint_t)ioc32.buf_elem; 6300 pioc->ret_buf = (sv_path_info_t *)(uintptr_t)ioc32.ret_buf; 6301 pioc->ret_elem = (uint_t *)(uintptr_t)ioc32.ret_elem; 6302 break; 6303 } 6304 6305 case DDI_MODEL_NONE: 6306 if (ddi_copyin(data, pioc, sizeof (*pioc), mode)) { 6307 retval = EFAULT; 6308 break; 6309 } 6310 break; 6311 } 6312 #else /* _MULTI_DATAMODEL */ 6313 if (ddi_copyin(data, pioc, sizeof (*pioc), mode)) { 6314 retval = EFAULT; 6315 } 6316 #endif /* _MULTI_DATAMODEL */ 6317 6318 #ifdef DEBUG 6319 if (retval) { 6320 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_get_ioc: cmd <%s> " 6321 "iocdata copyin failed", s)); 6322 } 6323 #endif 6324 6325 return (retval); 6326 } 6327 6328 6329 /* 6330 * Routine to get the ioctl argument for ioctl causing controller switchover. 6331 */ 6332 /* ARGSUSED */ 6333 static int 6334 vhci_get_iocswitchdata(const void *data, sv_switch_to_cntlr_iocdata_t *piocsc, 6335 int mode, caddr_t s) 6336 { 6337 int retval = 0; 6338 6339 #ifdef _MULTI_DATAMODEL 6340 switch (ddi_model_convert_from(mode & FMODELS)) { 6341 case DDI_MODEL_ILP32: 6342 { 6343 sv_switch_to_cntlr_iocdata32_t ioc32; 6344 6345 if (ddi_copyin(data, &ioc32, sizeof (ioc32), mode)) { 6346 retval = EFAULT; 6347 break; 6348 } 6349 piocsc->client = (caddr_t)(uintptr_t)ioc32.client; 6350 piocsc->class = (caddr_t)(uintptr_t)ioc32.class; 6351 break; 6352 } 6353 6354 case DDI_MODEL_NONE: 6355 if (ddi_copyin(data, piocsc, sizeof (*piocsc), mode)) { 6356 retval = EFAULT; 6357 } 6358 break; 6359 } 6360 #else /* _MULTI_DATAMODEL */ 6361 if (ddi_copyin(data, piocsc, sizeof (*piocsc), mode)) { 6362 retval = EFAULT; 6363 } 6364 #endif /* _MULTI_DATAMODEL */ 6365 6366 #ifdef DEBUG 6367 if (retval) { 6368 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_get_ioc: cmd <%s> " 6369 "switch_to_cntlr_iocdata copyin failed", s)); 6370 } 6371 #endif 6372 6373 return (retval); 6374 } 6375 6376 6377 /* 6378 * Routine to allocate memory for the path information structures. 6379 * It allocates two chunks of memory - one for keeping userland 6380 * pointers/values for path information and path properties, second for 6381 * keeping allocating kernel memory for path properties. These path 6382 * properties are finally copied to userland. 6383 */ 6384 /* ARGSUSED */ 6385 static int 6386 vhci_ioc_alloc_pathinfo(sv_path_info_t **upibuf, sv_path_info_t **kpibuf, 6387 uint_t num_paths, sv_iocdata_t *pioc, int mode, caddr_t s) 6388 { 6389 sv_path_info_t *pi; 6390 uint_t bufsize; 6391 int retval = 0; 6392 int index; 6393 6394 /* Allocate memory */ 6395 *upibuf = (sv_path_info_t *) 6396 kmem_zalloc(sizeof (sv_path_info_t) * num_paths, KM_SLEEP); 6397 ASSERT(*upibuf != NULL); 6398 *kpibuf = (sv_path_info_t *) 6399 kmem_zalloc(sizeof (sv_path_info_t) * num_paths, KM_SLEEP); 6400 ASSERT(*kpibuf != NULL); 6401 6402 /* 6403 * Get the path info structure from the user space. 6404 * We are interested in the following fields: 6405 * - user size of buffer for per path properties. 6406 * - user address of buffer for path info properties. 6407 * - user pointer for returning actual buffer size 6408 * Keep these fields in the 'upibuf' structures. 6409 * Allocate buffer for per path info properties in kernel 6410 * structure ('kpibuf'). 6411 * Size of these buffers will be equal to the size of buffers 6412 * in the user space. 6413 */ 6414 #ifdef _MULTI_DATAMODEL 6415 switch (ddi_model_convert_from(mode & FMODELS)) { 6416 case DDI_MODEL_ILP32: 6417 { 6418 sv_path_info32_t *src; 6419 sv_path_info32_t pi32; 6420 6421 src = (sv_path_info32_t *)pioc->ret_buf; 6422 pi = (sv_path_info_t *)*upibuf; 6423 for (index = 0; index < num_paths; index++, src++, pi++) { 6424 if (ddi_copyin(src, &pi32, sizeof (pi32), mode)) { 6425 retval = EFAULT; 6426 break; 6427 } 6428 6429 pi->ret_prop.buf_size = 6430 (uint_t)pi32.ret_prop.buf_size; 6431 pi->ret_prop.ret_buf_size = 6432 (uint_t *)(uintptr_t)pi32.ret_prop.ret_buf_size; 6433 pi->ret_prop.buf = 6434 (caddr_t)(uintptr_t)pi32.ret_prop.buf; 6435 } 6436 break; 6437 } 6438 6439 case DDI_MODEL_NONE: 6440 if (ddi_copyin(pioc->ret_buf, *upibuf, 6441 sizeof (sv_path_info_t) * num_paths, mode)) { 6442 retval = EFAULT; 6443 } 6444 break; 6445 } 6446 #else /* _MULTI_DATAMODEL */ 6447 if (ddi_copyin(pioc->ret_buf, *upibuf, 6448 sizeof (sv_path_info_t) * num_paths, mode)) { 6449 retval = EFAULT; 6450 } 6451 #endif /* _MULTI_DATAMODEL */ 6452 6453 if (retval != 0) { 6454 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_alloc_path_info: " 6455 "ioctl <%s> normal: path_info copyin failed", s)); 6456 kmem_free(*upibuf, sizeof (sv_path_info_t) * num_paths); 6457 kmem_free(*kpibuf, sizeof (sv_path_info_t) * num_paths); 6458 *upibuf = NULL; 6459 *kpibuf = NULL; 6460 return (retval); 6461 } 6462 6463 /* 6464 * Allocate memory for per path properties. 6465 */ 6466 for (index = 0, pi = *kpibuf; index < num_paths; index++, pi++) { 6467 bufsize = (*upibuf)[index].ret_prop.buf_size; 6468 6469 if (bufsize && bufsize <= SV_PROP_MAX_BUF_SIZE) { 6470 pi->ret_prop.buf_size = bufsize; 6471 pi->ret_prop.buf = (caddr_t) 6472 kmem_zalloc(bufsize, KM_SLEEP); 6473 ASSERT(pi->ret_prop.buf != NULL); 6474 } else { 6475 pi->ret_prop.buf_size = 0; 6476 pi->ret_prop.buf = NULL; 6477 } 6478 6479 if ((*upibuf)[index].ret_prop.ret_buf_size != NULL) { 6480 pi->ret_prop.ret_buf_size = (uint_t *)kmem_zalloc( 6481 sizeof (*pi->ret_prop.ret_buf_size), KM_SLEEP); 6482 ASSERT(pi->ret_prop.ret_buf_size != NULL); 6483 } else { 6484 pi->ret_prop.ret_buf_size = NULL; 6485 } 6486 } 6487 6488 return (0); 6489 } 6490 6491 6492 /* 6493 * Routine to free memory for the path information structures. 6494 * This is the memory which was allocated earlier. 6495 */ 6496 /* ARGSUSED */ 6497 static void 6498 vhci_ioc_free_pathinfo(sv_path_info_t *upibuf, sv_path_info_t *kpibuf, 6499 uint_t num_paths) 6500 { 6501 sv_path_info_t *pi; 6502 int index; 6503 6504 /* Free memory for per path properties */ 6505 for (index = 0, pi = kpibuf; index < num_paths; index++, pi++) { 6506 if (pi->ret_prop.ret_buf_size != NULL) { 6507 kmem_free(pi->ret_prop.ret_buf_size, 6508 sizeof (*pi->ret_prop.ret_buf_size)); 6509 } 6510 6511 if (pi->ret_prop.buf != NULL) { 6512 kmem_free(pi->ret_prop.buf, pi->ret_prop.buf_size); 6513 } 6514 } 6515 6516 /* Free memory for path info structures */ 6517 kmem_free(upibuf, sizeof (sv_path_info_t) * num_paths); 6518 kmem_free(kpibuf, sizeof (sv_path_info_t) * num_paths); 6519 } 6520 6521 6522 /* 6523 * Routine to copy path information and path properties to userland. 6524 */ 6525 /* ARGSUSED */ 6526 static int 6527 vhci_ioc_send_pathinfo(sv_path_info_t *upibuf, sv_path_info_t *kpibuf, 6528 uint_t num_paths, sv_iocdata_t *pioc, int mode, caddr_t s) 6529 { 6530 int retval = 0, index; 6531 sv_path_info_t *upi_ptr; 6532 sv_path_info32_t *upi32_ptr; 6533 6534 #ifdef _MULTI_DATAMODEL 6535 switch (ddi_model_convert_from(mode & FMODELS)) { 6536 case DDI_MODEL_ILP32: 6537 goto copy_32bit; 6538 6539 case DDI_MODEL_NONE: 6540 goto copy_normal; 6541 } 6542 #else /* _MULTI_DATAMODEL */ 6543 6544 goto copy_normal; 6545 6546 #endif /* _MULTI_DATAMODEL */ 6547 6548 copy_normal: 6549 6550 /* 6551 * Copy path information and path properties to user land. 6552 * Pointer fields inside the path property structure were 6553 * saved in the 'upibuf' structure earlier. 6554 */ 6555 upi_ptr = pioc->ret_buf; 6556 for (index = 0; index < num_paths; index++) { 6557 if (ddi_copyout(kpibuf[index].device.ret_ct, 6558 upi_ptr[index].device.ret_ct, MAXPATHLEN, mode)) { 6559 retval = EFAULT; 6560 break; 6561 } 6562 6563 if (ddi_copyout(kpibuf[index].ret_addr, 6564 upi_ptr[index].ret_addr, MAXNAMELEN, mode)) { 6565 retval = EFAULT; 6566 break; 6567 } 6568 6569 if (ddi_copyout(&kpibuf[index].ret_state, 6570 &upi_ptr[index].ret_state, sizeof (kpibuf[index].ret_state), 6571 mode)) { 6572 retval = EFAULT; 6573 break; 6574 } 6575 6576 if (ddi_copyout(&kpibuf[index].ret_ext_state, 6577 &upi_ptr[index].ret_ext_state, 6578 sizeof (kpibuf[index].ret_ext_state), mode)) { 6579 retval = EFAULT; 6580 break; 6581 } 6582 6583 if ((kpibuf[index].ret_prop.ret_buf_size != NULL) && 6584 ddi_copyout(kpibuf[index].ret_prop.ret_buf_size, 6585 upibuf[index].ret_prop.ret_buf_size, 6586 sizeof (*upibuf[index].ret_prop.ret_buf_size), mode)) { 6587 retval = EFAULT; 6588 break; 6589 } 6590 6591 if ((kpibuf[index].ret_prop.buf != NULL) && 6592 ddi_copyout(kpibuf[index].ret_prop.buf, 6593 upibuf[index].ret_prop.buf, 6594 upibuf[index].ret_prop.buf_size, mode)) { 6595 retval = EFAULT; 6596 break; 6597 } 6598 } 6599 6600 #ifdef DEBUG 6601 if (retval) { 6602 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_get_ioc: ioctl <%s> " 6603 "normal: path_info copyout failed", s)); 6604 } 6605 #endif 6606 6607 return (retval); 6608 6609 copy_32bit: 6610 /* 6611 * Copy path information and path properties to user land. 6612 * Pointer fields inside the path property structure were 6613 * saved in the 'upibuf' structure earlier. 6614 */ 6615 upi32_ptr = (sv_path_info32_t *)pioc->ret_buf; 6616 for (index = 0; index < num_paths; index++) { 6617 if (ddi_copyout(kpibuf[index].device.ret_ct, 6618 upi32_ptr[index].device.ret_ct, MAXPATHLEN, mode)) { 6619 retval = EFAULT; 6620 break; 6621 } 6622 6623 if (ddi_copyout(kpibuf[index].ret_addr, 6624 upi32_ptr[index].ret_addr, MAXNAMELEN, mode)) { 6625 retval = EFAULT; 6626 break; 6627 } 6628 6629 if (ddi_copyout(&kpibuf[index].ret_state, 6630 &upi32_ptr[index].ret_state, 6631 sizeof (kpibuf[index].ret_state), mode)) { 6632 retval = EFAULT; 6633 break; 6634 } 6635 6636 if (ddi_copyout(&kpibuf[index].ret_ext_state, 6637 &upi32_ptr[index].ret_ext_state, 6638 sizeof (kpibuf[index].ret_ext_state), mode)) { 6639 retval = EFAULT; 6640 break; 6641 } 6642 if ((kpibuf[index].ret_prop.ret_buf_size != NULL) && 6643 ddi_copyout(kpibuf[index].ret_prop.ret_buf_size, 6644 upibuf[index].ret_prop.ret_buf_size, 6645 sizeof (*upibuf[index].ret_prop.ret_buf_size), mode)) { 6646 retval = EFAULT; 6647 break; 6648 } 6649 6650 if ((kpibuf[index].ret_prop.buf != NULL) && 6651 ddi_copyout(kpibuf[index].ret_prop.buf, 6652 upibuf[index].ret_prop.buf, 6653 upibuf[index].ret_prop.buf_size, mode)) { 6654 retval = EFAULT; 6655 break; 6656 } 6657 } 6658 6659 #ifdef DEBUG 6660 if (retval) { 6661 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_get_ioc: ioctl <%s> " 6662 "normal: path_info copyout failed", s)); 6663 } 6664 #endif 6665 6666 return (retval); 6667 } 6668 6669 6670 /* 6671 * vhci_failover() 6672 * This routine expects VHCI_HOLD_LUN before being invoked. It can be invoked 6673 * as MDI_FAILOVER_ASYNC or MDI_FAILOVER_SYNC. For Asynchronous failovers 6674 * this routine shall VHCI_RELEASE_LUN on exiting. For synchronous failovers 6675 * it is the callers responsibility to release lun. 6676 */ 6677 6678 /* ARGSUSED */ 6679 static int 6680 vhci_failover(dev_info_t *vdip, dev_info_t *cdip, int flags) 6681 { 6682 char *guid; 6683 scsi_vhci_lun_t *vlun = NULL; 6684 struct scsi_vhci *vhci; 6685 mdi_pathinfo_t *pip, *npip; 6686 char *s_pclass, *pclass1, *pclass2, *pclass; 6687 char active_pclass_copy[255], *active_pclass_ptr; 6688 char *ptr1, *ptr2; 6689 mdi_pathinfo_state_t pi_state; 6690 uint32_t pi_ext_state; 6691 scsi_vhci_priv_t *svp; 6692 struct scsi_device *sd; 6693 struct scsi_failover_ops *sfo; 6694 int sps; /* mdi_select_path() status */ 6695 int activation_done = 0; 6696 int rval, retval = MDI_FAILURE; 6697 int reserve_pending, check_condition, UA_condition; 6698 struct scsi_pkt *pkt; 6699 struct buf *bp; 6700 6701 vhci = ddi_get_soft_state(vhci_softstate, ddi_get_instance(vdip)); 6702 sd = ddi_get_driver_private(cdip); 6703 vlun = ADDR2VLUN(&sd->sd_address); 6704 ASSERT(vlun != 0); 6705 ASSERT(VHCI_LUN_IS_HELD(vlun)); 6706 guid = vlun->svl_lun_wwn; 6707 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(1): guid %s\n", guid)); 6708 vhci_log(CE_NOTE, vdip, "!Initiating failover for device %s " 6709 "(GUID %s)", ddi_node_name(cdip), guid); 6710 6711 /* 6712 * Lets maintain a local copy of the vlun->svl_active_pclass 6713 * for the rest of the processing. Accessing the field 6714 * directly in the loop below causes loop logic to break 6715 * especially when the field gets updated by other threads 6716 * update path status etc and causes 'paths are not currently 6717 * available' condition to be declared prematurely. 6718 */ 6719 mutex_enter(&vlun->svl_mutex); 6720 if (vlun->svl_active_pclass != NULL) { 6721 (void) strlcpy(active_pclass_copy, vlun->svl_active_pclass, 6722 sizeof (active_pclass_copy)); 6723 active_pclass_ptr = &active_pclass_copy[0]; 6724 mutex_exit(&vlun->svl_mutex); 6725 if (vhci_quiesce_paths(vdip, cdip, vlun, guid, 6726 active_pclass_ptr) != 0) { 6727 retval = MDI_FAILURE; 6728 } 6729 } else { 6730 /* 6731 * can happen only when the available path to device 6732 * discovered is a STANDBY path. 6733 */ 6734 mutex_exit(&vlun->svl_mutex); 6735 active_pclass_copy[0] = '\0'; 6736 active_pclass_ptr = NULL; 6737 } 6738 6739 sfo = vlun->svl_fops; 6740 ASSERT(sfo != NULL); 6741 pclass1 = s_pclass = active_pclass_ptr; 6742 VHCI_DEBUG(1, (CE_NOTE, NULL, "!(%s)failing over from %s\n", guid, 6743 (s_pclass == NULL ? "<none>" : s_pclass))); 6744 6745 next_pathclass: 6746 6747 rval = sfo->sfo_pathclass_next(pclass1, &pclass2, 6748 vlun->svl_fops_ctpriv); 6749 if (rval == ENOENT) { 6750 if (s_pclass == NULL) { 6751 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(4)(%s): " 6752 "failed, no more pathclasses\n", guid)); 6753 goto done; 6754 } else { 6755 (void) sfo->sfo_pathclass_next(NULL, &pclass2, 6756 vlun->svl_fops_ctpriv); 6757 } 6758 } else if (rval == EINVAL) { 6759 vhci_log(CE_NOTE, vdip, "!Failover operation failed for " 6760 "device %s (GUID %s): Invalid path-class %s", 6761 ddi_node_name(cdip), guid, 6762 ((pclass1 == NULL) ? "<none>" : pclass1)); 6763 goto done; 6764 } 6765 if ((s_pclass != NULL) && (strcmp(pclass2, s_pclass) == 0)) { 6766 /* 6767 * paths are not currently available 6768 */ 6769 vhci_log(CE_NOTE, vdip, "!Failover path currently unavailable" 6770 " for device %s (GUID %s)", 6771 ddi_node_name(cdip), guid); 6772 goto done; 6773 } 6774 pip = npip = NULL; 6775 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(5.2)(%s): considering " 6776 "%s as failover destination\n", guid, pclass2)); 6777 sps = mdi_select_path(cdip, NULL, MDI_SELECT_STANDBY_PATH, NULL, &npip); 6778 if ((npip == NULL) || (sps != MDI_SUCCESS)) { 6779 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(%s): no " 6780 "STANDBY paths found (status:%x)!\n", guid, sps)); 6781 pclass1 = pclass2; 6782 goto next_pathclass; 6783 } 6784 do { 6785 pclass = NULL; 6786 if ((mdi_prop_lookup_string(npip, "path-class", 6787 &pclass) != MDI_SUCCESS) || (strcmp(pclass2, 6788 pclass) != 0)) { 6789 VHCI_DEBUG(1, (CE_NOTE, NULL, 6790 "!vhci_failover(5.5)(%s): skipping path " 6791 "%p(%s)...\n", guid, (void *)npip, pclass)); 6792 pip = npip; 6793 sps = mdi_select_path(cdip, NULL, 6794 MDI_SELECT_STANDBY_PATH, pip, &npip); 6795 mdi_rele_path(pip); 6796 (void) mdi_prop_free(pclass); 6797 continue; 6798 } 6799 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(npip); 6800 6801 /* 6802 * Issue READ at non-zer block on this STANDBY path. 6803 * Purple returns 6804 * 1. RESERVATION_CONFLICT if reservation is pending 6805 * 2. POR check condition if it reset happened. 6806 * 2. failover Check Conditions if one is already in progress. 6807 */ 6808 reserve_pending = 0; 6809 check_condition = 0; 6810 UA_condition = 0; 6811 6812 bp = scsi_alloc_consistent_buf(&svp->svp_psd->sd_address, 6813 (struct buf *)NULL, DEV_BSIZE, B_READ, NULL, NULL); 6814 if (!bp) { 6815 VHCI_DEBUG(1, (CE_NOTE, NULL, 6816 "vhci_failover !No resources (buf)\n")); 6817 mdi_rele_path(npip); 6818 goto done; 6819 } 6820 pkt = scsi_init_pkt(&svp->svp_psd->sd_address, NULL, bp, 6821 CDB_GROUP1, sizeof (struct scsi_arq_status), 0, 6822 PKT_CONSISTENT, NULL, NULL); 6823 if (pkt) { 6824 (void) scsi_setup_cdb((union scsi_cdb *)(uintptr_t) 6825 pkt->pkt_cdbp, SCMD_READ, 1, 1, 0); 6826 pkt->pkt_flags = FLAG_NOINTR; 6827 check_path_again: 6828 pkt->pkt_path_instance = mdi_pi_get_path_instance(npip); 6829 pkt->pkt_time = 3*30; 6830 6831 if (scsi_transport(pkt) == TRAN_ACCEPT) { 6832 switch (pkt->pkt_reason) { 6833 case CMD_CMPLT: 6834 switch (SCBP_C(pkt)) { 6835 case STATUS_GOOD: 6836 /* Already failed over */ 6837 activation_done = 1; 6838 break; 6839 case STATUS_RESERVATION_CONFLICT: 6840 reserve_pending = 1; 6841 break; 6842 case STATUS_CHECK: 6843 check_condition = 1; 6844 break; 6845 } 6846 } 6847 } 6848 if (check_condition && 6849 (pkt->pkt_state & STATE_ARQ_DONE)) { 6850 uint8_t *sns, skey, asc, ascq; 6851 sns = (uint8_t *) 6852 &(((struct scsi_arq_status *)(uintptr_t) 6853 (pkt->pkt_scbp))->sts_sensedata); 6854 skey = scsi_sense_key(sns); 6855 asc = scsi_sense_asc(sns); 6856 ascq = scsi_sense_ascq(sns); 6857 if (skey == KEY_UNIT_ATTENTION && 6858 asc == 0x29) { 6859 /* Already failed over */ 6860 VHCI_DEBUG(1, (CE_NOTE, NULL, 6861 "!vhci_failover(7)(%s): " 6862 "path 0x%p POR UA condition\n", 6863 guid, (void *)npip)); 6864 if (UA_condition == 0) { 6865 UA_condition = 1; 6866 goto check_path_again; 6867 } 6868 } else { 6869 activation_done = 0; 6870 VHCI_DEBUG(1, (CE_NOTE, NULL, 6871 "!vhci_failover(%s): path 0x%p " 6872 "unhandled chkcond %x %x %x\n", 6873 guid, (void *)npip, skey, 6874 asc, ascq)); 6875 } 6876 } 6877 scsi_destroy_pkt(pkt); 6878 } 6879 scsi_free_consistent_buf(bp); 6880 6881 if (activation_done) { 6882 mdi_rele_path(npip); 6883 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(7)(%s): " 6884 "path 0x%p already failedover\n", guid, 6885 (void *)npip)); 6886 break; 6887 } 6888 if (reserve_pending && (vlun->svl_xlf_capable == 0)) { 6889 (void) vhci_recovery_reset(vlun, 6890 &svp->svp_psd->sd_address, 6891 FALSE, VHCI_DEPTH_ALL); 6892 } 6893 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(6)(%s): " 6894 "activating path 0x%p(psd:%p)\n", guid, (void *)npip, 6895 (void *)svp->svp_psd)); 6896 if (sfo->sfo_path_activate(svp->svp_psd, pclass2, 6897 vlun->svl_fops_ctpriv) == 0) { 6898 activation_done = 1; 6899 mdi_rele_path(npip); 6900 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(7)(%s): " 6901 "path 0x%p successfully activated\n", guid, 6902 (void *)npip)); 6903 break; 6904 } 6905 pip = npip; 6906 sps = mdi_select_path(cdip, NULL, MDI_SELECT_STANDBY_PATH, 6907 pip, &npip); 6908 mdi_rele_path(pip); 6909 } while ((npip != NULL) && (sps == MDI_SUCCESS)); 6910 if (activation_done == 0) { 6911 pclass1 = pclass2; 6912 goto next_pathclass; 6913 } 6914 6915 /* 6916 * if we are here, we have succeeded in activating path npip of 6917 * pathclass pclass2; let us validate all paths of pclass2 by 6918 * "ping"-ing each one and mark the good ones ONLINE 6919 * Also, set the state of the paths belonging to the previously 6920 * active pathclass to STANDBY 6921 */ 6922 pip = npip = NULL; 6923 sps = mdi_select_path(cdip, NULL, (MDI_SELECT_ONLINE_PATH | 6924 MDI_SELECT_STANDBY_PATH | MDI_SELECT_USER_DISABLE_PATH), 6925 NULL, &npip); 6926 if (npip == NULL || sps != MDI_SUCCESS) { 6927 VHCI_DEBUG(1, (CE_NOTE, NULL, "!Failover operation failed for " 6928 "device %s (GUID %s): paths may be busy\n", 6929 ddi_node_name(cdip), guid)); 6930 goto done; 6931 } 6932 do { 6933 (void) mdi_pi_get_state2(npip, &pi_state, &pi_ext_state); 6934 if (mdi_prop_lookup_string(npip, "path-class", &pclass) 6935 != MDI_SUCCESS) { 6936 pip = npip; 6937 sps = mdi_select_path(cdip, NULL, 6938 (MDI_SELECT_ONLINE_PATH | 6939 MDI_SELECT_STANDBY_PATH | 6940 MDI_SELECT_USER_DISABLE_PATH), 6941 pip, &npip); 6942 mdi_rele_path(pip); 6943 continue; 6944 } 6945 if (strcmp(pclass, pclass2) == 0) { 6946 if (pi_state == MDI_PATHINFO_STATE_STANDBY) { 6947 svp = (scsi_vhci_priv_t *) 6948 mdi_pi_get_vhci_private(npip); 6949 VHCI_DEBUG(1, (CE_NOTE, NULL, 6950 "!vhci_failover(8)(%s): " 6951 "pinging path 0x%p\n", 6952 guid, (void *)npip)); 6953 if (sfo->sfo_path_ping(svp->svp_psd, 6954 vlun->svl_fops_ctpriv) == 1) { 6955 mdi_pi_set_state(npip, 6956 MDI_PATHINFO_STATE_ONLINE); 6957 VHCI_DEBUG(1, (CE_NOTE, NULL, 6958 "!vhci_failover(9)(%s): " 6959 "path 0x%p ping successful, " 6960 "marked online\n", guid, 6961 (void *)npip)); 6962 MDI_PI_ERRSTAT(npip, MDI_PI_FAILTO); 6963 } 6964 } 6965 } else if ((s_pclass != NULL) && (strcmp(pclass, s_pclass) 6966 == 0)) { 6967 if (pi_state == MDI_PATHINFO_STATE_ONLINE) { 6968 mdi_pi_set_state(npip, 6969 MDI_PATHINFO_STATE_STANDBY); 6970 VHCI_DEBUG(1, (CE_NOTE, NULL, 6971 "!vhci_failover(10)(%s): path 0x%p marked " 6972 "STANDBY\n", guid, (void *)npip)); 6973 MDI_PI_ERRSTAT(npip, MDI_PI_FAILFROM); 6974 } 6975 } 6976 (void) mdi_prop_free(pclass); 6977 pip = npip; 6978 sps = mdi_select_path(cdip, NULL, (MDI_SELECT_ONLINE_PATH | 6979 MDI_SELECT_STANDBY_PATH|MDI_SELECT_USER_DISABLE_PATH), 6980 pip, &npip); 6981 mdi_rele_path(pip); 6982 } while ((npip != NULL) && (sps == MDI_SUCCESS)); 6983 6984 /* 6985 * Update the AccessState of related MP-API TPGs 6986 */ 6987 (void) vhci_mpapi_update_tpg_acc_state_for_lu(vhci, vlun); 6988 6989 vhci_log(CE_NOTE, vdip, "!Failover operation completed successfully " 6990 "for device %s (GUID %s): failed over from %s to %s", 6991 ddi_node_name(cdip), guid, ((s_pclass == NULL) ? "<none>" : 6992 s_pclass), pclass2); 6993 ptr1 = kmem_alloc(strlen(pclass2)+1, KM_SLEEP); 6994 (void) strlcpy(ptr1, pclass2, (strlen(pclass2)+1)); 6995 mutex_enter(&vlun->svl_mutex); 6996 ptr2 = vlun->svl_active_pclass; 6997 vlun->svl_active_pclass = ptr1; 6998 mutex_exit(&vlun->svl_mutex); 6999 if (ptr2) { 7000 kmem_free(ptr2, strlen(ptr2)+1); 7001 } 7002 mutex_enter(&vhci->vhci_mutex); 7003 scsi_hba_reset_notify_callback(&vhci->vhci_mutex, 7004 &vhci->vhci_reset_notify_listf); 7005 /* All reservations are cleared upon these resets. */ 7006 vlun->svl_flags &= ~VLUN_RESERVE_ACTIVE_FLG; 7007 mutex_exit(&vhci->vhci_mutex); 7008 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(11): DONE! Active " 7009 "pathclass for %s is now %s\n", guid, pclass2)); 7010 retval = MDI_SUCCESS; 7011 7012 done: 7013 if (flags == MDI_FAILOVER_ASYNC) { 7014 VHCI_RELEASE_LUN(vlun); 7015 VHCI_DEBUG(6, (CE_NOTE, NULL, "!vhci_failover(12): DONE! " 7016 "releasing lun, as failover was ASYNC\n")); 7017 } else { 7018 VHCI_DEBUG(6, (CE_NOTE, NULL, "!vhci_failover(12): DONE! " 7019 "NOT releasing lun, as failover was SYNC\n")); 7020 } 7021 return (retval); 7022 } 7023 7024 /* 7025 * vhci_client_attached is called after the successful attach of a 7026 * client devinfo node. 7027 */ 7028 static void 7029 vhci_client_attached(dev_info_t *cdip) 7030 { 7031 mdi_pathinfo_t *pip; 7032 int circular; 7033 7034 /* 7035 * At this point the client has attached and it's instance number is 7036 * valid, so we can set up kstats. We need to do this here because it 7037 * is possible for paths to go online prior to client attach, in which 7038 * case the call to vhci_kstat_create_pathinfo in vhci_pathinfo_online 7039 * was a noop. 7040 */ 7041 ndi_devi_enter(cdip, &circular); 7042 for (pip = mdi_get_next_phci_path(cdip, NULL); pip; 7043 pip = mdi_get_next_phci_path(cdip, pip)) 7044 vhci_kstat_create_pathinfo(pip); 7045 ndi_devi_exit(cdip, circular); 7046 } 7047 7048 /* 7049 * quiesce all of the online paths 7050 */ 7051 static int 7052 vhci_quiesce_paths(dev_info_t *vdip, dev_info_t *cdip, scsi_vhci_lun_t *vlun, 7053 char *guid, char *active_pclass_ptr) 7054 { 7055 scsi_vhci_priv_t *svp; 7056 char *s_pclass = NULL; 7057 mdi_pathinfo_t *npip, *pip; 7058 int sps; 7059 7060 /* quiesce currently active paths */ 7061 s_pclass = NULL; 7062 pip = npip = NULL; 7063 sps = mdi_select_path(cdip, NULL, MDI_SELECT_ONLINE_PATH, NULL, &npip); 7064 if ((npip == NULL) || (sps != MDI_SUCCESS)) { 7065 return (1); 7066 } 7067 do { 7068 if (mdi_prop_lookup_string(npip, "path-class", 7069 &s_pclass) != MDI_SUCCESS) { 7070 mdi_rele_path(npip); 7071 vhci_log(CE_NOTE, vdip, "!Failover operation failed " 7072 "for device %s (GUID %s) due to an internal " 7073 "error", ddi_node_name(cdip), guid); 7074 return (1); 7075 } 7076 if (strcmp(s_pclass, active_pclass_ptr) == 0) { 7077 /* 7078 * quiesce path. Free s_pclass since 7079 * we don't need it anymore 7080 */ 7081 VHCI_DEBUG(1, (CE_NOTE, NULL, 7082 "!vhci_failover(2)(%s): failing over " 7083 "from %s; quiescing path %p\n", 7084 guid, s_pclass, (void *)npip)); 7085 (void) mdi_prop_free(s_pclass); 7086 svp = (scsi_vhci_priv_t *) 7087 mdi_pi_get_vhci_private(npip); 7088 if (svp == NULL) { 7089 VHCI_DEBUG(1, (CE_NOTE, NULL, 7090 "!vhci_failover(2.5)(%s): no " 7091 "client priv! %p offlined?\n", 7092 guid, (void *)npip)); 7093 pip = npip; 7094 sps = mdi_select_path(cdip, NULL, 7095 MDI_SELECT_ONLINE_PATH, pip, &npip); 7096 mdi_rele_path(pip); 7097 continue; 7098 } 7099 if (scsi_abort(&svp->svp_psd->sd_address, NULL) 7100 == 0) { 7101 (void) vhci_recovery_reset(vlun, 7102 &svp->svp_psd->sd_address, FALSE, 7103 VHCI_DEPTH_TARGET); 7104 } 7105 mutex_enter(&svp->svp_mutex); 7106 if (svp->svp_cmds == 0) { 7107 VHCI_DEBUG(1, (CE_NOTE, NULL, 7108 "!vhci_failover(3)(%s):" 7109 "quiesced path %p\n", guid, (void *)npip)); 7110 } else { 7111 while (svp->svp_cmds != 0) { 7112 cv_wait(&svp->svp_cv, &svp->svp_mutex); 7113 VHCI_DEBUG(1, (CE_NOTE, NULL, 7114 "!vhci_failover(3.cv)(%s):" 7115 "quiesced path %p\n", guid, 7116 (void *)npip)); 7117 } 7118 } 7119 mutex_exit(&svp->svp_mutex); 7120 } else { 7121 /* 7122 * make sure we freeup the memory 7123 */ 7124 (void) mdi_prop_free(s_pclass); 7125 } 7126 pip = npip; 7127 sps = mdi_select_path(cdip, NULL, MDI_SELECT_ONLINE_PATH, 7128 pip, &npip); 7129 mdi_rele_path(pip); 7130 } while ((npip != NULL) && (sps == MDI_SUCCESS)); 7131 return (0); 7132 } 7133 7134 static struct scsi_vhci_lun * 7135 vhci_lun_lookup(dev_info_t *tgt_dip) 7136 { 7137 return ((struct scsi_vhci_lun *) 7138 mdi_client_get_vhci_private(tgt_dip)); 7139 } 7140 7141 static struct scsi_vhci_lun * 7142 vhci_lun_lookup_alloc(dev_info_t *tgt_dip, char *guid, int *didalloc) 7143 { 7144 struct scsi_vhci_lun *svl; 7145 7146 if (svl = vhci_lun_lookup(tgt_dip)) { 7147 return (svl); 7148 } 7149 7150 svl = kmem_zalloc(sizeof (*svl), KM_SLEEP); 7151 svl->svl_lun_wwn = kmem_zalloc(strlen(guid)+1, KM_SLEEP); 7152 (void) strcpy(svl->svl_lun_wwn, guid); 7153 mutex_init(&svl->svl_mutex, NULL, MUTEX_DRIVER, NULL); 7154 cv_init(&svl->svl_cv, NULL, CV_DRIVER, NULL); 7155 sema_init(&svl->svl_pgr_sema, 1, NULL, SEMA_DRIVER, NULL); 7156 svl->svl_waiting_for_activepath = 1; 7157 svl->svl_sector_size = 1; 7158 mdi_client_set_vhci_private(tgt_dip, svl); 7159 *didalloc = 1; 7160 VHCI_DEBUG(1, (CE_NOTE, NULL, 7161 "vhci_lun_lookup_alloc: guid %s vlun 0x%p\n", 7162 guid, (void *)svl)); 7163 return (svl); 7164 } 7165 7166 static void 7167 vhci_lun_free(dev_info_t *tgt_dip) 7168 { 7169 struct scsi_vhci_lun *dvlp; 7170 char *guid; 7171 struct scsi_device *sd; 7172 7173 /* 7174 * The scsi_device was set to driver private during child node 7175 * initialization in the scsi_hba_bus_ctl(). 7176 */ 7177 sd = (struct scsi_device *)ddi_get_driver_private(tgt_dip); 7178 7179 dvlp = (struct scsi_vhci_lun *) 7180 mdi_client_get_vhci_private(tgt_dip); 7181 ASSERT(dvlp != NULL); 7182 7183 mdi_client_set_vhci_private(tgt_dip, NULL); 7184 7185 guid = dvlp->svl_lun_wwn; 7186 ASSERT(guid != NULL); 7187 VHCI_DEBUG(4, (CE_NOTE, NULL, "!vhci_lun_free: %s\n", guid)); 7188 7189 mutex_enter(&dvlp->svl_mutex); 7190 if (dvlp->svl_active_pclass != NULL) { 7191 kmem_free(dvlp->svl_active_pclass, 7192 strlen(dvlp->svl_active_pclass)+1); 7193 } 7194 dvlp->svl_active_pclass = NULL; 7195 mutex_exit(&dvlp->svl_mutex); 7196 7197 if (dvlp->svl_lun_wwn != NULL) { 7198 kmem_free(dvlp->svl_lun_wwn, strlen(dvlp->svl_lun_wwn)+1); 7199 } 7200 dvlp->svl_lun_wwn = NULL; 7201 7202 if (dvlp->svl_fops_name) { 7203 kmem_free(dvlp->svl_fops_name, strlen(dvlp->svl_fops_name)+1); 7204 } 7205 dvlp->svl_fops_name = NULL; 7206 7207 if (dvlp->svl_fops_ctpriv != NULL && 7208 dvlp->svl_fops != NULL) { 7209 dvlp->svl_fops->sfo_device_unprobe(sd, dvlp->svl_fops_ctpriv); 7210 } 7211 7212 if (dvlp->svl_flags & VLUN_TASK_D_ALIVE_FLG) 7213 taskq_destroy(dvlp->svl_taskq); 7214 7215 mutex_destroy(&dvlp->svl_mutex); 7216 cv_destroy(&dvlp->svl_cv); 7217 sema_destroy(&dvlp->svl_pgr_sema); 7218 kmem_free(dvlp, sizeof (*dvlp)); 7219 /* 7220 * vhci_lun_free may be called before the tgt_dip 7221 * initialization so check if the sd is NULL. 7222 */ 7223 if (sd != NULL) 7224 scsi_device_hba_private_set(sd, NULL); 7225 } 7226 7227 int 7228 vhci_do_scsi_cmd(struct scsi_pkt *pkt) 7229 { 7230 int err = 0; 7231 int retry_cnt = 0; 7232 uint8_t *sns, skey; 7233 7234 #ifdef DEBUG 7235 if (vhci_debug > 5) { 7236 vhci_print_cdb(pkt->pkt_address.a_hba_tran->tran_hba_dip, 7237 CE_WARN, "Vhci command", pkt->pkt_cdbp); 7238 } 7239 #endif 7240 7241 retry: 7242 err = scsi_poll(pkt); 7243 if (err) { 7244 if (pkt->pkt_cdbp[0] == SCMD_RELEASE) { 7245 if (SCBP_C(pkt) == STATUS_RESERVATION_CONFLICT) { 7246 VHCI_DEBUG(1, (CE_NOTE, NULL, 7247 "!v_s_do_s_c: RELEASE conflict\n")); 7248 return (0); 7249 } 7250 } 7251 if (retry_cnt++ < 6) { 7252 VHCI_DEBUG(1, (CE_WARN, NULL, 7253 "!v_s_do_s_c:retry packet 0x%p " 7254 "status 0x%x reason %s", 7255 (void *)pkt, SCBP_C(pkt), 7256 scsi_rname(pkt->pkt_reason))); 7257 if ((pkt->pkt_reason == CMD_CMPLT) && 7258 (SCBP_C(pkt) == STATUS_CHECK) && 7259 (pkt->pkt_state & STATE_ARQ_DONE)) { 7260 sns = (uint8_t *) 7261 &(((struct scsi_arq_status *)(uintptr_t) 7262 (pkt->pkt_scbp))->sts_sensedata); 7263 skey = scsi_sense_key(sns); 7264 VHCI_DEBUG(1, (CE_WARN, NULL, 7265 "!v_s_do_s_c:retry " 7266 "packet 0x%p sense data %s", (void *)pkt, 7267 scsi_sname(skey))); 7268 } 7269 goto retry; 7270 } 7271 VHCI_DEBUG(1, (CE_WARN, NULL, 7272 "!v_s_do_s_c: failed transport 0x%p 0x%x", 7273 (void *)pkt, SCBP_C(pkt))); 7274 return (0); 7275 } 7276 7277 switch (pkt->pkt_reason) { 7278 case CMD_TIMEOUT: 7279 VHCI_DEBUG(1, (CE_WARN, NULL, "!pkt timed " 7280 "out (pkt 0x%p)", (void *)pkt)); 7281 return (0); 7282 case CMD_CMPLT: 7283 switch (SCBP_C(pkt)) { 7284 case STATUS_GOOD: 7285 break; 7286 case STATUS_CHECK: 7287 if (pkt->pkt_state & STATE_ARQ_DONE) { 7288 sns = (uint8_t *)&((( 7289 struct scsi_arq_status *) 7290 (uintptr_t) 7291 (pkt->pkt_scbp))-> 7292 sts_sensedata); 7293 skey = scsi_sense_key(sns); 7294 if ((skey == 7295 KEY_UNIT_ATTENTION) || 7296 (skey == 7297 KEY_NOT_READY)) { 7298 /* 7299 * clear unit attn. 7300 */ 7301 7302 VHCI_DEBUG(1, 7303 (CE_WARN, NULL, 7304 "!v_s_do_s_c: " 7305 "retry " 7306 "packet 0x%p sense " 7307 "data %s", 7308 (void *)pkt, 7309 scsi_sname 7310 (skey))); 7311 goto retry; 7312 } 7313 VHCI_DEBUG(4, (CE_WARN, NULL, 7314 "!ARQ while " 7315 "transporting " 7316 "(pkt 0x%p)", 7317 (void *)pkt)); 7318 return (0); 7319 } 7320 return (0); 7321 default: 7322 VHCI_DEBUG(1, (CE_WARN, NULL, 7323 "!Bad status returned " 7324 "(pkt 0x%p, status %x)", 7325 (void *)pkt, SCBP_C(pkt))); 7326 return (0); 7327 } 7328 break; 7329 case CMD_INCOMPLETE: 7330 case CMD_RESET: 7331 case CMD_ABORTED: 7332 case CMD_TRAN_ERR: 7333 if (retry_cnt++ < 1) { 7334 VHCI_DEBUG(1, (CE_WARN, NULL, 7335 "!v_s_do_s_c: retry packet 0x%p %s", 7336 (void *)pkt, scsi_rname(pkt->pkt_reason))); 7337 goto retry; 7338 } 7339 /* FALLTHROUGH */ 7340 default: 7341 VHCI_DEBUG(1, (CE_WARN, NULL, "!pkt did not " 7342 "complete successfully (pkt 0x%p," 7343 "reason %x)", (void *)pkt, pkt->pkt_reason)); 7344 return (0); 7345 } 7346 return (1); 7347 } 7348 7349 static int 7350 vhci_quiesce_lun(struct scsi_vhci_lun *vlun) 7351 { 7352 mdi_pathinfo_t *pip, *spip; 7353 dev_info_t *cdip; 7354 struct scsi_vhci_priv *svp; 7355 mdi_pathinfo_state_t pstate; 7356 uint32_t p_ext_state; 7357 int circular; 7358 7359 cdip = vlun->svl_dip; 7360 pip = spip = NULL; 7361 ndi_devi_enter(cdip, &circular); 7362 pip = mdi_get_next_phci_path(cdip, NULL); 7363 while (pip != NULL) { 7364 (void) mdi_pi_get_state2(pip, &pstate, &p_ext_state); 7365 if (pstate != MDI_PATHINFO_STATE_ONLINE) { 7366 spip = pip; 7367 pip = mdi_get_next_phci_path(cdip, spip); 7368 continue; 7369 } 7370 mdi_hold_path(pip); 7371 ndi_devi_exit(cdip, circular); 7372 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip); 7373 mutex_enter(&svp->svp_mutex); 7374 while (svp->svp_cmds != 0) { 7375 if (cv_reltimedwait(&svp->svp_cv, &svp->svp_mutex, 7376 drv_usectohz(vhci_path_quiesce_timeout * 1000000), 7377 TR_CLOCK_TICK) == -1) { 7378 mutex_exit(&svp->svp_mutex); 7379 mdi_rele_path(pip); 7380 VHCI_DEBUG(1, (CE_WARN, NULL, 7381 "Quiesce of lun is not successful " 7382 "vlun: 0x%p.", (void *)vlun)); 7383 return (0); 7384 } 7385 } 7386 mutex_exit(&svp->svp_mutex); 7387 ndi_devi_enter(cdip, &circular); 7388 spip = pip; 7389 pip = mdi_get_next_phci_path(cdip, spip); 7390 mdi_rele_path(spip); 7391 } 7392 ndi_devi_exit(cdip, circular); 7393 return (1); 7394 } 7395 7396 static int 7397 vhci_pgr_validate_and_register(scsi_vhci_priv_t *svp) 7398 { 7399 scsi_vhci_lun_t *vlun; 7400 vhci_prout_t *prout; 7401 int rval, success; 7402 mdi_pathinfo_t *pip, *npip; 7403 scsi_vhci_priv_t *osvp; 7404 dev_info_t *cdip; 7405 uchar_t cdb_1; 7406 uchar_t temp_res_key[MHIOC_RESV_KEY_SIZE]; 7407 7408 7409 /* 7410 * see if there are any other paths available; if none, 7411 * then there is nothing to do. 7412 */ 7413 cdip = svp->svp_svl->svl_dip; 7414 rval = mdi_select_path(cdip, NULL, MDI_SELECT_ONLINE_PATH | 7415 MDI_SELECT_STANDBY_PATH, NULL, &pip); 7416 if ((rval != MDI_SUCCESS) || (pip == NULL)) { 7417 VHCI_DEBUG(4, (CE_NOTE, NULL, 7418 "%s%d: vhci_pgr_validate_and_register: first path\n", 7419 ddi_driver_name(cdip), ddi_get_instance(cdip))); 7420 return (1); 7421 } 7422 7423 vlun = svp->svp_svl; 7424 prout = &vlun->svl_prout; 7425 ASSERT(vlun->svl_pgr_active != 0); 7426 7427 /* 7428 * When the path was busy/offlined, some other host might have 7429 * cleared this key. Validate key on some other path first. 7430 * If it fails, return failure. 7431 */ 7432 7433 npip = pip; 7434 pip = NULL; 7435 success = 0; 7436 7437 /* Save the res key */ 7438 bcopy(prout->res_key, temp_res_key, MHIOC_RESV_KEY_SIZE); 7439 7440 /* 7441 * Sometimes CDB from application can be a Register_And_Ignore. 7442 * Instead of validation, this cdb would result in force registration. 7443 * Convert it to normal cdb for validation. 7444 * After that be sure to restore the cdb. 7445 */ 7446 cdb_1 = vlun->svl_cdb[1]; 7447 vlun->svl_cdb[1] &= 0xe0; 7448 7449 do { 7450 osvp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(npip); 7451 if (osvp == NULL) { 7452 VHCI_DEBUG(4, (CE_NOTE, NULL, 7453 "vhci_pgr_validate_and_register: no " 7454 "client priv! 0x%p offlined?\n", 7455 (void *)npip)); 7456 goto next_path_1; 7457 } 7458 7459 if (osvp == svp) { 7460 VHCI_DEBUG(4, (CE_NOTE, NULL, 7461 "vhci_pgr_validate_and_register: same svp 0x%p" 7462 " npip 0x%p vlun 0x%p\n", 7463 (void *)svp, (void *)npip, (void *)vlun)); 7464 goto next_path_1; 7465 } 7466 7467 VHCI_DEBUG(4, (CE_NOTE, NULL, 7468 "vhci_pgr_validate_and_register: First validate on" 7469 " osvp 0x%p being done. vlun 0x%p thread 0x%p Before bcopy" 7470 " cdb1 %x\n", (void *)osvp, (void *)vlun, 7471 (void *)curthread, vlun->svl_cdb[1])); 7472 vhci_print_prout_keys(vlun, "v_pgr_val_reg: before bcopy:"); 7473 7474 bcopy(prout->service_key, prout->res_key, MHIOC_RESV_KEY_SIZE); 7475 7476 VHCI_DEBUG(4, (CE_WARN, NULL, "vlun 0x%p After bcopy", 7477 (void *)vlun)); 7478 vhci_print_prout_keys(vlun, "v_pgr_val_reg: after bcopy: "); 7479 7480 rval = vhci_do_prout(osvp); 7481 if (rval == 1) { 7482 VHCI_DEBUG(4, (CE_NOTE, NULL, 7483 "%s%d: vhci_pgr_validate_and_register: key" 7484 " validated thread 0x%p\n", ddi_driver_name(cdip), 7485 ddi_get_instance(cdip), (void *)curthread)); 7486 pip = npip; 7487 success = 1; 7488 break; 7489 } else { 7490 VHCI_DEBUG(4, (CE_NOTE, NULL, 7491 "vhci_pgr_validate_and_register: First validation" 7492 " on osvp 0x%p failed %x\n", (void *)osvp, rval)); 7493 vhci_print_prout_keys(vlun, "v_pgr_val_reg: failed:"); 7494 } 7495 7496 /* 7497 * Try other paths 7498 */ 7499 next_path_1: 7500 pip = npip; 7501 rval = mdi_select_path(cdip, NULL, 7502 MDI_SELECT_ONLINE_PATH|MDI_SELECT_STANDBY_PATH, 7503 pip, &npip); 7504 mdi_rele_path(pip); 7505 } while ((rval == MDI_SUCCESS) && (npip != NULL)); 7506 7507 7508 /* Be sure to restore original cdb */ 7509 vlun->svl_cdb[1] = cdb_1; 7510 7511 /* Restore the res_key */ 7512 bcopy(temp_res_key, prout->res_key, MHIOC_RESV_KEY_SIZE); 7513 7514 /* 7515 * If key could not be registered on any path for the first time, 7516 * return success as online should still continue. 7517 */ 7518 if (success == 0) { 7519 return (1); 7520 } 7521 7522 ASSERT(pip != NULL); 7523 7524 /* 7525 * Force register on new path 7526 */ 7527 cdb_1 = vlun->svl_cdb[1]; /* store the cdb */ 7528 7529 vlun->svl_cdb[1] &= 0xe0; 7530 vlun->svl_cdb[1] |= VHCI_PROUT_R_AND_IGNORE; 7531 7532 vhci_print_prout_keys(vlun, "v_pgr_val_reg: keys before bcopy: "); 7533 7534 bcopy(prout->active_service_key, prout->service_key, 7535 MHIOC_RESV_KEY_SIZE); 7536 bcopy(prout->active_res_key, prout->res_key, MHIOC_RESV_KEY_SIZE); 7537 7538 vhci_print_prout_keys(vlun, "v_pgr_val_reg:keys after bcopy: "); 7539 7540 rval = vhci_do_prout(svp); 7541 vlun->svl_cdb[1] = cdb_1; /* restore the cdb */ 7542 if (rval != 1) { 7543 VHCI_DEBUG(4, (CE_NOTE, NULL, 7544 "vhci_pgr_validate_and_register: register on new" 7545 " path 0x%p svp 0x%p failed %x\n", 7546 (void *)pip, (void *)svp, rval)); 7547 vhci_print_prout_keys(vlun, "v_pgr_val_reg: reg failed: "); 7548 mdi_rele_path(pip); 7549 return (0); 7550 } 7551 7552 if (bcmp(prout->service_key, zero_key, MHIOC_RESV_KEY_SIZE) == 0) { 7553 VHCI_DEBUG(4, (CE_NOTE, NULL, 7554 "vhci_pgr_validate_and_register: zero service key\n")); 7555 mdi_rele_path(pip); 7556 return (rval); 7557 } 7558 7559 /* 7560 * While the key was force registered, some other host might have 7561 * cleared the key. Re-validate key on another pre-existing path 7562 * before declaring success. 7563 */ 7564 npip = pip; 7565 pip = NULL; 7566 7567 /* 7568 * Sometimes CDB from application can be Register and Ignore. 7569 * Instead of validation, it would result in force registration. 7570 * Convert it to normal cdb for validation. 7571 * After that be sure to restore the cdb. 7572 */ 7573 cdb_1 = vlun->svl_cdb[1]; 7574 vlun->svl_cdb[1] &= 0xe0; 7575 success = 0; 7576 7577 do { 7578 osvp = (scsi_vhci_priv_t *) 7579 mdi_pi_get_vhci_private(npip); 7580 if (osvp == NULL) { 7581 VHCI_DEBUG(4, (CE_NOTE, NULL, 7582 "vhci_pgr_validate_and_register: no " 7583 "client priv! 0x%p offlined?\n", 7584 (void *)npip)); 7585 goto next_path_2; 7586 } 7587 7588 if (osvp == svp) { 7589 VHCI_DEBUG(4, (CE_NOTE, NULL, 7590 "vhci_pgr_validate_and_register: same osvp 0x%p" 7591 " npip 0x%p vlun 0x%p\n", 7592 (void *)svp, (void *)npip, (void *)vlun)); 7593 goto next_path_2; 7594 } 7595 7596 VHCI_DEBUG(4, (CE_NOTE, NULL, 7597 "vhci_pgr_validate_and_register: Re-validation on" 7598 " osvp 0x%p being done. vlun 0x%p Before bcopy cdb1 %x\n", 7599 (void *)osvp, (void *)vlun, vlun->svl_cdb[1])); 7600 vhci_print_prout_keys(vlun, "v_pgr_val_reg: before bcopy: "); 7601 7602 bcopy(prout->service_key, prout->res_key, MHIOC_RESV_KEY_SIZE); 7603 7604 vhci_print_prout_keys(vlun, "v_pgr_val_reg: after bcopy: "); 7605 7606 rval = vhci_do_prout(osvp); 7607 if (rval == 1) { 7608 VHCI_DEBUG(4, (CE_NOTE, NULL, 7609 "%s%d: vhci_pgr_validate_and_register: key" 7610 " validated thread 0x%p\n", ddi_driver_name(cdip), 7611 ddi_get_instance(cdip), (void *)curthread)); 7612 pip = npip; 7613 success = 1; 7614 break; 7615 } else { 7616 VHCI_DEBUG(4, (CE_NOTE, NULL, 7617 "vhci_pgr_validate_and_register: Re-validation on" 7618 " osvp 0x%p failed %x\n", (void *)osvp, rval)); 7619 vhci_print_prout_keys(vlun, 7620 "v_pgr_val_reg: reval failed: "); 7621 } 7622 7623 /* 7624 * Try other paths 7625 */ 7626 next_path_2: 7627 pip = npip; 7628 rval = mdi_select_path(cdip, NULL, 7629 MDI_SELECT_ONLINE_PATH|MDI_SELECT_STANDBY_PATH, 7630 pip, &npip); 7631 mdi_rele_path(pip); 7632 } while ((rval == MDI_SUCCESS) && (npip != NULL)); 7633 7634 /* Be sure to restore original cdb */ 7635 vlun->svl_cdb[1] = cdb_1; 7636 7637 if (success == 1) { 7638 /* Successfully validated registration */ 7639 mdi_rele_path(pip); 7640 return (1); 7641 } 7642 7643 VHCI_DEBUG(4, (CE_WARN, NULL, "key validation failed")); 7644 7645 /* 7646 * key invalid, back out by registering key value of 0 7647 */ 7648 VHCI_DEBUG(4, (CE_NOTE, NULL, 7649 "vhci_pgr_validate_and_register: backout on" 7650 " svp 0x%p being done\n", (void *)svp)); 7651 vhci_print_prout_keys(vlun, "v_pgr_val_reg: before bcopy: "); 7652 7653 bcopy(prout->service_key, prout->res_key, MHIOC_RESV_KEY_SIZE); 7654 bzero(prout->service_key, MHIOC_RESV_KEY_SIZE); 7655 7656 vhci_print_prout_keys(vlun, "v_pgr_val_reg: before bcopy: "); 7657 7658 /* 7659 * Get a new path 7660 */ 7661 rval = mdi_select_path(cdip, NULL, MDI_SELECT_ONLINE_PATH | 7662 MDI_SELECT_STANDBY_PATH, NULL, &pip); 7663 if ((rval != MDI_SUCCESS) || (pip == NULL)) { 7664 VHCI_DEBUG(4, (CE_NOTE, NULL, 7665 "%s%d: vhci_pgr_validate_and_register: no valid pip\n", 7666 ddi_driver_name(cdip), ddi_get_instance(cdip))); 7667 return (0); 7668 } 7669 7670 if ((rval = vhci_do_prout(svp)) != 1) { 7671 VHCI_DEBUG(4, (CE_NOTE, NULL, 7672 "vhci_pgr_validate_and_register: backout on" 7673 " svp 0x%p failed\n", (void *)svp)); 7674 vhci_print_prout_keys(vlun, "backout failed"); 7675 7676 VHCI_DEBUG(4, (CE_WARN, NULL, 7677 "%s%d: vhci_pgr_validate_and_register: key" 7678 " validation and backout failed", ddi_driver_name(cdip), 7679 ddi_get_instance(cdip))); 7680 if (rval == VHCI_PGR_ILLEGALOP) { 7681 VHCI_DEBUG(4, (CE_WARN, NULL, 7682 "%s%d: vhci_pgr_validate_and_register: key" 7683 " already cleared", ddi_driver_name(cdip), 7684 ddi_get_instance(cdip))); 7685 rval = 1; 7686 } else 7687 rval = 0; 7688 } else { 7689 VHCI_DEBUG(4, (CE_NOTE, NULL, 7690 "%s%d: vhci_pgr_validate_and_register: key" 7691 " validation failed, key backed out\n", 7692 ddi_driver_name(cdip), ddi_get_instance(cdip))); 7693 vhci_print_prout_keys(vlun, "v_pgr_val_reg: key backed out: "); 7694 } 7695 mdi_rele_path(pip); 7696 7697 return (rval); 7698 } 7699 7700 /* 7701 * taskq routine to dispatch a scsi cmd to vhci_scsi_start. This ensures 7702 * that vhci_scsi_start is not called in interrupt context. 7703 * As the upper layer gets TRAN_ACCEPT when the command is dispatched, we 7704 * need to complete the command if something goes wrong. 7705 */ 7706 static void 7707 vhci_dispatch_scsi_start(void *arg) 7708 { 7709 struct vhci_pkt *vpkt = (struct vhci_pkt *)arg; 7710 struct scsi_pkt *tpkt = vpkt->vpkt_tgt_pkt; 7711 int rval = TRAN_BUSY; 7712 7713 VHCI_DEBUG(6, (CE_NOTE, NULL, "!vhci_dispatch_scsi_start: sending" 7714 " scsi-2 reserve for 0x%p\n", 7715 (void *)ADDR2DIP(&(vpkt->vpkt_tgt_pkt->pkt_address)))); 7716 7717 /* 7718 * To prevent the taskq from being called recursively we set the 7719 * the VHCI_PKT_THRU_TASKQ bit in the vhci_pkt_states. 7720 */ 7721 vpkt->vpkt_state |= VHCI_PKT_THRU_TASKQ; 7722 7723 /* 7724 * Wait for the transport to get ready to send packets 7725 * and if it times out, it will return something other than 7726 * TRAN_BUSY. The vhci_reserve_delay may want to 7727 * get tuned for other transports and is therefore a global. 7728 * Using delay since this routine is called by taskq dispatch 7729 * and not called during interrupt context. 7730 */ 7731 while ((rval = vhci_scsi_start(&(vpkt->vpkt_tgt_pkt->pkt_address), 7732 vpkt->vpkt_tgt_pkt)) == TRAN_BUSY) { 7733 delay(drv_usectohz(vhci_reserve_delay)); 7734 } 7735 7736 switch (rval) { 7737 case TRAN_ACCEPT: 7738 return; 7739 7740 default: 7741 /* 7742 * This pkt shall be retried, and to ensure another taskq 7743 * is dispatched for it, clear the VHCI_PKT_THRU_TASKQ 7744 * flag. 7745 */ 7746 vpkt->vpkt_state &= ~VHCI_PKT_THRU_TASKQ; 7747 7748 /* Ensure that the pkt is retried without a reset */ 7749 tpkt->pkt_reason = CMD_ABORTED; 7750 tpkt->pkt_statistics |= STAT_ABORTED; 7751 VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_dispatch_scsi_start: " 7752 "TRAN_rval %d returned for dip 0x%p", rval, 7753 (void *)ADDR2DIP(&(vpkt->vpkt_tgt_pkt->pkt_address)))); 7754 break; 7755 } 7756 7757 /* 7758 * vpkt_org_vpkt should always be NULL here if the retry command 7759 * has been successfully dispatched. If vpkt_org_vpkt != NULL at 7760 * this point, it is an error so restore the original vpkt and 7761 * return an error to the target driver so it can retry the 7762 * command as appropriate. 7763 */ 7764 if (vpkt->vpkt_org_vpkt != NULL) { 7765 struct vhci_pkt *new_vpkt = vpkt; 7766 scsi_vhci_priv_t *svp = (scsi_vhci_priv_t *) 7767 mdi_pi_get_vhci_private(vpkt->vpkt_path); 7768 7769 vpkt = vpkt->vpkt_org_vpkt; 7770 7771 vpkt->vpkt_tgt_pkt->pkt_reason = tpkt->pkt_reason; 7772 vpkt->vpkt_tgt_pkt->pkt_statistics = tpkt->pkt_statistics; 7773 7774 vhci_scsi_destroy_pkt(&svp->svp_psd->sd_address, 7775 new_vpkt->vpkt_tgt_pkt); 7776 7777 tpkt = vpkt->vpkt_tgt_pkt; 7778 } 7779 7780 scsi_hba_pkt_comp(tpkt); 7781 } 7782 7783 static void 7784 vhci_initiate_auto_failback(void *arg) 7785 { 7786 struct scsi_vhci_lun *vlun = (struct scsi_vhci_lun *)arg; 7787 dev_info_t *vdip, *cdip; 7788 int held; 7789 7790 cdip = vlun->svl_dip; 7791 vdip = ddi_get_parent(cdip); 7792 7793 VHCI_HOLD_LUN(vlun, VH_SLEEP, held); 7794 7795 /* 7796 * Perform a final check to see if the active path class is indeed 7797 * not the preferred path class. As in the time the auto failback 7798 * was dispatched, an external failover could have been detected. 7799 * [Some other host could have detected this condition and triggered 7800 * the auto failback before]. 7801 * In such a case if we go ahead with failover we will be negating the 7802 * whole purpose of auto failback. 7803 */ 7804 mutex_enter(&vlun->svl_mutex); 7805 if (vlun->svl_active_pclass != NULL) { 7806 char *best_pclass; 7807 struct scsi_failover_ops *fo; 7808 7809 fo = vlun->svl_fops; 7810 7811 (void) fo->sfo_pathclass_next(NULL, &best_pclass, 7812 vlun->svl_fops_ctpriv); 7813 if (strcmp(vlun->svl_active_pclass, best_pclass) == 0) { 7814 mutex_exit(&vlun->svl_mutex); 7815 VHCI_RELEASE_LUN(vlun); 7816 VHCI_DEBUG(1, (CE_NOTE, NULL, "Not initiating " 7817 "auto failback for %s as %s pathclass already " 7818 "active.\n", vlun->svl_lun_wwn, best_pclass)); 7819 return; 7820 } 7821 } 7822 mutex_exit(&vlun->svl_mutex); 7823 if (mdi_failover(vdip, vlun->svl_dip, MDI_FAILOVER_SYNC) 7824 == MDI_SUCCESS) { 7825 vhci_log(CE_NOTE, vdip, "!Auto failback operation " 7826 "succeeded for device %s (GUID %s)", 7827 ddi_node_name(cdip), vlun->svl_lun_wwn); 7828 } else { 7829 vhci_log(CE_NOTE, vdip, "!Auto failback operation " 7830 "failed for device %s (GUID %s)", 7831 ddi_node_name(cdip), vlun->svl_lun_wwn); 7832 } 7833 VHCI_RELEASE_LUN(vlun); 7834 } 7835 7836 #ifdef DEBUG 7837 static void 7838 vhci_print_prin_keys(vhci_prin_readkeys_t *prin, int numkeys) 7839 { 7840 vhci_clean_print(NULL, 5, "Current PGR Keys", 7841 (uchar_t *)prin, numkeys * 8); 7842 } 7843 #endif 7844 7845 static void 7846 vhci_print_prout_keys(scsi_vhci_lun_t *vlun, char *msg) 7847 { 7848 int i; 7849 vhci_prout_t *prout; 7850 char buf1[4*MHIOC_RESV_KEY_SIZE + 1]; 7851 char buf2[4*MHIOC_RESV_KEY_SIZE + 1]; 7852 char buf3[4*MHIOC_RESV_KEY_SIZE + 1]; 7853 char buf4[4*MHIOC_RESV_KEY_SIZE + 1]; 7854 7855 prout = &vlun->svl_prout; 7856 7857 for (i = 0; i < MHIOC_RESV_KEY_SIZE; i++) 7858 (void) sprintf(&buf1[4*i], "[%02x]", prout->res_key[i]); 7859 for (i = 0; i < MHIOC_RESV_KEY_SIZE; i++) 7860 (void) sprintf(&buf2[(4*i)], "[%02x]", prout->service_key[i]); 7861 for (i = 0; i < MHIOC_RESV_KEY_SIZE; i++) 7862 (void) sprintf(&buf3[4*i], "[%02x]", prout->active_res_key[i]); 7863 for (i = 0; i < MHIOC_RESV_KEY_SIZE; i++) 7864 (void) sprintf(&buf4[4*i], "[%02x]", 7865 prout->active_service_key[i]); 7866 7867 /* Printing all in one go. Otherwise it will jumble up */ 7868 VHCI_DEBUG(5, (CE_CONT, NULL, "%s vlun 0x%p, thread 0x%p\n" 7869 "res_key: : %s\n" 7870 "service_key : %s\n" 7871 "active_res_key : %s\n" 7872 "active_service_key: %s\n", 7873 msg, (void *)vlun, (void *)curthread, buf1, buf2, buf3, buf4)); 7874 } 7875 7876 /* 7877 * Called from vhci_scsi_start to update the pHCI pkt with target packet. 7878 */ 7879 static void 7880 vhci_update_pHCI_pkt(struct vhci_pkt *vpkt, struct scsi_pkt *pkt) 7881 { 7882 7883 ASSERT(vpkt->vpkt_hba_pkt); 7884 7885 vpkt->vpkt_hba_pkt->pkt_flags = pkt->pkt_flags; 7886 vpkt->vpkt_hba_pkt->pkt_flags |= FLAG_NOQUEUE; 7887 7888 if ((vpkt->vpkt_hba_pkt->pkt_flags & FLAG_NOINTR) || 7889 MDI_PI_IS_SUSPENDED(vpkt->vpkt_path)) { 7890 /* 7891 * Polled Command is requested or HBA is in 7892 * suspended state 7893 */ 7894 vpkt->vpkt_hba_pkt->pkt_flags |= FLAG_NOINTR; 7895 vpkt->vpkt_hba_pkt->pkt_comp = NULL; 7896 } else { 7897 vpkt->vpkt_hba_pkt->pkt_comp = vhci_intr; 7898 } 7899 vpkt->vpkt_hba_pkt->pkt_time = pkt->pkt_time; 7900 bcopy(pkt->pkt_cdbp, vpkt->vpkt_hba_pkt->pkt_cdbp, 7901 vpkt->vpkt_tgt_init_cdblen); 7902 vpkt->vpkt_hba_pkt->pkt_resid = pkt->pkt_resid; 7903 7904 /* Re-initialize the following pHCI packet state information */ 7905 vpkt->vpkt_hba_pkt->pkt_state = 0; 7906 vpkt->vpkt_hba_pkt->pkt_statistics = 0; 7907 vpkt->vpkt_hba_pkt->pkt_reason = 0; 7908 } 7909 7910 static int 7911 vhci_scsi_bus_power(dev_info_t *parent, void *impl_arg, pm_bus_power_op_t op, 7912 void *arg, void *result) 7913 { 7914 int ret = DDI_SUCCESS; 7915 7916 /* 7917 * Generic processing in MPxIO framework 7918 */ 7919 ret = mdi_bus_power(parent, impl_arg, op, arg, result); 7920 7921 switch (ret) { 7922 case MDI_SUCCESS: 7923 ret = DDI_SUCCESS; 7924 break; 7925 case MDI_FAILURE: 7926 ret = DDI_FAILURE; 7927 break; 7928 default: 7929 break; 7930 } 7931 7932 return (ret); 7933 } 7934 7935 static int 7936 vhci_pHCI_cap(struct scsi_address *ap, char *cap, int val, int whom, 7937 mdi_pathinfo_t *pip) 7938 { 7939 dev_info_t *cdip; 7940 mdi_pathinfo_t *npip = NULL; 7941 scsi_vhci_priv_t *svp = NULL; 7942 struct scsi_address *pap = NULL; 7943 scsi_hba_tran_t *hba = NULL; 7944 int sps; 7945 int mps_flag; 7946 int rval = 0; 7947 7948 mps_flag = (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH); 7949 if (pip) { 7950 /* 7951 * If the call is from vhci_pathinfo_state_change, 7952 * then this path was busy and is becoming ready to accept IO. 7953 */ 7954 ASSERT(ap != NULL); 7955 hba = ap->a_hba_tran; 7956 ASSERT(hba != NULL); 7957 rval = scsi_ifsetcap(ap, cap, val, whom); 7958 7959 VHCI_DEBUG(2, (CE_NOTE, NULL, 7960 "!vhci_pHCI_cap: only on path %p, ap %p, rval %x\n", 7961 (void *)pip, (void *)ap, rval)); 7962 7963 return (rval); 7964 } 7965 7966 /* 7967 * Set capability on all the pHCIs. 7968 * If any path is busy, then the capability would be set by 7969 * vhci_pathinfo_state_change. 7970 */ 7971 7972 cdip = ADDR2DIP(ap); 7973 ASSERT(cdip != NULL); 7974 sps = mdi_select_path(cdip, NULL, mps_flag, NULL, &pip); 7975 if ((sps != MDI_SUCCESS) || (pip == NULL)) { 7976 VHCI_DEBUG(2, (CE_WARN, NULL, 7977 "!vhci_pHCI_cap: Unable to get a path, dip 0x%p", 7978 (void *)cdip)); 7979 return (0); 7980 } 7981 7982 again: 7983 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip); 7984 if (svp == NULL) { 7985 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_pHCI_cap: " 7986 "priv is NULL, pip 0x%p", (void *)pip)); 7987 mdi_rele_path(pip); 7988 return (rval); 7989 } 7990 7991 if (svp->svp_psd == NULL) { 7992 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_pHCI_cap: " 7993 "psd is NULL, pip 0x%p, svp 0x%p", 7994 (void *)pip, (void *)svp)); 7995 mdi_rele_path(pip); 7996 return (rval); 7997 } 7998 7999 pap = &svp->svp_psd->sd_address; 8000 ASSERT(pap != NULL); 8001 hba = pap->a_hba_tran; 8002 ASSERT(hba != NULL); 8003 8004 if (hba->tran_setcap != NULL) { 8005 rval = scsi_ifsetcap(pap, cap, val, whom); 8006 8007 VHCI_DEBUG(2, (CE_NOTE, NULL, 8008 "!vhci_pHCI_cap: path %p, ap %p, rval %x\n", 8009 (void *)pip, (void *)ap, rval)); 8010 8011 /* 8012 * Select next path and issue the setcap, repeat 8013 * until all paths are exhausted 8014 */ 8015 sps = mdi_select_path(cdip, NULL, mps_flag, pip, &npip); 8016 if ((sps != MDI_SUCCESS) || (npip == NULL)) { 8017 mdi_rele_path(pip); 8018 return (1); 8019 } 8020 mdi_rele_path(pip); 8021 pip = npip; 8022 goto again; 8023 } 8024 mdi_rele_path(pip); 8025 return (rval); 8026 } 8027 8028 static int 8029 vhci_scsi_bus_config(dev_info_t *pdip, uint_t flags, ddi_bus_config_op_t op, 8030 void *arg, dev_info_t **child) 8031 { 8032 char *guid; 8033 8034 if (vhci_bus_config_debug) 8035 flags |= NDI_DEVI_DEBUG; 8036 8037 if (op == BUS_CONFIG_ONE || op == BUS_UNCONFIG_ONE) 8038 guid = vhci_devnm_to_guid((char *)arg); 8039 else 8040 guid = NULL; 8041 8042 if (mdi_vhci_bus_config(pdip, flags, op, arg, child, guid) 8043 == MDI_SUCCESS) 8044 return (NDI_SUCCESS); 8045 else 8046 return (NDI_FAILURE); 8047 } 8048 8049 static int 8050 vhci_scsi_bus_unconfig(dev_info_t *pdip, uint_t flags, ddi_bus_config_op_t op, 8051 void *arg) 8052 { 8053 if (vhci_bus_config_debug) 8054 flags |= NDI_DEVI_DEBUG; 8055 8056 return (ndi_busop_bus_unconfig(pdip, flags, op, arg)); 8057 } 8058 8059 /* 8060 * Take the original vhci_pkt, create a duplicate of the pkt for resending 8061 * as though it originated in ssd. 8062 */ 8063 static struct scsi_pkt * 8064 vhci_create_retry_pkt(struct vhci_pkt *vpkt) 8065 { 8066 struct vhci_pkt *new_vpkt = NULL; 8067 struct scsi_pkt *pkt = NULL; 8068 8069 scsi_vhci_priv_t *svp = (scsi_vhci_priv_t *) 8070 mdi_pi_get_vhci_private(vpkt->vpkt_path); 8071 8072 /* 8073 * Ensure consistent data at completion time by setting PKT_CONSISTENT 8074 */ 8075 pkt = vhci_scsi_init_pkt(&svp->svp_psd->sd_address, pkt, 8076 vpkt->vpkt_tgt_init_bp, vpkt->vpkt_tgt_init_cdblen, 8077 vpkt->vpkt_tgt_init_scblen, 0, PKT_CONSISTENT, NULL_FUNC, NULL); 8078 if (pkt != NULL) { 8079 new_vpkt = TGTPKT2VHCIPKT(pkt); 8080 8081 pkt->pkt_address = vpkt->vpkt_tgt_pkt->pkt_address; 8082 pkt->pkt_flags = vpkt->vpkt_tgt_pkt->pkt_flags; 8083 pkt->pkt_time = vpkt->vpkt_tgt_pkt->pkt_time; 8084 pkt->pkt_comp = vpkt->vpkt_tgt_pkt->pkt_comp; 8085 8086 pkt->pkt_resid = 0; 8087 pkt->pkt_statistics = 0; 8088 pkt->pkt_reason = 0; 8089 8090 bcopy(vpkt->vpkt_tgt_pkt->pkt_cdbp, 8091 pkt->pkt_cdbp, vpkt->vpkt_tgt_init_cdblen); 8092 8093 /* 8094 * Save a pointer to the original vhci_pkt 8095 */ 8096 new_vpkt->vpkt_org_vpkt = vpkt; 8097 } 8098 8099 return (pkt); 8100 } 8101 8102 /* 8103 * Copy the successful completion information from the hba packet into 8104 * the original target pkt from the upper layer. Returns the original 8105 * vpkt and destroys the new vpkt from the internal retry. 8106 */ 8107 static struct vhci_pkt * 8108 vhci_sync_retry_pkt(struct vhci_pkt *vpkt) 8109 { 8110 struct vhci_pkt *ret_vpkt = NULL; 8111 struct scsi_pkt *tpkt = NULL; 8112 struct scsi_pkt *hba_pkt = NULL; 8113 scsi_vhci_priv_t *svp = (scsi_vhci_priv_t *) 8114 mdi_pi_get_vhci_private(vpkt->vpkt_path); 8115 8116 ASSERT(vpkt->vpkt_org_vpkt != NULL); 8117 VHCI_DEBUG(0, (CE_NOTE, NULL, "vhci_sync_retry_pkt: Retry pkt " 8118 "completed successfully!\n")); 8119 8120 ret_vpkt = vpkt->vpkt_org_vpkt; 8121 tpkt = ret_vpkt->vpkt_tgt_pkt; 8122 hba_pkt = vpkt->vpkt_hba_pkt; 8123 8124 /* 8125 * Copy the good status into the target driver's packet 8126 */ 8127 *(tpkt->pkt_scbp) = *(hba_pkt->pkt_scbp); 8128 tpkt->pkt_resid = hba_pkt->pkt_resid; 8129 tpkt->pkt_state = hba_pkt->pkt_state; 8130 tpkt->pkt_statistics = hba_pkt->pkt_statistics; 8131 tpkt->pkt_reason = hba_pkt->pkt_reason; 8132 8133 /* 8134 * Destroy the internally created vpkt for the retry 8135 */ 8136 vhci_scsi_destroy_pkt(&svp->svp_psd->sd_address, 8137 vpkt->vpkt_tgt_pkt); 8138 8139 return (ret_vpkt); 8140 } 8141 8142 /* restart the request sense request */ 8143 static void 8144 vhci_uscsi_restart_sense(void *arg) 8145 { 8146 struct buf *rqbp; 8147 struct buf *bp; 8148 struct scsi_pkt *rqpkt = (struct scsi_pkt *)arg; 8149 mp_uscsi_cmd_t *mp_uscmdp; 8150 8151 VHCI_DEBUG(4, (CE_WARN, NULL, 8152 "vhci_uscsi_restart_sense: enter: rqpkt: %p", (void *)rqpkt)); 8153 8154 if (scsi_transport(rqpkt) != TRAN_ACCEPT) { 8155 /* if it fails - need to wakeup the original command */ 8156 mp_uscmdp = rqpkt->pkt_private; 8157 bp = mp_uscmdp->cmdbp; 8158 rqbp = mp_uscmdp->rqbp; 8159 ASSERT(mp_uscmdp && bp && rqbp); 8160 scsi_free_consistent_buf(rqbp); 8161 scsi_destroy_pkt(rqpkt); 8162 bp->b_resid = bp->b_bcount; 8163 bioerror(bp, EIO); 8164 biodone(bp); 8165 } 8166 } 8167 8168 /* 8169 * auto-rqsense is not enabled so we have to retrieve the request sense 8170 * manually. 8171 */ 8172 static int 8173 vhci_uscsi_send_sense(struct scsi_pkt *pkt, mp_uscsi_cmd_t *mp_uscmdp) 8174 { 8175 struct buf *rqbp, *cmdbp; 8176 struct scsi_pkt *rqpkt; 8177 int rval = 0; 8178 8179 cmdbp = mp_uscmdp->cmdbp; 8180 ASSERT(cmdbp != NULL); 8181 8182 VHCI_DEBUG(4, (CE_WARN, NULL, 8183 "vhci_uscsi_send_sense: enter: bp: %p pkt: %p scmd: %p", 8184 (void *)cmdbp, (void *)pkt, (void *)mp_uscmdp)); 8185 /* set up the packet information and cdb */ 8186 if ((rqbp = scsi_alloc_consistent_buf(mp_uscmdp->ap, NULL, 8187 SENSE_LENGTH, B_READ, NULL, NULL)) == NULL) { 8188 return (-1); 8189 } 8190 8191 if ((rqpkt = scsi_init_pkt(mp_uscmdp->ap, NULL, rqbp, 8192 CDB_GROUP0, 1, 0, PKT_CONSISTENT, NULL, NULL)) == NULL) { 8193 scsi_free_consistent_buf(rqbp); 8194 return (-1); 8195 } 8196 8197 (void) scsi_setup_cdb((union scsi_cdb *)(intptr_t)rqpkt->pkt_cdbp, 8198 SCMD_REQUEST_SENSE, 0, SENSE_LENGTH, 0); 8199 8200 mp_uscmdp->rqbp = rqbp; 8201 rqbp->b_private = mp_uscmdp; 8202 rqpkt->pkt_flags |= FLAG_SENSING; 8203 rqpkt->pkt_time = 60; 8204 rqpkt->pkt_comp = vhci_uscsi_iodone; 8205 rqpkt->pkt_private = mp_uscmdp; 8206 8207 /* 8208 * NOTE: This code path is related to MPAPI uscsi(7I), so path 8209 * selection is not based on path_instance. 8210 */ 8211 if (scsi_pkt_allocated_correctly(rqpkt)) 8212 rqpkt->pkt_path_instance = 0; 8213 8214 /* get her done */ 8215 switch (scsi_transport(rqpkt)) { 8216 case TRAN_ACCEPT: 8217 VHCI_DEBUG(1, (CE_NOTE, NULL, "vhci_uscsi_send_sense: " 8218 "transport accepted.")); 8219 break; 8220 case TRAN_BUSY: 8221 VHCI_DEBUG(1, (CE_NOTE, NULL, "vhci_uscsi_send_sense: " 8222 "transport busy, setting timeout.")); 8223 vhci_restart_timeid = timeout(vhci_uscsi_restart_sense, rqpkt, 8224 (drv_usectohz(5 * 1000000))); 8225 break; 8226 default: 8227 VHCI_DEBUG(1, (CE_NOTE, NULL, "vhci_uscsi_send_sense: " 8228 "transport failed")); 8229 scsi_free_consistent_buf(rqbp); 8230 scsi_destroy_pkt(rqpkt); 8231 rval = -1; 8232 } 8233 8234 return (rval); 8235 } 8236 8237 /* 8238 * done routine for the mpapi uscsi command - this is behaving as though 8239 * FLAG_DIAGNOSE is set meaning there are no retries except for a manual 8240 * request sense. 8241 */ 8242 void 8243 vhci_uscsi_iodone(struct scsi_pkt *pkt) 8244 { 8245 struct buf *bp; 8246 mp_uscsi_cmd_t *mp_uscmdp; 8247 struct uscsi_cmd *uscmdp; 8248 struct scsi_arq_status *arqstat; 8249 int err; 8250 8251 mp_uscmdp = (mp_uscsi_cmd_t *)pkt->pkt_private; 8252 uscmdp = mp_uscmdp->uscmdp; 8253 bp = mp_uscmdp->cmdbp; 8254 ASSERT(bp != NULL); 8255 VHCI_DEBUG(4, (CE_WARN, NULL, 8256 "vhci_uscsi_iodone: enter: bp: %p pkt: %p scmd: %p", 8257 (void *)bp, (void *)pkt, (void *)mp_uscmdp)); 8258 /* Save the status and the residual into the uscsi_cmd struct */ 8259 uscmdp->uscsi_status = ((*(pkt)->pkt_scbp) & STATUS_MASK); 8260 uscmdp->uscsi_resid = bp->b_resid; 8261 8262 /* return on a very successful command */ 8263 if (pkt->pkt_reason == CMD_CMPLT && 8264 SCBP_C(pkt) == 0 && ((pkt->pkt_flags & FLAG_SENSING) == 0) && 8265 pkt->pkt_resid == 0) { 8266 mdi_pi_kstat_iosupdate(mp_uscmdp->pip, bp); 8267 scsi_destroy_pkt(pkt); 8268 biodone(bp); 8269 return; 8270 } 8271 VHCI_DEBUG(4, (CE_NOTE, NULL, "iodone: reason=0x%x " 8272 " pkt_resid=%ld pkt_state: 0x%x b_count: %ld b_resid: %ld", 8273 pkt->pkt_reason, pkt->pkt_resid, 8274 pkt->pkt_state, bp->b_bcount, bp->b_resid)); 8275 8276 err = EIO; 8277 8278 arqstat = (struct scsi_arq_status *)(intptr_t)(pkt->pkt_scbp); 8279 if (pkt->pkt_reason != CMD_CMPLT) { 8280 /* 8281 * The command did not complete. 8282 */ 8283 VHCI_DEBUG(4, (CE_NOTE, NULL, 8284 "vhci_uscsi_iodone: command did not complete." 8285 " reason: %x flag: %x", pkt->pkt_reason, pkt->pkt_flags)); 8286 if (pkt->pkt_flags & FLAG_SENSING) { 8287 MDI_PI_ERRSTAT(mp_uscmdp->pip, MDI_PI_TRANSERR); 8288 } else if (pkt->pkt_reason == CMD_TIMEOUT) { 8289 MDI_PI_ERRSTAT(mp_uscmdp->pip, MDI_PI_HARDERR); 8290 err = ETIMEDOUT; 8291 } 8292 } else if (pkt->pkt_state & STATE_ARQ_DONE && mp_uscmdp->arq_enabled) { 8293 /* 8294 * The auto-rqsense happened, and the packet has a filled-in 8295 * scsi_arq_status structure, pointed to by pkt_scbp. 8296 */ 8297 VHCI_DEBUG(4, (CE_NOTE, NULL, 8298 "vhci_uscsi_iodone: received auto-requested sense")); 8299 if (uscmdp->uscsi_flags & USCSI_RQENABLE) { 8300 /* get the amount of data to copy into rqbuf */ 8301 int rqlen = SENSE_LENGTH - arqstat->sts_rqpkt_resid; 8302 rqlen = min(((int)uscmdp->uscsi_rqlen), rqlen); 8303 uscmdp->uscsi_rqresid = uscmdp->uscsi_rqlen - rqlen; 8304 uscmdp->uscsi_rqstatus = 8305 *((char *)&arqstat->sts_rqpkt_status); 8306 if (uscmdp->uscsi_rqbuf && uscmdp->uscsi_rqlen && 8307 rqlen != 0) { 8308 bcopy(&(arqstat->sts_sensedata), 8309 uscmdp->uscsi_rqbuf, rqlen); 8310 } 8311 mdi_pi_kstat_iosupdate(mp_uscmdp->pip, bp); 8312 VHCI_DEBUG(4, (CE_NOTE, NULL, 8313 "vhci_uscsi_iodone: ARQ " 8314 "uscsi_rqstatus=0x%x uscsi_rqresid=%d rqlen: %d " 8315 "xfer: %d rqpkt_resid: %d\n", 8316 uscmdp->uscsi_rqstatus, uscmdp->uscsi_rqresid, 8317 uscmdp->uscsi_rqlen, rqlen, 8318 arqstat->sts_rqpkt_resid)); 8319 } 8320 } else if (pkt->pkt_flags & FLAG_SENSING) { 8321 struct buf *rqbp; 8322 struct scsi_status *rqstatus; 8323 8324 rqstatus = (struct scsi_status *)pkt->pkt_scbp; 8325 /* a manual request sense was done - get the information */ 8326 if (uscmdp->uscsi_flags & USCSI_RQENABLE) { 8327 int rqlen = SENSE_LENGTH - pkt->pkt_resid; 8328 8329 rqbp = mp_uscmdp->rqbp; 8330 /* get the amount of data to copy into rqbuf */ 8331 rqlen = min(((int)uscmdp->uscsi_rqlen), rqlen); 8332 uscmdp->uscsi_rqresid = uscmdp->uscsi_rqlen - rqlen; 8333 uscmdp->uscsi_rqstatus = *((char *)rqstatus); 8334 if (uscmdp->uscsi_rqlen && uscmdp->uscsi_rqbuf) { 8335 bcopy(rqbp->b_un.b_addr, uscmdp->uscsi_rqbuf, 8336 rqlen); 8337 } 8338 MDI_PI_ERRSTAT(mp_uscmdp->pip, MDI_PI_TRANSERR); 8339 scsi_free_consistent_buf(rqbp); 8340 } 8341 VHCI_DEBUG(4, (CE_NOTE, NULL, "vhci_uscsi_iodone: FLAG_SENSING" 8342 "uscsi_rqstatus=0x%x uscsi_rqresid=%d\n", 8343 uscmdp->uscsi_rqstatus, uscmdp->uscsi_rqresid)); 8344 } else { 8345 struct scsi_status *status = 8346 (struct scsi_status *)pkt->pkt_scbp; 8347 /* 8348 * Command completed and we're not getting sense. Check for 8349 * errors and decide what to do next. 8350 */ 8351 VHCI_DEBUG(4, (CE_NOTE, NULL, 8352 "vhci_uscsi_iodone: command appears complete: reason: %x", 8353 pkt->pkt_reason)); 8354 if (status->sts_chk) { 8355 /* need to manually get the request sense */ 8356 if (vhci_uscsi_send_sense(pkt, mp_uscmdp) == 0) { 8357 scsi_destroy_pkt(pkt); 8358 return; 8359 } 8360 } else { 8361 VHCI_DEBUG(4, (CE_NOTE, NULL, 8362 "vhci_chk_err: appears complete")); 8363 err = 0; 8364 mdi_pi_kstat_iosupdate(mp_uscmdp->pip, bp); 8365 if (pkt->pkt_resid) { 8366 bp->b_resid += pkt->pkt_resid; 8367 } 8368 } 8369 } 8370 8371 if (err) { 8372 if (bp->b_resid == 0) 8373 bp->b_resid = bp->b_bcount; 8374 bioerror(bp, err); 8375 bp->b_flags |= B_ERROR; 8376 } 8377 8378 scsi_destroy_pkt(pkt); 8379 biodone(bp); 8380 8381 VHCI_DEBUG(4, (CE_WARN, NULL, "vhci_uscsi_iodone: exit")); 8382 } 8383 8384 /* 8385 * start routine for the mpapi uscsi command 8386 */ 8387 int 8388 vhci_uscsi_iostart(struct buf *bp) 8389 { 8390 struct scsi_pkt *pkt; 8391 struct uscsi_cmd *uscmdp; 8392 mp_uscsi_cmd_t *mp_uscmdp; 8393 int stat_size, rval; 8394 int retry = 0; 8395 8396 ASSERT(bp->b_private != NULL); 8397 8398 mp_uscmdp = (mp_uscsi_cmd_t *)bp->b_private; 8399 uscmdp = mp_uscmdp->uscmdp; 8400 if (uscmdp->uscsi_flags & USCSI_RQENABLE) { 8401 stat_size = SENSE_LENGTH; 8402 } else { 8403 stat_size = 1; 8404 } 8405 8406 pkt = scsi_init_pkt(mp_uscmdp->ap, NULL, bp, uscmdp->uscsi_cdblen, 8407 stat_size, 0, 0, SLEEP_FUNC, NULL); 8408 if (pkt == NULL) { 8409 VHCI_DEBUG(4, (CE_NOTE, NULL, 8410 "vhci_uscsi_iostart: rval: EINVAL")); 8411 bp->b_resid = bp->b_bcount; 8412 uscmdp->uscsi_resid = bp->b_bcount; 8413 bioerror(bp, EINVAL); 8414 biodone(bp); 8415 return (EINVAL); 8416 } 8417 8418 pkt->pkt_time = uscmdp->uscsi_timeout; 8419 bcopy(uscmdp->uscsi_cdb, pkt->pkt_cdbp, (size_t)uscmdp->uscsi_cdblen); 8420 pkt->pkt_comp = vhci_uscsi_iodone; 8421 pkt->pkt_private = mp_uscmdp; 8422 if (uscmdp->uscsi_flags & USCSI_SILENT) 8423 pkt->pkt_flags |= FLAG_SILENT; 8424 if (uscmdp->uscsi_flags & USCSI_ISOLATE) 8425 pkt->pkt_flags |= FLAG_ISOLATE; 8426 if (uscmdp->uscsi_flags & USCSI_DIAGNOSE) 8427 pkt->pkt_flags |= FLAG_DIAGNOSE; 8428 if (uscmdp->uscsi_flags & USCSI_RENEGOT) { 8429 pkt->pkt_flags |= FLAG_RENEGOTIATE_WIDE_SYNC; 8430 } 8431 VHCI_DEBUG(4, (CE_WARN, NULL, 8432 "vhci_uscsi_iostart: ap: %p pkt: %p pcdbp: %p uscmdp: %p" 8433 " ucdbp: %p pcdblen: %d bp: %p count: %ld pip: %p" 8434 " stat_size: %d", 8435 (void *)mp_uscmdp->ap, (void *)pkt, (void *)pkt->pkt_cdbp, 8436 (void *)uscmdp, (void *)uscmdp->uscsi_cdb, pkt->pkt_cdblen, 8437 (void *)bp, bp->b_bcount, (void *)mp_uscmdp->pip, stat_size)); 8438 8439 /* 8440 * NOTE: This code path is related to MPAPI uscsi(7I), so path 8441 * selection is not based on path_instance. 8442 */ 8443 if (scsi_pkt_allocated_correctly(pkt)) 8444 pkt->pkt_path_instance = 0; 8445 8446 while (((rval = scsi_transport(pkt)) == TRAN_BUSY) && 8447 retry < vhci_uscsi_retry_count) { 8448 delay(drv_usectohz(vhci_uscsi_delay)); 8449 retry++; 8450 } 8451 if (retry >= vhci_uscsi_retry_count) { 8452 VHCI_DEBUG(4, (CE_NOTE, NULL, 8453 "vhci_uscsi_iostart: tran_busy - retry: %d", retry)); 8454 } 8455 switch (rval) { 8456 case TRAN_ACCEPT: 8457 rval = 0; 8458 break; 8459 8460 default: 8461 VHCI_DEBUG(4, (CE_NOTE, NULL, 8462 "vhci_uscsi_iostart: rval: %d count: %ld res: %ld", 8463 rval, bp->b_bcount, bp->b_resid)); 8464 bp->b_resid = bp->b_bcount; 8465 uscmdp->uscsi_resid = bp->b_bcount; 8466 bioerror(bp, EIO); 8467 scsi_destroy_pkt(pkt); 8468 biodone(bp); 8469 rval = EIO; 8470 MDI_PI_ERRSTAT(mp_uscmdp->pip, MDI_PI_TRANSERR); 8471 break; 8472 } 8473 VHCI_DEBUG(4, (CE_NOTE, NULL, 8474 "vhci_uscsi_iostart: exit: rval: %d", rval)); 8475 return (rval); 8476 } 8477 8478 /* ARGSUSED */ 8479 static struct scsi_failover_ops * 8480 vhci_dev_fo(dev_info_t *vdip, struct scsi_device *psd, 8481 void **ctprivp, char **fo_namep) 8482 { 8483 struct scsi_failover_ops *sfo; 8484 char *sfo_name; 8485 char *override; 8486 struct scsi_failover *sf; 8487 8488 ASSERT(psd && psd->sd_inq); 8489 if ((psd == NULL) || (psd->sd_inq == NULL)) { 8490 VHCI_DEBUG(1, (CE_NOTE, NULL, 8491 "!vhci_dev_fo:return NULL no scsi_device or inquiry")); 8492 return (NULL); 8493 } 8494 8495 /* 8496 * Determine if device is supported under scsi_vhci, and select 8497 * failover module. 8498 * 8499 * See if there is a scsi_vhci.conf file override for this devices's 8500 * VID/PID. The following values can be returned: 8501 * 8502 * NULL If the NULL is returned then there is no scsi_vhci.conf 8503 * override. For NULL, we determine the failover_ops for 8504 * this device by checking the sfo_device_probe entry 8505 * point for each 'fops' module, in order. 8506 * 8507 * NOTE: Correct operation may depend on module ordering 8508 * of 'specific' (failover modules that are completely 8509 * VID/PID table based) to 'generic' (failover modules 8510 * that based on T10 standards like TPGS). Currently, 8511 * the value of 'ddi-forceload' in scsi_vhci.conf is used 8512 * to establish the module list and probe order. 8513 * 8514 * "NONE" If value "NONE" is returned then there is a 8515 * scsi_vhci.conf VID/PID override to indicate the device 8516 * should not be supported under scsi_vhci (even if there 8517 * is an 'fops' module supporting the device). 8518 * 8519 * "<other>" If another value is returned then that value is the 8520 * name of the 'fops' module that should be used. 8521 */ 8522 sfo = NULL; /* "NONE" */ 8523 override = scsi_get_device_type_string( 8524 "scsi-vhci-failover-override", vdip, psd); 8525 if (override == NULL) { 8526 /* NULL: default: select based on sfo_device_probe results */ 8527 for (sf = scsi_failover_table; sf->sf_mod; sf++) { 8528 if ((sf->sf_sfo == NULL) || 8529 sf->sf_sfo->sfo_device_probe(psd, psd->sd_inq, 8530 ctprivp) == SFO_DEVICE_PROBE_PHCI) 8531 continue; 8532 8533 /* found failover module, supported under scsi_vhci */ 8534 sfo = sf->sf_sfo; 8535 if (fo_namep && (*fo_namep == NULL)) { 8536 sfo_name = i_ddi_strdup(sfo->sfo_name, 8537 KM_SLEEP); 8538 *fo_namep = sfo_name; 8539 } 8540 break; 8541 } 8542 } else if (strcasecmp(override, "NONE")) { 8543 /* !"NONE": select based on driver.conf specified name */ 8544 for (sf = scsi_failover_table, sfo = NULL; sf->sf_mod; sf++) { 8545 if ((sf->sf_sfo == NULL) || 8546 (sf->sf_sfo->sfo_name == NULL) || 8547 strcmp(override, sf->sf_sfo->sfo_name)) 8548 continue; 8549 8550 /* 8551 * NOTE: If sfo_device_probe() has side-effects, 8552 * including setting *ctprivp, these are not going 8553 * to occur with override config. 8554 */ 8555 8556 /* found failover module, supported under scsi_vhci */ 8557 sfo = sf->sf_sfo; 8558 if (fo_namep && (*fo_namep == NULL)) { 8559 sfo_name = kmem_alloc(strlen("conf ") + 8560 strlen(sfo->sfo_name) + 1, KM_SLEEP); 8561 (void) sprintf(sfo_name, "conf %s", 8562 sfo->sfo_name); 8563 *fo_namep = sfo_name; 8564 } 8565 break; 8566 } 8567 } 8568 if (override) 8569 kmem_free(override, strlen(override) + 1); 8570 return (sfo); 8571 } 8572 8573 /* 8574 * Determine the device described by cinfo should be enumerated under 8575 * the vHCI or the pHCI - if there is a failover ops then device is 8576 * supported under vHCI. By agreement with SCSA cinfo is a pointer 8577 * to a scsi_device structure associated with a decorated pHCI probe node. 8578 */ 8579 /* ARGSUSED */ 8580 int 8581 vhci_is_dev_supported(dev_info_t *vdip, dev_info_t *pdip, void *cinfo) 8582 { 8583 struct scsi_device *psd = (struct scsi_device *)cinfo; 8584 8585 return (vhci_dev_fo(vdip, psd, NULL, NULL) ? MDI_SUCCESS : MDI_FAILURE); 8586 } 8587 8588 8589 #ifdef DEBUG 8590 extern struct scsi_key_strings scsi_cmds[]; 8591 8592 static char * 8593 vhci_print_scsi_cmd(char cmd) 8594 { 8595 char tmp[64]; 8596 char *cpnt; 8597 8598 cpnt = scsi_cmd_name(cmd, scsi_cmds, tmp); 8599 /* tmp goes out of scope on return and caller sees garbage */ 8600 if (cpnt == tmp) { 8601 cpnt = "Unknown Command"; 8602 } 8603 return (cpnt); 8604 } 8605 8606 extern uchar_t scsi_cdb_size[]; 8607 8608 static void 8609 vhci_print_cdb(dev_info_t *dip, uint_t level, char *title, uchar_t *cdb) 8610 { 8611 int len = scsi_cdb_size[CDB_GROUPID(cdb[0])]; 8612 char buf[256]; 8613 8614 if (level == CE_NOTE) { 8615 vhci_log(level, dip, "path cmd %s\n", 8616 vhci_print_scsi_cmd(*cdb)); 8617 return; 8618 } 8619 8620 (void) sprintf(buf, "%s for cmd(%s)", title, vhci_print_scsi_cmd(*cdb)); 8621 vhci_clean_print(dip, level, buf, cdb, len); 8622 } 8623 8624 static void 8625 vhci_clean_print(dev_info_t *dev, uint_t level, char *title, uchar_t *data, 8626 int len) 8627 { 8628 int i; 8629 int c; 8630 char *format; 8631 char buf[256]; 8632 uchar_t byte; 8633 8634 (void) sprintf(buf, "%s:\n", title); 8635 vhci_log(level, dev, "%s", buf); 8636 level = CE_CONT; 8637 for (i = 0; i < len; ) { 8638 buf[0] = 0; 8639 for (c = 0; c < 8 && i < len; c++, i++) { 8640 byte = (uchar_t)data[i]; 8641 if (byte < 0x10) 8642 format = "0x0%x "; 8643 else 8644 format = "0x%x "; 8645 (void) sprintf(&buf[(int)strlen(buf)], format, byte); 8646 } 8647 (void) sprintf(&buf[(int)strlen(buf)], "\n"); 8648 8649 vhci_log(level, dev, "%s\n", buf); 8650 } 8651 } 8652 #endif 8653 static void 8654 vhci_invalidate_mpapi_lu(struct scsi_vhci *vhci, scsi_vhci_lun_t *vlun) 8655 { 8656 char *svl_wwn; 8657 mpapi_item_list_t *ilist; 8658 mpapi_lu_data_t *ld; 8659 8660 if (vlun == NULL) { 8661 return; 8662 } else { 8663 svl_wwn = vlun->svl_lun_wwn; 8664 } 8665 8666 ilist = vhci->mp_priv->obj_hdr_list[MP_OBJECT_TYPE_MULTIPATH_LU]->head; 8667 8668 while (ilist != NULL) { 8669 ld = (mpapi_lu_data_t *)(ilist->item->idata); 8670 if ((ld != NULL) && (strncmp(ld->prop.name, svl_wwn, 8671 strlen(svl_wwn)) == 0)) { 8672 ld->valid = 0; 8673 VHCI_DEBUG(6, (CE_WARN, NULL, 8674 "vhci_invalidate_mpapi_lu: " 8675 "Invalidated LU(%s)", svl_wwn)); 8676 return; 8677 } 8678 ilist = ilist->next; 8679 } 8680 VHCI_DEBUG(6, (CE_WARN, NULL, "vhci_invalidate_mpapi_lu: " 8681 "Could not find LU(%s) to invalidate.", svl_wwn)); 8682 } 8683