1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. 23 */ 24 25 /* 26 * Multiplexed I/O SCSI vHCI implementation 27 */ 28 29 #include <sys/conf.h> 30 #include <sys/file.h> 31 #include <sys/ddi.h> 32 #include <sys/sunddi.h> 33 #include <sys/scsi/scsi.h> 34 #include <sys/scsi/impl/scsi_reset_notify.h> 35 #include <sys/scsi/impl/services.h> 36 #include <sys/sunmdi.h> 37 #include <sys/mdi_impldefs.h> 38 #include <sys/scsi/adapters/scsi_vhci.h> 39 #include <sys/disp.h> 40 #include <sys/byteorder.h> 41 42 extern uintptr_t scsi_callback_id; 43 extern ddi_dma_attr_t scsi_alloc_attr; 44 45 #ifdef DEBUG 46 int vhci_debug = VHCI_DEBUG_DEFAULT_VAL; 47 #endif 48 49 /* retry for the vhci_do_prout command when a not ready is returned */ 50 int vhci_prout_not_ready_retry = 180; 51 52 /* 53 * These values are defined to support the internal retry of 54 * SCSI packets for better sense code handling. 55 */ 56 #define VHCI_CMD_CMPLT 0 57 #define VHCI_CMD_RETRY 1 58 #define VHCI_CMD_ERROR -1 59 60 #define PROPFLAGS (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM) 61 #define VHCI_SCSI_PERR 0x47 62 #define VHCI_PGR_ILLEGALOP -2 63 #define VHCI_NUM_UPDATE_TASKQ 8 64 /* changed to 132 to accomodate HDS */ 65 66 /* 67 * Version Macros 68 */ 69 #define VHCI_NAME_VERSION "SCSI VHCI Driver" 70 char vhci_version_name[] = VHCI_NAME_VERSION; 71 72 int vhci_first_time = 0; 73 clock_t vhci_to_ticks = 0; 74 int vhci_init_wait_timeout = VHCI_INIT_WAIT_TIMEOUT; 75 kcondvar_t vhci_cv; 76 kmutex_t vhci_global_mutex; 77 void *vhci_softstate = NULL; /* for soft state */ 78 79 /* 80 * Flag to delay the retry of the reserve command 81 */ 82 int vhci_reserve_delay = 100000; 83 static int vhci_path_quiesce_timeout = 60; 84 static uchar_t zero_key[MHIOC_RESV_KEY_SIZE]; 85 86 /* uscsi delay for a TRAN_BUSY */ 87 static int vhci_uscsi_delay = 100000; 88 static int vhci_uscsi_retry_count = 180; 89 /* uscsi_restart_sense timeout id in case it needs to get canceled */ 90 static timeout_id_t vhci_restart_timeid = 0; 91 92 static int vhci_bus_config_debug = 0; 93 94 /* 95 * Bidirectional map of 'target-port' to port id <pid> for support of 96 * iostat(1M) '-Xx' and '-Yx' output. 97 */ 98 static kmutex_t vhci_targetmap_mutex; 99 static uint_t vhci_targetmap_pid = 1; 100 static mod_hash_t *vhci_targetmap_bypid; /* <pid> -> 'target-port' */ 101 static mod_hash_t *vhci_targetmap_byport; /* 'target-port' -> <pid> */ 102 103 /* 104 * functions exported by scsi_vhci struct cb_ops 105 */ 106 static int vhci_open(dev_t *, int, int, cred_t *); 107 static int vhci_close(dev_t, int, int, cred_t *); 108 static int vhci_ioctl(dev_t, int, intptr_t, int, cred_t *, int *); 109 110 /* 111 * functions exported by scsi_vhci struct dev_ops 112 */ 113 static int vhci_getinfo(dev_info_t *, ddi_info_cmd_t, void *, void **); 114 static int vhci_attach(dev_info_t *, ddi_attach_cmd_t); 115 static int vhci_detach(dev_info_t *, ddi_detach_cmd_t); 116 117 /* 118 * functions exported by scsi_vhci scsi_hba_tran_t transport table 119 */ 120 static int vhci_scsi_tgt_init(dev_info_t *, dev_info_t *, 121 scsi_hba_tran_t *, struct scsi_device *); 122 static void vhci_scsi_tgt_free(dev_info_t *, dev_info_t *, scsi_hba_tran_t *, 123 struct scsi_device *); 124 static int vhci_pgr_register_start(scsi_vhci_lun_t *, struct scsi_pkt *); 125 static int vhci_scsi_start(struct scsi_address *, struct scsi_pkt *); 126 static int vhci_scsi_abort(struct scsi_address *, struct scsi_pkt *); 127 static int vhci_scsi_reset(struct scsi_address *, int); 128 static int vhci_scsi_reset_target(struct scsi_address *, int level, 129 uint8_t select_path); 130 static int vhci_scsi_reset_bus(struct scsi_address *); 131 static int vhci_scsi_getcap(struct scsi_address *, char *, int); 132 static int vhci_scsi_setcap(struct scsi_address *, char *, int, int); 133 static int vhci_commoncap(struct scsi_address *, char *, int, int, int); 134 static int vhci_pHCI_cap(struct scsi_address *ap, char *cap, int val, int whom, 135 mdi_pathinfo_t *pip); 136 static struct scsi_pkt *vhci_scsi_init_pkt(struct scsi_address *, 137 struct scsi_pkt *, struct buf *, int, int, int, int, int (*)(), caddr_t); 138 static void vhci_scsi_destroy_pkt(struct scsi_address *, struct scsi_pkt *); 139 static void vhci_scsi_dmafree(struct scsi_address *, struct scsi_pkt *); 140 static void vhci_scsi_sync_pkt(struct scsi_address *, struct scsi_pkt *); 141 static int vhci_scsi_reset_notify(struct scsi_address *, int, void (*)(caddr_t), 142 caddr_t); 143 static int vhci_scsi_get_bus_addr(struct scsi_device *, char *, int); 144 static int vhci_scsi_get_name(struct scsi_device *, char *, int); 145 static int vhci_scsi_bus_power(dev_info_t *, void *, pm_bus_power_op_t, 146 void *, void *); 147 static int vhci_scsi_bus_config(dev_info_t *, uint_t, ddi_bus_config_op_t, 148 void *, dev_info_t **); 149 static int vhci_scsi_bus_unconfig(dev_info_t *, uint_t, ddi_bus_config_op_t, 150 void *); 151 static struct scsi_failover_ops *vhci_dev_fo(dev_info_t *, struct scsi_device *, 152 void **, char **); 153 154 /* 155 * functions registered with the mpxio framework via mdi_vhci_ops_t 156 */ 157 static int vhci_pathinfo_init(dev_info_t *, mdi_pathinfo_t *, int); 158 static int vhci_pathinfo_uninit(dev_info_t *, mdi_pathinfo_t *, int); 159 static int vhci_pathinfo_state_change(dev_info_t *, mdi_pathinfo_t *, 160 mdi_pathinfo_state_t, uint32_t, int); 161 static int vhci_pathinfo_online(dev_info_t *, mdi_pathinfo_t *, int); 162 static int vhci_pathinfo_offline(dev_info_t *, mdi_pathinfo_t *, int); 163 static int vhci_failover(dev_info_t *, dev_info_t *, int); 164 static void vhci_client_attached(dev_info_t *); 165 static int vhci_is_dev_supported(dev_info_t *, dev_info_t *, void *); 166 167 static int vhci_ctl(dev_t, int, intptr_t, int, cred_t *, int *); 168 static int vhci_devctl(dev_t, int, intptr_t, int, cred_t *, int *); 169 static int vhci_ioc_get_phci_path(sv_iocdata_t *, caddr_t, int, caddr_t); 170 static int vhci_ioc_get_client_path(sv_iocdata_t *, caddr_t, int, caddr_t); 171 static int vhci_ioc_get_paddr(sv_iocdata_t *, caddr_t, int, caddr_t); 172 static int vhci_ioc_send_client_path(caddr_t, sv_iocdata_t *, int, caddr_t); 173 static void vhci_ioc_devi_to_path(dev_info_t *, caddr_t); 174 static int vhci_get_phci_path_list(dev_info_t *, sv_path_info_t *, uint_t); 175 static int vhci_get_client_path_list(dev_info_t *, sv_path_info_t *, uint_t); 176 static int vhci_get_iocdata(const void *, sv_iocdata_t *, int, caddr_t); 177 static int vhci_get_iocswitchdata(const void *, sv_switch_to_cntlr_iocdata_t *, 178 int, caddr_t); 179 static int vhci_ioc_alloc_pathinfo(sv_path_info_t **, sv_path_info_t **, 180 uint_t, sv_iocdata_t *, int, caddr_t); 181 static void vhci_ioc_free_pathinfo(sv_path_info_t *, sv_path_info_t *, uint_t); 182 static int vhci_ioc_send_pathinfo(sv_path_info_t *, sv_path_info_t *, uint_t, 183 sv_iocdata_t *, int, caddr_t); 184 static int vhci_handle_ext_fo(struct scsi_pkt *, int); 185 static int vhci_efo_watch_cb(caddr_t, struct scsi_watch_result *); 186 static int vhci_quiesce_lun(struct scsi_vhci_lun *); 187 static int vhci_pgr_validate_and_register(scsi_vhci_priv_t *); 188 static void vhci_dispatch_scsi_start(void *); 189 static void vhci_efo_done(void *); 190 static void vhci_initiate_auto_failback(void *); 191 static void vhci_update_pHCI_pkt(struct vhci_pkt *, struct scsi_pkt *); 192 static int vhci_update_pathinfo(struct scsi_device *, mdi_pathinfo_t *, 193 struct scsi_failover_ops *, scsi_vhci_lun_t *, struct scsi_vhci *); 194 static void vhci_kstat_create_pathinfo(mdi_pathinfo_t *); 195 static int vhci_quiesce_paths(dev_info_t *, dev_info_t *, 196 scsi_vhci_lun_t *, char *, char *); 197 198 static char *vhci_devnm_to_guid(char *); 199 static int vhci_bind_transport(struct scsi_address *, struct vhci_pkt *, 200 int, int (*func)(caddr_t)); 201 static void vhci_intr(struct scsi_pkt *); 202 static int vhci_do_prout(scsi_vhci_priv_t *); 203 static void vhci_run_cmd(void *); 204 static int vhci_do_prin(struct vhci_pkt **); 205 static struct scsi_pkt *vhci_create_retry_pkt(struct vhci_pkt *); 206 static struct vhci_pkt *vhci_sync_retry_pkt(struct vhci_pkt *); 207 static struct scsi_vhci_lun *vhci_lun_lookup(dev_info_t *); 208 static struct scsi_vhci_lun *vhci_lun_lookup_alloc(dev_info_t *, char *, int *); 209 static void vhci_lun_free(dev_info_t *); 210 static int vhci_recovery_reset(scsi_vhci_lun_t *, struct scsi_address *, 211 uint8_t, uint8_t); 212 void vhci_update_pathstates(void *); 213 214 #ifdef DEBUG 215 static void vhci_print_prin_keys(vhci_prin_readkeys_t *, int); 216 static void vhci_print_cdb(dev_info_t *dip, uint_t level, 217 char *title, uchar_t *cdb); 218 static void vhci_clean_print(dev_info_t *dev, uint_t level, 219 char *title, uchar_t *data, int len); 220 #endif 221 static void vhci_print_prout_keys(scsi_vhci_lun_t *, char *); 222 static void vhci_uscsi_iodone(struct scsi_pkt *pkt); 223 static void vhci_invalidate_mpapi_lu(struct scsi_vhci *, scsi_vhci_lun_t *); 224 225 /* 226 * MP-API related functions 227 */ 228 extern int vhci_mpapi_init(struct scsi_vhci *); 229 extern void vhci_mpapi_add_dev_prod(struct scsi_vhci *, char *); 230 extern int vhci_mpapi_ctl(dev_t, int, intptr_t, int, cred_t *, int *); 231 extern void vhci_update_mpapi_data(struct scsi_vhci *, 232 scsi_vhci_lun_t *, mdi_pathinfo_t *); 233 extern void* vhci_get_mpapi_item(struct scsi_vhci *, mpapi_list_header_t *, 234 uint8_t, void*); 235 extern void vhci_mpapi_set_path_state(dev_info_t *, mdi_pathinfo_t *, int); 236 extern int vhci_mpapi_update_tpg_acc_state_for_lu(struct scsi_vhci *, 237 scsi_vhci_lun_t *); 238 239 #define VHCI_DMA_MAX_XFER_CAP INT_MAX 240 241 #define VHCI_MAX_PGR_RETRIES 3 242 243 /* 244 * Macros for the device-type mpxio options 245 */ 246 #define LOAD_BALANCE_OPTIONS "load-balance-options" 247 #define LOGICAL_BLOCK_REGION_SIZE "region-size" 248 #define MPXIO_OPTIONS_LIST "device-type-mpxio-options-list" 249 #define DEVICE_TYPE_STR "device-type" 250 #define isdigit(ch) ((ch) >= '0' && (ch) <= '9') 251 252 static struct cb_ops vhci_cb_ops = { 253 vhci_open, /* open */ 254 vhci_close, /* close */ 255 nodev, /* strategy */ 256 nodev, /* print */ 257 nodev, /* dump */ 258 nodev, /* read */ 259 nodev, /* write */ 260 vhci_ioctl, /* ioctl */ 261 nodev, /* devmap */ 262 nodev, /* mmap */ 263 nodev, /* segmap */ 264 nochpoll, /* chpoll */ 265 ddi_prop_op, /* cb_prop_op */ 266 0, /* streamtab */ 267 D_NEW | D_MP, /* cb_flag */ 268 CB_REV, /* rev */ 269 nodev, /* aread */ 270 nodev /* awrite */ 271 }; 272 273 static struct dev_ops vhci_ops = { 274 DEVO_REV, 275 0, 276 vhci_getinfo, 277 nulldev, /* identify */ 278 nulldev, /* probe */ 279 vhci_attach, /* attach and detach are mandatory */ 280 vhci_detach, 281 nodev, /* reset */ 282 &vhci_cb_ops, /* cb_ops */ 283 NULL, /* bus_ops */ 284 NULL, /* power */ 285 ddi_quiesce_not_needed, /* quiesce */ 286 }; 287 288 extern struct mod_ops mod_driverops; 289 290 static struct modldrv modldrv = { 291 &mod_driverops, 292 vhci_version_name, /* module name */ 293 &vhci_ops 294 }; 295 296 static struct modlinkage modlinkage = { 297 MODREV_1, 298 &modldrv, 299 NULL 300 }; 301 302 static mdi_vhci_ops_t vhci_opinfo = { 303 MDI_VHCI_OPS_REV, 304 vhci_pathinfo_init, /* Pathinfo node init callback */ 305 vhci_pathinfo_uninit, /* Pathinfo uninit callback */ 306 vhci_pathinfo_state_change, /* Pathinfo node state change */ 307 vhci_failover, /* failover callback */ 308 vhci_client_attached, /* client attached callback */ 309 vhci_is_dev_supported /* is device supported by mdi */ 310 }; 311 312 /* 313 * The scsi_failover table defines an ordered set of 'fops' modules supported 314 * by scsi_vhci. Currently, initialize this table from the 'ddi-forceload' 315 * property specified in scsi_vhci.conf. 316 */ 317 static struct scsi_failover { 318 ddi_modhandle_t sf_mod; 319 struct scsi_failover_ops *sf_sfo; 320 } *scsi_failover_table; 321 static uint_t scsi_nfailover; 322 323 int 324 _init(void) 325 { 326 int rval; 327 328 /* 329 * Allocate soft state and prepare to do ddi_soft_state_zalloc() 330 * before registering with the transport first. 331 */ 332 if ((rval = ddi_soft_state_init(&vhci_softstate, 333 sizeof (struct scsi_vhci), 1)) != 0) { 334 VHCI_DEBUG(1, (CE_NOTE, NULL, 335 "!_init:soft state init failed\n")); 336 return (rval); 337 } 338 339 if ((rval = scsi_hba_init(&modlinkage)) != 0) { 340 VHCI_DEBUG(1, (CE_NOTE, NULL, 341 "!_init: scsi hba init failed\n")); 342 ddi_soft_state_fini(&vhci_softstate); 343 return (rval); 344 } 345 346 mutex_init(&vhci_global_mutex, NULL, MUTEX_DRIVER, NULL); 347 cv_init(&vhci_cv, NULL, CV_DRIVER, NULL); 348 349 mutex_init(&vhci_targetmap_mutex, NULL, MUTEX_DRIVER, NULL); 350 vhci_targetmap_byport = mod_hash_create_strhash( 351 "vhci_targetmap_byport", 256, mod_hash_null_valdtor); 352 vhci_targetmap_bypid = mod_hash_create_idhash( 353 "vhci_targetmap_bypid", 256, mod_hash_null_valdtor); 354 355 if ((rval = mod_install(&modlinkage)) != 0) { 356 VHCI_DEBUG(1, (CE_NOTE, NULL, "!_init: mod_install failed\n")); 357 if (vhci_targetmap_bypid) 358 mod_hash_destroy_idhash(vhci_targetmap_bypid); 359 if (vhci_targetmap_byport) 360 mod_hash_destroy_strhash(vhci_targetmap_byport); 361 mutex_destroy(&vhci_targetmap_mutex); 362 cv_destroy(&vhci_cv); 363 mutex_destroy(&vhci_global_mutex); 364 scsi_hba_fini(&modlinkage); 365 ddi_soft_state_fini(&vhci_softstate); 366 } 367 return (rval); 368 } 369 370 371 /* 372 * the system is done with us as a driver, so clean up 373 */ 374 int 375 _fini(void) 376 { 377 int rval; 378 379 /* 380 * don't start cleaning up until we know that the module remove 381 * has worked -- if this works, then we know that each instance 382 * has successfully been DDI_DETACHed 383 */ 384 if ((rval = mod_remove(&modlinkage)) != 0) { 385 VHCI_DEBUG(4, (CE_NOTE, NULL, "!_fini: mod_remove failed\n")); 386 return (rval); 387 } 388 389 if (vhci_targetmap_bypid) 390 mod_hash_destroy_idhash(vhci_targetmap_bypid); 391 if (vhci_targetmap_byport) 392 mod_hash_destroy_strhash(vhci_targetmap_byport); 393 mutex_destroy(&vhci_targetmap_mutex); 394 cv_destroy(&vhci_cv); 395 mutex_destroy(&vhci_global_mutex); 396 scsi_hba_fini(&modlinkage); 397 ddi_soft_state_fini(&vhci_softstate); 398 399 return (rval); 400 } 401 402 int 403 _info(struct modinfo *modinfop) 404 { 405 return (mod_info(&modlinkage, modinfop)); 406 } 407 408 /* 409 * Lookup scsi_failover by "short name" of failover module. 410 */ 411 struct scsi_failover_ops * 412 vhci_failover_ops_by_name(char *name) 413 { 414 struct scsi_failover *sf; 415 416 for (sf = scsi_failover_table; sf->sf_mod; sf++) { 417 if (sf->sf_sfo == NULL) 418 continue; 419 if (strcmp(sf->sf_sfo->sfo_name, name) == 0) 420 return (sf->sf_sfo); 421 } 422 return (NULL); 423 } 424 425 /* 426 * Load all scsi_failover_ops 'fops' modules. 427 */ 428 static void 429 vhci_failover_modopen(struct scsi_vhci *vhci) 430 { 431 char **module; 432 int i; 433 struct scsi_failover *sf; 434 char **dt; 435 int e; 436 437 if (scsi_failover_table) 438 return; 439 440 /* Get the list of modules from scsi_vhci.conf */ 441 if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, 442 vhci->vhci_dip, DDI_PROP_DONTPASS, "ddi-forceload", 443 &module, &scsi_nfailover) != DDI_PROP_SUCCESS) { 444 cmn_err(CE_WARN, "scsi_vhci: " 445 "scsi_vhci.conf is missing 'ddi-forceload'"); 446 return; 447 } 448 if (scsi_nfailover == 0) { 449 cmn_err(CE_WARN, "scsi_vhci: " 450 "scsi_vhci.conf has empty 'ddi-forceload'"); 451 ddi_prop_free(module); 452 return; 453 } 454 455 /* allocate failover table based on number of modules */ 456 scsi_failover_table = (struct scsi_failover *) 457 kmem_zalloc(sizeof (struct scsi_failover) * (scsi_nfailover + 1), 458 KM_SLEEP); 459 460 /* loop over modules specified in scsi_vhci.conf and open each module */ 461 for (i = 0, sf = scsi_failover_table; i < scsi_nfailover; i++) { 462 if (module[i] == NULL) 463 continue; 464 465 sf->sf_mod = ddi_modopen(module[i], KRTLD_MODE_FIRST, &e); 466 if (sf->sf_mod == NULL) { 467 /* 468 * A module returns EEXIST if other software is 469 * supporting the intended function: for example 470 * the scsi_vhci_f_sum_emc module returns EEXIST 471 * from _init if EMC powerpath software is installed. 472 */ 473 if (e != EEXIST) 474 cmn_err(CE_WARN, "scsi_vhci: unable to open " 475 "module '%s', error %d", module[i], e); 476 continue; 477 } 478 sf->sf_sfo = ddi_modsym(sf->sf_mod, 479 "scsi_vhci_failover_ops", &e); 480 if (sf->sf_sfo == NULL) { 481 cmn_err(CE_WARN, "scsi_vhci: " 482 "unable to import 'scsi_failover_ops' from '%s', " 483 "error %d", module[i], e); 484 (void) ddi_modclose(sf->sf_mod); 485 sf->sf_mod = NULL; 486 continue; 487 } 488 489 /* register vid/pid of devices supported with mpapi */ 490 for (dt = sf->sf_sfo->sfo_devices; *dt; dt++) 491 vhci_mpapi_add_dev_prod(vhci, *dt); 492 sf++; 493 } 494 495 /* verify that at least the "well-known" modules were there */ 496 if (vhci_failover_ops_by_name(SFO_NAME_SYM) == NULL) 497 cmn_err(CE_WARN, "scsi_vhci: well-known module \"" 498 SFO_NAME_SYM "\" not defined in scsi_vhci.conf's " 499 "'ddi-forceload'"); 500 if (vhci_failover_ops_by_name(SFO_NAME_TPGS) == NULL) 501 cmn_err(CE_WARN, "scsi_vhci: well-known module \"" 502 SFO_NAME_TPGS "\" not defined in scsi_vhci.conf's " 503 "'ddi-forceload'"); 504 505 /* call sfo_init for modules that need it */ 506 for (sf = scsi_failover_table; sf->sf_mod; sf++) { 507 if (sf->sf_sfo && sf->sf_sfo->sfo_init) 508 sf->sf_sfo->sfo_init(); 509 } 510 511 ddi_prop_free(module); 512 } 513 514 /* 515 * unload all loaded scsi_failover_ops modules 516 */ 517 static void 518 vhci_failover_modclose() 519 { 520 struct scsi_failover *sf; 521 522 for (sf = scsi_failover_table; sf->sf_mod; sf++) { 523 if ((sf->sf_mod == NULL) || (sf->sf_sfo == NULL)) 524 continue; 525 (void) ddi_modclose(sf->sf_mod); 526 sf->sf_mod = NULL; 527 sf->sf_sfo = NULL; 528 } 529 530 if (scsi_failover_table && scsi_nfailover) 531 kmem_free(scsi_failover_table, 532 sizeof (struct scsi_failover) * (scsi_nfailover + 1)); 533 scsi_failover_table = NULL; 534 scsi_nfailover = 0; 535 } 536 537 /* ARGSUSED */ 538 static int 539 vhci_open(dev_t *devp, int flag, int otype, cred_t *credp) 540 { 541 struct scsi_vhci *vhci; 542 543 if (otype != OTYP_CHR) { 544 return (EINVAL); 545 } 546 547 vhci = ddi_get_soft_state(vhci_softstate, MINOR2INST(getminor(*devp))); 548 if (vhci == NULL) { 549 VHCI_DEBUG(1, (CE_NOTE, NULL, "vhci_open: failed ENXIO\n")); 550 return (ENXIO); 551 } 552 553 mutex_enter(&vhci->vhci_mutex); 554 if ((flag & FEXCL) && (vhci->vhci_state & VHCI_STATE_OPEN)) { 555 mutex_exit(&vhci->vhci_mutex); 556 vhci_log(CE_NOTE, vhci->vhci_dip, 557 "!vhci%d: Already open\n", getminor(*devp)); 558 return (EBUSY); 559 } 560 561 vhci->vhci_state |= VHCI_STATE_OPEN; 562 mutex_exit(&vhci->vhci_mutex); 563 return (0); 564 } 565 566 567 /* ARGSUSED */ 568 static int 569 vhci_close(dev_t dev, int flag, int otype, cred_t *credp) 570 { 571 struct scsi_vhci *vhci; 572 573 if (otype != OTYP_CHR) { 574 return (EINVAL); 575 } 576 577 vhci = ddi_get_soft_state(vhci_softstate, MINOR2INST(getminor(dev))); 578 if (vhci == NULL) { 579 VHCI_DEBUG(1, (CE_NOTE, NULL, "vhci_close: failed ENXIO\n")); 580 return (ENXIO); 581 } 582 583 mutex_enter(&vhci->vhci_mutex); 584 vhci->vhci_state &= ~VHCI_STATE_OPEN; 585 mutex_exit(&vhci->vhci_mutex); 586 587 return (0); 588 } 589 590 /* ARGSUSED */ 591 static int 592 vhci_ioctl(dev_t dev, int cmd, intptr_t data, int mode, 593 cred_t *credp, int *rval) 594 { 595 if (IS_DEVCTL(cmd)) { 596 return (vhci_devctl(dev, cmd, data, mode, credp, rval)); 597 } else if (cmd == MP_CMD) { 598 return (vhci_mpapi_ctl(dev, cmd, data, mode, credp, rval)); 599 } else { 600 return (vhci_ctl(dev, cmd, data, mode, credp, rval)); 601 } 602 } 603 604 /* 605 * attach the module 606 */ 607 static int 608 vhci_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 609 { 610 int rval = DDI_FAILURE; 611 int scsi_hba_attached = 0; 612 int vhci_attached = 0; 613 int mutex_initted = 0; 614 int instance; 615 struct scsi_vhci *vhci; 616 scsi_hba_tran_t *tran; 617 char cache_name_buf[64]; 618 char *data; 619 620 VHCI_DEBUG(4, (CE_NOTE, NULL, "vhci_attach: cmd=0x%x\n", cmd)); 621 622 instance = ddi_get_instance(dip); 623 624 switch (cmd) { 625 case DDI_ATTACH: 626 break; 627 628 case DDI_RESUME: 629 case DDI_PM_RESUME: 630 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_attach: resume not yet" 631 "implemented\n")); 632 return (rval); 633 634 default: 635 VHCI_DEBUG(1, (CE_NOTE, NULL, 636 "!vhci_attach: unknown ddi command\n")); 637 return (rval); 638 } 639 640 /* 641 * Allocate vhci data structure. 642 */ 643 if (ddi_soft_state_zalloc(vhci_softstate, instance) != DDI_SUCCESS) { 644 VHCI_DEBUG(1, (CE_NOTE, dip, "!vhci_attach:" 645 "soft state alloc failed\n")); 646 return (DDI_FAILURE); 647 } 648 649 if ((vhci = ddi_get_soft_state(vhci_softstate, instance)) == NULL) { 650 VHCI_DEBUG(1, (CE_NOTE, dip, "!vhci_attach:" 651 "bad soft state\n")); 652 ddi_soft_state_free(vhci_softstate, instance); 653 return (DDI_FAILURE); 654 } 655 656 /* Allocate packet cache */ 657 (void) snprintf(cache_name_buf, sizeof (cache_name_buf), 658 "vhci%d_cache", instance); 659 660 mutex_init(&vhci->vhci_mutex, NULL, MUTEX_DRIVER, NULL); 661 mutex_initted++; 662 663 /* 664 * Allocate a transport structure 665 */ 666 tran = scsi_hba_tran_alloc(dip, SCSI_HBA_CANSLEEP); 667 ASSERT(tran != NULL); 668 669 vhci->vhci_tran = tran; 670 vhci->vhci_dip = dip; 671 vhci->vhci_instance = instance; 672 673 tran->tran_hba_private = vhci; 674 tran->tran_tgt_init = vhci_scsi_tgt_init; 675 tran->tran_tgt_probe = NULL; 676 tran->tran_tgt_free = vhci_scsi_tgt_free; 677 678 tran->tran_start = vhci_scsi_start; 679 tran->tran_abort = vhci_scsi_abort; 680 tran->tran_reset = vhci_scsi_reset; 681 tran->tran_getcap = vhci_scsi_getcap; 682 tran->tran_setcap = vhci_scsi_setcap; 683 tran->tran_init_pkt = vhci_scsi_init_pkt; 684 tran->tran_destroy_pkt = vhci_scsi_destroy_pkt; 685 tran->tran_dmafree = vhci_scsi_dmafree; 686 tran->tran_sync_pkt = vhci_scsi_sync_pkt; 687 tran->tran_reset_notify = vhci_scsi_reset_notify; 688 689 tran->tran_get_bus_addr = vhci_scsi_get_bus_addr; 690 tran->tran_get_name = vhci_scsi_get_name; 691 tran->tran_bus_reset = NULL; 692 tran->tran_quiesce = NULL; 693 tran->tran_unquiesce = NULL; 694 695 /* 696 * register event notification routines with scsa 697 */ 698 tran->tran_get_eventcookie = NULL; 699 tran->tran_add_eventcall = NULL; 700 tran->tran_remove_eventcall = NULL; 701 tran->tran_post_event = NULL; 702 703 tran->tran_bus_power = vhci_scsi_bus_power; 704 705 tran->tran_bus_config = vhci_scsi_bus_config; 706 tran->tran_bus_unconfig = vhci_scsi_bus_unconfig; 707 708 /* 709 * Attach this instance with the mpxio framework 710 */ 711 if (mdi_vhci_register(MDI_HCI_CLASS_SCSI, dip, &vhci_opinfo, 0) 712 != MDI_SUCCESS) { 713 VHCI_DEBUG(1, (CE_NOTE, dip, "!vhci_attach:" 714 "mdi_vhci_register failed\n")); 715 goto attach_fail; 716 } 717 vhci_attached++; 718 719 /* 720 * Attach this instance of the hba. 721 * 722 * Regarding dma attributes: Since scsi_vhci is a virtual scsi HBA 723 * driver, it has nothing to do with DMA. However, when calling 724 * scsi_hba_attach_setup() we need to pass something valid in the 725 * dma attributes parameter. So we just use scsi_alloc_attr. 726 * SCSA itself seems to care only for dma_attr_minxfer and 727 * dma_attr_burstsizes fields of dma attributes structure. 728 * It expects those fileds to be non-zero. 729 */ 730 if (scsi_hba_attach_setup(dip, &scsi_alloc_attr, tran, 731 SCSI_HBA_ADDR_COMPLEX) != DDI_SUCCESS) { 732 VHCI_DEBUG(1, (CE_NOTE, dip, "!vhci_attach:" 733 "hba attach failed\n")); 734 goto attach_fail; 735 } 736 scsi_hba_attached++; 737 738 if (ddi_create_minor_node(dip, "devctl", S_IFCHR, 739 INST2DEVCTL(instance), DDI_NT_SCSI_NEXUS, 0) != DDI_SUCCESS) { 740 VHCI_DEBUG(1, (CE_NOTE, dip, "!vhci_attach:" 741 " ddi_create_minor_node failed\n")); 742 goto attach_fail; 743 } 744 745 /* 746 * Set pm-want-child-notification property for 747 * power management of the phci and client 748 */ 749 if (ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP, 750 "pm-want-child-notification?", NULL, NULL) != DDI_PROP_SUCCESS) { 751 cmn_err(CE_WARN, 752 "%s%d fail to create pm-want-child-notification? prop", 753 ddi_driver_name(dip), ddi_get_instance(dip)); 754 goto attach_fail; 755 } 756 757 vhci->vhci_taskq = taskq_create("vhci_taskq", 1, MINCLSYSPRI, 1, 4, 0); 758 vhci->vhci_update_pathstates_taskq = 759 taskq_create("vhci_update_pathstates", VHCI_NUM_UPDATE_TASKQ, 760 MINCLSYSPRI, 1, 4, 0); 761 ASSERT(vhci->vhci_taskq); 762 ASSERT(vhci->vhci_update_pathstates_taskq); 763 764 /* 765 * Set appropriate configuration flags based on options set in 766 * conf file. 767 */ 768 vhci->vhci_conf_flags = 0; 769 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, PROPFLAGS, 770 "auto-failback", &data) == DDI_SUCCESS) { 771 if (strcmp(data, "enable") == 0) 772 vhci->vhci_conf_flags |= VHCI_CONF_FLAGS_AUTO_FAILBACK; 773 ddi_prop_free(data); 774 } 775 776 if (!(vhci->vhci_conf_flags & VHCI_CONF_FLAGS_AUTO_FAILBACK)) 777 vhci_log(CE_NOTE, dip, "!Auto-failback capability " 778 "disabled through scsi_vhci.conf file."); 779 780 /* 781 * Allocate an mpapi private structure 782 */ 783 vhci->mp_priv = kmem_zalloc(sizeof (mpapi_priv_t), KM_SLEEP); 784 if (vhci_mpapi_init(vhci) != 0) { 785 VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_attach: " 786 "vhci_mpapi_init() failed")); 787 } 788 789 vhci_failover_modopen(vhci); /* load failover modules */ 790 791 ddi_report_dev(dip); 792 return (DDI_SUCCESS); 793 794 attach_fail: 795 if (vhci_attached) 796 (void) mdi_vhci_unregister(dip, 0); 797 798 if (scsi_hba_attached) 799 (void) scsi_hba_detach(dip); 800 801 if (vhci->vhci_tran) 802 scsi_hba_tran_free(vhci->vhci_tran); 803 804 if (mutex_initted) { 805 mutex_destroy(&vhci->vhci_mutex); 806 } 807 808 ddi_soft_state_free(vhci_softstate, instance); 809 return (DDI_FAILURE); 810 } 811 812 813 /*ARGSUSED*/ 814 static int 815 vhci_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 816 { 817 int instance = ddi_get_instance(dip); 818 scsi_hba_tran_t *tran; 819 struct scsi_vhci *vhci; 820 821 VHCI_DEBUG(4, (CE_NOTE, NULL, "vhci_detach: cmd=0x%x\n", cmd)); 822 823 if ((tran = ddi_get_driver_private(dip)) == NULL) 824 return (DDI_FAILURE); 825 826 vhci = TRAN2HBAPRIVATE(tran); 827 if (!vhci) { 828 return (DDI_FAILURE); 829 } 830 831 switch (cmd) { 832 case DDI_DETACH: 833 break; 834 835 case DDI_SUSPEND: 836 case DDI_PM_SUSPEND: 837 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_detach: suspend/pm not yet" 838 "implemented\n")); 839 return (DDI_FAILURE); 840 841 default: 842 VHCI_DEBUG(1, (CE_NOTE, NULL, 843 "!vhci_detach: unknown ddi command\n")); 844 return (DDI_FAILURE); 845 } 846 847 (void) mdi_vhci_unregister(dip, 0); 848 (void) scsi_hba_detach(dip); 849 scsi_hba_tran_free(tran); 850 851 if (ddi_prop_remove(DDI_DEV_T_NONE, dip, 852 "pm-want-child-notification?") != DDI_PROP_SUCCESS) { 853 cmn_err(CE_WARN, 854 "%s%d unable to remove prop pm-want_child_notification?", 855 ddi_driver_name(dip), ddi_get_instance(dip)); 856 } 857 if (vhci_restart_timeid != 0) { 858 (void) untimeout(vhci_restart_timeid); 859 } 860 vhci_restart_timeid = 0; 861 862 mutex_destroy(&vhci->vhci_mutex); 863 vhci->vhci_dip = NULL; 864 vhci->vhci_tran = NULL; 865 taskq_destroy(vhci->vhci_taskq); 866 taskq_destroy(vhci->vhci_update_pathstates_taskq); 867 ddi_remove_minor_node(dip, NULL); 868 ddi_soft_state_free(vhci_softstate, instance); 869 870 vhci_failover_modclose(); /* unload failover modules */ 871 return (DDI_SUCCESS); 872 } 873 874 /* 875 * vhci_getinfo() 876 * Given the device number, return the devinfo pointer or the 877 * instance number. 878 * Note: always succeed DDI_INFO_DEVT2INSTANCE, even before attach. 879 */ 880 881 /*ARGSUSED*/ 882 static int 883 vhci_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result) 884 { 885 struct scsi_vhci *vhcip; 886 int instance = MINOR2INST(getminor((dev_t)arg)); 887 888 switch (cmd) { 889 case DDI_INFO_DEVT2DEVINFO: 890 vhcip = ddi_get_soft_state(vhci_softstate, instance); 891 if (vhcip != NULL) 892 *result = vhcip->vhci_dip; 893 else { 894 *result = NULL; 895 return (DDI_FAILURE); 896 } 897 break; 898 899 case DDI_INFO_DEVT2INSTANCE: 900 *result = (void *)(uintptr_t)instance; 901 break; 902 903 default: 904 return (DDI_FAILURE); 905 } 906 907 return (DDI_SUCCESS); 908 } 909 910 /*ARGSUSED*/ 911 static int 912 vhci_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip, 913 scsi_hba_tran_t *hba_tran, struct scsi_device *sd) 914 { 915 char *guid; 916 scsi_vhci_lun_t *vlun; 917 struct scsi_vhci *vhci; 918 clock_t from_ticks; 919 mdi_pathinfo_t *pip; 920 int rval; 921 922 ASSERT(hba_dip != NULL); 923 ASSERT(tgt_dip != NULL); 924 925 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, tgt_dip, PROPFLAGS, 926 MDI_CLIENT_GUID_PROP, &guid) != DDI_SUCCESS) { 927 /* 928 * This must be the .conf node without GUID property. 929 * The node under fp already inserts a delay, so we 930 * just return from here. We rely on this delay to have 931 * all dips be posted to the ndi hotplug thread's newdev 932 * list. This is necessary for the deferred attach 933 * mechanism to work and opens() done soon after boot to 934 * succeed. 935 */ 936 VHCI_DEBUG(4, (CE_WARN, hba_dip, "tgt_init: lun guid " 937 "property failed")); 938 return (DDI_NOT_WELL_FORMED); 939 } 940 941 if (ndi_dev_is_persistent_node(tgt_dip) == 0) { 942 /* 943 * This must be .conf node with the GUID property. We don't 944 * merge property by ndi_merge_node() here because the 945 * devi_addr_buf of .conf node is "" always according the 946 * implementation of vhci_scsi_get_name_bus_addr(). 947 */ 948 ddi_set_name_addr(tgt_dip, NULL); 949 return (DDI_FAILURE); 950 } 951 952 vhci = ddi_get_soft_state(vhci_softstate, ddi_get_instance(hba_dip)); 953 ASSERT(vhci != NULL); 954 955 VHCI_DEBUG(4, (CE_NOTE, hba_dip, 956 "!tgt_init: called for %s (instance %d)\n", 957 ddi_driver_name(tgt_dip), ddi_get_instance(tgt_dip))); 958 959 vlun = vhci_lun_lookup(tgt_dip); 960 961 mutex_enter(&vhci_global_mutex); 962 963 from_ticks = ddi_get_lbolt(); 964 if (vhci_to_ticks == 0) { 965 vhci_to_ticks = from_ticks + 966 drv_usectohz(vhci_init_wait_timeout); 967 } 968 969 #if DEBUG 970 if (vlun) { 971 VHCI_DEBUG(1, (CE_WARN, hba_dip, "tgt_init: " 972 "vhci_scsi_tgt_init: guid %s : found vlun 0x%p " 973 "from_ticks %lx to_ticks %lx", 974 guid, (void *)vlun, from_ticks, vhci_to_ticks)); 975 } else { 976 VHCI_DEBUG(1, (CE_WARN, hba_dip, "tgt_init: " 977 "vhci_scsi_tgt_init: guid %s : vlun not found " 978 "from_ticks %lx to_ticks %lx", guid, from_ticks, 979 vhci_to_ticks)); 980 } 981 #endif 982 983 rval = mdi_select_path(tgt_dip, NULL, 984 (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH), NULL, &pip); 985 if (rval == MDI_SUCCESS) { 986 mdi_rele_path(pip); 987 } 988 989 /* 990 * Wait for the following conditions : 991 * 1. no vlun available yet 992 * 2. no path established 993 * 3. timer did not expire 994 */ 995 while ((vlun == NULL) || (mdi_client_get_path_count(tgt_dip) == 0) || 996 (rval != MDI_SUCCESS)) { 997 if (vlun && vlun->svl_not_supported) { 998 VHCI_DEBUG(1, (CE_WARN, hba_dip, "tgt_init: " 999 "vlun 0x%p lun guid %s not supported!", 1000 (void *)vlun, guid)); 1001 mutex_exit(&vhci_global_mutex); 1002 ddi_prop_free(guid); 1003 return (DDI_NOT_WELL_FORMED); 1004 } 1005 if ((vhci_first_time == 0) && (from_ticks >= vhci_to_ticks)) { 1006 vhci_first_time = 1; 1007 } 1008 if (vhci_first_time == 1) { 1009 VHCI_DEBUG(1, (CE_WARN, hba_dip, "vhci_scsi_tgt_init: " 1010 "no wait for %s. from_tick %lx, to_tick %lx", 1011 guid, from_ticks, vhci_to_ticks)); 1012 mutex_exit(&vhci_global_mutex); 1013 ddi_prop_free(guid); 1014 return (DDI_NOT_WELL_FORMED); 1015 } 1016 1017 if (cv_timedwait(&vhci_cv, 1018 &vhci_global_mutex, vhci_to_ticks) == -1) { 1019 /* Timed out */ 1020 #ifdef DEBUG 1021 if (vlun == NULL) { 1022 VHCI_DEBUG(1, (CE_WARN, hba_dip, 1023 "tgt_init: no vlun for %s!", guid)); 1024 } else if (mdi_client_get_path_count(tgt_dip) == 0) { 1025 VHCI_DEBUG(1, (CE_WARN, hba_dip, 1026 "tgt_init: client path count is " 1027 "zero for %s!", guid)); 1028 } else { 1029 VHCI_DEBUG(1, (CE_WARN, hba_dip, 1030 "tgt_init: client path not " 1031 "available yet for %s!", guid)); 1032 } 1033 #endif /* DEBUG */ 1034 mutex_exit(&vhci_global_mutex); 1035 ddi_prop_free(guid); 1036 return (DDI_NOT_WELL_FORMED); 1037 } 1038 vlun = vhci_lun_lookup(tgt_dip); 1039 rval = mdi_select_path(tgt_dip, NULL, 1040 (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH), 1041 NULL, &pip); 1042 if (rval == MDI_SUCCESS) { 1043 mdi_rele_path(pip); 1044 } 1045 from_ticks = ddi_get_lbolt(); 1046 } 1047 mutex_exit(&vhci_global_mutex); 1048 1049 ASSERT(vlun != NULL); 1050 ddi_prop_free(guid); 1051 1052 scsi_device_hba_private_set(sd, vlun); 1053 1054 return (DDI_SUCCESS); 1055 } 1056 1057 /*ARGSUSED*/ 1058 static void 1059 vhci_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip, 1060 scsi_hba_tran_t *hba_tran, struct scsi_device *sd) 1061 { 1062 } 1063 1064 /* 1065 * a PGR register command has started; copy the info we need 1066 */ 1067 int 1068 vhci_pgr_register_start(scsi_vhci_lun_t *vlun, struct scsi_pkt *pkt) 1069 { 1070 struct vhci_pkt *vpkt = TGTPKT2VHCIPKT(pkt); 1071 void *addr; 1072 1073 if (!vpkt->vpkt_tgt_init_bp) 1074 return (TRAN_BADPKT); 1075 1076 addr = bp_mapin_common(vpkt->vpkt_tgt_init_bp, 1077 (vpkt->vpkt_flags & CFLAG_NOWAIT) ? VM_NOSLEEP : VM_SLEEP); 1078 if (addr == NULL) 1079 return (TRAN_BUSY); 1080 1081 mutex_enter(&vlun->svl_mutex); 1082 1083 vhci_print_prout_keys(vlun, "v_pgr_reg_start: before bcopy:"); 1084 1085 bcopy(addr, &vlun->svl_prout, sizeof (vhci_prout_t) - 1086 (2 * MHIOC_RESV_KEY_SIZE*sizeof (char))); 1087 bcopy(pkt->pkt_cdbp, vlun->svl_cdb, sizeof (vlun->svl_cdb)); 1088 1089 vhci_print_prout_keys(vlun, "v_pgr_reg_start: after bcopy:"); 1090 1091 vlun->svl_time = pkt->pkt_time; 1092 vlun->svl_bcount = vpkt->vpkt_tgt_init_bp->b_bcount; 1093 vlun->svl_first_path = vpkt->vpkt_path; 1094 mutex_exit(&vlun->svl_mutex); 1095 return (0); 1096 } 1097 1098 /* 1099 * Function name : vhci_scsi_start() 1100 * 1101 * Return Values : TRAN_FATAL_ERROR - vhci has been shutdown 1102 * or other fatal failure 1103 * preventing packet transportation 1104 * TRAN_BUSY - request queue is full 1105 * TRAN_ACCEPT - pkt has been submitted to phci 1106 * (or is held in the waitQ) 1107 * Description : Implements SCSA's tran_start() entry point for 1108 * packet transport 1109 * 1110 */ 1111 static int 1112 vhci_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt) 1113 { 1114 int rval = TRAN_ACCEPT; 1115 int instance, held; 1116 struct scsi_vhci *vhci = ADDR2VHCI(ap); 1117 struct scsi_vhci_lun *vlun = ADDR2VLUN(ap); 1118 struct vhci_pkt *vpkt = TGTPKT2VHCIPKT(pkt); 1119 int flags = 0; 1120 scsi_vhci_priv_t *svp; 1121 dev_info_t *cdip; 1122 client_lb_t lbp; 1123 int restore_lbp = 0; 1124 /* set if pkt is SCSI-II RESERVE cmd */ 1125 int pkt_reserve_cmd = 0; 1126 int reserve_failed = 0; 1127 1128 ASSERT(vhci != NULL); 1129 ASSERT(vpkt != NULL); 1130 ASSERT(vpkt->vpkt_state != VHCI_PKT_ISSUED); 1131 cdip = ADDR2DIP(ap); 1132 1133 /* 1134 * Block IOs if LUN is held or QUIESCED for IOs. 1135 */ 1136 if ((VHCI_LUN_IS_HELD(vlun)) || 1137 ((vlun->svl_flags & VLUN_QUIESCED_FLG) == VLUN_QUIESCED_FLG)) { 1138 return (TRAN_BUSY); 1139 } 1140 1141 /* 1142 * vhci_lun needs to be quiesced before SCSI-II RESERVE command 1143 * can be issued. This may require a cv_timedwait, which is 1144 * dangerous to perform in an interrupt context. So if this 1145 * is a RESERVE command a taskq is dispatched to service it. 1146 * This taskq shall again call vhci_scsi_start, but we shall be 1147 * sure its not in an interrupt context. 1148 */ 1149 if ((pkt->pkt_cdbp[0] == SCMD_RESERVE) || 1150 (pkt->pkt_cdbp[0] == SCMD_RESERVE_G1)) { 1151 if (!(vpkt->vpkt_state & VHCI_PKT_THRU_TASKQ)) { 1152 if (taskq_dispatch(vhci->vhci_taskq, 1153 vhci_dispatch_scsi_start, (void *) vpkt, 1154 KM_NOSLEEP)) { 1155 return (TRAN_ACCEPT); 1156 } else { 1157 return (TRAN_BUSY); 1158 } 1159 } 1160 1161 /* 1162 * Here we ensure that simultaneous SCSI-II RESERVE cmds don't 1163 * get serviced for a lun. 1164 */ 1165 VHCI_HOLD_LUN(vlun, VH_NOSLEEP, held); 1166 if (!held) { 1167 return (TRAN_BUSY); 1168 } else if ((vlun->svl_flags & VLUN_QUIESCED_FLG) == 1169 VLUN_QUIESCED_FLG) { 1170 VHCI_RELEASE_LUN(vlun); 1171 return (TRAN_BUSY); 1172 } 1173 1174 /* 1175 * To ensure that no IOs occur for this LUN for the duration 1176 * of this pkt set the VLUN_QUIESCED_FLG. 1177 * In case this routine needs to exit on error make sure that 1178 * this flag is cleared. 1179 */ 1180 vlun->svl_flags |= VLUN_QUIESCED_FLG; 1181 pkt_reserve_cmd = 1; 1182 1183 /* 1184 * if this is a SCSI-II RESERVE command, set load balancing 1185 * policy to be ALTERNATE PATH to ensure that all subsequent 1186 * IOs are routed on the same path. This is because if commands 1187 * are routed across multiple paths then IOs on paths other than 1188 * the one on which the RESERVE was executed will get a 1189 * RESERVATION CONFLICT 1190 */ 1191 lbp = mdi_get_lb_policy(cdip); 1192 if (lbp != LOAD_BALANCE_NONE) { 1193 if (vhci_quiesce_lun(vlun) != 1) { 1194 vlun->svl_flags &= ~VLUN_QUIESCED_FLG; 1195 VHCI_RELEASE_LUN(vlun); 1196 return (TRAN_FATAL_ERROR); 1197 } 1198 vlun->svl_lb_policy_save = lbp; 1199 if (mdi_set_lb_policy(cdip, LOAD_BALANCE_NONE) != 1200 MDI_SUCCESS) { 1201 vlun->svl_flags &= ~VLUN_QUIESCED_FLG; 1202 VHCI_RELEASE_LUN(vlun); 1203 return (TRAN_FATAL_ERROR); 1204 } 1205 restore_lbp = 1; 1206 } 1207 /* 1208 * See comments for VLUN_RESERVE_ACTIVE_FLG in scsi_vhci.h 1209 * To narrow this window where a reserve command may be sent 1210 * down an inactive path the path states first need to be 1211 * updated. Before calling vhci_update_pathstates reset 1212 * VLUN_RESERVE_ACTIVE_FLG, just in case it was already set 1213 * for this lun. This shall prevent an unnecessary reset 1214 * from being sent out. 1215 */ 1216 vlun->svl_flags &= ~VLUN_RESERVE_ACTIVE_FLG; 1217 vhci_update_pathstates((void *)vlun); 1218 } 1219 1220 instance = ddi_get_instance(vhci->vhci_dip); 1221 1222 /* 1223 * If the command is PRIN with action of zero, then the cmd 1224 * is reading PR keys which requires filtering on completion. 1225 * Data cache sync must be guaranteed. 1226 */ 1227 if ((pkt->pkt_cdbp[0] == SCMD_PRIN) && (pkt->pkt_cdbp[1] == 0) && 1228 (vpkt->vpkt_org_vpkt == NULL)) { 1229 vpkt->vpkt_tgt_init_pkt_flags |= PKT_CONSISTENT; 1230 } 1231 1232 /* 1233 * Do not defer bind for PKT_DMA_PARTIAL 1234 */ 1235 if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) == 0) { 1236 1237 /* This is a non pkt_dma_partial case */ 1238 if ((rval = vhci_bind_transport( 1239 ap, vpkt, vpkt->vpkt_tgt_init_pkt_flags, NULL_FUNC)) 1240 != TRAN_ACCEPT) { 1241 VHCI_DEBUG(6, (CE_WARN, vhci->vhci_dip, 1242 "!vhci%d %x: failed to bind transport: " 1243 "vlun 0x%p pkt_reserved %x restore_lbp %x," 1244 "lbp %x", instance, rval, (void *)vlun, 1245 pkt_reserve_cmd, restore_lbp, lbp)); 1246 if (restore_lbp) 1247 (void) mdi_set_lb_policy(cdip, lbp); 1248 if (pkt_reserve_cmd) 1249 vlun->svl_flags &= ~VLUN_QUIESCED_FLG; 1250 return (rval); 1251 } 1252 VHCI_DEBUG(8, (CE_NOTE, NULL, 1253 "vhci_scsi_start: v_b_t called 0x%p\n", (void *)vpkt)); 1254 } 1255 ASSERT(vpkt->vpkt_hba_pkt != NULL); 1256 ASSERT(vpkt->vpkt_path != NULL); 1257 1258 /* 1259 * This is the chance to adjust the pHCI's pkt and other information 1260 * from target driver's pkt. 1261 */ 1262 VHCI_DEBUG(8, (CE_NOTE, vhci->vhci_dip, "vhci_scsi_start vpkt %p\n", 1263 (void *)vpkt)); 1264 vhci_update_pHCI_pkt(vpkt, pkt); 1265 1266 if (vlun->svl_flags & VLUN_RESERVE_ACTIVE_FLG) { 1267 if (vpkt->vpkt_path != vlun->svl_resrv_pip) { 1268 VHCI_DEBUG(1, (CE_WARN, vhci->vhci_dip, 1269 "!vhci_bind: reserve flag set for vlun 0x%p, but, " 1270 "pktpath 0x%p resrv path 0x%p differ. lb_policy %x", 1271 (void *)vlun, (void *)vpkt->vpkt_path, 1272 (void *)vlun->svl_resrv_pip, 1273 mdi_get_lb_policy(cdip))); 1274 reserve_failed = 1; 1275 } 1276 } 1277 1278 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(vpkt->vpkt_path); 1279 if (svp == NULL || reserve_failed) { 1280 if (pkt_reserve_cmd) { 1281 VHCI_DEBUG(6, (CE_WARN, vhci->vhci_dip, 1282 "!vhci_bind returned null svp vlun 0x%p", 1283 (void *)vlun)); 1284 vlun->svl_flags &= ~VLUN_QUIESCED_FLG; 1285 if (restore_lbp) 1286 (void) mdi_set_lb_policy(cdip, lbp); 1287 } 1288 pkt_cleanup: 1289 if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) == 0) { 1290 scsi_destroy_pkt(vpkt->vpkt_hba_pkt); 1291 vpkt->vpkt_hba_pkt = NULL; 1292 if (vpkt->vpkt_path) { 1293 mdi_rele_path(vpkt->vpkt_path); 1294 vpkt->vpkt_path = NULL; 1295 } 1296 } 1297 if ((pkt->pkt_cdbp[0] == SCMD_PROUT) && 1298 (((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_REGISTER) || 1299 ((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_R_AND_IGNORE))) { 1300 sema_v(&vlun->svl_pgr_sema); 1301 } 1302 return (TRAN_BUSY); 1303 } 1304 1305 VHCI_INCR_PATH_CMDCOUNT(svp); 1306 1307 /* 1308 * Ensure that no other IOs raced ahead, while a RESERVE cmd was 1309 * QUIESCING the same lun. 1310 */ 1311 if ((!pkt_reserve_cmd) && 1312 ((vlun->svl_flags & VLUN_QUIESCED_FLG) == VLUN_QUIESCED_FLG)) { 1313 VHCI_DECR_PATH_CMDCOUNT(svp); 1314 goto pkt_cleanup; 1315 } 1316 1317 if ((pkt->pkt_cdbp[0] == SCMD_PRIN) || 1318 (pkt->pkt_cdbp[0] == SCMD_PROUT)) { 1319 /* 1320 * currently this thread only handles running PGR 1321 * commands, so don't bother creating it unless 1322 * something interesting is going to happen (like 1323 * either a PGR out, or a PGR in with enough space 1324 * to hold the keys that are getting returned) 1325 */ 1326 mutex_enter(&vlun->svl_mutex); 1327 if (((vlun->svl_flags & VLUN_TASK_D_ALIVE_FLG) == 0) && 1328 (pkt->pkt_cdbp[0] == SCMD_PROUT)) { 1329 vlun->svl_taskq = taskq_create("vlun_pgr_task_daemon", 1330 1, MINCLSYSPRI, 1, 4, 0); 1331 vlun->svl_flags |= VLUN_TASK_D_ALIVE_FLG; 1332 } 1333 mutex_exit(&vlun->svl_mutex); 1334 if ((pkt->pkt_cdbp[0] == SCMD_PROUT) && 1335 (((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_REGISTER) || 1336 ((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_R_AND_IGNORE))) { 1337 if (rval = vhci_pgr_register_start(vlun, pkt)) { 1338 /* an error */ 1339 sema_v(&vlun->svl_pgr_sema); 1340 return (rval); 1341 } 1342 } 1343 } 1344 1345 /* 1346 * SCSI-II RESERVE cmd is not expected in polled mode. 1347 * If this changes it needs to be handled for the polled scenario. 1348 */ 1349 flags = vpkt->vpkt_hba_pkt->pkt_flags; 1350 1351 /* 1352 * Set the path_instance *before* sending the scsi_pkt down the path 1353 * to mpxio's pHCI so that additional path abstractions at a pHCI 1354 * level (like maybe iSCSI at some point in the future) can update 1355 * the path_instance. 1356 */ 1357 if (scsi_pkt_allocated_correctly(vpkt->vpkt_hba_pkt)) 1358 vpkt->vpkt_hba_pkt->pkt_path_instance = 1359 mdi_pi_get_path_instance(vpkt->vpkt_path); 1360 1361 rval = scsi_transport(vpkt->vpkt_hba_pkt); 1362 if (rval == TRAN_ACCEPT) { 1363 if (flags & FLAG_NOINTR) { 1364 struct scsi_pkt *tpkt = vpkt->vpkt_tgt_pkt; 1365 struct scsi_pkt *pkt = vpkt->vpkt_hba_pkt; 1366 1367 ASSERT(tpkt != NULL); 1368 *(tpkt->pkt_scbp) = *(pkt->pkt_scbp); 1369 tpkt->pkt_resid = pkt->pkt_resid; 1370 tpkt->pkt_state = pkt->pkt_state; 1371 tpkt->pkt_statistics = pkt->pkt_statistics; 1372 tpkt->pkt_reason = pkt->pkt_reason; 1373 1374 if ((*(pkt->pkt_scbp) == STATUS_CHECK) && 1375 (pkt->pkt_state & STATE_ARQ_DONE)) { 1376 bcopy(pkt->pkt_scbp, tpkt->pkt_scbp, 1377 vpkt->vpkt_tgt_init_scblen); 1378 } 1379 1380 VHCI_DECR_PATH_CMDCOUNT(svp); 1381 if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) == 0) { 1382 scsi_destroy_pkt(vpkt->vpkt_hba_pkt); 1383 vpkt->vpkt_hba_pkt = NULL; 1384 if (vpkt->vpkt_path) { 1385 mdi_rele_path(vpkt->vpkt_path); 1386 vpkt->vpkt_path = NULL; 1387 } 1388 } 1389 /* 1390 * This path will not automatically retry pkts 1391 * internally, therefore, vpkt_org_vpkt should 1392 * never be set. 1393 */ 1394 ASSERT(vpkt->vpkt_org_vpkt == NULL); 1395 scsi_hba_pkt_comp(tpkt); 1396 } 1397 return (rval); 1398 } else if ((pkt->pkt_cdbp[0] == SCMD_PROUT) && 1399 (((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_REGISTER) || 1400 ((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_R_AND_IGNORE))) { 1401 /* the command exited with bad status */ 1402 sema_v(&vlun->svl_pgr_sema); 1403 } else if (vpkt->vpkt_tgt_pkt->pkt_cdbp[0] == SCMD_PRIN) { 1404 /* the command exited with bad status */ 1405 sema_v(&vlun->svl_pgr_sema); 1406 } else if (pkt_reserve_cmd) { 1407 VHCI_DEBUG(6, (CE_WARN, vhci->vhci_dip, 1408 "!vhci_scsi_start: reserve failed vlun 0x%p", 1409 (void *)vlun)); 1410 vlun->svl_flags &= ~VLUN_QUIESCED_FLG; 1411 if (restore_lbp) 1412 (void) mdi_set_lb_policy(cdip, lbp); 1413 } 1414 1415 ASSERT(vpkt->vpkt_hba_pkt != NULL); 1416 VHCI_DECR_PATH_CMDCOUNT(svp); 1417 1418 /* Do not destroy phci packet information for PKT_DMA_PARTIAL */ 1419 if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) == 0) { 1420 scsi_destroy_pkt(vpkt->vpkt_hba_pkt); 1421 vpkt->vpkt_hba_pkt = NULL; 1422 if (vpkt->vpkt_path) { 1423 MDI_PI_ERRSTAT(vpkt->vpkt_path, MDI_PI_TRANSERR); 1424 mdi_rele_path(vpkt->vpkt_path); 1425 vpkt->vpkt_path = NULL; 1426 } 1427 } 1428 return (TRAN_BUSY); 1429 } 1430 1431 /* 1432 * Function name : vhci_scsi_reset() 1433 * 1434 * Return Values : 0 - reset failed 1435 * 1 - reset succeeded 1436 */ 1437 1438 /* ARGSUSED */ 1439 static int 1440 vhci_scsi_reset(struct scsi_address *ap, int level) 1441 { 1442 int rval = 0; 1443 1444 cmn_err(CE_WARN, "!vhci_scsi_reset 0x%x", level); 1445 if ((level == RESET_TARGET) || (level == RESET_LUN)) { 1446 return (vhci_scsi_reset_target(ap, level, TRUE)); 1447 } else if (level == RESET_ALL) { 1448 return (vhci_scsi_reset_bus(ap)); 1449 } 1450 1451 return (rval); 1452 } 1453 1454 /* 1455 * vhci_recovery_reset: 1456 * Issues reset to the device 1457 * Input: 1458 * vlun - vhci lun pointer of the device 1459 * ap - address of the device 1460 * select_path: 1461 * If select_path is FALSE, then the address specified in ap is 1462 * the path on which reset will be issued. 1463 * If select_path is TRUE, then path is obtained by calling 1464 * mdi_select_path. 1465 * 1466 * recovery_depth: 1467 * Caller can specify the level of reset. 1468 * VHCI_DEPTH_LUN - 1469 * Issues LUN RESET if device supports lun reset. 1470 * VHCI_DEPTH_TARGET - 1471 * If Lun Reset fails or the device does not support 1472 * Lun Reset, issues TARGET RESET 1473 * VHCI_DEPTH_ALL - 1474 * If Lun Reset fails or the device does not support 1475 * Lun Reset, issues TARGET RESET. 1476 * If TARGET RESET does not succeed, issues Bus Reset. 1477 */ 1478 1479 static int 1480 vhci_recovery_reset(scsi_vhci_lun_t *vlun, struct scsi_address *ap, 1481 uint8_t select_path, uint8_t recovery_depth) 1482 { 1483 int ret = 0; 1484 1485 ASSERT(ap != NULL); 1486 1487 if (vlun && vlun->svl_support_lun_reset == 1) { 1488 ret = vhci_scsi_reset_target(ap, RESET_LUN, 1489 select_path); 1490 } 1491 1492 recovery_depth--; 1493 1494 if ((ret == 0) && recovery_depth) { 1495 ret = vhci_scsi_reset_target(ap, RESET_TARGET, 1496 select_path); 1497 recovery_depth--; 1498 } 1499 1500 if ((ret == 0) && recovery_depth) { 1501 (void) scsi_reset(ap, RESET_ALL); 1502 } 1503 1504 return (ret); 1505 } 1506 1507 /* 1508 * Note: The scsi_address passed to this routine could be the scsi_address 1509 * for the virtual device or the physical device. No assumptions should be 1510 * made in this routine about the contents of the ap structure. 1511 * Further, note that the child dip would be the dip of the ssd node regardless 1512 * of the scsi_address passed in. 1513 */ 1514 static int 1515 vhci_scsi_reset_target(struct scsi_address *ap, int level, uint8_t select_path) 1516 { 1517 dev_info_t *vdip, *cdip; 1518 mdi_pathinfo_t *pip = NULL; 1519 mdi_pathinfo_t *npip = NULL; 1520 int rval = -1; 1521 scsi_vhci_priv_t *svp = NULL; 1522 struct scsi_address *pap = NULL; 1523 scsi_hba_tran_t *hba = NULL; 1524 int sps; 1525 struct scsi_vhci *vhci = NULL; 1526 1527 if (select_path != TRUE) { 1528 ASSERT(ap != NULL); 1529 if (level == RESET_LUN) { 1530 hba = ap->a_hba_tran; 1531 ASSERT(hba != NULL); 1532 return (hba->tran_reset(ap, RESET_LUN)); 1533 } 1534 return (scsi_reset(ap, level)); 1535 } 1536 1537 cdip = ADDR2DIP(ap); 1538 ASSERT(cdip != NULL); 1539 vdip = ddi_get_parent(cdip); 1540 ASSERT(vdip != NULL); 1541 vhci = ddi_get_soft_state(vhci_softstate, ddi_get_instance(vdip)); 1542 ASSERT(vhci != NULL); 1543 1544 rval = mdi_select_path(cdip, NULL, MDI_SELECT_ONLINE_PATH, NULL, &pip); 1545 if ((rval != MDI_SUCCESS) || (pip == NULL)) { 1546 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_scsi_reset_target: " 1547 "Unable to get a path, dip 0x%p", (void *)cdip)); 1548 return (0); 1549 } 1550 again: 1551 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip); 1552 if (svp == NULL) { 1553 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_scsi_reset_target: " 1554 "priv is NULL, pip 0x%p", (void *)pip)); 1555 mdi_rele_path(pip); 1556 return (0); 1557 } 1558 1559 if (svp->svp_psd == NULL) { 1560 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_scsi_reset_target: " 1561 "psd is NULL, pip 0x%p, svp 0x%p", 1562 (void *)pip, (void *)svp)); 1563 mdi_rele_path(pip); 1564 return (0); 1565 } 1566 1567 pap = &svp->svp_psd->sd_address; 1568 hba = pap->a_hba_tran; 1569 1570 ASSERT(pap != NULL); 1571 ASSERT(hba != NULL); 1572 1573 if (hba->tran_reset != NULL) { 1574 if (hba->tran_reset(pap, level) == 0) { 1575 vhci_log(CE_WARN, vdip, "!%s%d: " 1576 "path %s, reset %d failed", 1577 ddi_driver_name(cdip), ddi_get_instance(cdip), 1578 mdi_pi_spathname(pip), level); 1579 1580 /* 1581 * Select next path and issue the reset, repeat 1582 * until all paths are exhausted 1583 */ 1584 sps = mdi_select_path(cdip, NULL, 1585 MDI_SELECT_ONLINE_PATH, pip, &npip); 1586 if ((sps != MDI_SUCCESS) || (npip == NULL)) { 1587 mdi_rele_path(pip); 1588 return (0); 1589 } 1590 mdi_rele_path(pip); 1591 pip = npip; 1592 goto again; 1593 } 1594 mdi_rele_path(pip); 1595 mutex_enter(&vhci->vhci_mutex); 1596 scsi_hba_reset_notify_callback(&vhci->vhci_mutex, 1597 &vhci->vhci_reset_notify_listf); 1598 mutex_exit(&vhci->vhci_mutex); 1599 VHCI_DEBUG(6, (CE_NOTE, NULL, "!vhci_scsi_reset_target: " 1600 "reset %d sent down pip:%p for cdip:%p\n", level, 1601 (void *)pip, (void *)cdip)); 1602 return (1); 1603 } 1604 mdi_rele_path(pip); 1605 return (0); 1606 } 1607 1608 1609 /* ARGSUSED */ 1610 static int 1611 vhci_scsi_reset_bus(struct scsi_address *ap) 1612 { 1613 return (1); 1614 } 1615 1616 1617 /* 1618 * called by vhci_getcap and vhci_setcap to get and set (respectively) 1619 * SCSI capabilities 1620 */ 1621 /* ARGSUSED */ 1622 static int 1623 vhci_commoncap(struct scsi_address *ap, char *cap, 1624 int val, int tgtonly, int doset) 1625 { 1626 struct scsi_vhci *vhci = ADDR2VHCI(ap); 1627 struct scsi_vhci_lun *vlun = ADDR2VLUN(ap); 1628 int cidx; 1629 int rval = 0; 1630 1631 if (cap == (char *)0) { 1632 VHCI_DEBUG(3, (CE_WARN, vhci->vhci_dip, 1633 "!vhci_commoncap: invalid arg")); 1634 return (rval); 1635 } 1636 1637 if (vlun == NULL) { 1638 VHCI_DEBUG(3, (CE_WARN, vhci->vhci_dip, 1639 "!vhci_commoncap: vlun is null")); 1640 return (rval); 1641 } 1642 1643 if ((cidx = scsi_hba_lookup_capstr(cap)) == -1) { 1644 return (UNDEFINED); 1645 } 1646 1647 /* 1648 * Process setcap request. 1649 */ 1650 if (doset) { 1651 /* 1652 * At present, we can only set binary (0/1) values 1653 */ 1654 switch (cidx) { 1655 case SCSI_CAP_ARQ: 1656 if (val == 0) { 1657 rval = 0; 1658 } else { 1659 rval = 1; 1660 } 1661 break; 1662 1663 case SCSI_CAP_LUN_RESET: 1664 if (tgtonly == 0) { 1665 VHCI_DEBUG(1, (CE_WARN, vhci->vhci_dip, 1666 "scsi_vhci_setcap: " 1667 "Returning error since whom = 0")); 1668 rval = -1; 1669 break; 1670 } 1671 /* 1672 * Set the capability accordingly. 1673 */ 1674 mutex_enter(&vlun->svl_mutex); 1675 vlun->svl_support_lun_reset = val; 1676 rval = val; 1677 mutex_exit(&vlun->svl_mutex); 1678 break; 1679 1680 case SCSI_CAP_SECTOR_SIZE: 1681 mutex_enter(&vlun->svl_mutex); 1682 vlun->svl_sector_size = val; 1683 vlun->svl_setcap_done = 1; 1684 mutex_exit(&vlun->svl_mutex); 1685 (void) vhci_pHCI_cap(ap, cap, val, tgtonly, NULL); 1686 1687 /* Always return success */ 1688 rval = 1; 1689 break; 1690 1691 default: 1692 VHCI_DEBUG(6, (CE_WARN, vhci->vhci_dip, 1693 "!vhci_setcap: unsupported %d", cidx)); 1694 rval = UNDEFINED; 1695 break; 1696 } 1697 1698 VHCI_DEBUG(6, (CE_NOTE, vhci->vhci_dip, 1699 "!set cap: cap=%s, val/tgtonly/doset/rval = " 1700 "0x%x/0x%x/0x%x/%d\n", 1701 cap, val, tgtonly, doset, rval)); 1702 1703 } else { 1704 /* 1705 * Process getcap request. 1706 */ 1707 switch (cidx) { 1708 case SCSI_CAP_DMA_MAX: 1709 /* 1710 * For X86 this capability is caught in scsi_ifgetcap(). 1711 * XXX Should this be getting the value from the pHCI? 1712 */ 1713 rval = (int)VHCI_DMA_MAX_XFER_CAP; 1714 break; 1715 1716 case SCSI_CAP_INITIATOR_ID: 1717 rval = 0x00; 1718 break; 1719 1720 case SCSI_CAP_ARQ: 1721 case SCSI_CAP_RESET_NOTIFICATION: 1722 case SCSI_CAP_TAGGED_QING: 1723 rval = 1; 1724 break; 1725 1726 case SCSI_CAP_SCSI_VERSION: 1727 rval = 3; 1728 break; 1729 1730 case SCSI_CAP_INTERCONNECT_TYPE: 1731 rval = INTERCONNECT_FABRIC; 1732 break; 1733 1734 case SCSI_CAP_LUN_RESET: 1735 /* 1736 * scsi_vhci will always return success for LUN reset. 1737 * When request for doing LUN reset comes 1738 * through scsi_reset entry point, at that time attempt 1739 * will be made to do reset through all the possible 1740 * paths. 1741 */ 1742 mutex_enter(&vlun->svl_mutex); 1743 rval = vlun->svl_support_lun_reset; 1744 mutex_exit(&vlun->svl_mutex); 1745 VHCI_DEBUG(4, (CE_WARN, vhci->vhci_dip, 1746 "scsi_vhci_getcap:" 1747 "Getting the Lun reset capability %d", rval)); 1748 break; 1749 1750 case SCSI_CAP_SECTOR_SIZE: 1751 mutex_enter(&vlun->svl_mutex); 1752 rval = vlun->svl_sector_size; 1753 mutex_exit(&vlun->svl_mutex); 1754 break; 1755 1756 case SCSI_CAP_CDB_LEN: 1757 rval = VHCI_SCSI_CDB_SIZE; 1758 break; 1759 1760 case SCSI_CAP_DMA_MAX_ARCH: 1761 /* 1762 * For X86 this capability is caught in scsi_ifgetcap(). 1763 * XXX Should this be getting the value from the pHCI? 1764 */ 1765 rval = 0; 1766 break; 1767 1768 default: 1769 VHCI_DEBUG(6, (CE_WARN, vhci->vhci_dip, 1770 "!vhci_getcap: unsupported %d", cidx)); 1771 rval = UNDEFINED; 1772 break; 1773 } 1774 1775 VHCI_DEBUG(6, (CE_NOTE, vhci->vhci_dip, 1776 "!get cap: cap=%s, val/tgtonly/doset/rval = " 1777 "0x%x/0x%x/0x%x/%d\n", 1778 cap, val, tgtonly, doset, rval)); 1779 } 1780 return (rval); 1781 } 1782 1783 1784 /* 1785 * Function name : vhci_scsi_getcap() 1786 * 1787 */ 1788 static int 1789 vhci_scsi_getcap(struct scsi_address *ap, char *cap, int whom) 1790 { 1791 return (vhci_commoncap(ap, cap, 0, whom, 0)); 1792 } 1793 1794 static int 1795 vhci_scsi_setcap(struct scsi_address *ap, char *cap, int value, int whom) 1796 { 1797 return (vhci_commoncap(ap, cap, value, whom, 1)); 1798 } 1799 1800 /* 1801 * Function name : vhci_scsi_abort() 1802 */ 1803 /* ARGSUSED */ 1804 static int 1805 vhci_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt) 1806 { 1807 return (0); 1808 } 1809 1810 /* 1811 * Function name : vhci_scsi_init_pkt 1812 * 1813 * Return Values : pointer to scsi_pkt, or NULL 1814 */ 1815 /* ARGSUSED */ 1816 static struct scsi_pkt * 1817 vhci_scsi_init_pkt(struct scsi_address *ap, struct scsi_pkt *pkt, 1818 struct buf *bp, int cmdlen, int statuslen, int tgtlen, 1819 int flags, int (*callback)(caddr_t), caddr_t arg) 1820 { 1821 struct scsi_vhci *vhci = ADDR2VHCI(ap); 1822 struct vhci_pkt *vpkt; 1823 int rval; 1824 int newpkt = 0; 1825 struct scsi_pkt *pktp; 1826 1827 1828 if (pkt == NULL) { 1829 if (cmdlen > VHCI_SCSI_CDB_SIZE) { 1830 if ((cmdlen != VHCI_SCSI_OSD_CDB_SIZE) || 1831 ((flags & VHCI_SCSI_OSD_PKT_FLAGS) != 1832 VHCI_SCSI_OSD_PKT_FLAGS)) { 1833 VHCI_DEBUG(1, (CE_NOTE, NULL, 1834 "!init pkt: cdb size not supported\n")); 1835 return (NULL); 1836 } 1837 } 1838 1839 pktp = scsi_hba_pkt_alloc(vhci->vhci_dip, 1840 ap, cmdlen, statuslen, tgtlen, sizeof (*vpkt), callback, 1841 arg); 1842 1843 if (pktp == NULL) { 1844 return (NULL); 1845 } 1846 1847 /* Get the vhci's private structure */ 1848 vpkt = (struct vhci_pkt *)(pktp->pkt_ha_private); 1849 ASSERT(vpkt); 1850 1851 /* Save the target driver's packet */ 1852 vpkt->vpkt_tgt_pkt = pktp; 1853 1854 /* 1855 * Save pkt_tgt_init_pkt fields if deferred binding 1856 * is needed or for other purposes. 1857 */ 1858 vpkt->vpkt_tgt_init_pkt_flags = flags; 1859 vpkt->vpkt_flags = (callback == NULL_FUNC) ? CFLAG_NOWAIT : 0; 1860 vpkt->vpkt_state = VHCI_PKT_IDLE; 1861 vpkt->vpkt_tgt_init_cdblen = cmdlen; 1862 vpkt->vpkt_tgt_init_scblen = statuslen; 1863 newpkt = 1; 1864 } else { /* pkt not NULL */ 1865 vpkt = pkt->pkt_ha_private; 1866 } 1867 1868 VHCI_DEBUG(8, (CE_NOTE, NULL, "vhci_scsi_init_pkt " 1869 "vpkt %p flags %x\n", (void *)vpkt, flags)); 1870 1871 /* Clear any stale error flags */ 1872 if (bp) { 1873 bioerror(bp, 0); 1874 } 1875 1876 vpkt->vpkt_tgt_init_bp = bp; 1877 1878 if (flags & PKT_DMA_PARTIAL) { 1879 1880 /* 1881 * Immediate binding is needed. 1882 * Target driver may not set this flag in next invocation. 1883 * vhci has to remember this flag was set during first 1884 * invocation of vhci_scsi_init_pkt. 1885 */ 1886 vpkt->vpkt_flags |= CFLAG_DMA_PARTIAL; 1887 } 1888 1889 if (vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) { 1890 1891 /* 1892 * Re-initialize some of the target driver packet state 1893 * information. 1894 */ 1895 vpkt->vpkt_tgt_pkt->pkt_state = 0; 1896 vpkt->vpkt_tgt_pkt->pkt_statistics = 0; 1897 vpkt->vpkt_tgt_pkt->pkt_reason = 0; 1898 1899 /* 1900 * Binding a vpkt->vpkt_path for this IO at init_time. 1901 * If an IO error happens later, target driver will clear 1902 * this vpkt->vpkt_path binding before re-init IO again. 1903 */ 1904 VHCI_DEBUG(8, (CE_NOTE, NULL, 1905 "vhci_scsi_init_pkt: calling v_b_t %p, newpkt %d\n", 1906 (void *)vpkt, newpkt)); 1907 if (pkt && vpkt->vpkt_hba_pkt) { 1908 VHCI_DEBUG(4, (CE_NOTE, NULL, 1909 "v_s_i_p calling update_pHCI_pkt resid %ld\n", 1910 pkt->pkt_resid)); 1911 vhci_update_pHCI_pkt(vpkt, pkt); 1912 } 1913 if (callback == SLEEP_FUNC) { 1914 rval = vhci_bind_transport( 1915 ap, vpkt, flags, callback); 1916 } else { 1917 rval = vhci_bind_transport( 1918 ap, vpkt, flags, NULL_FUNC); 1919 } 1920 VHCI_DEBUG(8, (CE_NOTE, NULL, 1921 "vhci_scsi_init_pkt: v_b_t called 0x%p rval 0x%x\n", 1922 (void *)vpkt, rval)); 1923 if (bp) { 1924 if (rval == TRAN_FATAL_ERROR) { 1925 /* 1926 * No paths available. Could not bind 1927 * any pHCI. Setting EFAULT as a way 1928 * to indicate no DMA is mapped. 1929 */ 1930 bioerror(bp, EFAULT); 1931 } else { 1932 /* 1933 * Do not indicate any pHCI errors to 1934 * target driver otherwise. 1935 */ 1936 bioerror(bp, 0); 1937 } 1938 } 1939 if (rval != TRAN_ACCEPT) { 1940 VHCI_DEBUG(8, (CE_NOTE, NULL, 1941 "vhci_scsi_init_pkt: " 1942 "v_b_t failed 0x%p newpkt %x\n", 1943 (void *)vpkt, newpkt)); 1944 if (newpkt) { 1945 scsi_hba_pkt_free(ap, 1946 vpkt->vpkt_tgt_pkt); 1947 } 1948 return (NULL); 1949 } 1950 ASSERT(vpkt->vpkt_hba_pkt != NULL); 1951 ASSERT(vpkt->vpkt_path != NULL); 1952 1953 /* Update the resid for the target driver */ 1954 vpkt->vpkt_tgt_pkt->pkt_resid = 1955 vpkt->vpkt_hba_pkt->pkt_resid; 1956 } 1957 1958 return (vpkt->vpkt_tgt_pkt); 1959 } 1960 1961 /* 1962 * Function name : vhci_scsi_destroy_pkt 1963 * 1964 * Return Values : none 1965 */ 1966 static void 1967 vhci_scsi_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt) 1968 { 1969 struct vhci_pkt *vpkt = (struct vhci_pkt *)pkt->pkt_ha_private; 1970 1971 VHCI_DEBUG(8, (CE_NOTE, NULL, 1972 "vhci_scsi_destroy_pkt: vpkt 0x%p\n", (void *)vpkt)); 1973 1974 vpkt->vpkt_tgt_init_pkt_flags = 0; 1975 if (vpkt->vpkt_hba_pkt) { 1976 scsi_destroy_pkt(vpkt->vpkt_hba_pkt); 1977 vpkt->vpkt_hba_pkt = NULL; 1978 } 1979 if (vpkt->vpkt_path) { 1980 mdi_rele_path(vpkt->vpkt_path); 1981 vpkt->vpkt_path = NULL; 1982 } 1983 1984 ASSERT(vpkt->vpkt_state != VHCI_PKT_ISSUED); 1985 scsi_hba_pkt_free(ap, vpkt->vpkt_tgt_pkt); 1986 } 1987 1988 /* 1989 * Function name : vhci_scsi_dmafree() 1990 * 1991 * Return Values : none 1992 */ 1993 /*ARGSUSED*/ 1994 static void 1995 vhci_scsi_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt) 1996 { 1997 struct vhci_pkt *vpkt = (struct vhci_pkt *)pkt->pkt_ha_private; 1998 1999 VHCI_DEBUG(6, (CE_NOTE, NULL, 2000 "vhci_scsi_dmafree: vpkt 0x%p\n", (void *)vpkt)); 2001 2002 ASSERT(vpkt != NULL); 2003 if (vpkt->vpkt_hba_pkt) { 2004 scsi_destroy_pkt(vpkt->vpkt_hba_pkt); 2005 vpkt->vpkt_hba_pkt = NULL; 2006 } 2007 if (vpkt->vpkt_path) { 2008 mdi_rele_path(vpkt->vpkt_path); 2009 vpkt->vpkt_path = NULL; 2010 } 2011 } 2012 2013 /* 2014 * Function name : vhci_scsi_sync_pkt() 2015 * 2016 * Return Values : none 2017 */ 2018 /*ARGSUSED*/ 2019 static void 2020 vhci_scsi_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt) 2021 { 2022 struct vhci_pkt *vpkt = (struct vhci_pkt *)pkt->pkt_ha_private; 2023 2024 ASSERT(vpkt != NULL); 2025 if (vpkt->vpkt_hba_pkt) { 2026 scsi_sync_pkt(vpkt->vpkt_hba_pkt); 2027 } 2028 } 2029 2030 /* 2031 * routine for reset notification setup, to register or cancel. 2032 */ 2033 static int 2034 vhci_scsi_reset_notify(struct scsi_address *ap, int flag, 2035 void (*callback)(caddr_t), caddr_t arg) 2036 { 2037 struct scsi_vhci *vhci = ADDR2VHCI(ap); 2038 return (scsi_hba_reset_notify_setup(ap, flag, callback, arg, 2039 &vhci->vhci_mutex, &vhci->vhci_reset_notify_listf)); 2040 } 2041 2042 static int 2043 vhci_scsi_get_name_bus_addr(struct scsi_device *sd, 2044 char *name, int len, int bus_addr) 2045 { 2046 dev_info_t *cdip; 2047 char *guid; 2048 scsi_vhci_lun_t *vlun; 2049 2050 ASSERT(sd != NULL); 2051 ASSERT(name != NULL); 2052 2053 *name = 0; 2054 cdip = sd->sd_dev; 2055 2056 ASSERT(cdip != NULL); 2057 2058 if (mdi_component_is_client(cdip, NULL) != MDI_SUCCESS) 2059 return (1); 2060 2061 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, cdip, PROPFLAGS, 2062 MDI_CLIENT_GUID_PROP, &guid) != DDI_SUCCESS) 2063 return (1); 2064 2065 /* 2066 * Message is "sd# at scsi_vhci0: unit-address <guid>: <bus_addr>". 2067 * <guid> bus_addr argument == 0 2068 * <bus_addr> bus_addr argument != 0 2069 * Since the <guid> is already provided with unit-address, we just 2070 * provide failover module in <bus_addr> to keep output shorter. 2071 */ 2072 vlun = ADDR2VLUN(&sd->sd_address); 2073 if (bus_addr == 0) { 2074 /* report the guid: */ 2075 (void) snprintf(name, len, "g%s", guid); 2076 } else if (vlun && vlun->svl_fops_name) { 2077 /* report the name of the failover module */ 2078 (void) snprintf(name, len, "%s", vlun->svl_fops_name); 2079 } 2080 2081 ddi_prop_free(guid); 2082 return (1); 2083 } 2084 2085 static int 2086 vhci_scsi_get_bus_addr(struct scsi_device *sd, char *name, int len) 2087 { 2088 return (vhci_scsi_get_name_bus_addr(sd, name, len, 1)); 2089 } 2090 2091 static int 2092 vhci_scsi_get_name(struct scsi_device *sd, char *name, int len) 2093 { 2094 return (vhci_scsi_get_name_bus_addr(sd, name, len, 0)); 2095 } 2096 2097 /* 2098 * Return a pointer to the guid part of the devnm. 2099 * devnm format is "nodename@busaddr", busaddr format is "gGUID". 2100 */ 2101 static char * 2102 vhci_devnm_to_guid(char *devnm) 2103 { 2104 char *cp = devnm; 2105 2106 if (devnm == NULL) 2107 return (NULL); 2108 2109 while (*cp != '\0' && *cp != '@') 2110 cp++; 2111 if (*cp == '@' && *(cp + 1) == 'g') 2112 return (cp + 2); 2113 return (NULL); 2114 } 2115 2116 static int 2117 vhci_bind_transport(struct scsi_address *ap, struct vhci_pkt *vpkt, int flags, 2118 int (*func)(caddr_t)) 2119 { 2120 struct scsi_vhci *vhci = ADDR2VHCI(ap); 2121 dev_info_t *cdip = ADDR2DIP(ap); 2122 mdi_pathinfo_t *pip = NULL; 2123 mdi_pathinfo_t *npip = NULL; 2124 scsi_vhci_priv_t *svp = NULL; 2125 struct scsi_device *psd = NULL; 2126 struct scsi_address *address = NULL; 2127 struct scsi_pkt *pkt = NULL; 2128 int rval = -1; 2129 int pgr_sema_held = 0; 2130 int held; 2131 int mps_flag = MDI_SELECT_ONLINE_PATH; 2132 struct scsi_vhci_lun *vlun; 2133 time_t tnow; 2134 int path_instance = 0; 2135 2136 vlun = ADDR2VLUN(ap); 2137 ASSERT(vlun != 0); 2138 2139 if ((vpkt->vpkt_tgt_pkt->pkt_cdbp[0] == SCMD_PROUT) && 2140 (((vpkt->vpkt_tgt_pkt->pkt_cdbp[1] & 0x1f) == 2141 VHCI_PROUT_REGISTER) || 2142 ((vpkt->vpkt_tgt_pkt->pkt_cdbp[1] & 0x1f) == 2143 VHCI_PROUT_R_AND_IGNORE))) { 2144 if (!sema_tryp(&vlun->svl_pgr_sema)) 2145 return (TRAN_BUSY); 2146 pgr_sema_held = 1; 2147 if (vlun->svl_first_path != NULL) { 2148 rval = mdi_select_path(cdip, NULL, 2149 MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH, 2150 NULL, &pip); 2151 if ((rval != MDI_SUCCESS) || (pip == NULL)) { 2152 VHCI_DEBUG(4, (CE_NOTE, NULL, 2153 "vhci_bind_transport: path select fail\n")); 2154 } else { 2155 npip = pip; 2156 do { 2157 if (npip == vlun->svl_first_path) { 2158 VHCI_DEBUG(4, (CE_NOTE, NULL, 2159 "vhci_bind_transport: " 2160 "valid first path 0x%p\n", 2161 (void *) 2162 vlun->svl_first_path)); 2163 pip = vlun->svl_first_path; 2164 goto bind_path; 2165 } 2166 pip = npip; 2167 rval = mdi_select_path(cdip, NULL, 2168 MDI_SELECT_ONLINE_PATH | 2169 MDI_SELECT_STANDBY_PATH, 2170 pip, &npip); 2171 mdi_rele_path(pip); 2172 } while ((rval == MDI_SUCCESS) && 2173 (npip != NULL)); 2174 } 2175 } 2176 2177 if (vlun->svl_first_path) { 2178 VHCI_DEBUG(4, (CE_NOTE, NULL, 2179 "vhci_bind_transport: invalid first path 0x%p\n", 2180 (void *)vlun->svl_first_path)); 2181 vlun->svl_first_path = NULL; 2182 } 2183 } else if (vpkt->vpkt_tgt_pkt->pkt_cdbp[0] == SCMD_PRIN) { 2184 if ((vpkt->vpkt_state & VHCI_PKT_THRU_TASKQ) == 0) { 2185 if (!sema_tryp(&vlun->svl_pgr_sema)) 2186 return (TRAN_BUSY); 2187 } 2188 pgr_sema_held = 1; 2189 } 2190 2191 /* 2192 * If the path is already bound for PKT_PARTIAL_DMA case, 2193 * try to use the same path. 2194 */ 2195 if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) && vpkt->vpkt_path) { 2196 VHCI_DEBUG(4, (CE_NOTE, NULL, 2197 "vhci_bind_transport: PKT_PARTIAL_DMA " 2198 "vpkt 0x%p, path 0x%p\n", 2199 (void *)vpkt, (void *)vpkt->vpkt_path)); 2200 pip = vpkt->vpkt_path; 2201 goto bind_path; 2202 } 2203 2204 /* 2205 * Get path_instance. Non-zero with FLAG_PKT_PATH_INSTANCE set 2206 * indicates that mdi_select_path should be called to select a 2207 * specific instance. 2208 * 2209 * NB: Condition pkt_path_instance reference on proper allocation. 2210 */ 2211 if ((vpkt->vpkt_tgt_pkt->pkt_flags & FLAG_PKT_PATH_INSTANCE) && 2212 scsi_pkt_allocated_correctly(vpkt->vpkt_tgt_pkt)) { 2213 path_instance = vpkt->vpkt_tgt_pkt->pkt_path_instance; 2214 } 2215 2216 /* 2217 * If reservation is active bind the transport directly to the pip 2218 * with the reservation. 2219 */ 2220 if (vpkt->vpkt_hba_pkt == NULL) { 2221 if (vlun->svl_flags & VLUN_RESERVE_ACTIVE_FLG) { 2222 if (MDI_PI_IS_ONLINE(vlun->svl_resrv_pip)) { 2223 pip = vlun->svl_resrv_pip; 2224 mdi_hold_path(pip); 2225 vlun->svl_waiting_for_activepath = 0; 2226 rval = MDI_SUCCESS; 2227 goto bind_path; 2228 } else { 2229 if (pgr_sema_held) { 2230 sema_v(&vlun->svl_pgr_sema); 2231 } 2232 return (TRAN_BUSY); 2233 } 2234 } 2235 try_again: 2236 rval = mdi_select_path(cdip, vpkt->vpkt_tgt_init_bp, 2237 path_instance ? MDI_SELECT_PATH_INSTANCE : 0, 2238 (void *)(intptr_t)path_instance, &pip); 2239 if (rval == MDI_BUSY) { 2240 if (pgr_sema_held) { 2241 sema_v(&vlun->svl_pgr_sema); 2242 } 2243 return (TRAN_BUSY); 2244 } else if (rval == MDI_DEVI_ONLINING) { 2245 /* 2246 * if we are here then we are in the midst of 2247 * an attach/probe of the client device. 2248 * We attempt to bind to ONLINE path if available, 2249 * else it is OK to bind to a STANDBY path (instead 2250 * of triggering a failover) because IO associated 2251 * with attach/probe (eg. INQUIRY, block 0 read) 2252 * are completed by targets even on passive paths 2253 * If no ONLINE paths available, it is important 2254 * to set svl_waiting_for_activepath for two 2255 * reasons: (1) avoid sense analysis in the 2256 * "external failure detection" codepath in 2257 * vhci_intr(). Failure to do so will result in 2258 * infinite loop (unless an ONLINE path becomes 2259 * available at some point) (2) avoid 2260 * unnecessary failover (see "---Waiting For Active 2261 * Path---" comment below). 2262 */ 2263 VHCI_DEBUG(1, (CE_NOTE, NULL, "!%p in onlining " 2264 "state\n", (void *)cdip)); 2265 pip = NULL; 2266 rval = mdi_select_path(cdip, vpkt->vpkt_tgt_init_bp, 2267 mps_flag, NULL, &pip); 2268 if ((rval != MDI_SUCCESS) || (pip == NULL)) { 2269 if (vlun->svl_waiting_for_activepath == 0) { 2270 vlun->svl_waiting_for_activepath = 1; 2271 vlun->svl_wfa_time = ddi_get_time(); 2272 } 2273 mps_flag |= MDI_SELECT_STANDBY_PATH; 2274 rval = mdi_select_path(cdip, 2275 vpkt->vpkt_tgt_init_bp, 2276 mps_flag, NULL, &pip); 2277 if ((rval != MDI_SUCCESS) || (pip == NULL)) { 2278 if (pgr_sema_held) { 2279 sema_v(&vlun->svl_pgr_sema); 2280 } 2281 return (TRAN_FATAL_ERROR); 2282 } 2283 goto bind_path; 2284 } 2285 } else if ((rval == MDI_FAILURE) || 2286 ((rval == MDI_NOPATH) && (path_instance))) { 2287 if (pgr_sema_held) { 2288 sema_v(&vlun->svl_pgr_sema); 2289 } 2290 return (TRAN_FATAL_ERROR); 2291 } 2292 2293 if ((pip == NULL) || (rval == MDI_NOPATH)) { 2294 while (vlun->svl_waiting_for_activepath) { 2295 /* 2296 * ---Waiting For Active Path--- 2297 * This device was discovered across a 2298 * passive path; lets wait for a little 2299 * bit, hopefully an active path will 2300 * show up obviating the need for a 2301 * failover 2302 */ 2303 tnow = ddi_get_time(); 2304 if (tnow - vlun->svl_wfa_time >= 60) { 2305 vlun->svl_waiting_for_activepath = 0; 2306 } else { 2307 drv_usecwait(1000); 2308 if (vlun->svl_waiting_for_activepath 2309 == 0) { 2310 /* 2311 * an active path has come 2312 * online! 2313 */ 2314 goto try_again; 2315 } 2316 } 2317 } 2318 VHCI_HOLD_LUN(vlun, VH_NOSLEEP, held); 2319 if (!held) { 2320 VHCI_DEBUG(4, (CE_NOTE, NULL, 2321 "!Lun not held\n")); 2322 if (pgr_sema_held) { 2323 sema_v(&vlun->svl_pgr_sema); 2324 } 2325 return (TRAN_BUSY); 2326 } 2327 /* 2328 * now that the LUN is stable, one last check 2329 * to make sure no other changes sneaked in 2330 * (like a path coming online or a 2331 * failover initiated by another thread) 2332 */ 2333 pip = NULL; 2334 rval = mdi_select_path(cdip, vpkt->vpkt_tgt_init_bp, 2335 0, NULL, &pip); 2336 if (pip != NULL) { 2337 VHCI_RELEASE_LUN(vlun); 2338 vlun->svl_waiting_for_activepath = 0; 2339 goto bind_path; 2340 } 2341 2342 /* 2343 * Check if there is an ONLINE path OR a STANDBY path 2344 * available. If none is available, do not attempt 2345 * to do a failover, just return a fatal error at this 2346 * point. 2347 */ 2348 npip = NULL; 2349 rval = mdi_select_path(cdip, NULL, 2350 (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH), 2351 NULL, &npip); 2352 if ((npip == NULL) || (rval != MDI_SUCCESS)) { 2353 /* 2354 * No paths available, jus return FATAL error. 2355 */ 2356 VHCI_RELEASE_LUN(vlun); 2357 if (pgr_sema_held) { 2358 sema_v(&vlun->svl_pgr_sema); 2359 } 2360 return (TRAN_FATAL_ERROR); 2361 } 2362 mdi_rele_path(npip); 2363 if (!(vpkt->vpkt_state & VHCI_PKT_IN_FAILOVER)) { 2364 VHCI_DEBUG(1, (CE_NOTE, NULL, "!invoking " 2365 "mdi_failover\n")); 2366 rval = mdi_failover(vhci->vhci_dip, cdip, 2367 MDI_FAILOVER_ASYNC); 2368 } else { 2369 rval = vlun->svl_failover_status; 2370 } 2371 if (rval == MDI_FAILURE) { 2372 VHCI_RELEASE_LUN(vlun); 2373 if (pgr_sema_held) { 2374 sema_v(&vlun->svl_pgr_sema); 2375 } 2376 return (TRAN_FATAL_ERROR); 2377 } else if (rval == MDI_BUSY) { 2378 VHCI_RELEASE_LUN(vlun); 2379 if (pgr_sema_held) { 2380 sema_v(&vlun->svl_pgr_sema); 2381 } 2382 return (TRAN_BUSY); 2383 } else { 2384 if (pgr_sema_held) { 2385 sema_v(&vlun->svl_pgr_sema); 2386 } 2387 vpkt->vpkt_state |= VHCI_PKT_IN_FAILOVER; 2388 return (TRAN_BUSY); 2389 } 2390 } 2391 vlun->svl_waiting_for_activepath = 0; 2392 bind_path: 2393 vpkt->vpkt_path = pip; 2394 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip); 2395 ASSERT(svp != NULL); 2396 2397 psd = svp->svp_psd; 2398 ASSERT(psd != NULL); 2399 address = &psd->sd_address; 2400 } else { 2401 pkt = vpkt->vpkt_hba_pkt; 2402 address = &pkt->pkt_address; 2403 } 2404 2405 /* Verify match of specified path_instance and selected path_instance */ 2406 ASSERT((path_instance == 0) || 2407 (path_instance == mdi_pi_get_path_instance(vpkt->vpkt_path))); 2408 2409 /* 2410 * For PKT_PARTIAL_DMA case, call pHCI's scsi_init_pkt whenever 2411 * target driver calls vhci_scsi_init_pkt. 2412 */ 2413 if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) && 2414 vpkt->vpkt_path && vpkt->vpkt_hba_pkt) { 2415 VHCI_DEBUG(4, (CE_NOTE, NULL, 2416 "vhci_bind_transport: PKT_PARTIAL_DMA " 2417 "vpkt 0x%p, path 0x%p hba_pkt 0x%p\n", 2418 (void *)vpkt, (void *)vpkt->vpkt_path, (void *)pkt)); 2419 pkt = vpkt->vpkt_hba_pkt; 2420 address = &pkt->pkt_address; 2421 } 2422 2423 if (pkt == NULL || (vpkt->vpkt_flags & CFLAG_DMA_PARTIAL)) { 2424 pkt = scsi_init_pkt(address, pkt, 2425 vpkt->vpkt_tgt_init_bp, vpkt->vpkt_tgt_init_cdblen, 2426 vpkt->vpkt_tgt_init_scblen, 0, flags, func, NULL); 2427 2428 if (pkt == NULL) { 2429 VHCI_DEBUG(4, (CE_NOTE, NULL, 2430 "!bind transport: 0x%p 0x%p 0x%p\n", 2431 (void *)vhci, (void *)psd, (void *)vpkt)); 2432 if ((vpkt->vpkt_hba_pkt == NULL) && vpkt->vpkt_path) { 2433 MDI_PI_ERRSTAT(vpkt->vpkt_path, 2434 MDI_PI_TRANSERR); 2435 mdi_rele_path(vpkt->vpkt_path); 2436 vpkt->vpkt_path = NULL; 2437 } 2438 if (pgr_sema_held) { 2439 sema_v(&vlun->svl_pgr_sema); 2440 } 2441 /* 2442 * Consider it a fatal error if b_error is 2443 * set as a result of DMA binding failure 2444 * vs. a condition of being temporarily out of 2445 * some resource 2446 */ 2447 if (vpkt->vpkt_tgt_init_bp == NULL || 2448 geterror(vpkt->vpkt_tgt_init_bp)) 2449 return (TRAN_FATAL_ERROR); 2450 else 2451 return (TRAN_BUSY); 2452 } 2453 } 2454 2455 pkt->pkt_private = vpkt; 2456 vpkt->vpkt_hba_pkt = pkt; 2457 return (TRAN_ACCEPT); 2458 } 2459 2460 2461 /*PRINTFLIKE3*/ 2462 void 2463 vhci_log(int level, dev_info_t *dip, const char *fmt, ...) 2464 { 2465 char buf[256]; 2466 va_list ap; 2467 2468 va_start(ap, fmt); 2469 (void) vsprintf(buf, fmt, ap); 2470 va_end(ap); 2471 2472 scsi_log(dip, "scsi_vhci", level, buf); 2473 } 2474 2475 /* do a PGR out with the information we've saved away */ 2476 static int 2477 vhci_do_prout(scsi_vhci_priv_t *svp) 2478 { 2479 2480 struct scsi_pkt *new_pkt; 2481 struct buf *bp; 2482 scsi_vhci_lun_t *vlun = svp->svp_svl; 2483 int rval, retry, nr_retry, ua_retry; 2484 uint8_t *sns, skey; 2485 2486 bp = getrbuf(KM_SLEEP); 2487 bp->b_flags = B_WRITE; 2488 bp->b_resid = 0; 2489 bp->b_un.b_addr = (caddr_t)&vlun->svl_prout; 2490 bp->b_bcount = vlun->svl_bcount; 2491 2492 VHCI_INCR_PATH_CMDCOUNT(svp); 2493 2494 new_pkt = scsi_init_pkt(&svp->svp_psd->sd_address, NULL, bp, 2495 CDB_GROUP1, sizeof (struct scsi_arq_status), 0, 0, 2496 SLEEP_FUNC, NULL); 2497 if (new_pkt == NULL) { 2498 VHCI_DECR_PATH_CMDCOUNT(svp); 2499 freerbuf(bp); 2500 cmn_err(CE_WARN, "!vhci_do_prout: scsi_init_pkt failed"); 2501 return (0); 2502 } 2503 mutex_enter(&vlun->svl_mutex); 2504 bp->b_un.b_addr = (caddr_t)&vlun->svl_prout; 2505 bp->b_bcount = vlun->svl_bcount; 2506 bcopy(vlun->svl_cdb, new_pkt->pkt_cdbp, 2507 sizeof (vlun->svl_cdb)); 2508 new_pkt->pkt_time = vlun->svl_time; 2509 mutex_exit(&vlun->svl_mutex); 2510 new_pkt->pkt_flags = FLAG_NOINTR; 2511 2512 ua_retry = nr_retry = retry = 0; 2513 again: 2514 rval = vhci_do_scsi_cmd(new_pkt); 2515 if (rval != 1) { 2516 if ((new_pkt->pkt_reason == CMD_CMPLT) && 2517 (SCBP_C(new_pkt) == STATUS_CHECK) && 2518 (new_pkt->pkt_state & STATE_ARQ_DONE)) { 2519 sns = (uint8_t *) 2520 &(((struct scsi_arq_status *)(uintptr_t) 2521 (new_pkt->pkt_scbp))->sts_sensedata); 2522 skey = scsi_sense_key(sns); 2523 if ((skey == KEY_UNIT_ATTENTION) || 2524 (skey == KEY_NOT_READY)) { 2525 int max_retry; 2526 struct scsi_failover_ops *fops; 2527 fops = vlun->svl_fops; 2528 rval = fops->sfo_analyze_sense(svp->svp_psd, 2529 sns, vlun->svl_fops_ctpriv); 2530 if (rval == SCSI_SENSE_NOT_READY) { 2531 max_retry = vhci_prout_not_ready_retry; 2532 retry = nr_retry++; 2533 delay(1*drv_usectohz(1000000)); 2534 } else { 2535 /* chk for state change and update */ 2536 if (rval == SCSI_SENSE_STATE_CHANGED) { 2537 int held; 2538 VHCI_HOLD_LUN(vlun, 2539 VH_NOSLEEP, held); 2540 if (!held) { 2541 rval = TRAN_BUSY; 2542 } else { 2543 /* chk for alua first */ 2544 vhci_update_pathstates( 2545 (void *)vlun); 2546 } 2547 } 2548 retry = ua_retry++; 2549 max_retry = VHCI_MAX_PGR_RETRIES; 2550 } 2551 if (retry < max_retry) { 2552 VHCI_DEBUG(4, (CE_WARN, NULL, 2553 "!vhci_do_prout retry 0x%x " 2554 "(0x%x 0x%x 0x%x)", 2555 SCBP_C(new_pkt), 2556 new_pkt->pkt_cdbp[0], 2557 new_pkt->pkt_cdbp[1], 2558 new_pkt->pkt_cdbp[2])); 2559 goto again; 2560 } 2561 rval = 0; 2562 VHCI_DEBUG(4, (CE_WARN, NULL, 2563 "!vhci_do_prout 0x%x " 2564 "(0x%x 0x%x 0x%x)", 2565 SCBP_C(new_pkt), 2566 new_pkt->pkt_cdbp[0], 2567 new_pkt->pkt_cdbp[1], 2568 new_pkt->pkt_cdbp[2])); 2569 } else if (skey == KEY_ILLEGAL_REQUEST) 2570 rval = VHCI_PGR_ILLEGALOP; 2571 } 2572 } else { 2573 rval = 1; 2574 } 2575 scsi_destroy_pkt(new_pkt); 2576 VHCI_DECR_PATH_CMDCOUNT(svp); 2577 freerbuf(bp); 2578 return (rval); 2579 } 2580 2581 static void 2582 vhci_run_cmd(void *arg) 2583 { 2584 struct scsi_pkt *pkt = (struct scsi_pkt *)arg; 2585 struct scsi_pkt *tpkt; 2586 scsi_vhci_priv_t *svp; 2587 mdi_pathinfo_t *pip, *npip; 2588 scsi_vhci_lun_t *vlun; 2589 dev_info_t *cdip; 2590 scsi_vhci_priv_t *nsvp; 2591 int fail = 0; 2592 int rval; 2593 struct vhci_pkt *vpkt; 2594 uchar_t cdb_1; 2595 vhci_prout_t *prout; 2596 2597 vpkt = (struct vhci_pkt *)pkt->pkt_private; 2598 tpkt = vpkt->vpkt_tgt_pkt; 2599 pip = vpkt->vpkt_path; 2600 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip); 2601 if (svp == NULL) { 2602 tpkt->pkt_reason = CMD_TRAN_ERR; 2603 tpkt->pkt_statistics = STAT_ABORTED; 2604 goto done; 2605 } 2606 vlun = svp->svp_svl; 2607 prout = &vlun->svl_prout; 2608 if (SCBP_C(pkt) != STATUS_GOOD) 2609 fail++; 2610 cdip = vlun->svl_dip; 2611 pip = npip = NULL; 2612 rval = mdi_select_path(cdip, NULL, 2613 MDI_SELECT_ONLINE_PATH|MDI_SELECT_STANDBY_PATH, NULL, &npip); 2614 if ((rval != MDI_SUCCESS) || (npip == NULL)) { 2615 VHCI_DEBUG(4, (CE_NOTE, NULL, 2616 "vhci_run_cmd: no path! 0x%p\n", (void *)svp)); 2617 tpkt->pkt_reason = CMD_TRAN_ERR; 2618 tpkt->pkt_statistics = STAT_ABORTED; 2619 goto done; 2620 } 2621 2622 cdb_1 = vlun->svl_cdb[1]; 2623 vlun->svl_cdb[1] &= 0xe0; 2624 vlun->svl_cdb[1] |= VHCI_PROUT_R_AND_IGNORE; 2625 2626 do { 2627 nsvp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(npip); 2628 if (nsvp == NULL) { 2629 VHCI_DEBUG(4, (CE_NOTE, NULL, 2630 "vhci_run_cmd: no " 2631 "client priv! 0x%p offlined?\n", 2632 (void *)npip)); 2633 goto next_path; 2634 } 2635 if (vlun->svl_first_path == npip) { 2636 goto next_path; 2637 } else { 2638 if (vhci_do_prout(nsvp) != 1) 2639 fail++; 2640 } 2641 next_path: 2642 pip = npip; 2643 rval = mdi_select_path(cdip, NULL, 2644 MDI_SELECT_ONLINE_PATH|MDI_SELECT_STANDBY_PATH, 2645 pip, &npip); 2646 mdi_rele_path(pip); 2647 } while ((rval == MDI_SUCCESS) && (npip != NULL)); 2648 2649 vlun->svl_cdb[1] = cdb_1; 2650 2651 if (fail) { 2652 VHCI_DEBUG(4, (CE_WARN, NULL, "%s%d: key registration failed, " 2653 "couldn't be replicated on all paths", 2654 ddi_driver_name(cdip), ddi_get_instance(cdip))); 2655 vhci_print_prout_keys(vlun, "vhci_run_cmd: "); 2656 2657 if (SCBP_C(pkt) != STATUS_GOOD) { 2658 tpkt->pkt_reason = CMD_TRAN_ERR; 2659 tpkt->pkt_statistics = STAT_ABORTED; 2660 } 2661 } else { 2662 vlun->svl_pgr_active = 1; 2663 vhci_print_prout_keys(vlun, "vhci_run_cmd: before bcopy:"); 2664 2665 bcopy((const void *)prout->service_key, 2666 (void *)prout->active_service_key, MHIOC_RESV_KEY_SIZE); 2667 bcopy((const void *)prout->res_key, 2668 (void *)prout->active_res_key, MHIOC_RESV_KEY_SIZE); 2669 2670 vhci_print_prout_keys(vlun, "vhci_run_cmd: after bcopy:"); 2671 } 2672 done: 2673 if (SCBP_C(pkt) == STATUS_GOOD) 2674 vlun->svl_first_path = NULL; 2675 2676 if (svp) 2677 VHCI_DECR_PATH_CMDCOUNT(svp); 2678 2679 if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) == 0) { 2680 scsi_destroy_pkt(pkt); 2681 vpkt->vpkt_hba_pkt = NULL; 2682 if (vpkt->vpkt_path) { 2683 mdi_rele_path(vpkt->vpkt_path); 2684 vpkt->vpkt_path = NULL; 2685 } 2686 } 2687 2688 sema_v(&vlun->svl_pgr_sema); 2689 /* 2690 * The PROUT commands are not included in the automatic retry 2691 * mechanism, therefore, vpkt_org_vpkt should never be set here. 2692 */ 2693 ASSERT(vpkt->vpkt_org_vpkt == NULL); 2694 scsi_hba_pkt_comp(tpkt); 2695 } 2696 2697 /* 2698 * Get the keys registered with this target. Since we will have 2699 * registered the same key with multiple initiators, strip out 2700 * any duplicate keys. 2701 * 2702 * The pointers which will be used to filter the registered keys from 2703 * the device will be stored in filter_prin and filter_pkt. If the 2704 * allocation length of the buffer was sufficient for the number of 2705 * parameter data bytes available to be returned by the device then the 2706 * key filtering will use the keylist returned from the original 2707 * request. If the allocation length of the buffer was not sufficient, 2708 * then the filtering will use the keylist returned from the request 2709 * that is resent below. 2710 * 2711 * If the device returns an additional length field that is greater than 2712 * the allocation length of the buffer, then allocate a new buffer which 2713 * can accommodate the number of parameter data bytes available to be 2714 * returned. Resend the scsi PRIN command, filter out the duplicate 2715 * keys and return as many of the unique keys found that was originally 2716 * requested and set the additional length field equal to the data bytes 2717 * of unique reservation keys available to be returned. 2718 * 2719 * If the device returns an additional length field that is less than or 2720 * equal to the allocation length of the buffer, then all the available 2721 * keys registered were returned by the device. Filter out the 2722 * duplicate keys and return all of the unique keys found and set the 2723 * additional length field equal to the data bytes of the reservation 2724 * keys to be returned. 2725 */ 2726 2727 #define VHCI_PRIN_HEADER_SZ (sizeof (prin->length) + sizeof (prin->generation)) 2728 2729 static int 2730 vhci_do_prin(struct vhci_pkt **intr_vpkt) 2731 { 2732 scsi_vhci_priv_t *svp; 2733 struct vhci_pkt *vpkt = *intr_vpkt; 2734 vhci_prin_readkeys_t *prin; 2735 scsi_vhci_lun_t *vlun; 2736 struct scsi_vhci *vhci = ADDR2VHCI(&vpkt->vpkt_tgt_pkt->pkt_address); 2737 2738 struct buf *new_bp = NULL; 2739 struct scsi_pkt *new_pkt = NULL; 2740 struct vhci_pkt *new_vpkt = NULL; 2741 uint32_t needed_length; 2742 int rval = VHCI_CMD_CMPLT; 2743 uint32_t prin_length = 0; 2744 uint32_t svl_prin_length = 0; 2745 2746 ASSERT(vpkt->vpkt_path); 2747 svp = mdi_pi_get_vhci_private(vpkt->vpkt_path); 2748 ASSERT(svp); 2749 vlun = svp->svp_svl; 2750 ASSERT(vlun); 2751 2752 /* 2753 * If the caller only asked for an amount of data that would not 2754 * be enough to include any key data it is likely that they will 2755 * send the next command with a buffer size based on the information 2756 * from this header. Doing recovery on this would be a duplication 2757 * of efforts. 2758 */ 2759 if (vpkt->vpkt_tgt_init_bp->b_bcount <= VHCI_PRIN_HEADER_SZ) { 2760 rval = VHCI_CMD_CMPLT; 2761 goto exit; 2762 } 2763 2764 if (vpkt->vpkt_org_vpkt == NULL) { 2765 /* 2766 * Can fail as sleep is not allowed. 2767 */ 2768 prin = (vhci_prin_readkeys_t *) 2769 bp_mapin_common(vpkt->vpkt_tgt_init_bp, VM_NOSLEEP); 2770 } else { 2771 /* 2772 * The retry buf doesn't need to be mapped in. 2773 */ 2774 prin = (vhci_prin_readkeys_t *) 2775 vpkt->vpkt_tgt_init_bp->b_un.b_daddr; 2776 } 2777 2778 if (prin == NULL) { 2779 VHCI_DEBUG(5, (CE_WARN, NULL, 2780 "vhci_do_prin: bp_mapin_common failed.")); 2781 rval = VHCI_CMD_ERROR; 2782 goto fail; 2783 } 2784 2785 prin_length = BE_32(prin->length); 2786 2787 /* 2788 * According to SPC-3r22, sec 4.3.4.6: "If the amount of 2789 * information to be transferred exceeds the maximum value 2790 * that the ALLOCATION LENGTH field is capable of specifying, 2791 * the device server shall...terminate the command with CHECK 2792 * CONDITION status". The ALLOCATION LENGTH field of the 2793 * PERSISTENT RESERVE IN command is 2 bytes. We should never 2794 * get here with an ADDITIONAL LENGTH greater than 0xFFFF 2795 * so if we do, then it is an error! 2796 */ 2797 2798 2799 if ((prin_length + VHCI_PRIN_HEADER_SZ) > 0xFFFF) { 2800 VHCI_DEBUG(5, (CE_NOTE, NULL, 2801 "vhci_do_prin: Device returned invalid " 2802 "length 0x%x\n", prin_length)); 2803 rval = VHCI_CMD_ERROR; 2804 goto fail; 2805 } 2806 needed_length = prin_length + VHCI_PRIN_HEADER_SZ; 2807 2808 /* 2809 * If prin->length is greater than the byte count allocated in the 2810 * original buffer, then resend the request with enough buffer 2811 * allocated to get all of the available registered keys. 2812 */ 2813 if ((vpkt->vpkt_tgt_init_bp->b_bcount < needed_length) && 2814 (vpkt->vpkt_org_vpkt == NULL)) { 2815 2816 new_pkt = vhci_create_retry_pkt(vpkt); 2817 if (new_pkt == NULL) { 2818 rval = VHCI_CMD_ERROR; 2819 goto fail; 2820 } 2821 new_vpkt = TGTPKT2VHCIPKT(new_pkt); 2822 2823 /* 2824 * This is the buf with buffer pointer 2825 * where the prin readkeys will be 2826 * returned from the device 2827 */ 2828 new_bp = scsi_alloc_consistent_buf(&svp->svp_psd->sd_address, 2829 NULL, needed_length, B_READ, NULL_FUNC, NULL); 2830 if ((new_bp == NULL) || (new_bp->b_un.b_addr == NULL)) { 2831 if (new_bp) { 2832 scsi_free_consistent_buf(new_bp); 2833 } 2834 vhci_scsi_destroy_pkt(&new_pkt->pkt_address, new_pkt); 2835 rval = VHCI_CMD_ERROR; 2836 goto fail; 2837 } 2838 new_bp->b_bcount = needed_length; 2839 new_pkt->pkt_cdbp[7] = (uchar_t)(needed_length >> 8); 2840 new_pkt->pkt_cdbp[8] = (uchar_t)needed_length; 2841 2842 rval = VHCI_CMD_RETRY; 2843 2844 new_vpkt->vpkt_tgt_init_bp = new_bp; 2845 } 2846 2847 if (rval == VHCI_CMD_RETRY) { 2848 2849 /* 2850 * There were more keys then the original request asked for. 2851 */ 2852 mdi_pathinfo_t *path_holder = vpkt->vpkt_path; 2853 2854 /* 2855 * Release the old path because it does not matter which path 2856 * this command is sent down. This allows the normal bind 2857 * transport mechanism to be used. 2858 */ 2859 if (vpkt->vpkt_path != NULL) { 2860 mdi_rele_path(vpkt->vpkt_path); 2861 vpkt->vpkt_path = NULL; 2862 } 2863 2864 /* 2865 * Dispatch the retry command 2866 */ 2867 if (taskq_dispatch(vhci->vhci_taskq, vhci_dispatch_scsi_start, 2868 (void *) new_vpkt, KM_NOSLEEP) == NULL) { 2869 if (path_holder) { 2870 vpkt->vpkt_path = path_holder; 2871 mdi_hold_path(path_holder); 2872 } 2873 scsi_free_consistent_buf(new_bp); 2874 vhci_scsi_destroy_pkt(&new_pkt->pkt_address, new_pkt); 2875 rval = VHCI_CMD_ERROR; 2876 goto fail; 2877 } 2878 2879 /* 2880 * If we return VHCI_CMD_RETRY, that means the caller 2881 * is going to bail and wait for the reissued command 2882 * to complete. In that case, we need to decrement 2883 * the path command count right now. In any other 2884 * case, it'll be decremented by the caller. 2885 */ 2886 VHCI_DECR_PATH_CMDCOUNT(svp); 2887 goto exit; 2888 2889 } 2890 2891 if (rval == VHCI_CMD_CMPLT) { 2892 /* 2893 * The original request got all of the keys or the recovery 2894 * packet returns. 2895 */ 2896 int new; 2897 int old; 2898 int num_keys = prin_length / MHIOC_RESV_KEY_SIZE; 2899 2900 VHCI_DEBUG(4, (CE_NOTE, NULL, "vhci_do_prin: %d keys read\n", 2901 num_keys)); 2902 2903 #ifdef DEBUG 2904 VHCI_DEBUG(5, (CE_NOTE, NULL, "vhci_do_prin: from storage\n")); 2905 if (vhci_debug == 5) 2906 vhci_print_prin_keys(prin, num_keys); 2907 VHCI_DEBUG(5, (CE_NOTE, NULL, 2908 "vhci_do_prin: MPxIO old keys:\n")); 2909 if (vhci_debug == 5) 2910 vhci_print_prin_keys(&vlun->svl_prin, num_keys); 2911 #endif 2912 2913 /* 2914 * Filter out all duplicate keys returned from the device 2915 * We know that we use a different key for every host, so we 2916 * can simply strip out duplicates. Otherwise we would need to 2917 * do more bookkeeping to figure out which keys to strip out. 2918 */ 2919 2920 new = 0; 2921 2922 /* 2923 * If we got at least 1 key copy it. 2924 */ 2925 if (num_keys > 0) { 2926 vlun->svl_prin.keylist[0] = prin->keylist[0]; 2927 new++; 2928 } 2929 2930 /* 2931 * find next unique key. 2932 */ 2933 for (old = 1; old < num_keys; old++) { 2934 int j; 2935 int match = 0; 2936 2937 if (new >= VHCI_NUM_RESV_KEYS) 2938 break; 2939 for (j = 0; j < new; j++) { 2940 if (bcmp(&prin->keylist[old], 2941 &vlun->svl_prin.keylist[j], 2942 sizeof (mhioc_resv_key_t)) == 0) { 2943 match = 1; 2944 break; 2945 } 2946 } 2947 if (!match) { 2948 vlun->svl_prin.keylist[new] = 2949 prin->keylist[old]; 2950 new++; 2951 } 2952 } 2953 2954 /* Stored Big Endian */ 2955 vlun->svl_prin.generation = prin->generation; 2956 svl_prin_length = new * sizeof (mhioc_resv_key_t); 2957 /* Stored Big Endian */ 2958 vlun->svl_prin.length = BE_32(svl_prin_length); 2959 svl_prin_length += VHCI_PRIN_HEADER_SZ; 2960 2961 /* 2962 * If we arrived at this point after issuing a retry, make sure 2963 * that we put everything back the way it originally was so 2964 * that the target driver can complete the command correctly. 2965 */ 2966 if (vpkt->vpkt_org_vpkt != NULL) { 2967 new_bp = vpkt->vpkt_tgt_init_bp; 2968 2969 scsi_free_consistent_buf(new_bp); 2970 2971 vpkt = vhci_sync_retry_pkt(vpkt); 2972 *intr_vpkt = vpkt; 2973 2974 /* 2975 * Make sure the original buffer is mapped into kernel 2976 * space before we try to copy the filtered keys into 2977 * it. 2978 */ 2979 prin = (vhci_prin_readkeys_t *)bp_mapin_common( 2980 vpkt->vpkt_tgt_init_bp, VM_NOSLEEP); 2981 } 2982 2983 /* 2984 * Now copy the desired number of prin keys into the original 2985 * target buffer. 2986 */ 2987 if (svl_prin_length <= vpkt->vpkt_tgt_init_bp->b_bcount) { 2988 /* 2989 * It is safe to return all of the available unique 2990 * keys 2991 */ 2992 bcopy(&vlun->svl_prin, prin, svl_prin_length); 2993 } else { 2994 /* 2995 * Not all of the available keys were requested by the 2996 * original command. 2997 */ 2998 bcopy(&vlun->svl_prin, prin, 2999 vpkt->vpkt_tgt_init_bp->b_bcount); 3000 } 3001 #ifdef DEBUG 3002 VHCI_DEBUG(5, (CE_NOTE, NULL, 3003 "vhci_do_prin: To Application:\n")); 3004 if (vhci_debug == 5) 3005 vhci_print_prin_keys(prin, new); 3006 VHCI_DEBUG(5, (CE_NOTE, NULL, 3007 "vhci_do_prin: MPxIO new keys:\n")); 3008 if (vhci_debug == 5) 3009 vhci_print_prin_keys(&vlun->svl_prin, new); 3010 #endif 3011 } 3012 fail: 3013 if (rval == VHCI_CMD_ERROR) { 3014 /* 3015 * If we arrived at this point after issuing a 3016 * retry, make sure that we put everything back 3017 * the way it originally was so that ssd can 3018 * complete the command correctly. 3019 */ 3020 3021 if (vpkt->vpkt_org_vpkt != NULL) { 3022 new_bp = vpkt->vpkt_tgt_init_bp; 3023 if (new_bp != NULL) { 3024 scsi_free_consistent_buf(new_bp); 3025 } 3026 3027 new_vpkt = vpkt; 3028 vpkt = vpkt->vpkt_org_vpkt; 3029 3030 vhci_scsi_destroy_pkt(&svp->svp_psd->sd_address, 3031 new_vpkt->vpkt_tgt_pkt); 3032 } 3033 3034 /* 3035 * Mark this command completion as having an error so that 3036 * ssd will retry the command. 3037 */ 3038 3039 vpkt->vpkt_tgt_pkt->pkt_reason = CMD_ABORTED; 3040 vpkt->vpkt_tgt_pkt->pkt_statistics |= STAT_ABORTED; 3041 3042 rval = VHCI_CMD_CMPLT; 3043 } 3044 exit: 3045 /* 3046 * Make sure that the semaphore is only released once. 3047 */ 3048 if (rval == VHCI_CMD_CMPLT) { 3049 sema_v(&vlun->svl_pgr_sema); 3050 } 3051 3052 return (rval); 3053 } 3054 3055 static void 3056 vhci_intr(struct scsi_pkt *pkt) 3057 { 3058 struct vhci_pkt *vpkt = (struct vhci_pkt *)pkt->pkt_private; 3059 struct scsi_pkt *tpkt; 3060 scsi_vhci_priv_t *svp; 3061 scsi_vhci_lun_t *vlun; 3062 int rval, held; 3063 struct scsi_failover_ops *fops; 3064 uint8_t *sns, skey, asc, ascq; 3065 mdi_pathinfo_t *lpath; 3066 static char *timeout_err = "Command Timeout"; 3067 static char *parity_err = "Parity Error"; 3068 char *err_str = NULL; 3069 dev_info_t *vdip, *cdip; 3070 char *cpath; 3071 3072 ASSERT(vpkt != NULL); 3073 tpkt = vpkt->vpkt_tgt_pkt; 3074 ASSERT(tpkt != NULL); 3075 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(vpkt->vpkt_path); 3076 ASSERT(svp != NULL); 3077 vlun = svp->svp_svl; 3078 ASSERT(vlun != NULL); 3079 lpath = vpkt->vpkt_path; 3080 3081 /* 3082 * sync up the target driver's pkt with the pkt that 3083 * we actually used 3084 */ 3085 *(tpkt->pkt_scbp) = *(pkt->pkt_scbp); 3086 tpkt->pkt_resid = pkt->pkt_resid; 3087 tpkt->pkt_state = pkt->pkt_state; 3088 tpkt->pkt_statistics = pkt->pkt_statistics; 3089 tpkt->pkt_reason = pkt->pkt_reason; 3090 3091 /* Return path_instance information back to the target driver. */ 3092 if (scsi_pkt_allocated_correctly(tpkt)) { 3093 if (scsi_pkt_allocated_correctly(pkt)) { 3094 /* 3095 * If both packets were correctly allocated, 3096 * return path returned by pHCI. 3097 */ 3098 tpkt->pkt_path_instance = pkt->pkt_path_instance; 3099 } else { 3100 /* Otherwise return path of pHCI we used */ 3101 tpkt->pkt_path_instance = 3102 mdi_pi_get_path_instance(lpath); 3103 } 3104 } 3105 3106 if (pkt->pkt_cdbp[0] == SCMD_PROUT && 3107 ((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_REGISTER) || 3108 ((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_R_AND_IGNORE)) { 3109 if ((SCBP_C(pkt) != STATUS_GOOD) || 3110 (pkt->pkt_reason != CMD_CMPLT)) { 3111 sema_v(&vlun->svl_pgr_sema); 3112 } 3113 } else if (pkt->pkt_cdbp[0] == SCMD_PRIN) { 3114 if (pkt->pkt_reason != CMD_CMPLT || 3115 (SCBP_C(pkt) != STATUS_GOOD)) { 3116 sema_v(&vlun->svl_pgr_sema); 3117 } 3118 } 3119 3120 switch (pkt->pkt_reason) { 3121 case CMD_CMPLT: 3122 /* 3123 * cmd completed successfully, check for scsi errors 3124 */ 3125 switch (*(pkt->pkt_scbp)) { 3126 case STATUS_CHECK: 3127 if (pkt->pkt_state & STATE_ARQ_DONE) { 3128 sns = (uint8_t *) 3129 &(((struct scsi_arq_status *)(uintptr_t) 3130 (pkt->pkt_scbp))->sts_sensedata); 3131 skey = scsi_sense_key(sns); 3132 asc = scsi_sense_asc(sns); 3133 ascq = scsi_sense_ascq(sns); 3134 fops = vlun->svl_fops; 3135 ASSERT(fops != NULL); 3136 VHCI_DEBUG(4, (CE_NOTE, NULL, "vhci_intr: " 3137 "Received sns key %x esc %x escq %x\n", 3138 skey, asc, ascq)); 3139 3140 if (vlun->svl_waiting_for_activepath == 1) { 3141 /* 3142 * if we are here it means we are 3143 * in the midst of a probe/attach 3144 * through a passive path; this 3145 * case is exempt from sense analysis 3146 * for detection of ext. failover 3147 * because that would unnecessarily 3148 * increase attach time. 3149 */ 3150 bcopy(pkt->pkt_scbp, tpkt->pkt_scbp, 3151 vpkt->vpkt_tgt_init_scblen); 3152 break; 3153 } 3154 if (asc == VHCI_SCSI_PERR) { 3155 /* 3156 * parity error 3157 */ 3158 err_str = parity_err; 3159 bcopy(pkt->pkt_scbp, tpkt->pkt_scbp, 3160 vpkt->vpkt_tgt_init_scblen); 3161 break; 3162 } 3163 rval = fops->sfo_analyze_sense(svp->svp_psd, 3164 sns, vlun->svl_fops_ctpriv); 3165 if ((rval == SCSI_SENSE_NOFAILOVER) || 3166 (rval == SCSI_SENSE_UNKNOWN) || 3167 (rval == SCSI_SENSE_NOT_READY)) { 3168 bcopy(pkt->pkt_scbp, tpkt->pkt_scbp, 3169 vpkt->vpkt_tgt_init_scblen); 3170 break; 3171 } else if (rval == SCSI_SENSE_STATE_CHANGED) { 3172 struct scsi_vhci *vhci; 3173 vhci = ADDR2VHCI(&tpkt->pkt_address); 3174 VHCI_HOLD_LUN(vlun, VH_NOSLEEP, held); 3175 if (!held) { 3176 /* 3177 * looks like some other thread 3178 * has already detected this 3179 * condition 3180 */ 3181 tpkt->pkt_state &= 3182 ~STATE_ARQ_DONE; 3183 *(tpkt->pkt_scbp) = 3184 STATUS_BUSY; 3185 break; 3186 } 3187 (void) taskq_dispatch( 3188 vhci->vhci_update_pathstates_taskq, 3189 vhci_update_pathstates, 3190 (void *)vlun, KM_SLEEP); 3191 } else { 3192 /* 3193 * externally initiated failover 3194 * has occurred or is in progress 3195 */ 3196 VHCI_HOLD_LUN(vlun, VH_NOSLEEP, held); 3197 if (!held) { 3198 /* 3199 * looks like some other thread 3200 * has already detected this 3201 * condition 3202 */ 3203 tpkt->pkt_state &= 3204 ~STATE_ARQ_DONE; 3205 *(tpkt->pkt_scbp) = 3206 STATUS_BUSY; 3207 break; 3208 } else { 3209 rval = vhci_handle_ext_fo 3210 (pkt, rval); 3211 if (rval == BUSY_RETURN) { 3212 tpkt->pkt_state &= 3213 ~STATE_ARQ_DONE; 3214 *(tpkt->pkt_scbp) = 3215 STATUS_BUSY; 3216 break; 3217 } 3218 bcopy(pkt->pkt_scbp, 3219 tpkt->pkt_scbp, 3220 vpkt->vpkt_tgt_init_scblen); 3221 break; 3222 } 3223 } 3224 } 3225 break; 3226 3227 /* 3228 * If this is a good SCSI-II RELEASE cmd completion then restore 3229 * the load balancing policy and reset VLUN_RESERVE_ACTIVE_FLG. 3230 * If this is a good SCSI-II RESERVE cmd completion then set 3231 * VLUN_RESERVE_ACTIVE_FLG. 3232 */ 3233 case STATUS_GOOD: 3234 if ((pkt->pkt_cdbp[0] == SCMD_RELEASE) || 3235 (pkt->pkt_cdbp[0] == SCMD_RELEASE_G1)) { 3236 (void) mdi_set_lb_policy(vlun->svl_dip, 3237 vlun->svl_lb_policy_save); 3238 vlun->svl_flags &= ~VLUN_RESERVE_ACTIVE_FLG; 3239 VHCI_DEBUG(1, (CE_WARN, NULL, 3240 "!vhci_intr: vlun 0x%p release path 0x%p", 3241 (void *)vlun, (void *)vpkt->vpkt_path)); 3242 } 3243 3244 if ((pkt->pkt_cdbp[0] == SCMD_RESERVE) || 3245 (pkt->pkt_cdbp[0] == SCMD_RESERVE_G1)) { 3246 vlun->svl_flags |= VLUN_RESERVE_ACTIVE_FLG; 3247 vlun->svl_resrv_pip = vpkt->vpkt_path; 3248 VHCI_DEBUG(1, (CE_WARN, NULL, 3249 "!vhci_intr: vlun 0x%p reserved path 0x%p", 3250 (void *)vlun, (void *)vpkt->vpkt_path)); 3251 } 3252 break; 3253 3254 case STATUS_RESERVATION_CONFLICT: 3255 VHCI_DEBUG(1, (CE_WARN, NULL, 3256 "!vhci_intr: vlun 0x%p " 3257 "reserve conflict on path 0x%p", 3258 (void *)vlun, (void *)vpkt->vpkt_path)); 3259 /* FALLTHROUGH */ 3260 default: 3261 break; 3262 } 3263 3264 /* 3265 * Update I/O completion statistics for the path 3266 */ 3267 mdi_pi_kstat_iosupdate(vpkt->vpkt_path, vpkt->vpkt_tgt_init_bp); 3268 3269 /* 3270 * Command completed successfully, release the dma binding and 3271 * destroy the transport side of the packet. 3272 */ 3273 if ((pkt->pkt_cdbp[0] == SCMD_PROUT) && 3274 (((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_REGISTER) || 3275 ((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_R_AND_IGNORE))) { 3276 if (SCBP_C(pkt) == STATUS_GOOD) { 3277 ASSERT(vlun->svl_taskq); 3278 svp->svp_last_pkt_reason = pkt->pkt_reason; 3279 (void) taskq_dispatch(vlun->svl_taskq, 3280 vhci_run_cmd, pkt, KM_SLEEP); 3281 return; 3282 } 3283 } 3284 if ((SCBP_C(pkt) == STATUS_GOOD) && 3285 (pkt->pkt_cdbp[0] == SCMD_PRIN) && vpkt->vpkt_tgt_init_bp) { 3286 /* 3287 * If the action (value in byte 1 of the cdb) is zero, 3288 * we're reading keys, and that's the only condition 3289 * where we need to be concerned with filtering keys 3290 * and potential retries. Otherwise, we simply signal 3291 * the semaphore and move on. 3292 */ 3293 if (pkt->pkt_cdbp[1] == 0) { 3294 /* 3295 * If this is the completion of an internal 3296 * retry then we need to make sure that the 3297 * pkt and tpkt pointers are readjusted so 3298 * the calls to scsi_destroy_pkt and pkt_comp 3299 * below work * correctly. 3300 */ 3301 if (vpkt->vpkt_org_vpkt != NULL) { 3302 pkt = vpkt->vpkt_org_vpkt->vpkt_hba_pkt; 3303 tpkt = vpkt->vpkt_org_vpkt-> 3304 vpkt_tgt_pkt; 3305 3306 /* 3307 * If this command was issued through 3308 * the taskq then we need to clear 3309 * this flag for proper processing in 3310 * the case of a retry from the target 3311 * driver. 3312 */ 3313 vpkt->vpkt_state &= 3314 ~VHCI_PKT_THRU_TASKQ; 3315 } 3316 3317 /* 3318 * if vhci_do_prin returns VHCI_CMD_CMPLT then 3319 * vpkt will contain the address of the 3320 * original vpkt 3321 */ 3322 if (vhci_do_prin(&vpkt) == VHCI_CMD_RETRY) { 3323 /* 3324 * The command has been resent to get 3325 * all the keys from the device. Don't 3326 * complete the command with ssd until 3327 * the retry completes. 3328 */ 3329 return; 3330 } 3331 } else { 3332 sema_v(&vlun->svl_pgr_sema); 3333 } 3334 } 3335 3336 break; 3337 3338 case CMD_TIMEOUT: 3339 if ((pkt->pkt_statistics & 3340 (STAT_BUS_RESET|STAT_DEV_RESET|STAT_ABORTED)) == 0) { 3341 3342 VHCI_DEBUG(1, (CE_NOTE, NULL, 3343 "!scsi vhci timeout invoked\n")); 3344 3345 (void) vhci_recovery_reset(vlun, &pkt->pkt_address, 3346 FALSE, VHCI_DEPTH_ALL); 3347 } 3348 MDI_PI_ERRSTAT(lpath, MDI_PI_TRANSERR); 3349 tpkt->pkt_statistics |= STAT_ABORTED; 3350 err_str = timeout_err; 3351 break; 3352 3353 case CMD_TRAN_ERR: 3354 /* 3355 * This status is returned if the transport has sent the cmd 3356 * down the link to the target and then some error occurs. 3357 * In case of SCSI-II RESERVE cmd, we don't know if the 3358 * reservation been accepted by the target or not, so we need 3359 * to clear the reservation. 3360 */ 3361 if ((pkt->pkt_cdbp[0] == SCMD_RESERVE) || 3362 (pkt->pkt_cdbp[0] == SCMD_RESERVE_G1)) { 3363 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_intr received" 3364 " cmd_tran_err for scsi-2 reserve cmd\n")); 3365 if (!vhci_recovery_reset(vlun, &pkt->pkt_address, 3366 TRUE, VHCI_DEPTH_TARGET)) { 3367 VHCI_DEBUG(1, (CE_WARN, NULL, 3368 "!vhci_intr cmd_tran_err reset failed!")); 3369 } 3370 } 3371 break; 3372 3373 case CMD_DEV_GONE: 3374 /* 3375 * If this is the last path then report CMD_DEV_GONE to the 3376 * target driver, otherwise report BUSY to triggger retry. 3377 */ 3378 if (vlun->svl_dip && 3379 (mdi_client_get_path_count(vlun->svl_dip) <= 1)) { 3380 struct scsi_vhci *vhci; 3381 vhci = ADDR2VHCI(&tpkt->pkt_address); 3382 VHCI_DEBUG(1, (CE_NOTE, NULL, "vhci_intr received " 3383 "cmd_dev_gone on last path\n")); 3384 (void) vhci_invalidate_mpapi_lu(vhci, vlun); 3385 break; 3386 } 3387 3388 /* Report CMD_CMPLT-with-BUSY to cause retry. */ 3389 VHCI_DEBUG(1, (CE_NOTE, NULL, "vhci_intr received " 3390 "cmd_dev_gone\n")); 3391 tpkt->pkt_reason = CMD_CMPLT; 3392 tpkt->pkt_state = STATE_GOT_BUS | 3393 STATE_GOT_TARGET | STATE_SENT_CMD | 3394 STATE_GOT_STATUS; 3395 *(tpkt->pkt_scbp) = STATUS_BUSY; 3396 break; 3397 3398 default: 3399 break; 3400 } 3401 3402 /* 3403 * SCSI-II RESERVE cmd has been serviced by the lower layers clear 3404 * the flag so the lun is not QUIESCED any longer. 3405 * Also clear the VHCI_PKT_THRU_TASKQ flag, to ensure that if this pkt 3406 * is retried, a taskq shall again be dispatched to service it. Else 3407 * it may lead to a system hang if the retry is within interrupt 3408 * context. 3409 */ 3410 if ((pkt->pkt_cdbp[0] == SCMD_RESERVE) || 3411 (pkt->pkt_cdbp[0] == SCMD_RESERVE_G1)) { 3412 vlun->svl_flags &= ~VLUN_QUIESCED_FLG; 3413 vpkt->vpkt_state &= ~VHCI_PKT_THRU_TASKQ; 3414 } 3415 3416 /* 3417 * vpkt_org_vpkt should always be NULL here if the retry command 3418 * has been successfully processed. If vpkt_org_vpkt != NULL at 3419 * this point, it is an error so restore the original vpkt and 3420 * return an error to the target driver so it can retry the 3421 * command as appropriate. 3422 */ 3423 if (vpkt->vpkt_org_vpkt != NULL) { 3424 struct vhci_pkt *new_vpkt = vpkt; 3425 vpkt = vpkt->vpkt_org_vpkt; 3426 3427 vhci_scsi_destroy_pkt(&svp->svp_psd->sd_address, 3428 new_vpkt->vpkt_tgt_pkt); 3429 3430 /* 3431 * Mark this command completion as having an error so that 3432 * ssd will retry the command. 3433 */ 3434 vpkt->vpkt_tgt_pkt->pkt_reason = CMD_ABORTED; 3435 vpkt->vpkt_tgt_pkt->pkt_statistics |= STAT_ABORTED; 3436 3437 pkt = vpkt->vpkt_hba_pkt; 3438 tpkt = vpkt->vpkt_tgt_pkt; 3439 } 3440 3441 if ((err_str != NULL) && (pkt->pkt_reason != 3442 svp->svp_last_pkt_reason)) { 3443 cdip = vlun->svl_dip; 3444 vdip = ddi_get_parent(cdip); 3445 cpath = kmem_alloc(MAXPATHLEN, KM_SLEEP); 3446 vhci_log(CE_WARN, vdip, "!%s (%s%d): %s on path %s", 3447 ddi_pathname(cdip, cpath), ddi_driver_name(cdip), 3448 ddi_get_instance(cdip), err_str, 3449 mdi_pi_spathname(vpkt->vpkt_path)); 3450 kmem_free(cpath, MAXPATHLEN); 3451 } 3452 svp->svp_last_pkt_reason = pkt->pkt_reason; 3453 VHCI_DECR_PATH_CMDCOUNT(svp); 3454 3455 /* 3456 * For PARTIAL_DMA, vhci should not free the path. 3457 * Target driver will call into vhci_scsi_dmafree or 3458 * destroy pkt to release this path. 3459 */ 3460 if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) == 0) { 3461 scsi_destroy_pkt(pkt); 3462 vpkt->vpkt_hba_pkt = NULL; 3463 if (vpkt->vpkt_path) { 3464 mdi_rele_path(vpkt->vpkt_path); 3465 vpkt->vpkt_path = NULL; 3466 } 3467 } 3468 3469 scsi_hba_pkt_comp(tpkt); 3470 } 3471 3472 /* 3473 * two possibilities: (1) failover has completed 3474 * or (2) is in progress; update our path states for 3475 * the former case; for the latter case, 3476 * initiate a scsi_watch request to 3477 * determine when failover completes - vlun is HELD 3478 * until failover completes; BUSY is returned to upper 3479 * layer in both the cases 3480 */ 3481 static int 3482 vhci_handle_ext_fo(struct scsi_pkt *pkt, int fostat) 3483 { 3484 struct vhci_pkt *vpkt = (struct vhci_pkt *)pkt->pkt_private; 3485 struct scsi_pkt *tpkt; 3486 scsi_vhci_priv_t *svp; 3487 scsi_vhci_lun_t *vlun; 3488 struct scsi_vhci *vhci; 3489 scsi_vhci_swarg_t *swarg; 3490 char *path; 3491 3492 ASSERT(vpkt != NULL); 3493 tpkt = vpkt->vpkt_tgt_pkt; 3494 ASSERT(tpkt != NULL); 3495 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(vpkt->vpkt_path); 3496 ASSERT(svp != NULL); 3497 vlun = svp->svp_svl; 3498 ASSERT(vlun != NULL); 3499 ASSERT(VHCI_LUN_IS_HELD(vlun)); 3500 3501 vhci = ADDR2VHCI(&tpkt->pkt_address); 3502 3503 if (fostat == SCSI_SENSE_INACTIVE) { 3504 VHCI_DEBUG(1, (CE_NOTE, NULL, "!Failover " 3505 "detected for %s; updating path states...\n", 3506 vlun->svl_lun_wwn)); 3507 /* 3508 * set the vlun flag to indicate to the task that the target 3509 * port group needs updating 3510 */ 3511 vlun->svl_flags |= VLUN_UPDATE_TPG; 3512 (void) taskq_dispatch(vhci->vhci_update_pathstates_taskq, 3513 vhci_update_pathstates, (void *)vlun, KM_SLEEP); 3514 } else { 3515 path = kmem_alloc(MAXPATHLEN, KM_SLEEP); 3516 vhci_log(CE_NOTE, ddi_get_parent(vlun->svl_dip), 3517 "!%s (%s%d): Waiting for externally initiated failover " 3518 "to complete", ddi_pathname(vlun->svl_dip, path), 3519 ddi_driver_name(vlun->svl_dip), 3520 ddi_get_instance(vlun->svl_dip)); 3521 kmem_free(path, MAXPATHLEN); 3522 swarg = kmem_alloc(sizeof (*swarg), KM_NOSLEEP); 3523 if (swarg == NULL) { 3524 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_handle_ext_fo: " 3525 "request packet allocation for %s failed....\n", 3526 vlun->svl_lun_wwn)); 3527 VHCI_RELEASE_LUN(vlun); 3528 return (PKT_RETURN); 3529 } 3530 swarg->svs_svp = svp; 3531 swarg->svs_tos = ddi_get_time(); 3532 swarg->svs_pi = vpkt->vpkt_path; 3533 swarg->svs_release_lun = 0; 3534 swarg->svs_done = 0; 3535 /* 3536 * place a hold on the path...we don't want it to 3537 * vanish while scsi_watch is in progress 3538 */ 3539 mdi_hold_path(vpkt->vpkt_path); 3540 svp->svp_sw_token = scsi_watch_request_submit(svp->svp_psd, 3541 VHCI_FOWATCH_INTERVAL, SENSE_LENGTH, vhci_efo_watch_cb, 3542 (caddr_t)swarg); 3543 } 3544 return (BUSY_RETURN); 3545 } 3546 3547 /* 3548 * vhci_efo_watch_cb: 3549 * Callback from scsi_watch request to check the failover status. 3550 * Completion is either due to successful failover or timeout. 3551 * Upon successful completion, vhci_update_path_states is called. 3552 * For timeout condition, vhci_efo_done is called. 3553 * Always returns 0 to scsi_watch to keep retrying till vhci_efo_done 3554 * terminates this request properly in a separate thread. 3555 */ 3556 3557 static int 3558 vhci_efo_watch_cb(caddr_t arg, struct scsi_watch_result *resultp) 3559 { 3560 struct scsi_status *statusp = resultp->statusp; 3561 uint8_t *sensep = (uint8_t *)resultp->sensep; 3562 struct scsi_pkt *pkt = resultp->pkt; 3563 scsi_vhci_swarg_t *swarg; 3564 scsi_vhci_priv_t *svp; 3565 scsi_vhci_lun_t *vlun; 3566 struct scsi_vhci *vhci; 3567 dev_info_t *vdip; 3568 int rval, updt_paths; 3569 3570 swarg = (scsi_vhci_swarg_t *)(uintptr_t)arg; 3571 svp = swarg->svs_svp; 3572 if (swarg->svs_done) { 3573 /* 3574 * Already completed failover or timedout. 3575 * Waiting for vhci_efo_done to terminate this scsi_watch. 3576 */ 3577 return (0); 3578 } 3579 3580 ASSERT(svp != NULL); 3581 vlun = svp->svp_svl; 3582 ASSERT(vlun != NULL); 3583 ASSERT(VHCI_LUN_IS_HELD(vlun)); 3584 vlun->svl_efo_update_path = 0; 3585 vdip = ddi_get_parent(vlun->svl_dip); 3586 vhci = ddi_get_soft_state(vhci_softstate, 3587 ddi_get_instance(vdip)); 3588 3589 updt_paths = 0; 3590 3591 if (pkt->pkt_reason != CMD_CMPLT) { 3592 if ((ddi_get_time() - swarg->svs_tos) >= VHCI_EXTFO_TIMEOUT) { 3593 swarg->svs_release_lun = 1; 3594 goto done; 3595 } 3596 return (0); 3597 } 3598 if (*((unsigned char *)statusp) == STATUS_CHECK) { 3599 rval = vlun->svl_fops->sfo_analyze_sense(svp->svp_psd, sensep, 3600 vlun->svl_fops_ctpriv); 3601 switch (rval) { 3602 /* 3603 * Only update path states in case path is definitely 3604 * inactive, or no failover occurred. For all other 3605 * check conditions continue pinging. A unexpected 3606 * check condition shouldn't cause pinging to complete 3607 * prematurely. 3608 */ 3609 case SCSI_SENSE_INACTIVE: 3610 case SCSI_SENSE_NOFAILOVER: 3611 updt_paths = 1; 3612 break; 3613 default: 3614 if ((ddi_get_time() - swarg->svs_tos) 3615 >= VHCI_EXTFO_TIMEOUT) { 3616 swarg->svs_release_lun = 1; 3617 goto done; 3618 } 3619 return (0); 3620 } 3621 } else if (*((unsigned char *)statusp) == 3622 STATUS_RESERVATION_CONFLICT) { 3623 updt_paths = 1; 3624 } else if ((*((unsigned char *)statusp)) & 3625 (STATUS_BUSY | STATUS_QFULL)) { 3626 return (0); 3627 } 3628 if ((*((unsigned char *)statusp) == STATUS_GOOD) || 3629 (updt_paths == 1)) { 3630 /* 3631 * we got here because we had detected an 3632 * externally initiated failover; things 3633 * have settled down now, so let's 3634 * start up a task to update the 3635 * path states and target port group 3636 */ 3637 vlun->svl_efo_update_path = 1; 3638 swarg->svs_done = 1; 3639 vlun->svl_swarg = swarg; 3640 vlun->svl_flags |= VLUN_UPDATE_TPG; 3641 (void) taskq_dispatch(vhci->vhci_update_pathstates_taskq, 3642 vhci_update_pathstates, (void *)vlun, 3643 KM_SLEEP); 3644 return (0); 3645 } 3646 if ((ddi_get_time() - swarg->svs_tos) >= VHCI_EXTFO_TIMEOUT) { 3647 swarg->svs_release_lun = 1; 3648 goto done; 3649 } 3650 return (0); 3651 done: 3652 swarg->svs_done = 1; 3653 (void) taskq_dispatch(vhci->vhci_taskq, 3654 vhci_efo_done, (void *)swarg, KM_SLEEP); 3655 return (0); 3656 } 3657 3658 /* 3659 * vhci_efo_done: 3660 * cleanly terminates scsi_watch and free up resources. 3661 * Called as taskq function in vhci_efo_watch_cb for EFO timeout condition 3662 * or by vhci_update_path_states invoked during external initiated 3663 * failover completion. 3664 */ 3665 static void 3666 vhci_efo_done(void *arg) 3667 { 3668 scsi_vhci_lun_t *vlun; 3669 scsi_vhci_swarg_t *swarg = (scsi_vhci_swarg_t *)arg; 3670 scsi_vhci_priv_t *svp = swarg->svs_svp; 3671 ASSERT(svp); 3672 3673 vlun = svp->svp_svl; 3674 ASSERT(vlun); 3675 3676 /* Wait for clean termination of scsi_watch */ 3677 (void) scsi_watch_request_terminate(svp->svp_sw_token, 3678 SCSI_WATCH_TERMINATE_ALL_WAIT); 3679 svp->svp_sw_token = NULL; 3680 3681 /* release path and freeup resources to indicate failover completion */ 3682 mdi_rele_path(swarg->svs_pi); 3683 if (swarg->svs_release_lun) { 3684 VHCI_RELEASE_LUN(vlun); 3685 } 3686 kmem_free((void *)swarg, sizeof (*swarg)); 3687 } 3688 3689 /* 3690 * Update the path states 3691 * vlun should be HELD when this is invoked. 3692 * Calls vhci_efo_done to cleanup resources allocated for EFO. 3693 */ 3694 void 3695 vhci_update_pathstates(void *arg) 3696 { 3697 mdi_pathinfo_t *pip, *npip; 3698 dev_info_t *dip; 3699 struct scsi_failover_ops *fo; 3700 struct scsi_vhci_priv *svp; 3701 struct scsi_device *psd; 3702 struct scsi_path_opinfo opinfo; 3703 char *pclass, *tptr; 3704 struct scsi_vhci_lun *vlun = (struct scsi_vhci_lun *)arg; 3705 int sps; /* mdi_select_path() status */ 3706 char *cpath; 3707 struct scsi_vhci *vhci; 3708 struct scsi_pkt *pkt; 3709 struct buf *bp; 3710 int reserve_conflict = 0; 3711 3712 ASSERT(VHCI_LUN_IS_HELD(vlun)); 3713 dip = vlun->svl_dip; 3714 pip = npip = NULL; 3715 3716 vhci = ddi_get_soft_state(vhci_softstate, 3717 ddi_get_instance(ddi_get_parent(dip))); 3718 3719 sps = mdi_select_path(dip, NULL, (MDI_SELECT_ONLINE_PATH | 3720 MDI_SELECT_STANDBY_PATH | MDI_SELECT_NO_PREFERRED), NULL, &npip); 3721 if ((npip == NULL) || (sps != MDI_SUCCESS)) { 3722 goto done; 3723 } 3724 3725 fo = vlun->svl_fops; 3726 do { 3727 pip = npip; 3728 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip); 3729 psd = svp->svp_psd; 3730 if (fo->sfo_path_get_opinfo(psd, &opinfo, 3731 vlun->svl_fops_ctpriv) != 0) { 3732 sps = mdi_select_path(dip, NULL, 3733 (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH | 3734 MDI_SELECT_NO_PREFERRED), pip, &npip); 3735 mdi_rele_path(pip); 3736 continue; 3737 } 3738 3739 if (mdi_prop_lookup_string(pip, "path-class", &pclass) != 3740 MDI_SUCCESS) { 3741 VHCI_DEBUG(1, (CE_NOTE, NULL, 3742 "!vhci_update_pathstates: prop lookup failed for " 3743 "path 0x%p\n", (void *)pip)); 3744 sps = mdi_select_path(dip, NULL, 3745 (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH | 3746 MDI_SELECT_NO_PREFERRED), pip, &npip); 3747 mdi_rele_path(pip); 3748 continue; 3749 } 3750 3751 /* 3752 * Need to update the "path-class" property 3753 * value in the device tree if different 3754 * from the existing value. 3755 */ 3756 if (strcmp(pclass, opinfo.opinfo_path_attr) != 0) { 3757 (void) mdi_prop_update_string(pip, "path-class", 3758 opinfo.opinfo_path_attr); 3759 } 3760 3761 /* 3762 * Only change the state if needed. i.e. Don't call 3763 * mdi_pi_set_state to ONLINE a path if its already 3764 * ONLINE. Same for STANDBY paths. 3765 */ 3766 3767 if ((opinfo.opinfo_path_state == SCSI_PATH_ACTIVE || 3768 opinfo.opinfo_path_state == SCSI_PATH_ACTIVE_NONOPT)) { 3769 if (!(MDI_PI_IS_ONLINE(pip))) { 3770 VHCI_DEBUG(1, (CE_NOTE, NULL, 3771 "!vhci_update_pathstates: marking path" 3772 " 0x%p as ONLINE\n", (void *)pip)); 3773 cpath = kmem_alloc(MAXPATHLEN, KM_SLEEP); 3774 vhci_log(CE_NOTE, ddi_get_parent(dip), "!%s " 3775 "(%s%d): path %s " 3776 "is now ONLINE because of " 3777 "an externally initiated failover", 3778 ddi_pathname(dip, cpath), 3779 ddi_driver_name(dip), 3780 ddi_get_instance(dip), 3781 mdi_pi_spathname(pip)); 3782 kmem_free(cpath, MAXPATHLEN); 3783 mdi_pi_set_state(pip, 3784 MDI_PATHINFO_STATE_ONLINE); 3785 mdi_pi_set_preferred(pip, 3786 opinfo.opinfo_preferred); 3787 tptr = kmem_alloc(strlen 3788 (opinfo.opinfo_path_attr)+1, KM_SLEEP); 3789 (void) strlcpy(tptr, opinfo.opinfo_path_attr, 3790 (strlen(opinfo.opinfo_path_attr)+1)); 3791 mutex_enter(&vlun->svl_mutex); 3792 if (vlun->svl_active_pclass != NULL) { 3793 kmem_free(vlun->svl_active_pclass, 3794 strlen(vlun->svl_active_pclass)+1); 3795 } 3796 vlun->svl_active_pclass = tptr; 3797 if (vlun->svl_waiting_for_activepath) { 3798 vlun->svl_waiting_for_activepath = 0; 3799 } 3800 mutex_exit(&vlun->svl_mutex); 3801 /* Check for Reservation Conflict */ 3802 bp = scsi_alloc_consistent_buf( 3803 &svp->svp_psd->sd_address, 3804 (struct buf *)NULL, DEV_BSIZE, B_READ, 3805 NULL, NULL); 3806 if (!bp) { 3807 VHCI_DEBUG(1, (CE_NOTE, NULL, 3808 "vhci_update_pathstates: " 3809 "!No resources (buf)\n")); 3810 mdi_rele_path(pip); 3811 goto done; 3812 } 3813 pkt = scsi_init_pkt(&svp->svp_psd->sd_address, 3814 NULL, bp, CDB_GROUP1, 3815 sizeof (struct scsi_arq_status), 0, 3816 PKT_CONSISTENT, NULL, NULL); 3817 if (pkt) { 3818 (void) scsi_setup_cdb((union scsi_cdb *) 3819 (uintptr_t)pkt->pkt_cdbp, 3820 SCMD_READ, 1, 1, 0); 3821 pkt->pkt_time = 3*30; 3822 pkt->pkt_flags = FLAG_NOINTR; 3823 pkt->pkt_path_instance = 3824 mdi_pi_get_path_instance(pip); 3825 3826 if ((scsi_transport(pkt) == 3827 TRAN_ACCEPT) && (pkt->pkt_reason 3828 == CMD_CMPLT) && (SCBP_C(pkt) == 3829 STATUS_RESERVATION_CONFLICT)) { 3830 reserve_conflict = 1; 3831 } 3832 scsi_destroy_pkt(pkt); 3833 } 3834 scsi_free_consistent_buf(bp); 3835 } else if (MDI_PI_IS_ONLINE(pip)) { 3836 if (strcmp(pclass, opinfo.opinfo_path_attr) 3837 != 0) { 3838 mdi_pi_set_preferred(pip, 3839 opinfo.opinfo_preferred); 3840 mutex_enter(&vlun->svl_mutex); 3841 if (vlun->svl_active_pclass == NULL || 3842 strcmp(opinfo.opinfo_path_attr, 3843 vlun->svl_active_pclass) != 0) { 3844 mutex_exit(&vlun->svl_mutex); 3845 tptr = kmem_alloc(strlen 3846 (opinfo.opinfo_path_attr)+1, 3847 KM_SLEEP); 3848 (void) strlcpy(tptr, 3849 opinfo.opinfo_path_attr, 3850 (strlen 3851 (opinfo.opinfo_path_attr) 3852 +1)); 3853 mutex_enter(&vlun->svl_mutex); 3854 } else { 3855 /* 3856 * No need to update 3857 * svl_active_pclass 3858 */ 3859 tptr = NULL; 3860 mutex_exit(&vlun->svl_mutex); 3861 } 3862 if (tptr) { 3863 if (vlun->svl_active_pclass 3864 != NULL) { 3865 kmem_free(vlun-> 3866 svl_active_pclass, 3867 strlen(vlun-> 3868 svl_active_pclass) 3869 +1); 3870 } 3871 vlun->svl_active_pclass = tptr; 3872 mutex_exit(&vlun->svl_mutex); 3873 } 3874 } 3875 } 3876 } else if ((opinfo.opinfo_path_state == SCSI_PATH_INACTIVE) && 3877 !(MDI_PI_IS_STANDBY(pip))) { 3878 VHCI_DEBUG(1, (CE_NOTE, NULL, 3879 "!vhci_update_pathstates: marking path" 3880 " 0x%p as STANDBY\n", (void *)pip)); 3881 cpath = kmem_alloc(MAXPATHLEN, KM_SLEEP); 3882 vhci_log(CE_NOTE, ddi_get_parent(dip), "!%s " 3883 "(%s%d): path %s " 3884 "is now STANDBY because of " 3885 "an externally initiated failover", 3886 ddi_pathname(dip, cpath), 3887 ddi_driver_name(dip), 3888 ddi_get_instance(dip), 3889 mdi_pi_spathname(pip)); 3890 kmem_free(cpath, MAXPATHLEN); 3891 mdi_pi_set_state(pip, 3892 MDI_PATHINFO_STATE_STANDBY); 3893 mdi_pi_set_preferred(pip, 3894 opinfo.opinfo_preferred); 3895 mutex_enter(&vlun->svl_mutex); 3896 if (vlun->svl_active_pclass != NULL) { 3897 if (strcmp(vlun->svl_active_pclass, 3898 opinfo.opinfo_path_attr) == 0) { 3899 kmem_free(vlun-> 3900 svl_active_pclass, 3901 strlen(vlun-> 3902 svl_active_pclass)+1); 3903 vlun->svl_active_pclass = NULL; 3904 } 3905 } 3906 mutex_exit(&vlun->svl_mutex); 3907 } 3908 (void) mdi_prop_free(pclass); 3909 sps = mdi_select_path(dip, NULL, 3910 (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH | 3911 MDI_SELECT_NO_PREFERRED), pip, &npip); 3912 mdi_rele_path(pip); 3913 3914 } while ((npip != NULL) && (sps == MDI_SUCCESS)); 3915 3916 /* 3917 * Check to see if this vlun has an active SCSI-II RESERVE. If so 3918 * clear the reservation by sending a reset, so the host doesn't 3919 * receive a reservation conflict. 3920 * Reset VLUN_RESERVE_ACTIVE_FLG for this vlun. Also notify ssd 3921 * of the reset, explicitly. 3922 */ 3923 if (vlun->svl_flags & VLUN_RESERVE_ACTIVE_FLG) { 3924 if (reserve_conflict && (vlun->svl_xlf_capable == 0)) { 3925 (void) vhci_recovery_reset(vlun, 3926 &svp->svp_psd->sd_address, FALSE, 3927 VHCI_DEPTH_TARGET); 3928 } 3929 vlun->svl_flags &= ~VLUN_RESERVE_ACTIVE_FLG; 3930 mutex_enter(&vhci->vhci_mutex); 3931 scsi_hba_reset_notify_callback(&vhci->vhci_mutex, 3932 &vhci->vhci_reset_notify_listf); 3933 mutex_exit(&vhci->vhci_mutex); 3934 } 3935 if (vlun->svl_flags & VLUN_UPDATE_TPG) { 3936 /* 3937 * Update the AccessState of related MP-API TPGs 3938 */ 3939 (void) vhci_mpapi_update_tpg_acc_state_for_lu(vhci, vlun); 3940 vlun->svl_flags &= ~VLUN_UPDATE_TPG; 3941 } 3942 done: 3943 if (vlun->svl_efo_update_path) { 3944 vlun->svl_efo_update_path = 0; 3945 vhci_efo_done(vlun->svl_swarg); 3946 vlun->svl_swarg = 0; 3947 } 3948 VHCI_RELEASE_LUN(vlun); 3949 } 3950 3951 /* ARGSUSED */ 3952 static int 3953 vhci_pathinfo_init(dev_info_t *vdip, mdi_pathinfo_t *pip, int flags) 3954 { 3955 scsi_hba_tran_t *hba = NULL; 3956 struct scsi_device *psd = NULL; 3957 scsi_vhci_lun_t *vlun = NULL; 3958 dev_info_t *pdip = NULL; 3959 dev_info_t *tgt_dip; 3960 struct scsi_vhci *vhci; 3961 char *guid; 3962 scsi_vhci_priv_t *svp = NULL; 3963 int rval = MDI_FAILURE; 3964 int vlun_alloced = 0; 3965 3966 ASSERT(vdip != NULL); 3967 ASSERT(pip != NULL); 3968 3969 vhci = ddi_get_soft_state(vhci_softstate, ddi_get_instance(vdip)); 3970 ASSERT(vhci != NULL); 3971 3972 pdip = mdi_pi_get_phci(pip); 3973 ASSERT(pdip != NULL); 3974 3975 hba = ddi_get_driver_private(pdip); 3976 ASSERT(hba != NULL); 3977 3978 tgt_dip = mdi_pi_get_client(pip); 3979 ASSERT(tgt_dip != NULL); 3980 3981 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, tgt_dip, PROPFLAGS, 3982 MDI_CLIENT_GUID_PROP, &guid) != DDI_SUCCESS) { 3983 VHCI_DEBUG(1, (CE_WARN, NULL, 3984 "vhci_pathinfo_init: lun guid property failed")); 3985 goto failure; 3986 } 3987 3988 vlun = vhci_lun_lookup_alloc(tgt_dip, guid, &vlun_alloced); 3989 ddi_prop_free(guid); 3990 3991 vlun->svl_dip = tgt_dip; 3992 3993 svp = kmem_zalloc(sizeof (*svp), KM_SLEEP); 3994 svp->svp_svl = vlun; 3995 3996 vlun->svl_lb_policy_save = mdi_get_lb_policy(tgt_dip); 3997 mutex_init(&svp->svp_mutex, NULL, MUTEX_DRIVER, NULL); 3998 cv_init(&svp->svp_cv, NULL, CV_DRIVER, NULL); 3999 4000 psd = kmem_zalloc(sizeof (*psd), KM_SLEEP); 4001 mutex_init(&psd->sd_mutex, NULL, MUTEX_DRIVER, NULL); 4002 4003 if (hba->tran_hba_flags & SCSI_HBA_ADDR_COMPLEX) { 4004 /* 4005 * For a SCSI_HBA_ADDR_COMPLEX transport we store a pointer to 4006 * scsi_device in the scsi_address structure. This allows an 4007 * an HBA driver to find its scsi_device(9S) and 4008 * per-scsi_device(9S) HBA private data given a 4009 * scsi_address(9S) by using scsi_address_device(9F) and 4010 * scsi_device_hba_private_get(9F)). 4011 */ 4012 psd->sd_address.a.a_sd = psd; 4013 } else if (hba->tran_hba_flags & SCSI_HBA_TRAN_CLONE) { 4014 /* 4015 * Clone transport structure if requested, so 4016 * Self enumerating HBAs always need to use cloning 4017 */ 4018 scsi_hba_tran_t *clone = 4019 kmem_alloc(sizeof (scsi_hba_tran_t), KM_SLEEP); 4020 bcopy(hba, clone, sizeof (scsi_hba_tran_t)); 4021 hba = clone; 4022 hba->tran_sd = psd; 4023 } else { 4024 /* 4025 * SPI pHCI unit-address. If we ever need to support this 4026 * we could set a.spi.a_target/a.spi.a_lun based on pathinfo 4027 * node unit-address properties. For now we fail... 4028 */ 4029 goto failure; 4030 } 4031 4032 psd->sd_dev = tgt_dip; 4033 psd->sd_address.a_hba_tran = hba; 4034 4035 /* 4036 * Mark scsi_device as being associated with a pathinfo node. For 4037 * a scsi_device structure associated with a devinfo node, 4038 * scsi_ctlops_initchild sets this field to NULL. 4039 */ 4040 psd->sd_pathinfo = pip; 4041 4042 /* 4043 * LEGACY: sd_private: set for older mpxio-capable pHCI drivers with 4044 * too much scsi_vhci/mdi/ndi knowledge. Remove this code when all 4045 * mpxio-capable pHCI drivers use SCSA enumeration services (or at 4046 * least have been changed to use sd_pathinfo instead). 4047 */ 4048 psd->sd_private = (caddr_t)pip; 4049 4050 /* See scsi_hba.c for info on sd_tran_safe kludge */ 4051 psd->sd_tran_safe = hba; 4052 4053 svp->svp_psd = psd; 4054 mdi_pi_set_vhci_private(pip, (caddr_t)svp); 4055 4056 /* 4057 * call hba's target init entry point if it exists 4058 */ 4059 if (hba->tran_tgt_init != NULL) { 4060 psd->sd_tran_tgt_free_done = 0; 4061 if ((rval = (*hba->tran_tgt_init)(pdip, tgt_dip, 4062 hba, psd)) != DDI_SUCCESS) { 4063 VHCI_DEBUG(1, (CE_WARN, pdip, 4064 "!vhci_pathinfo_init: tran_tgt_init failed for " 4065 "path=0x%p rval=%x", (void *)pip, rval)); 4066 goto failure; 4067 } 4068 } 4069 4070 svp->svp_new_path = 1; 4071 4072 VHCI_DEBUG(4, (CE_NOTE, NULL, "!vhci_pathinfo_init: path:%p\n", 4073 (void *)pip)); 4074 return (MDI_SUCCESS); 4075 4076 failure: 4077 if (psd) { 4078 mutex_destroy(&psd->sd_mutex); 4079 kmem_free(psd, sizeof (*psd)); 4080 } 4081 if (svp) { 4082 mdi_pi_set_vhci_private(pip, NULL); 4083 mutex_destroy(&svp->svp_mutex); 4084 cv_destroy(&svp->svp_cv); 4085 kmem_free(svp, sizeof (*svp)); 4086 } 4087 if (hba && (hba->tran_hba_flags & SCSI_HBA_TRAN_CLONE)) 4088 kmem_free(hba, sizeof (scsi_hba_tran_t)); 4089 4090 if (vlun_alloced) 4091 vhci_lun_free(tgt_dip); 4092 4093 return (rval); 4094 } 4095 4096 /* ARGSUSED */ 4097 static int 4098 vhci_pathinfo_uninit(dev_info_t *vdip, mdi_pathinfo_t *pip, int flags) 4099 { 4100 scsi_hba_tran_t *hba = NULL; 4101 struct scsi_device *psd = NULL; 4102 dev_info_t *pdip = NULL; 4103 dev_info_t *cdip = NULL; 4104 scsi_vhci_priv_t *svp = NULL; 4105 4106 ASSERT(vdip != NULL); 4107 ASSERT(pip != NULL); 4108 4109 pdip = mdi_pi_get_phci(pip); 4110 ASSERT(pdip != NULL); 4111 4112 cdip = mdi_pi_get_client(pip); 4113 ASSERT(cdip != NULL); 4114 4115 hba = ddi_get_driver_private(pdip); 4116 ASSERT(hba != NULL); 4117 4118 vhci_mpapi_set_path_state(vdip, pip, MP_DRVR_PATH_STATE_UNINIT); 4119 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip); 4120 if (svp == NULL) { 4121 /* path already freed. Nothing to do. */ 4122 return (MDI_SUCCESS); 4123 } 4124 4125 psd = svp->svp_psd; 4126 ASSERT(psd != NULL); 4127 4128 if (hba->tran_hba_flags & SCSI_HBA_ADDR_COMPLEX) { 4129 /* Verify plumbing */ 4130 ASSERT(psd->sd_address.a_hba_tran == hba); 4131 ASSERT(psd->sd_address.a.a_sd == psd); 4132 } else if (hba->tran_hba_flags & SCSI_HBA_TRAN_CLONE) { 4133 /* Switch to cloned scsi_hba_tran(9S) structure */ 4134 hba = psd->sd_address.a_hba_tran; 4135 ASSERT(hba->tran_hba_flags & SCSI_HBA_TRAN_CLONE); 4136 ASSERT(hba->tran_sd == psd); 4137 } 4138 4139 if ((hba->tran_tgt_free != NULL) && !psd->sd_tran_tgt_free_done) { 4140 (*hba->tran_tgt_free) (pdip, cdip, hba, psd); 4141 psd->sd_tran_tgt_free_done = 1; 4142 } 4143 mutex_destroy(&psd->sd_mutex); 4144 if (hba->tran_hba_flags & SCSI_HBA_TRAN_CLONE) { 4145 kmem_free(hba, sizeof (*hba)); 4146 } 4147 4148 mdi_pi_set_vhci_private(pip, NULL); 4149 4150 /* 4151 * Free the pathinfo related scsi_device inquiry data. Note that this 4152 * matches what happens for scsi_hba.c devinfo case at uninitchild time. 4153 */ 4154 if (psd->sd_inq) 4155 kmem_free((caddr_t)psd->sd_inq, sizeof (struct scsi_inquiry)); 4156 kmem_free((caddr_t)psd, sizeof (*psd)); 4157 4158 mutex_destroy(&svp->svp_mutex); 4159 cv_destroy(&svp->svp_cv); 4160 kmem_free((caddr_t)svp, sizeof (*svp)); 4161 4162 /* 4163 * If this is the last path to the client, 4164 * then free up the vlun as well. 4165 */ 4166 if (mdi_client_get_path_count(cdip) == 1) { 4167 vhci_lun_free(cdip); 4168 } 4169 4170 VHCI_DEBUG(4, (CE_NOTE, NULL, "!vhci_pathinfo_uninit: path=0x%p\n", 4171 (void *)pip)); 4172 return (MDI_SUCCESS); 4173 } 4174 4175 /* ARGSUSED */ 4176 static int 4177 vhci_pathinfo_state_change(dev_info_t *vdip, mdi_pathinfo_t *pip, 4178 mdi_pathinfo_state_t state, uint32_t ext_state, int flags) 4179 { 4180 int rval = MDI_SUCCESS; 4181 scsi_vhci_priv_t *svp; 4182 scsi_vhci_lun_t *vlun; 4183 int held; 4184 int op = (flags & 0xf00) >> 8; 4185 struct scsi_vhci *vhci; 4186 4187 vhci = ddi_get_soft_state(vhci_softstate, ddi_get_instance(vdip)); 4188 4189 if (flags & MDI_EXT_STATE_CHANGE) { 4190 /* 4191 * We do not want to issue any commands down the path in case 4192 * sync flag is set. Lower layers might not be ready to accept 4193 * any I/O commands. 4194 */ 4195 if (op == DRIVER_DISABLE) 4196 return (MDI_SUCCESS); 4197 4198 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip); 4199 if (svp == NULL) { 4200 return (MDI_FAILURE); 4201 } 4202 vlun = svp->svp_svl; 4203 4204 if (flags & MDI_BEFORE_STATE_CHANGE) { 4205 /* 4206 * Hold the LUN. 4207 */ 4208 VHCI_HOLD_LUN(vlun, VH_SLEEP, held); 4209 if (flags & MDI_DISABLE_OP) { 4210 /* 4211 * Issue scsi reset if it happens to be 4212 * reserved path. 4213 */ 4214 if (vlun->svl_flags & VLUN_RESERVE_ACTIVE_FLG) { 4215 /* 4216 * if reservation pending on 4217 * this path, dont' mark the 4218 * path busy 4219 */ 4220 if (op == DRIVER_DISABLE_TRANSIENT) { 4221 VHCI_DEBUG(1, (CE_NOTE, NULL, 4222 "!vhci_pathinfo" 4223 "_state_change (pip:%p): " 4224 " reservation: fail busy\n", 4225 (void *)pip)); 4226 return (MDI_FAILURE); 4227 } 4228 if (pip == vlun->svl_resrv_pip) { 4229 if (vhci_recovery_reset( 4230 svp->svp_svl, 4231 &svp->svp_psd->sd_address, 4232 TRUE, 4233 VHCI_DEPTH_TARGET) == 0) { 4234 VHCI_DEBUG(1, 4235 (CE_NOTE, NULL, 4236 "!vhci_pathinfo" 4237 "_state_change " 4238 " (pip:%p): " 4239 "reset failed, " 4240 "give up!\n", 4241 (void *)pip)); 4242 } 4243 vlun->svl_flags &= 4244 ~VLUN_RESERVE_ACTIVE_FLG; 4245 } 4246 } 4247 } else if (flags & MDI_ENABLE_OP) { 4248 if (((vhci->vhci_conf_flags & 4249 VHCI_CONF_FLAGS_AUTO_FAILBACK) == 4250 VHCI_CONF_FLAGS_AUTO_FAILBACK) && 4251 MDI_PI_IS_USER_DISABLE(pip) && 4252 MDI_PI_IS_STANDBY(pip)) { 4253 struct scsi_failover_ops *fo; 4254 char *best_pclass, *pclass = NULL; 4255 int best_class, rv; 4256 /* 4257 * Failback if enabling a standby path 4258 * and it is the primary class or 4259 * preferred class 4260 */ 4261 best_class = mdi_pi_get_preferred(pip); 4262 if (best_class == 0) { 4263 /* 4264 * if not preferred - compare 4265 * path-class with class 4266 */ 4267 fo = vlun->svl_fops; 4268 (void) fo->sfo_pathclass_next( 4269 NULL, &best_pclass, 4270 vlun->svl_fops_ctpriv); 4271 pclass = NULL; 4272 rv = mdi_prop_lookup_string(pip, 4273 "path-class", &pclass); 4274 if (rv != MDI_SUCCESS || 4275 pclass == NULL) { 4276 vhci_log(CE_NOTE, vdip, 4277 "!path-class " 4278 " lookup " 4279 "failed. rv: %d" 4280 "class: %p", rv, 4281 (void *)pclass); 4282 } else if (strncmp(pclass, 4283 best_pclass, 4284 strlen(best_pclass)) == 0) { 4285 best_class = 1; 4286 } 4287 if (rv == MDI_SUCCESS && 4288 pclass != NULL) { 4289 rv = mdi_prop_free( 4290 pclass); 4291 if (rv != 4292 DDI_PROP_SUCCESS) { 4293 vhci_log( 4294 CE_NOTE, 4295 vdip, 4296 "!path-" 4297 "class" 4298 " free" 4299 " failed" 4300 " rv: %d" 4301 " class: " 4302 "%p", 4303 rv, 4304 (void *) 4305 pclass); 4306 } 4307 } 4308 } 4309 if (best_class == 1) { 4310 VHCI_DEBUG(1, (CE_NOTE, NULL, 4311 "preferred path: %p " 4312 "USER_DISABLE->USER_ENABLE " 4313 "transition for lun %s\n", 4314 (void *)pip, 4315 vlun->svl_lun_wwn)); 4316 (void) taskq_dispatch( 4317 vhci->vhci_taskq, 4318 vhci_initiate_auto_failback, 4319 (void *) vlun, KM_SLEEP); 4320 } 4321 } 4322 /* 4323 * if PGR is active, revalidate key and 4324 * register on this path also, if key is 4325 * still valid 4326 */ 4327 sema_p(&vlun->svl_pgr_sema); 4328 if (vlun->svl_pgr_active) 4329 (void) 4330 vhci_pgr_validate_and_register(svp); 4331 sema_v(&vlun->svl_pgr_sema); 4332 /* 4333 * Inform target driver about any 4334 * reservations to be reinstated if target 4335 * has dropped reservation during the busy 4336 * period. 4337 */ 4338 mutex_enter(&vhci->vhci_mutex); 4339 scsi_hba_reset_notify_callback( 4340 &vhci->vhci_mutex, 4341 &vhci->vhci_reset_notify_listf); 4342 mutex_exit(&vhci->vhci_mutex); 4343 } 4344 } 4345 if (flags & MDI_AFTER_STATE_CHANGE) { 4346 if (flags & MDI_ENABLE_OP) { 4347 mutex_enter(&vhci_global_mutex); 4348 cv_broadcast(&vhci_cv); 4349 mutex_exit(&vhci_global_mutex); 4350 } 4351 if (vlun->svl_setcap_done) { 4352 (void) vhci_pHCI_cap(&svp->svp_psd->sd_address, 4353 "sector-size", vlun->svl_sector_size, 4354 1, pip); 4355 } 4356 4357 /* 4358 * Release the LUN 4359 */ 4360 VHCI_RELEASE_LUN(vlun); 4361 4362 /* 4363 * Path transition is complete. 4364 * Run callback to indicate target driver to 4365 * retry to prevent IO starvation. 4366 */ 4367 if (scsi_callback_id != 0) { 4368 ddi_run_callback(&scsi_callback_id); 4369 } 4370 } 4371 } else { 4372 switch (state) { 4373 case MDI_PATHINFO_STATE_ONLINE: 4374 rval = vhci_pathinfo_online(vdip, pip, flags); 4375 break; 4376 4377 case MDI_PATHINFO_STATE_OFFLINE: 4378 rval = vhci_pathinfo_offline(vdip, pip, flags); 4379 break; 4380 4381 default: 4382 break; 4383 } 4384 /* 4385 * Path transition is complete. 4386 * Run callback to indicate target driver to 4387 * retry to prevent IO starvation. 4388 */ 4389 if ((rval == MDI_SUCCESS) && (scsi_callback_id != 0)) { 4390 ddi_run_callback(&scsi_callback_id); 4391 } 4392 return (rval); 4393 } 4394 4395 return (MDI_SUCCESS); 4396 } 4397 4398 /* 4399 * Parse the mpxio load balancing options. The datanameptr 4400 * will point to a string containing the load-balance-options value. 4401 * The load-balance-options value will be a property that 4402 * defines the load-balance algorithm and any arguments to that 4403 * algorithm. 4404 * For example: 4405 * device-type-mpxio-options-list= 4406 * "device-type=SUN SENA", "load-balance-options=logical-block-options" 4407 * "device-type=SUN SE6920", "round-robin-options"; 4408 * logical-block-options="load-balance=logical-block", "region-size=15"; 4409 * round-robin-options="load-balance=round-robin"; 4410 * 4411 * If the load-balance is not defined the load balance algorithm will 4412 * default to the global setting. There will be default values assigned 4413 * to the arguments (region-size=18) and if an argument is one 4414 * that is not known, it will be ignored. 4415 */ 4416 static void 4417 vhci_parse_mpxio_lb_options(dev_info_t *dip, dev_info_t *cdip, 4418 caddr_t datanameptr) 4419 { 4420 char *dataptr, *next_entry; 4421 caddr_t config_list = NULL; 4422 int config_list_len = 0, list_len = 0; 4423 int region_size = -1; 4424 client_lb_t load_balance; 4425 4426 if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, datanameptr, 4427 (caddr_t)&config_list, &config_list_len) != DDI_PROP_SUCCESS) { 4428 return; 4429 } 4430 4431 list_len = config_list_len; 4432 next_entry = config_list; 4433 while (config_list_len > 0) { 4434 dataptr = next_entry; 4435 4436 if (strncmp(mdi_load_balance, dataptr, 4437 strlen(mdi_load_balance)) == 0) { 4438 /* get the load-balance scheme */ 4439 dataptr += strlen(mdi_load_balance) + 1; 4440 if (strcmp(dataptr, LOAD_BALANCE_PROP_RR) == 0) { 4441 (void) mdi_set_lb_policy(cdip, LOAD_BALANCE_RR); 4442 load_balance = LOAD_BALANCE_RR; 4443 } else if (strcmp(dataptr, 4444 LOAD_BALANCE_PROP_LBA) == 0) { 4445 (void) mdi_set_lb_policy(cdip, 4446 LOAD_BALANCE_LBA); 4447 load_balance = LOAD_BALANCE_LBA; 4448 } else if (strcmp(dataptr, 4449 LOAD_BALANCE_PROP_NONE) == 0) { 4450 (void) mdi_set_lb_policy(cdip, 4451 LOAD_BALANCE_NONE); 4452 load_balance = LOAD_BALANCE_NONE; 4453 } 4454 } else if (strncmp(dataptr, LOGICAL_BLOCK_REGION_SIZE, 4455 strlen(LOGICAL_BLOCK_REGION_SIZE)) == 0) { 4456 int i = 0; 4457 char *ptr; 4458 char *tmp; 4459 4460 tmp = dataptr + (strlen(LOGICAL_BLOCK_REGION_SIZE) + 1); 4461 /* check for numeric value */ 4462 for (ptr = tmp; i < strlen(tmp); i++, ptr++) { 4463 if (!isdigit(*ptr)) { 4464 cmn_err(CE_WARN, 4465 "Illegal region size: %s." 4466 " Setting to default value: %d", 4467 tmp, 4468 LOAD_BALANCE_DEFAULT_REGION_SIZE); 4469 region_size = 4470 LOAD_BALANCE_DEFAULT_REGION_SIZE; 4471 break; 4472 } 4473 } 4474 if (i >= strlen(tmp)) { 4475 region_size = stoi(&tmp); 4476 } 4477 (void) mdi_set_lb_region_size(cdip, region_size); 4478 } 4479 config_list_len -= (strlen(next_entry) + 1); 4480 next_entry += strlen(next_entry) + 1; 4481 } 4482 #ifdef DEBUG 4483 if ((region_size >= 0) && (load_balance != LOAD_BALANCE_LBA)) { 4484 VHCI_DEBUG(1, (CE_NOTE, dip, 4485 "!vhci_parse_mpxio_lb_options: region-size: %d" 4486 "only valid for load-balance=logical-block\n", 4487 region_size)); 4488 } 4489 #endif 4490 if ((region_size == -1) && (load_balance == LOAD_BALANCE_LBA)) { 4491 VHCI_DEBUG(1, (CE_NOTE, dip, 4492 "!vhci_parse_mpxio_lb_options: No region-size" 4493 " defined load-balance=logical-block." 4494 " Default to: %d\n", LOAD_BALANCE_DEFAULT_REGION_SIZE)); 4495 (void) mdi_set_lb_region_size(cdip, 4496 LOAD_BALANCE_DEFAULT_REGION_SIZE); 4497 } 4498 if (list_len > 0) { 4499 kmem_free(config_list, list_len); 4500 } 4501 } 4502 4503 /* 4504 * Parse the device-type-mpxio-options-list looking for the key of 4505 * "load-balance-options". If found, parse the load balancing options. 4506 * Check the comment of the vhci_get_device_type_mpxio_options() 4507 * for the device-type-mpxio-options-list. 4508 */ 4509 static void 4510 vhci_parse_mpxio_options(dev_info_t *dip, dev_info_t *cdip, 4511 caddr_t datanameptr, int list_len) 4512 { 4513 char *dataptr; 4514 int len; 4515 4516 /* 4517 * get the data list 4518 */ 4519 dataptr = datanameptr; 4520 len = 0; 4521 while (len < list_len && 4522 strncmp(dataptr, DEVICE_TYPE_STR, strlen(DEVICE_TYPE_STR)) 4523 != 0) { 4524 if (strncmp(dataptr, LOAD_BALANCE_OPTIONS, 4525 strlen(LOAD_BALANCE_OPTIONS)) == 0) { 4526 len += strlen(LOAD_BALANCE_OPTIONS) + 1; 4527 dataptr += strlen(LOAD_BALANCE_OPTIONS) + 1; 4528 vhci_parse_mpxio_lb_options(dip, cdip, dataptr); 4529 } 4530 len += strlen(dataptr) + 1; 4531 dataptr += strlen(dataptr) + 1; 4532 } 4533 } 4534 4535 /* 4536 * Check the inquriy string returned from the device with the device-type 4537 * Check for the existence of the device-type-mpxio-options-list and 4538 * if found parse the list checking for a match with the device-type 4539 * value and the inquiry string returned from the device. If a match 4540 * is found, parse the mpxio options list. The format of the 4541 * device-type-mpxio-options-list is: 4542 * device-type-mpxio-options-list= 4543 * "device-type=SUN SENA", "load-balance-options=logical-block-options" 4544 * "device-type=SUN SE6920", "round-robin-options"; 4545 * logical-block-options="load-balance=logical-block", "region-size=15"; 4546 * round-robin-options="load-balance=round-robin"; 4547 */ 4548 void 4549 vhci_get_device_type_mpxio_options(dev_info_t *dip, dev_info_t *cdip, 4550 struct scsi_device *devp) 4551 { 4552 4553 caddr_t config_list = NULL; 4554 caddr_t vidptr, datanameptr; 4555 int vidlen, dupletlen = 0; 4556 int config_list_len = 0, len; 4557 struct scsi_inquiry *inq = devp->sd_inq; 4558 4559 /* 4560 * look up the device-type-mpxio-options-list and walk thru 4561 * the list compare the vendor ids of the earlier inquiry command and 4562 * with those vids in the list if there is a match, lookup 4563 * the mpxio-options value 4564 */ 4565 if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 4566 MPXIO_OPTIONS_LIST, 4567 (caddr_t)&config_list, &config_list_len) == DDI_PROP_SUCCESS) { 4568 4569 /* 4570 * Compare vids in each duplet - if it matches, 4571 * parse the mpxio options list. 4572 */ 4573 for (len = config_list_len, vidptr = config_list; len > 0; 4574 len -= dupletlen) { 4575 4576 dupletlen = 0; 4577 4578 if (strlen(vidptr) != 0 && 4579 strncmp(vidptr, DEVICE_TYPE_STR, 4580 strlen(DEVICE_TYPE_STR)) == 0) { 4581 /* point to next duplet */ 4582 datanameptr = vidptr + strlen(vidptr) + 1; 4583 /* add len of this duplet */ 4584 dupletlen += strlen(vidptr) + 1; 4585 /* get to device type */ 4586 vidptr += strlen(DEVICE_TYPE_STR) + 1; 4587 vidlen = strlen(vidptr); 4588 if ((vidlen != 0) && 4589 bcmp(inq->inq_vid, vidptr, vidlen) == 0) { 4590 vhci_parse_mpxio_options(dip, cdip, 4591 datanameptr, len - dupletlen); 4592 break; 4593 } 4594 /* get to next duplet */ 4595 vidptr += strlen(vidptr) + 1; 4596 } 4597 /* get to the next device-type */ 4598 while (len - dupletlen > 0 && 4599 strlen(vidptr) != 0 && 4600 strncmp(vidptr, DEVICE_TYPE_STR, 4601 strlen(DEVICE_TYPE_STR)) != 0) { 4602 dupletlen += strlen(vidptr) + 1; 4603 vidptr += strlen(vidptr) + 1; 4604 } 4605 } 4606 if (config_list_len > 0) { 4607 kmem_free(config_list, config_list_len); 4608 } 4609 } 4610 } 4611 4612 static int 4613 vhci_update_pathinfo(struct scsi_device *psd, mdi_pathinfo_t *pip, 4614 struct scsi_failover_ops *fo, 4615 scsi_vhci_lun_t *vlun, 4616 struct scsi_vhci *vhci) 4617 { 4618 struct scsi_path_opinfo opinfo; 4619 char *pclass, *best_pclass; 4620 4621 if (fo->sfo_path_get_opinfo(psd, &opinfo, vlun->svl_fops_ctpriv) != 0) { 4622 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_update_pathinfo: " 4623 "Failed to get operation info for path:%p\n", (void *)pip)); 4624 return (MDI_FAILURE); 4625 } 4626 /* set the xlf capable flag in the vlun for future use */ 4627 vlun->svl_xlf_capable = opinfo.opinfo_xlf_capable; 4628 (void) mdi_prop_update_string(pip, "path-class", 4629 opinfo.opinfo_path_attr); 4630 4631 pclass = opinfo.opinfo_path_attr; 4632 if (opinfo.opinfo_path_state == SCSI_PATH_ACTIVE) { 4633 mutex_enter(&vlun->svl_mutex); 4634 if (vlun->svl_active_pclass != NULL) { 4635 if (strcmp(vlun->svl_active_pclass, pclass) != 0) { 4636 mutex_exit(&vlun->svl_mutex); 4637 /* 4638 * Externally initiated failover has happened; 4639 * force the path state to be STANDBY/ONLINE, 4640 * next IO will trigger failover and thus 4641 * sync-up the pathstates. Reason we don't 4642 * sync-up immediately by invoking 4643 * vhci_update_pathstates() is because it 4644 * needs a VHCI_HOLD_LUN() and we don't 4645 * want to block here. 4646 * 4647 * Further, if the device is an ALUA device, 4648 * then failure to exactly match 'pclass' and 4649 * 'svl_active_pclass'(as is the case here) 4650 * indicates that the currently active path 4651 * is a 'non-optimized' path - which means 4652 * that 'svl_active_pclass' needs to be 4653 * replaced with opinfo.opinfo_path_state 4654 * value. 4655 */ 4656 4657 if (SCSI_FAILOVER_IS_TPGS(vlun->svl_fops)) { 4658 char *tptr; 4659 4660 /* 4661 * The device is ALUA compliant. The 4662 * state need to be changed to online 4663 * rather than standby state which is 4664 * done typically for a asymmetric 4665 * device that is non ALUA compliant. 4666 */ 4667 mdi_pi_set_state(pip, 4668 MDI_PATHINFO_STATE_ONLINE); 4669 tptr = kmem_alloc(strlen 4670 (opinfo.opinfo_path_attr)+1, 4671 KM_SLEEP); 4672 (void) strlcpy(tptr, 4673 opinfo.opinfo_path_attr, 4674 (strlen(opinfo.opinfo_path_attr) 4675 +1)); 4676 mutex_enter(&vlun->svl_mutex); 4677 kmem_free(vlun->svl_active_pclass, 4678 strlen(vlun->svl_active_pclass)+1); 4679 vlun->svl_active_pclass = tptr; 4680 mutex_exit(&vlun->svl_mutex); 4681 } else { 4682 /* 4683 * Non ALUA device case. 4684 */ 4685 mdi_pi_set_state(pip, 4686 MDI_PATHINFO_STATE_STANDBY); 4687 } 4688 vlun->svl_fo_support = opinfo.opinfo_mode; 4689 mdi_pi_set_preferred(pip, 4690 opinfo.opinfo_preferred); 4691 return (MDI_SUCCESS); 4692 } 4693 } else { 4694 char *tptr; 4695 4696 /* 4697 * lets release the mutex before we try to 4698 * allocate since the potential to sleep is 4699 * possible. 4700 */ 4701 mutex_exit(&vlun->svl_mutex); 4702 tptr = kmem_alloc(strlen(pclass)+1, KM_SLEEP); 4703 (void) strlcpy(tptr, pclass, (strlen(pclass)+1)); 4704 mutex_enter(&vlun->svl_mutex); 4705 vlun->svl_active_pclass = tptr; 4706 } 4707 mutex_exit(&vlun->svl_mutex); 4708 mdi_pi_set_state(pip, MDI_PATHINFO_STATE_ONLINE); 4709 vlun->svl_waiting_for_activepath = 0; 4710 } else if (opinfo.opinfo_path_state == SCSI_PATH_ACTIVE_NONOPT) { 4711 mutex_enter(&vlun->svl_mutex); 4712 if (vlun->svl_active_pclass == NULL) { 4713 char *tptr; 4714 4715 mutex_exit(&vlun->svl_mutex); 4716 tptr = kmem_alloc(strlen(pclass)+1, KM_SLEEP); 4717 (void) strlcpy(tptr, pclass, (strlen(pclass)+1)); 4718 mutex_enter(&vlun->svl_mutex); 4719 vlun->svl_active_pclass = tptr; 4720 } 4721 mutex_exit(&vlun->svl_mutex); 4722 mdi_pi_set_state(pip, MDI_PATHINFO_STATE_ONLINE); 4723 vlun->svl_waiting_for_activepath = 0; 4724 } else if (opinfo.opinfo_path_state == SCSI_PATH_INACTIVE) { 4725 mutex_enter(&vlun->svl_mutex); 4726 if (vlun->svl_active_pclass != NULL) { 4727 if (strcmp(vlun->svl_active_pclass, pclass) == 0) { 4728 mutex_exit(&vlun->svl_mutex); 4729 /* 4730 * externally initiated failover has happened; 4731 * force state to ONLINE (see comment above) 4732 */ 4733 mdi_pi_set_state(pip, 4734 MDI_PATHINFO_STATE_ONLINE); 4735 vlun->svl_fo_support = opinfo.opinfo_mode; 4736 mdi_pi_set_preferred(pip, 4737 opinfo.opinfo_preferred); 4738 return (MDI_SUCCESS); 4739 } 4740 } 4741 mutex_exit(&vlun->svl_mutex); 4742 mdi_pi_set_state(pip, MDI_PATHINFO_STATE_STANDBY); 4743 4744 /* 4745 * Initiate auto-failback, if enabled, for path if path-state 4746 * is transitioning from OFFLINE->STANDBY and pathclass is the 4747 * preferred pathclass for this storage. 4748 * NOTE: In case where opinfo_path_state is SCSI_PATH_ACTIVE 4749 * (above), where the pi state is set to STANDBY, we don't 4750 * initiate auto-failback as the next IO shall take care of. 4751 * this. See comment above. 4752 */ 4753 (void) fo->sfo_pathclass_next(NULL, &best_pclass, 4754 vlun->svl_fops_ctpriv); 4755 if (((vhci->vhci_conf_flags & VHCI_CONF_FLAGS_AUTO_FAILBACK) == 4756 VHCI_CONF_FLAGS_AUTO_FAILBACK) && 4757 (strcmp(pclass, best_pclass) == 0) && 4758 ((MDI_PI_OLD_STATE(pip) == MDI_PATHINFO_STATE_OFFLINE)|| 4759 (MDI_PI_OLD_STATE(pip) == MDI_PATHINFO_STATE_INIT))) { 4760 VHCI_DEBUG(1, (CE_NOTE, NULL, "%s pathclass path: %p" 4761 " OFFLINE->STANDBY transition for lun %s\n", 4762 best_pclass, (void *)pip, vlun->svl_lun_wwn)); 4763 (void) taskq_dispatch(vhci->vhci_taskq, 4764 vhci_initiate_auto_failback, (void *) vlun, 4765 KM_SLEEP); 4766 } 4767 } 4768 vlun->svl_fo_support = opinfo.opinfo_mode; 4769 mdi_pi_set_preferred(pip, opinfo.opinfo_preferred); 4770 4771 VHCI_DEBUG(8, (CE_NOTE, NULL, "vhci_update_pathinfo: opinfo_rev = %x," 4772 " opinfo_path_state = %x opinfo_preferred = %x, opinfo_mode = %x\n", 4773 opinfo.opinfo_rev, opinfo.opinfo_path_state, 4774 opinfo.opinfo_preferred, opinfo.opinfo_mode)); 4775 4776 return (MDI_SUCCESS); 4777 } 4778 4779 /* 4780 * Form the kstat name and and call mdi_pi_kstat_create() 4781 */ 4782 void 4783 vhci_kstat_create_pathinfo(mdi_pathinfo_t *pip) 4784 { 4785 dev_info_t *tgt_dip; 4786 dev_info_t *pdip; 4787 char *guid; 4788 char *target_port, *target_port_dup; 4789 char ks_name[KSTAT_STRLEN]; 4790 uint_t pid; 4791 int by_id; 4792 mod_hash_val_t hv; 4793 4794 4795 /* return if we have already allocated kstats */ 4796 if (mdi_pi_kstat_exists(pip)) 4797 return; 4798 4799 /* 4800 * We need instance numbers to create a kstat name, return if we don't 4801 * have instance numbers assigned yet. 4802 */ 4803 tgt_dip = mdi_pi_get_client(pip); 4804 pdip = mdi_pi_get_phci(pip); 4805 if ((ddi_get_instance(tgt_dip) == -1) || (ddi_get_instance(pdip) == -1)) 4806 return; 4807 4808 /* 4809 * A path oriented kstat has a ks_name of the form: 4810 * 4811 * <client-driver><instance>.t<pid>.<pHCI-driver><instance> 4812 * 4813 * We maintain a bidirectional 'target-port' to <pid> map, 4814 * called targetmap. All pathinfo nodes with the same 4815 * 'target-port' map to the same <pid>. The iostat(1M) code, 4816 * when parsing a path oriented kstat name, uses the <pid> as 4817 * a SCSI_VHCI_GET_TARGET_LONGNAME ioctl argument in order 4818 * to get the 'target-port'. For KSTAT_FLAG_PERSISTENT kstats, 4819 * this ioctl needs to translate a <pid> to a 'target-port' 4820 * even after all pathinfo nodes associated with the 4821 * 'target-port' have been destroyed. This is needed to support 4822 * consistent first-iteration activity-since-boot iostat(1M) 4823 * output. Because of this requirement, the mapping can't be 4824 * based on pathinfo information in a devinfo snapshot. 4825 */ 4826 4827 /* determine 'target-port' */ 4828 if (mdi_prop_lookup_string(pip, 4829 SCSI_ADDR_PROP_TARGET_PORT, &target_port) == MDI_SUCCESS) { 4830 target_port_dup = i_ddi_strdup(target_port, KM_SLEEP); 4831 (void) mdi_prop_free(target_port); 4832 by_id = 1; 4833 } else { 4834 /* 4835 * If the pHCI did not set up 'target-port' on this 4836 * pathinfo node, assume that our client is the only 4837 * one with paths to the device by using the guid 4838 * value as the 'target-port'. Since no other client 4839 * will have the same guid, no other client will use 4840 * the same <pid>. NOTE: a client with an instance 4841 * number always has a guid. 4842 */ 4843 (void) ddi_prop_lookup_string(DDI_DEV_T_ANY, tgt_dip, 4844 PROPFLAGS, MDI_CLIENT_GUID_PROP, &guid); 4845 target_port_dup = i_ddi_strdup(guid, KM_SLEEP); 4846 ddi_prop_free(guid); 4847 4848 /* 4849 * For this type of mapping we don't want the 4850 * <id> -> 'target-port' mapping to be made. This 4851 * will cause the SCSI_VHCI_GET_TARGET_LONGNAME ioctl 4852 * to fail, and the iostat(1M) long '-n' output will 4853 * still use the <pid>. We do this because we just 4854 * made up the 'target-port' using the guid, and we 4855 * don't want to expose that fact in iostat output. 4856 */ 4857 by_id = 0; 4858 } 4859 4860 /* find/establish <pid> given 'target-port' */ 4861 mutex_enter(&vhci_targetmap_mutex); 4862 if (mod_hash_find(vhci_targetmap_byport, 4863 (mod_hash_key_t)target_port_dup, &hv) == 0) { 4864 pid = (int)(intptr_t)hv; /* mapping exists */ 4865 } else { 4866 pid = vhci_targetmap_pid++; /* new mapping */ 4867 4868 (void) mod_hash_insert(vhci_targetmap_byport, 4869 (mod_hash_key_t)target_port_dup, 4870 (mod_hash_val_t)(intptr_t)pid); 4871 if (by_id) { 4872 (void) mod_hash_insert(vhci_targetmap_bypid, 4873 (mod_hash_key_t)(uintptr_t)pid, 4874 (mod_hash_val_t)(uintptr_t)target_port_dup); 4875 } 4876 target_port_dup = NULL; /* owned by hash */ 4877 } 4878 mutex_exit(&vhci_targetmap_mutex); 4879 4880 /* form kstat name */ 4881 (void) snprintf(ks_name, KSTAT_STRLEN, "%s%d.t%d.%s%d", 4882 ddi_driver_name(tgt_dip), ddi_get_instance(tgt_dip), 4883 pid, ddi_driver_name(pdip), ddi_get_instance(pdip)); 4884 4885 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_path_online: path:%p " 4886 "kstat %s: pid %x <-> port %s\n", (void *)pip, 4887 ks_name, pid, target_port_dup)); 4888 if (target_port_dup) 4889 kmem_free(target_port_dup, strlen(target_port_dup) + 1); 4890 4891 /* call mdi to create kstats with the name we built */ 4892 (void) mdi_pi_kstat_create(pip, ks_name); 4893 } 4894 4895 /* ARGSUSED */ 4896 static int 4897 vhci_pathinfo_online(dev_info_t *vdip, mdi_pathinfo_t *pip, int flags) 4898 { 4899 scsi_hba_tran_t *hba = NULL; 4900 struct scsi_device *psd = NULL; 4901 scsi_vhci_lun_t *vlun = NULL; 4902 dev_info_t *pdip = NULL; 4903 dev_info_t *cdip; 4904 dev_info_t *tgt_dip; 4905 struct scsi_vhci *vhci; 4906 char *guid; 4907 struct scsi_failover_ops *sfo; 4908 scsi_vhci_priv_t *svp = NULL; 4909 struct scsi_address *ap; 4910 struct scsi_pkt *pkt; 4911 int rval = MDI_FAILURE; 4912 mpapi_item_list_t *list_ptr; 4913 mpapi_lu_data_t *ld; 4914 4915 ASSERT(vdip != NULL); 4916 ASSERT(pip != NULL); 4917 4918 vhci = ddi_get_soft_state(vhci_softstate, ddi_get_instance(vdip)); 4919 ASSERT(vhci != NULL); 4920 4921 pdip = mdi_pi_get_phci(pip); 4922 hba = ddi_get_driver_private(pdip); 4923 ASSERT(hba != NULL); 4924 4925 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip); 4926 ASSERT(svp != NULL); 4927 4928 cdip = mdi_pi_get_client(pip); 4929 ASSERT(cdip != NULL); 4930 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, cdip, PROPFLAGS, 4931 MDI_CLIENT_GUID_PROP, &guid) != DDI_SUCCESS) { 4932 VHCI_DEBUG(1, (CE_WARN, NULL, "vhci_path_online: lun guid " 4933 "property failed")); 4934 goto failure; 4935 } 4936 4937 vlun = vhci_lun_lookup(cdip); 4938 ASSERT(vlun != NULL); 4939 4940 ddi_prop_free(guid); 4941 4942 vlun->svl_dip = mdi_pi_get_client(pip); 4943 ASSERT(vlun->svl_dip != NULL); 4944 4945 psd = svp->svp_psd; 4946 ASSERT(psd != NULL); 4947 4948 /* 4949 * Get inquiry data into pathinfo related scsi_device structure. 4950 * Free sq_inq when pathinfo related scsi_device structure is destroyed 4951 * by vhci_pathinfo_uninit(). In other words, vhci maintains its own 4952 * copy of scsi_device and scsi_inquiry data on a per-path basis. 4953 */ 4954 if (scsi_probe(psd, SLEEP_FUNC) != SCSIPROBE_EXISTS) { 4955 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_pathinfo_online: " 4956 "scsi_probe failed path:%p rval:%x\n", (void *)pip, rval)); 4957 rval = MDI_FAILURE; 4958 goto failure; 4959 } 4960 4961 /* 4962 * See if we have a failover module to support the device. 4963 * 4964 * We re-probe to determine the failover ops for each path. This 4965 * is done in case there are any path-specific side-effects associated 4966 * with the sfo_device_probe implementation. 4967 * 4968 * Give the first successfull sfo_device_probe the opportunity to 4969 * establish 'ctpriv', vlun/client private data. The ctpriv will 4970 * then be passed into the failover module on all other sfo_device_*() 4971 * operations (and must be freed by sfo_device_unprobe implementation). 4972 * 4973 * NOTE: While sfo_device_probe is done once per path, 4974 * sfo_device_unprobe only occurs once - when the vlun is destroyed. 4975 * 4976 * NOTE: We don't currently support per-path fops private data 4977 * mechanism. 4978 */ 4979 sfo = vhci_dev_fo(vdip, psd, 4980 &vlun->svl_fops_ctpriv, &vlun->svl_fops_name); 4981 4982 /* check path configuration result with current vlun state */ 4983 if (((sfo && vlun->svl_fops) && (sfo != vlun->svl_fops)) || 4984 (sfo && vlun->svl_not_supported) || 4985 ((sfo == NULL) && vlun->svl_fops)) { 4986 /* Getting different results for different paths. */ 4987 VHCI_DEBUG(1, (CE_NOTE, vhci->vhci_dip, 4988 "!vhci_pathinfo_online: dev (path 0x%p) contradiction\n", 4989 (void *)pip)); 4990 cmn_err(CE_WARN, "scsi_vhci: failover contradiction: " 4991 "'%s'.vs.'%s': path %s\n", 4992 vlun->svl_fops ? vlun->svl_fops->sfo_name : "NULL", 4993 sfo ? sfo->sfo_name : "NULL", mdi_pi_pathname(pip)); 4994 vlun->svl_not_supported = 1; 4995 rval = MDI_NOT_SUPPORTED; 4996 goto done; 4997 } else if (sfo == NULL) { 4998 /* No failover module - device not supported under vHCI. */ 4999 VHCI_DEBUG(1, (CE_NOTE, vhci->vhci_dip, 5000 "!vhci_pathinfo_online: dev (path 0x%p) not " 5001 "supported\n", (void *)pip)); 5002 5003 /* XXX does this contradict vhci_is_dev_supported ? */ 5004 vlun->svl_not_supported = 1; 5005 rval = MDI_NOT_SUPPORTED; 5006 goto done; 5007 } 5008 5009 /* failover supported for device - save failover_ops in vlun */ 5010 vlun->svl_fops = sfo; 5011 ASSERT(vlun->svl_fops_name != NULL); 5012 5013 /* 5014 * Obtain the device-type based mpxio options as specified in 5015 * scsi_vhci.conf file. 5016 * 5017 * NOTE: currently, the end result is a call to 5018 * mdi_set_lb_region_size(). 5019 */ 5020 tgt_dip = psd->sd_dev; 5021 ASSERT(tgt_dip != NULL); 5022 vhci_get_device_type_mpxio_options(vdip, tgt_dip, psd); 5023 5024 /* 5025 * The device probe or options in conf file may have set/changed the 5026 * lb policy, save the current value. 5027 */ 5028 vlun->svl_lb_policy_save = mdi_get_lb_policy(tgt_dip); 5029 5030 /* 5031 * if PGR is active, revalidate key and register on this path also, 5032 * if key is still valid 5033 */ 5034 sema_p(&vlun->svl_pgr_sema); 5035 if (vlun->svl_pgr_active) { 5036 rval = vhci_pgr_validate_and_register(svp); 5037 if (rval != 1) { 5038 rval = MDI_FAILURE; 5039 sema_v(&vlun->svl_pgr_sema); 5040 goto failure; 5041 } 5042 } 5043 sema_v(&vlun->svl_pgr_sema); 5044 5045 if (svp->svp_new_path) { 5046 /* 5047 * Last chance to perform any cleanup operations on this 5048 * new path before making this path completely online. 5049 */ 5050 svp->svp_new_path = 0; 5051 5052 /* 5053 * If scsi_vhci knows the lun is alread RESERVE'd, 5054 * then skip the issue of RELEASE on new path. 5055 */ 5056 if ((vlun->svl_flags & VLUN_RESERVE_ACTIVE_FLG) == 0) { 5057 /* 5058 * Issue SCSI-2 RELEASE only for the first time on 5059 * a new path just in case the host rebooted and 5060 * a reservation is still pending on this path. 5061 * IBM Shark storage does not clear RESERVE upon 5062 * host reboot. 5063 */ 5064 ap = &psd->sd_address; 5065 pkt = scsi_init_pkt(ap, NULL, NULL, CDB_GROUP0, 5066 sizeof (struct scsi_arq_status), 0, 0, 5067 SLEEP_FUNC, NULL); 5068 if (pkt == NULL) { 5069 VHCI_DEBUG(1, (CE_NOTE, NULL, 5070 "!vhci_pathinfo_online: " 5071 "Release init_pkt failed :%p\n", 5072 (void *)pip)); 5073 rval = MDI_FAILURE; 5074 goto failure; 5075 } 5076 pkt->pkt_cdbp[0] = SCMD_RELEASE; 5077 pkt->pkt_time = 60; 5078 5079 VHCI_DEBUG(1, (CE_NOTE, NULL, 5080 "!vhci_path_online: path:%p " 5081 "Issued SCSI-2 RELEASE\n", (void *)pip)); 5082 5083 /* Ignore the return value */ 5084 (void) vhci_do_scsi_cmd(pkt); 5085 scsi_destroy_pkt(pkt); 5086 } 5087 } 5088 5089 rval = vhci_update_pathinfo(psd, pip, sfo, vlun, vhci); 5090 if (rval == MDI_FAILURE) { 5091 goto failure; 5092 } 5093 5094 /* Initialize MP-API data */ 5095 vhci_update_mpapi_data(vhci, vlun, pip); 5096 5097 /* 5098 * MP-API also needs the Inquiry data to be maintained in the 5099 * mp_vendor_prop_t structure, so find the lun and update its 5100 * structure with this data. 5101 */ 5102 list_ptr = (mpapi_item_list_t *)vhci_get_mpapi_item(vhci, NULL, 5103 MP_OBJECT_TYPE_MULTIPATH_LU, (void *)vlun); 5104 ld = (mpapi_lu_data_t *)list_ptr->item->idata; 5105 if (ld != NULL) { 5106 bcopy(psd->sd_inq->inq_vid, ld->prop.prodInfo.vendor, 8); 5107 bcopy(psd->sd_inq->inq_pid, ld->prop.prodInfo.product, 16); 5108 bcopy(psd->sd_inq->inq_revision, ld->prop.prodInfo.revision, 4); 5109 } else { 5110 VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_pathinfo_online: " 5111 "mpapi_lu_data_t is NULL")); 5112 } 5113 5114 /* create kstats for path */ 5115 vhci_kstat_create_pathinfo(pip); 5116 5117 done: 5118 mutex_enter(&vhci_global_mutex); 5119 cv_broadcast(&vhci_cv); 5120 mutex_exit(&vhci_global_mutex); 5121 5122 if (vlun->svl_setcap_done) { 5123 (void) vhci_pHCI_cap(ap, "sector-size", 5124 vlun->svl_sector_size, 1, pip); 5125 } 5126 5127 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_path_online: path:%p\n", 5128 (void *)pip)); 5129 5130 failure: 5131 return (rval); 5132 } 5133 5134 /* 5135 * path offline handler. Release all bindings that will not be 5136 * released by the normal packet transport/completion code path. 5137 * Since we don't (presently) keep any bindings alive outside of 5138 * the in-transport packets (which will be released on completion) 5139 * there is not much to do here. 5140 */ 5141 /* ARGSUSED */ 5142 static int 5143 vhci_pathinfo_offline(dev_info_t *vdip, mdi_pathinfo_t *pip, int flags) 5144 { 5145 scsi_hba_tran_t *hba = NULL; 5146 struct scsi_device *psd = NULL; 5147 dev_info_t *pdip = NULL; 5148 dev_info_t *cdip = NULL; 5149 scsi_vhci_priv_t *svp = NULL; 5150 5151 ASSERT(vdip != NULL); 5152 ASSERT(pip != NULL); 5153 5154 pdip = mdi_pi_get_phci(pip); 5155 ASSERT(pdip != NULL); 5156 if (pdip == NULL) { 5157 VHCI_DEBUG(1, (CE_WARN, vdip, "Invalid path 0x%p: NULL " 5158 "phci dip", (void *)pip)); 5159 return (MDI_FAILURE); 5160 } 5161 5162 cdip = mdi_pi_get_client(pip); 5163 ASSERT(cdip != NULL); 5164 if (cdip == NULL) { 5165 VHCI_DEBUG(1, (CE_WARN, vdip, "Invalid path 0x%p: NULL " 5166 "client dip", (void *)pip)); 5167 return (MDI_FAILURE); 5168 } 5169 5170 hba = ddi_get_driver_private(pdip); 5171 ASSERT(hba != NULL); 5172 5173 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip); 5174 if (svp == NULL) { 5175 /* 5176 * mdi_pathinfo node in INIT state can have vHCI private 5177 * information set to null 5178 */ 5179 VHCI_DEBUG(1, (CE_NOTE, vdip, "!vhci_pathinfo_offline: " 5180 "svp is NULL for pip 0x%p\n", (void *)pip)); 5181 return (MDI_SUCCESS); 5182 } 5183 5184 psd = svp->svp_psd; 5185 ASSERT(psd != NULL); 5186 5187 mutex_enter(&svp->svp_mutex); 5188 5189 VHCI_DEBUG(1, (CE_NOTE, vdip, "!vhci_pathinfo_offline: " 5190 "%d cmds pending on path: 0x%p\n", svp->svp_cmds, (void *)pip)); 5191 while (svp->svp_cmds != 0) { 5192 if (cv_reltimedwait(&svp->svp_cv, &svp->svp_mutex, 5193 drv_usectohz(vhci_path_quiesce_timeout * 1000000), 5194 TR_CLOCK_TICK) == -1) { 5195 /* 5196 * The timeout time reached without the condition 5197 * being signaled. 5198 */ 5199 VHCI_DEBUG(1, (CE_NOTE, vdip, "!vhci_pathinfo_offline: " 5200 "Timeout reached on path 0x%p without the cond\n", 5201 (void *)pip)); 5202 VHCI_DEBUG(1, (CE_NOTE, vdip, "!vhci_pathinfo_offline: " 5203 "%d cmds still pending on path: 0x%p\n", 5204 svp->svp_cmds, (void *)pip)); 5205 break; 5206 } 5207 } 5208 mutex_exit(&svp->svp_mutex); 5209 5210 /* 5211 * Check to see if this vlun has an active SCSI-II RESERVE. And this 5212 * is the pip for the path that has been reserved. 5213 * If so clear the reservation by sending a reset, so the host will not 5214 * get a reservation conflict. Reset the flag VLUN_RESERVE_ACTIVE_FLG 5215 * for this lun. Also a reset notify is sent to the target driver 5216 * just in case the POR check condition is cleared by some other layer 5217 * in the stack. 5218 */ 5219 if (svp->svp_svl->svl_flags & VLUN_RESERVE_ACTIVE_FLG) { 5220 if (pip == svp->svp_svl->svl_resrv_pip) { 5221 if (vhci_recovery_reset(svp->svp_svl, 5222 &svp->svp_psd->sd_address, TRUE, 5223 VHCI_DEPTH_TARGET) == 0) { 5224 VHCI_DEBUG(1, (CE_NOTE, NULL, 5225 "!vhci_pathinfo_offline (pip:%p):" 5226 "reset failed, retrying\n", (void *)pip)); 5227 delay(1*drv_usectohz(1000000)); 5228 if (vhci_recovery_reset(svp->svp_svl, 5229 &svp->svp_psd->sd_address, TRUE, 5230 VHCI_DEPTH_TARGET) == 0) { 5231 VHCI_DEBUG(1, (CE_NOTE, NULL, 5232 "!vhci_pathinfo_offline " 5233 "(pip:%p): reset failed, " 5234 "giving up!\n", (void *)pip)); 5235 } 5236 } 5237 svp->svp_svl->svl_flags &= ~VLUN_RESERVE_ACTIVE_FLG; 5238 } 5239 } 5240 5241 mdi_pi_set_state(pip, MDI_PATHINFO_STATE_OFFLINE); 5242 vhci_mpapi_set_path_state(vdip, pip, MP_DRVR_PATH_STATE_REMOVED); 5243 5244 VHCI_DEBUG(1, (CE_NOTE, NULL, 5245 "!vhci_pathinfo_offline: offlined path 0x%p\n", (void *)pip)); 5246 return (MDI_SUCCESS); 5247 } 5248 5249 5250 /* 5251 * routine for SCSI VHCI IOCTL implementation. 5252 */ 5253 /* ARGSUSED */ 5254 static int 5255 vhci_ctl(dev_t dev, int cmd, intptr_t data, int mode, cred_t *credp, int *rval) 5256 { 5257 struct scsi_vhci *vhci; 5258 dev_info_t *vdip; 5259 mdi_pathinfo_t *pip; 5260 int instance, held; 5261 int retval = 0; 5262 caddr_t phci_path = NULL, client_path = NULL; 5263 caddr_t paddr = NULL; 5264 sv_iocdata_t ioc; 5265 sv_iocdata_t *pioc = &ioc; 5266 sv_switch_to_cntlr_iocdata_t iocsc; 5267 sv_switch_to_cntlr_iocdata_t *piocsc = &iocsc; 5268 caddr_t s; 5269 scsi_vhci_lun_t *vlun; 5270 struct scsi_failover_ops *fo; 5271 char *pclass; 5272 5273 /* Check for validity of vhci structure */ 5274 vhci = ddi_get_soft_state(vhci_softstate, MINOR2INST(getminor(dev))); 5275 if (vhci == NULL) { 5276 return (ENXIO); 5277 } 5278 5279 mutex_enter(&vhci->vhci_mutex); 5280 if ((vhci->vhci_state & VHCI_STATE_OPEN) == 0) { 5281 mutex_exit(&vhci->vhci_mutex); 5282 return (ENXIO); 5283 } 5284 mutex_exit(&vhci->vhci_mutex); 5285 5286 /* Get the vhci dip */ 5287 vdip = vhci->vhci_dip; 5288 ASSERT(vdip != NULL); 5289 instance = ddi_get_instance(vdip); 5290 5291 /* Allocate memory for getting parameters from userland */ 5292 phci_path = kmem_zalloc(MAXPATHLEN, KM_SLEEP); 5293 client_path = kmem_zalloc(MAXPATHLEN, KM_SLEEP); 5294 paddr = kmem_zalloc(MAXNAMELEN, KM_SLEEP); 5295 5296 /* 5297 * Set a local variable indicating the ioctl name. Used for 5298 * printing debug strings. 5299 */ 5300 switch (cmd) { 5301 case SCSI_VHCI_GET_CLIENT_MULTIPATH_INFO: 5302 s = "GET_CLIENT_MULTIPATH_INFO"; 5303 break; 5304 5305 case SCSI_VHCI_GET_PHCI_MULTIPATH_INFO: 5306 s = "GET_PHCI_MULTIPATH_INFO"; 5307 break; 5308 5309 case SCSI_VHCI_GET_CLIENT_NAME: 5310 s = "GET_CLIENT_NAME"; 5311 break; 5312 5313 case SCSI_VHCI_PATH_ONLINE: 5314 s = "PATH_ONLINE"; 5315 break; 5316 5317 case SCSI_VHCI_PATH_OFFLINE: 5318 s = "PATH_OFFLINE"; 5319 break; 5320 5321 case SCSI_VHCI_PATH_STANDBY: 5322 s = "PATH_STANDBY"; 5323 break; 5324 5325 case SCSI_VHCI_PATH_TEST: 5326 s = "PATH_TEST"; 5327 break; 5328 5329 case SCSI_VHCI_SWITCH_TO_CNTLR: 5330 s = "SWITCH_TO_CNTLR"; 5331 break; 5332 case SCSI_VHCI_PATH_DISABLE: 5333 s = "PATH_DISABLE"; 5334 break; 5335 case SCSI_VHCI_PATH_ENABLE: 5336 s = "PATH_ENABLE"; 5337 break; 5338 5339 case SCSI_VHCI_GET_TARGET_LONGNAME: 5340 s = "GET_TARGET_LONGNAME"; 5341 break; 5342 5343 #ifdef DEBUG 5344 case SCSI_VHCI_CONFIGURE_PHCI: 5345 s = "CONFIGURE_PHCI"; 5346 break; 5347 5348 case SCSI_VHCI_UNCONFIGURE_PHCI: 5349 s = "UNCONFIGURE_PHCI"; 5350 break; 5351 #endif 5352 5353 default: 5354 s = "Unknown"; 5355 vhci_log(CE_NOTE, vdip, 5356 "!vhci%d: ioctl %x (unsupported ioctl)", instance, cmd); 5357 retval = ENOTSUP; 5358 break; 5359 } 5360 if (retval != 0) { 5361 goto end; 5362 } 5363 5364 VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci%d: ioctl <%s>", instance, s)); 5365 5366 /* 5367 * Get IOCTL parameters from userland 5368 */ 5369 switch (cmd) { 5370 case SCSI_VHCI_GET_CLIENT_MULTIPATH_INFO: 5371 case SCSI_VHCI_GET_PHCI_MULTIPATH_INFO: 5372 case SCSI_VHCI_GET_CLIENT_NAME: 5373 case SCSI_VHCI_PATH_ONLINE: 5374 case SCSI_VHCI_PATH_OFFLINE: 5375 case SCSI_VHCI_PATH_STANDBY: 5376 case SCSI_VHCI_PATH_TEST: 5377 case SCSI_VHCI_PATH_DISABLE: 5378 case SCSI_VHCI_PATH_ENABLE: 5379 case SCSI_VHCI_GET_TARGET_LONGNAME: 5380 #ifdef DEBUG 5381 case SCSI_VHCI_CONFIGURE_PHCI: 5382 case SCSI_VHCI_UNCONFIGURE_PHCI: 5383 #endif 5384 retval = vhci_get_iocdata((const void *)data, pioc, mode, s); 5385 break; 5386 5387 case SCSI_VHCI_SWITCH_TO_CNTLR: 5388 retval = vhci_get_iocswitchdata((const void *)data, piocsc, 5389 mode, s); 5390 break; 5391 } 5392 if (retval != 0) { 5393 goto end; 5394 } 5395 5396 5397 /* 5398 * Process the IOCTL 5399 */ 5400 switch (cmd) { 5401 case SCSI_VHCI_GET_CLIENT_MULTIPATH_INFO: 5402 { 5403 uint_t num_paths; /* Num paths to client dev */ 5404 sv_path_info_t *upibuf = NULL; /* To keep userland values */ 5405 sv_path_info_t *kpibuf = NULL; /* Kernel data for ioctls */ 5406 dev_info_t *cdip; /* Client device dip */ 5407 5408 if (pioc->ret_elem == NULL) { 5409 retval = EINVAL; 5410 break; 5411 } 5412 5413 /* Get client device path from user land */ 5414 if (vhci_ioc_get_client_path(pioc, client_path, mode, s)) { 5415 retval = EFAULT; 5416 break; 5417 } 5418 5419 VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> " 5420 "client <%s>", s, client_path)); 5421 5422 /* Get number of paths to this client device */ 5423 if ((cdip = mdi_client_path2devinfo(vdip, client_path)) 5424 == NULL) { 5425 retval = ENXIO; 5426 VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> " 5427 "client dip doesn't exist. invalid path <%s>", 5428 s, client_path)); 5429 break; 5430 } 5431 num_paths = mdi_client_get_path_count(cdip); 5432 5433 if (ddi_copyout(&num_paths, pioc->ret_elem, 5434 sizeof (num_paths), mode)) { 5435 VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> " 5436 "num_paths copyout failed", s)); 5437 retval = EFAULT; 5438 break; 5439 } 5440 5441 /* If user just wanted num_paths, then return */ 5442 if (pioc->buf_elem == 0 || pioc->ret_buf == NULL || 5443 num_paths == 0) { 5444 break; 5445 } 5446 5447 /* Set num_paths to value as much as can be sent to userland */ 5448 if (num_paths > pioc->buf_elem) { 5449 num_paths = pioc->buf_elem; 5450 } 5451 5452 /* Allocate memory and get userland pointers */ 5453 if (vhci_ioc_alloc_pathinfo(&upibuf, &kpibuf, num_paths, 5454 pioc, mode, s) != 0) { 5455 retval = EFAULT; 5456 break; 5457 } 5458 ASSERT(upibuf != NULL); 5459 ASSERT(kpibuf != NULL); 5460 5461 /* 5462 * Get the path information and send it to userland. 5463 */ 5464 if (vhci_get_client_path_list(cdip, kpibuf, num_paths) 5465 != MDI_SUCCESS) { 5466 retval = ENXIO; 5467 vhci_ioc_free_pathinfo(upibuf, kpibuf, num_paths); 5468 break; 5469 } 5470 5471 if (vhci_ioc_send_pathinfo(upibuf, kpibuf, num_paths, 5472 pioc, mode, s)) { 5473 retval = EFAULT; 5474 vhci_ioc_free_pathinfo(upibuf, kpibuf, num_paths); 5475 break; 5476 } 5477 5478 /* Free the memory allocated for path information */ 5479 vhci_ioc_free_pathinfo(upibuf, kpibuf, num_paths); 5480 break; 5481 } 5482 5483 case SCSI_VHCI_GET_PHCI_MULTIPATH_INFO: 5484 { 5485 uint_t num_paths; /* Num paths to client dev */ 5486 sv_path_info_t *upibuf = NULL; /* To keep userland values */ 5487 sv_path_info_t *kpibuf = NULL; /* Kernel data for ioctls */ 5488 dev_info_t *pdip; /* PHCI device dip */ 5489 5490 if (pioc->ret_elem == NULL) { 5491 retval = EINVAL; 5492 break; 5493 } 5494 5495 /* Get PHCI device path from user land */ 5496 if (vhci_ioc_get_phci_path(pioc, phci_path, mode, s)) { 5497 retval = EFAULT; 5498 break; 5499 } 5500 5501 VHCI_DEBUG(6, (CE_WARN, vdip, 5502 "!vhci_ioctl: ioctl <%s> phci <%s>", s, phci_path)); 5503 5504 /* Get number of devices associated with this PHCI device */ 5505 if ((pdip = mdi_phci_path2devinfo(vdip, phci_path)) == NULL) { 5506 VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> " 5507 "phci dip doesn't exist. invalid path <%s>", 5508 s, phci_path)); 5509 retval = ENXIO; 5510 break; 5511 } 5512 5513 num_paths = mdi_phci_get_path_count(pdip); 5514 5515 if (ddi_copyout(&num_paths, pioc->ret_elem, 5516 sizeof (num_paths), mode)) { 5517 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> " 5518 "num_paths copyout failed", s)); 5519 retval = EFAULT; 5520 break; 5521 } 5522 5523 /* If user just wanted num_paths, then return */ 5524 if (pioc->buf_elem == 0 || pioc->ret_buf == NULL || 5525 num_paths == 0) { 5526 break; 5527 } 5528 5529 /* Set num_paths to value as much as can be sent to userland */ 5530 if (num_paths > pioc->buf_elem) { 5531 num_paths = pioc->buf_elem; 5532 } 5533 5534 /* Allocate memory and get userland pointers */ 5535 if (vhci_ioc_alloc_pathinfo(&upibuf, &kpibuf, num_paths, 5536 pioc, mode, s) != 0) { 5537 retval = EFAULT; 5538 break; 5539 } 5540 ASSERT(upibuf != NULL); 5541 ASSERT(kpibuf != NULL); 5542 5543 /* 5544 * Get the path information and send it to userland. 5545 */ 5546 if (vhci_get_phci_path_list(pdip, kpibuf, num_paths) 5547 != MDI_SUCCESS) { 5548 retval = ENXIO; 5549 vhci_ioc_free_pathinfo(upibuf, kpibuf, num_paths); 5550 break; 5551 } 5552 5553 if (vhci_ioc_send_pathinfo(upibuf, kpibuf, num_paths, 5554 pioc, mode, s)) { 5555 retval = EFAULT; 5556 vhci_ioc_free_pathinfo(upibuf, kpibuf, num_paths); 5557 break; 5558 } 5559 5560 /* Free the memory allocated for path information */ 5561 vhci_ioc_free_pathinfo(upibuf, kpibuf, num_paths); 5562 break; 5563 } 5564 5565 case SCSI_VHCI_GET_CLIENT_NAME: 5566 { 5567 dev_info_t *cdip, *pdip; 5568 5569 /* Get PHCI path and device address from user land */ 5570 if (vhci_ioc_get_phci_path(pioc, phci_path, mode, s) || 5571 vhci_ioc_get_paddr(pioc, paddr, mode, s)) { 5572 retval = EFAULT; 5573 break; 5574 } 5575 5576 VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> " 5577 "phci <%s>, paddr <%s>", s, phci_path, paddr)); 5578 5579 /* Get the PHCI dip */ 5580 if ((pdip = mdi_phci_path2devinfo(vdip, phci_path)) == NULL) { 5581 VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> " 5582 "phci dip doesn't exist. invalid path <%s>", 5583 s, phci_path)); 5584 retval = ENXIO; 5585 break; 5586 } 5587 5588 if ((pip = mdi_pi_find(pdip, NULL, paddr)) == NULL) { 5589 VHCI_DEBUG(1, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> " 5590 "pathinfo doesn't exist. invalid device addr", s)); 5591 retval = ENXIO; 5592 break; 5593 } 5594 5595 /* Get the client device pathname and send to userland */ 5596 cdip = mdi_pi_get_client(pip); 5597 vhci_ioc_devi_to_path(cdip, client_path); 5598 5599 VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> " 5600 "client <%s>", s, client_path)); 5601 5602 if (vhci_ioc_send_client_path(client_path, pioc, mode, s)) { 5603 retval = EFAULT; 5604 break; 5605 } 5606 break; 5607 } 5608 5609 case SCSI_VHCI_PATH_ONLINE: 5610 case SCSI_VHCI_PATH_OFFLINE: 5611 case SCSI_VHCI_PATH_STANDBY: 5612 case SCSI_VHCI_PATH_TEST: 5613 { 5614 dev_info_t *pdip; /* PHCI dip */ 5615 5616 /* Get PHCI path and device address from user land */ 5617 if (vhci_ioc_get_phci_path(pioc, phci_path, mode, s) || 5618 vhci_ioc_get_paddr(pioc, paddr, mode, s)) { 5619 retval = EFAULT; 5620 break; 5621 } 5622 5623 VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> " 5624 "phci <%s>, paddr <%s>", s, phci_path, paddr)); 5625 5626 /* Get the PHCI dip */ 5627 if ((pdip = mdi_phci_path2devinfo(vdip, phci_path)) == NULL) { 5628 VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> " 5629 "phci dip doesn't exist. invalid path <%s>", 5630 s, phci_path)); 5631 retval = ENXIO; 5632 break; 5633 } 5634 5635 if ((pip = mdi_pi_find(pdip, NULL, paddr)) == NULL) { 5636 VHCI_DEBUG(1, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> " 5637 "pathinfo doesn't exist. invalid device addr", s)); 5638 retval = ENXIO; 5639 break; 5640 } 5641 5642 VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> " 5643 "Calling MDI function to change device state", s)); 5644 5645 switch (cmd) { 5646 case SCSI_VHCI_PATH_ONLINE: 5647 retval = mdi_pi_online(pip, 0); 5648 break; 5649 5650 case SCSI_VHCI_PATH_OFFLINE: 5651 retval = mdi_pi_offline(pip, 0); 5652 break; 5653 5654 case SCSI_VHCI_PATH_STANDBY: 5655 retval = mdi_pi_standby(pip, 0); 5656 break; 5657 5658 case SCSI_VHCI_PATH_TEST: 5659 break; 5660 } 5661 break; 5662 } 5663 5664 case SCSI_VHCI_SWITCH_TO_CNTLR: 5665 { 5666 dev_info_t *cdip; 5667 struct scsi_device *devp; 5668 5669 /* Get the client device pathname */ 5670 if (ddi_copyin(piocsc->client, client_path, 5671 MAXPATHLEN, mode)) { 5672 VHCI_DEBUG(2, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> " 5673 "client_path copyin failed", s)); 5674 retval = EFAULT; 5675 break; 5676 } 5677 5678 /* Get the path class to which user wants to switch */ 5679 if (ddi_copyin(piocsc->class, paddr, MAXNAMELEN, mode)) { 5680 VHCI_DEBUG(2, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> " 5681 "controller_class copyin failed", s)); 5682 retval = EFAULT; 5683 break; 5684 } 5685 5686 /* Perform validity checks */ 5687 if ((cdip = mdi_client_path2devinfo(vdip, 5688 client_path)) == NULL) { 5689 VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> " 5690 "client dip doesn't exist. invalid path <%s>", 5691 s, client_path)); 5692 retval = ENXIO; 5693 break; 5694 } 5695 5696 VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: Calling MDI func " 5697 "to switch controller")); 5698 VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: client <%s> " 5699 "class <%s>", client_path, paddr)); 5700 5701 if (strcmp(paddr, PCLASS_PRIMARY) && 5702 strcmp(paddr, PCLASS_SECONDARY)) { 5703 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> " 5704 "invalid path class <%s>", s, paddr)); 5705 retval = ENXIO; 5706 break; 5707 } 5708 5709 devp = ddi_get_driver_private(cdip); 5710 if (devp == NULL) { 5711 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> " 5712 "invalid scsi device <%s>", s, client_path)); 5713 retval = ENXIO; 5714 break; 5715 } 5716 vlun = ADDR2VLUN(&devp->sd_address); 5717 ASSERT(vlun); 5718 5719 /* 5720 * Checking to see if device has only one pclass, PRIMARY. 5721 * If so this device doesn't support failovers. Assumed 5722 * that the devices with one pclass is PRIMARY, as thats the 5723 * case today. If this is not true and in future other 5724 * symmetric devices are supported with other pclass, this 5725 * IOCTL shall have to be overhauled anyways as now the only 5726 * arguments it accepts are PRIMARY and SECONDARY. 5727 */ 5728 fo = vlun->svl_fops; 5729 if (fo->sfo_pathclass_next(PCLASS_PRIMARY, &pclass, 5730 vlun->svl_fops_ctpriv)) { 5731 retval = ENOTSUP; 5732 break; 5733 } 5734 5735 VHCI_HOLD_LUN(vlun, VH_SLEEP, held); 5736 mutex_enter(&vlun->svl_mutex); 5737 if (vlun->svl_active_pclass != NULL) { 5738 if (strcmp(vlun->svl_active_pclass, paddr) == 0) { 5739 mutex_exit(&vlun->svl_mutex); 5740 retval = EALREADY; 5741 VHCI_RELEASE_LUN(vlun); 5742 break; 5743 } 5744 } 5745 mutex_exit(&vlun->svl_mutex); 5746 /* Call mdi function to cause a switch over */ 5747 retval = mdi_failover(vdip, cdip, MDI_FAILOVER_SYNC); 5748 if (retval == MDI_SUCCESS) { 5749 retval = 0; 5750 } else if (retval == MDI_BUSY) { 5751 retval = EBUSY; 5752 } else { 5753 retval = EIO; 5754 } 5755 VHCI_RELEASE_LUN(vlun); 5756 break; 5757 } 5758 5759 case SCSI_VHCI_PATH_ENABLE: 5760 case SCSI_VHCI_PATH_DISABLE: 5761 { 5762 dev_info_t *cdip, *pdip; 5763 5764 /* 5765 * Get client device path from user land 5766 */ 5767 if (vhci_ioc_get_client_path(pioc, client_path, mode, s)) { 5768 retval = EFAULT; 5769 break; 5770 } 5771 5772 /* 5773 * Get Phci device path from user land 5774 */ 5775 if (vhci_ioc_get_phci_path(pioc, phci_path, mode, s)) { 5776 retval = EFAULT; 5777 break; 5778 } 5779 5780 /* 5781 * Get the devinfo for the Phci. 5782 */ 5783 if ((pdip = mdi_phci_path2devinfo(vdip, phci_path)) == NULL) { 5784 VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> " 5785 "phci dip doesn't exist. invalid path <%s>", 5786 s, phci_path)); 5787 retval = ENXIO; 5788 break; 5789 } 5790 5791 /* 5792 * If the client path is set to /scsi_vhci then we need 5793 * to do the operation on all the clients so set cdip to NULL. 5794 * Else, try to get the client dip. 5795 */ 5796 if (strcmp(client_path, "/scsi_vhci") == 0) { 5797 cdip = NULL; 5798 } else { 5799 if ((cdip = mdi_client_path2devinfo(vdip, 5800 client_path)) == NULL) { 5801 retval = ENXIO; 5802 VHCI_DEBUG(1, (CE_WARN, NULL, 5803 "!vhci_ioctl: ioctl <%s> client dip " 5804 "doesn't exist. invalid path <%s>", 5805 s, client_path)); 5806 break; 5807 } 5808 } 5809 5810 if (cmd == SCSI_VHCI_PATH_ENABLE) 5811 retval = mdi_pi_enable(cdip, pdip, USER_DISABLE); 5812 else 5813 retval = mdi_pi_disable(cdip, pdip, USER_DISABLE); 5814 5815 break; 5816 } 5817 5818 case SCSI_VHCI_GET_TARGET_LONGNAME: 5819 { 5820 uint_t pid = pioc->buf_elem; 5821 char *target_port; 5822 mod_hash_val_t hv; 5823 5824 /* targetmap lookup of 'target-port' by <pid> */ 5825 if (mod_hash_find(vhci_targetmap_bypid, 5826 (mod_hash_key_t)(uintptr_t)pid, &hv) != 0) { 5827 /* 5828 * NOTE: failure to find the mapping is OK for guid 5829 * based 'target-port' values. 5830 */ 5831 VHCI_DEBUG(3, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> " 5832 "targetport mapping doesn't exist: pid %d", 5833 s, pid)); 5834 retval = ENXIO; 5835 break; 5836 } 5837 5838 /* copyout 'target-port' result */ 5839 target_port = (char *)hv; 5840 if (copyoutstr(target_port, pioc->addr, MAXNAMELEN, NULL)) { 5841 VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> " 5842 "targetport copyout failed: len: %d", 5843 s, (int)strlen(target_port))); 5844 retval = EFAULT; 5845 } 5846 break; 5847 } 5848 5849 #ifdef DEBUG 5850 case SCSI_VHCI_CONFIGURE_PHCI: 5851 { 5852 dev_info_t *pdip; 5853 5854 /* Get PHCI path and device address from user land */ 5855 if (vhci_ioc_get_phci_path(pioc, phci_path, mode, s)) { 5856 retval = EFAULT; 5857 break; 5858 } 5859 5860 VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> " 5861 "phci <%s>", s, phci_path)); 5862 5863 /* Get the PHCI dip */ 5864 if ((pdip = e_ddi_hold_devi_by_path(phci_path, 0)) == NULL) { 5865 VHCI_DEBUG(3, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> " 5866 "phci dip doesn't exist. invalid path <%s>", 5867 s, phci_path)); 5868 retval = ENXIO; 5869 break; 5870 } 5871 5872 if (ndi_devi_config(pdip, 5873 NDI_DEVFS_CLEAN|NDI_DEVI_PERSIST) != NDI_SUCCESS) { 5874 retval = EIO; 5875 } 5876 5877 ddi_release_devi(pdip); 5878 break; 5879 } 5880 5881 case SCSI_VHCI_UNCONFIGURE_PHCI: 5882 { 5883 dev_info_t *pdip; 5884 5885 /* Get PHCI path and device address from user land */ 5886 if (vhci_ioc_get_phci_path(pioc, phci_path, mode, s)) { 5887 retval = EFAULT; 5888 break; 5889 } 5890 5891 VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> " 5892 "phci <%s>", s, phci_path)); 5893 5894 /* Get the PHCI dip */ 5895 if ((pdip = e_ddi_hold_devi_by_path(phci_path, 0)) == NULL) { 5896 VHCI_DEBUG(3, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> " 5897 "phci dip doesn't exist. invalid path <%s>", 5898 s, phci_path)); 5899 retval = ENXIO; 5900 break; 5901 } 5902 5903 if (ndi_devi_unconfig(pdip, 5904 NDI_DEVI_REMOVE|NDI_DEVFS_CLEAN) != NDI_SUCCESS) { 5905 retval = EBUSY; 5906 } 5907 5908 ddi_release_devi(pdip); 5909 break; 5910 } 5911 #endif 5912 } 5913 5914 end: 5915 /* Free the memory allocated above */ 5916 if (phci_path != NULL) { 5917 kmem_free(phci_path, MAXPATHLEN); 5918 } 5919 if (client_path != NULL) { 5920 kmem_free(client_path, MAXPATHLEN); 5921 } 5922 if (paddr != NULL) { 5923 kmem_free(paddr, MAXNAMELEN); 5924 } 5925 return (retval); 5926 } 5927 5928 /* 5929 * devctl IOCTL support for client device DR 5930 */ 5931 /* ARGSUSED */ 5932 int 5933 vhci_devctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp, 5934 int *rvalp) 5935 { 5936 dev_info_t *self; 5937 dev_info_t *child; 5938 scsi_hba_tran_t *hba; 5939 struct devctl_iocdata *dcp; 5940 struct scsi_vhci *vhci; 5941 int rv = 0; 5942 int retval = 0; 5943 scsi_vhci_priv_t *svp; 5944 mdi_pathinfo_t *pip; 5945 5946 if ((vhci = ddi_get_soft_state(vhci_softstate, 5947 MINOR2INST(getminor(dev)))) == NULL) 5948 return (ENXIO); 5949 5950 /* 5951 * check if :devctl minor device has been opened 5952 */ 5953 mutex_enter(&vhci->vhci_mutex); 5954 if ((vhci->vhci_state & VHCI_STATE_OPEN) == 0) { 5955 mutex_exit(&vhci->vhci_mutex); 5956 return (ENXIO); 5957 } 5958 mutex_exit(&vhci->vhci_mutex); 5959 5960 self = vhci->vhci_dip; 5961 hba = ddi_get_driver_private(self); 5962 if (hba == NULL) 5963 return (ENXIO); 5964 5965 /* 5966 * We can use the generic implementation for these ioctls 5967 */ 5968 switch (cmd) { 5969 case DEVCTL_DEVICE_GETSTATE: 5970 case DEVCTL_DEVICE_ONLINE: 5971 case DEVCTL_DEVICE_OFFLINE: 5972 case DEVCTL_DEVICE_REMOVE: 5973 case DEVCTL_BUS_GETSTATE: 5974 return (ndi_devctl_ioctl(self, cmd, arg, mode, 0)); 5975 } 5976 5977 /* 5978 * read devctl ioctl data 5979 */ 5980 if (ndi_dc_allochdl((void *)arg, &dcp) != NDI_SUCCESS) 5981 return (EFAULT); 5982 5983 switch (cmd) { 5984 5985 case DEVCTL_DEVICE_RESET: 5986 /* 5987 * lookup and hold child device 5988 */ 5989 if ((child = ndi_devi_find(self, ndi_dc_getname(dcp), 5990 ndi_dc_getaddr(dcp))) == NULL) { 5991 rv = ENXIO; 5992 break; 5993 } 5994 retval = mdi_select_path(child, NULL, 5995 (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH), 5996 NULL, &pip); 5997 if ((retval != MDI_SUCCESS) || (pip == NULL)) { 5998 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioctl:" 5999 "Unable to get a path, dip 0x%p", (void *)child)); 6000 rv = ENXIO; 6001 break; 6002 } 6003 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip); 6004 if (vhci_recovery_reset(svp->svp_svl, 6005 &svp->svp_psd->sd_address, TRUE, 6006 VHCI_DEPTH_TARGET) == 0) { 6007 VHCI_DEBUG(1, (CE_NOTE, NULL, 6008 "!vhci_ioctl(pip:%p): " 6009 "reset failed\n", (void *)pip)); 6010 rv = ENXIO; 6011 } 6012 mdi_rele_path(pip); 6013 break; 6014 6015 case DEVCTL_BUS_QUIESCE: 6016 case DEVCTL_BUS_UNQUIESCE: 6017 case DEVCTL_BUS_RESET: 6018 case DEVCTL_BUS_RESETALL: 6019 #ifdef DEBUG 6020 case DEVCTL_BUS_CONFIGURE: 6021 case DEVCTL_BUS_UNCONFIGURE: 6022 #endif 6023 rv = ENOTSUP; 6024 break; 6025 6026 default: 6027 rv = ENOTTY; 6028 } /* end of outer switch */ 6029 6030 ndi_dc_freehdl(dcp); 6031 return (rv); 6032 } 6033 6034 /* 6035 * Routine to get the PHCI pathname from ioctl structures in userland 6036 */ 6037 /* ARGSUSED */ 6038 static int 6039 vhci_ioc_get_phci_path(sv_iocdata_t *pioc, caddr_t phci_path, 6040 int mode, caddr_t s) 6041 { 6042 int retval = 0; 6043 6044 if (ddi_copyin(pioc->phci, phci_path, MAXPATHLEN, mode)) { 6045 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioc_get_phci: ioctl <%s> " 6046 "phci_path copyin failed", s)); 6047 retval = EFAULT; 6048 } 6049 return (retval); 6050 6051 } 6052 6053 6054 /* 6055 * Routine to get the Client device pathname from ioctl structures in userland 6056 */ 6057 /* ARGSUSED */ 6058 static int 6059 vhci_ioc_get_client_path(sv_iocdata_t *pioc, caddr_t client_path, 6060 int mode, caddr_t s) 6061 { 6062 int retval = 0; 6063 6064 if (ddi_copyin(pioc->client, client_path, MAXPATHLEN, mode)) { 6065 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioc_get_client: " 6066 "ioctl <%s> client_path copyin failed", s)); 6067 retval = EFAULT; 6068 } 6069 return (retval); 6070 } 6071 6072 6073 /* 6074 * Routine to get physical device address from ioctl structure in userland 6075 */ 6076 /* ARGSUSED */ 6077 static int 6078 vhci_ioc_get_paddr(sv_iocdata_t *pioc, caddr_t paddr, int mode, caddr_t s) 6079 { 6080 int retval = 0; 6081 6082 if (ddi_copyin(pioc->addr, paddr, MAXNAMELEN, mode)) { 6083 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioc_get_paddr: " 6084 "ioctl <%s> device addr copyin failed", s)); 6085 retval = EFAULT; 6086 } 6087 return (retval); 6088 } 6089 6090 6091 /* 6092 * Routine to send client device pathname to userland. 6093 */ 6094 /* ARGSUSED */ 6095 static int 6096 vhci_ioc_send_client_path(caddr_t client_path, sv_iocdata_t *pioc, 6097 int mode, caddr_t s) 6098 { 6099 int retval = 0; 6100 6101 if (ddi_copyout(client_path, pioc->client, MAXPATHLEN, mode)) { 6102 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioc_send_client: " 6103 "ioctl <%s> client_path copyout failed", s)); 6104 retval = EFAULT; 6105 } 6106 return (retval); 6107 } 6108 6109 6110 /* 6111 * Routine to translated dev_info pointer (dip) to device pathname. 6112 */ 6113 static void 6114 vhci_ioc_devi_to_path(dev_info_t *dip, caddr_t path) 6115 { 6116 (void) ddi_pathname(dip, path); 6117 } 6118 6119 6120 /* 6121 * vhci_get_phci_path_list: 6122 * get information about devices associated with a 6123 * given PHCI device. 6124 * 6125 * Return Values: 6126 * path information elements 6127 */ 6128 int 6129 vhci_get_phci_path_list(dev_info_t *pdip, sv_path_info_t *pibuf, 6130 uint_t num_elems) 6131 { 6132 uint_t count, done; 6133 mdi_pathinfo_t *pip; 6134 sv_path_info_t *ret_pip; 6135 int status; 6136 size_t prop_size; 6137 int circular; 6138 6139 /* 6140 * Get the PHCI structure and retrieve the path information 6141 * from the GUID hash table. 6142 */ 6143 6144 ret_pip = pibuf; 6145 count = 0; 6146 6147 ndi_devi_enter(pdip, &circular); 6148 6149 done = (count >= num_elems); 6150 pip = mdi_get_next_client_path(pdip, NULL); 6151 while (pip && !done) { 6152 mdi_pi_lock(pip); 6153 (void) ddi_pathname(mdi_pi_get_phci(pip), 6154 ret_pip->device.ret_phci); 6155 (void) strcpy(ret_pip->ret_addr, mdi_pi_get_addr(pip)); 6156 (void) mdi_pi_get_state2(pip, &ret_pip->ret_state, 6157 &ret_pip->ret_ext_state); 6158 6159 status = mdi_prop_size(pip, &prop_size); 6160 if (status == MDI_SUCCESS && ret_pip->ret_prop.ret_buf_size) { 6161 *ret_pip->ret_prop.ret_buf_size = (uint_t)prop_size; 6162 } 6163 6164 #ifdef DEBUG 6165 if (status != MDI_SUCCESS) { 6166 VHCI_DEBUG(2, (CE_WARN, NULL, 6167 "!vhci_get_phci_path_list: " 6168 "phci <%s>, prop size failure 0x%x", 6169 ret_pip->device.ret_phci, status)); 6170 } 6171 #endif /* DEBUG */ 6172 6173 6174 if (status == MDI_SUCCESS && ret_pip->ret_prop.buf && 6175 prop_size && ret_pip->ret_prop.buf_size >= prop_size) { 6176 status = mdi_prop_pack(pip, 6177 &ret_pip->ret_prop.buf, 6178 ret_pip->ret_prop.buf_size); 6179 6180 #ifdef DEBUG 6181 if (status != MDI_SUCCESS) { 6182 VHCI_DEBUG(2, (CE_WARN, NULL, 6183 "!vhci_get_phci_path_list: " 6184 "phci <%s>, prop pack failure 0x%x", 6185 ret_pip->device.ret_phci, status)); 6186 } 6187 #endif /* DEBUG */ 6188 } 6189 6190 mdi_pi_unlock(pip); 6191 pip = mdi_get_next_client_path(pdip, pip); 6192 ret_pip++; 6193 count++; 6194 done = (count >= num_elems); 6195 } 6196 6197 ndi_devi_exit(pdip, circular); 6198 6199 return (MDI_SUCCESS); 6200 } 6201 6202 6203 /* 6204 * vhci_get_client_path_list: 6205 * get information about various paths associated with a 6206 * given client device. 6207 * 6208 * Return Values: 6209 * path information elements 6210 */ 6211 int 6212 vhci_get_client_path_list(dev_info_t *cdip, sv_path_info_t *pibuf, 6213 uint_t num_elems) 6214 { 6215 uint_t count, done; 6216 mdi_pathinfo_t *pip; 6217 sv_path_info_t *ret_pip; 6218 int status; 6219 size_t prop_size; 6220 int circular; 6221 6222 ret_pip = pibuf; 6223 count = 0; 6224 6225 ndi_devi_enter(cdip, &circular); 6226 6227 done = (count >= num_elems); 6228 pip = mdi_get_next_phci_path(cdip, NULL); 6229 while (pip && !done) { 6230 mdi_pi_lock(pip); 6231 (void) ddi_pathname(mdi_pi_get_phci(pip), 6232 ret_pip->device.ret_phci); 6233 (void) strcpy(ret_pip->ret_addr, mdi_pi_get_addr(pip)); 6234 (void) mdi_pi_get_state2(pip, &ret_pip->ret_state, 6235 &ret_pip->ret_ext_state); 6236 6237 status = mdi_prop_size(pip, &prop_size); 6238 if (status == MDI_SUCCESS && ret_pip->ret_prop.ret_buf_size) { 6239 *ret_pip->ret_prop.ret_buf_size = (uint_t)prop_size; 6240 } 6241 6242 #ifdef DEBUG 6243 if (status != MDI_SUCCESS) { 6244 VHCI_DEBUG(2, (CE_WARN, NULL, 6245 "!vhci_get_client_path_list: " 6246 "phci <%s>, prop size failure 0x%x", 6247 ret_pip->device.ret_phci, status)); 6248 } 6249 #endif /* DEBUG */ 6250 6251 6252 if (status == MDI_SUCCESS && ret_pip->ret_prop.buf && 6253 prop_size && ret_pip->ret_prop.buf_size >= prop_size) { 6254 status = mdi_prop_pack(pip, 6255 &ret_pip->ret_prop.buf, 6256 ret_pip->ret_prop.buf_size); 6257 6258 #ifdef DEBUG 6259 if (status != MDI_SUCCESS) { 6260 VHCI_DEBUG(2, (CE_WARN, NULL, 6261 "!vhci_get_client_path_list: " 6262 "phci <%s>, prop pack failure 0x%x", 6263 ret_pip->device.ret_phci, status)); 6264 } 6265 #endif /* DEBUG */ 6266 } 6267 6268 mdi_pi_unlock(pip); 6269 pip = mdi_get_next_phci_path(cdip, pip); 6270 ret_pip++; 6271 count++; 6272 done = (count >= num_elems); 6273 } 6274 6275 ndi_devi_exit(cdip, circular); 6276 6277 return (MDI_SUCCESS); 6278 } 6279 6280 6281 /* 6282 * Routine to get ioctl argument structure from userland. 6283 */ 6284 /* ARGSUSED */ 6285 static int 6286 vhci_get_iocdata(const void *data, sv_iocdata_t *pioc, int mode, caddr_t s) 6287 { 6288 int retval = 0; 6289 6290 #ifdef _MULTI_DATAMODEL 6291 switch (ddi_model_convert_from(mode & FMODELS)) { 6292 case DDI_MODEL_ILP32: 6293 { 6294 sv_iocdata32_t ioc32; 6295 6296 if (ddi_copyin(data, &ioc32, sizeof (ioc32), mode)) { 6297 retval = EFAULT; 6298 break; 6299 } 6300 pioc->client = (caddr_t)(uintptr_t)ioc32.client; 6301 pioc->phci = (caddr_t)(uintptr_t)ioc32.phci; 6302 pioc->addr = (caddr_t)(uintptr_t)ioc32.addr; 6303 pioc->buf_elem = (uint_t)ioc32.buf_elem; 6304 pioc->ret_buf = (sv_path_info_t *)(uintptr_t)ioc32.ret_buf; 6305 pioc->ret_elem = (uint_t *)(uintptr_t)ioc32.ret_elem; 6306 break; 6307 } 6308 6309 case DDI_MODEL_NONE: 6310 if (ddi_copyin(data, pioc, sizeof (*pioc), mode)) { 6311 retval = EFAULT; 6312 break; 6313 } 6314 break; 6315 } 6316 #else /* _MULTI_DATAMODEL */ 6317 if (ddi_copyin(data, pioc, sizeof (*pioc), mode)) { 6318 retval = EFAULT; 6319 } 6320 #endif /* _MULTI_DATAMODEL */ 6321 6322 #ifdef DEBUG 6323 if (retval) { 6324 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_get_ioc: cmd <%s> " 6325 "iocdata copyin failed", s)); 6326 } 6327 #endif 6328 6329 return (retval); 6330 } 6331 6332 6333 /* 6334 * Routine to get the ioctl argument for ioctl causing controller switchover. 6335 */ 6336 /* ARGSUSED */ 6337 static int 6338 vhci_get_iocswitchdata(const void *data, sv_switch_to_cntlr_iocdata_t *piocsc, 6339 int mode, caddr_t s) 6340 { 6341 int retval = 0; 6342 6343 #ifdef _MULTI_DATAMODEL 6344 switch (ddi_model_convert_from(mode & FMODELS)) { 6345 case DDI_MODEL_ILP32: 6346 { 6347 sv_switch_to_cntlr_iocdata32_t ioc32; 6348 6349 if (ddi_copyin(data, &ioc32, sizeof (ioc32), mode)) { 6350 retval = EFAULT; 6351 break; 6352 } 6353 piocsc->client = (caddr_t)(uintptr_t)ioc32.client; 6354 piocsc->class = (caddr_t)(uintptr_t)ioc32.class; 6355 break; 6356 } 6357 6358 case DDI_MODEL_NONE: 6359 if (ddi_copyin(data, piocsc, sizeof (*piocsc), mode)) { 6360 retval = EFAULT; 6361 } 6362 break; 6363 } 6364 #else /* _MULTI_DATAMODEL */ 6365 if (ddi_copyin(data, piocsc, sizeof (*piocsc), mode)) { 6366 retval = EFAULT; 6367 } 6368 #endif /* _MULTI_DATAMODEL */ 6369 6370 #ifdef DEBUG 6371 if (retval) { 6372 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_get_ioc: cmd <%s> " 6373 "switch_to_cntlr_iocdata copyin failed", s)); 6374 } 6375 #endif 6376 6377 return (retval); 6378 } 6379 6380 6381 /* 6382 * Routine to allocate memory for the path information structures. 6383 * It allocates two chunks of memory - one for keeping userland 6384 * pointers/values for path information and path properties, second for 6385 * keeping allocating kernel memory for path properties. These path 6386 * properties are finally copied to userland. 6387 */ 6388 /* ARGSUSED */ 6389 static int 6390 vhci_ioc_alloc_pathinfo(sv_path_info_t **upibuf, sv_path_info_t **kpibuf, 6391 uint_t num_paths, sv_iocdata_t *pioc, int mode, caddr_t s) 6392 { 6393 sv_path_info_t *pi; 6394 uint_t bufsize; 6395 int retval = 0; 6396 int index; 6397 6398 /* Allocate memory */ 6399 *upibuf = (sv_path_info_t *) 6400 kmem_zalloc(sizeof (sv_path_info_t) * num_paths, KM_SLEEP); 6401 ASSERT(*upibuf != NULL); 6402 *kpibuf = (sv_path_info_t *) 6403 kmem_zalloc(sizeof (sv_path_info_t) * num_paths, KM_SLEEP); 6404 ASSERT(*kpibuf != NULL); 6405 6406 /* 6407 * Get the path info structure from the user space. 6408 * We are interested in the following fields: 6409 * - user size of buffer for per path properties. 6410 * - user address of buffer for path info properties. 6411 * - user pointer for returning actual buffer size 6412 * Keep these fields in the 'upibuf' structures. 6413 * Allocate buffer for per path info properties in kernel 6414 * structure ('kpibuf'). 6415 * Size of these buffers will be equal to the size of buffers 6416 * in the user space. 6417 */ 6418 #ifdef _MULTI_DATAMODEL 6419 switch (ddi_model_convert_from(mode & FMODELS)) { 6420 case DDI_MODEL_ILP32: 6421 { 6422 sv_path_info32_t *src; 6423 sv_path_info32_t pi32; 6424 6425 src = (sv_path_info32_t *)pioc->ret_buf; 6426 pi = (sv_path_info_t *)*upibuf; 6427 for (index = 0; index < num_paths; index++, src++, pi++) { 6428 if (ddi_copyin(src, &pi32, sizeof (pi32), mode)) { 6429 retval = EFAULT; 6430 break; 6431 } 6432 6433 pi->ret_prop.buf_size = 6434 (uint_t)pi32.ret_prop.buf_size; 6435 pi->ret_prop.ret_buf_size = 6436 (uint_t *)(uintptr_t)pi32.ret_prop.ret_buf_size; 6437 pi->ret_prop.buf = 6438 (caddr_t)(uintptr_t)pi32.ret_prop.buf; 6439 } 6440 break; 6441 } 6442 6443 case DDI_MODEL_NONE: 6444 if (ddi_copyin(pioc->ret_buf, *upibuf, 6445 sizeof (sv_path_info_t) * num_paths, mode)) { 6446 retval = EFAULT; 6447 } 6448 break; 6449 } 6450 #else /* _MULTI_DATAMODEL */ 6451 if (ddi_copyin(pioc->ret_buf, *upibuf, 6452 sizeof (sv_path_info_t) * num_paths, mode)) { 6453 retval = EFAULT; 6454 } 6455 #endif /* _MULTI_DATAMODEL */ 6456 6457 if (retval != 0) { 6458 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_alloc_path_info: " 6459 "ioctl <%s> normal: path_info copyin failed", s)); 6460 kmem_free(*upibuf, sizeof (sv_path_info_t) * num_paths); 6461 kmem_free(*kpibuf, sizeof (sv_path_info_t) * num_paths); 6462 *upibuf = NULL; 6463 *kpibuf = NULL; 6464 return (retval); 6465 } 6466 6467 /* 6468 * Allocate memory for per path properties. 6469 */ 6470 for (index = 0, pi = *kpibuf; index < num_paths; index++, pi++) { 6471 bufsize = (*upibuf)[index].ret_prop.buf_size; 6472 6473 if (bufsize && bufsize <= SV_PROP_MAX_BUF_SIZE) { 6474 pi->ret_prop.buf_size = bufsize; 6475 pi->ret_prop.buf = (caddr_t) 6476 kmem_zalloc(bufsize, KM_SLEEP); 6477 ASSERT(pi->ret_prop.buf != NULL); 6478 } else { 6479 pi->ret_prop.buf_size = 0; 6480 pi->ret_prop.buf = NULL; 6481 } 6482 6483 if ((*upibuf)[index].ret_prop.ret_buf_size != NULL) { 6484 pi->ret_prop.ret_buf_size = (uint_t *)kmem_zalloc( 6485 sizeof (*pi->ret_prop.ret_buf_size), KM_SLEEP); 6486 ASSERT(pi->ret_prop.ret_buf_size != NULL); 6487 } else { 6488 pi->ret_prop.ret_buf_size = NULL; 6489 } 6490 } 6491 6492 return (0); 6493 } 6494 6495 6496 /* 6497 * Routine to free memory for the path information structures. 6498 * This is the memory which was allocated earlier. 6499 */ 6500 /* ARGSUSED */ 6501 static void 6502 vhci_ioc_free_pathinfo(sv_path_info_t *upibuf, sv_path_info_t *kpibuf, 6503 uint_t num_paths) 6504 { 6505 sv_path_info_t *pi; 6506 int index; 6507 6508 /* Free memory for per path properties */ 6509 for (index = 0, pi = kpibuf; index < num_paths; index++, pi++) { 6510 if (pi->ret_prop.ret_buf_size != NULL) { 6511 kmem_free(pi->ret_prop.ret_buf_size, 6512 sizeof (*pi->ret_prop.ret_buf_size)); 6513 } 6514 6515 if (pi->ret_prop.buf != NULL) { 6516 kmem_free(pi->ret_prop.buf, pi->ret_prop.buf_size); 6517 } 6518 } 6519 6520 /* Free memory for path info structures */ 6521 kmem_free(upibuf, sizeof (sv_path_info_t) * num_paths); 6522 kmem_free(kpibuf, sizeof (sv_path_info_t) * num_paths); 6523 } 6524 6525 6526 /* 6527 * Routine to copy path information and path properties to userland. 6528 */ 6529 /* ARGSUSED */ 6530 static int 6531 vhci_ioc_send_pathinfo(sv_path_info_t *upibuf, sv_path_info_t *kpibuf, 6532 uint_t num_paths, sv_iocdata_t *pioc, int mode, caddr_t s) 6533 { 6534 int retval = 0, index; 6535 sv_path_info_t *upi_ptr; 6536 sv_path_info32_t *upi32_ptr; 6537 6538 #ifdef _MULTI_DATAMODEL 6539 switch (ddi_model_convert_from(mode & FMODELS)) { 6540 case DDI_MODEL_ILP32: 6541 goto copy_32bit; 6542 6543 case DDI_MODEL_NONE: 6544 goto copy_normal; 6545 } 6546 #else /* _MULTI_DATAMODEL */ 6547 6548 goto copy_normal; 6549 6550 #endif /* _MULTI_DATAMODEL */ 6551 6552 copy_normal: 6553 6554 /* 6555 * Copy path information and path properties to user land. 6556 * Pointer fields inside the path property structure were 6557 * saved in the 'upibuf' structure earlier. 6558 */ 6559 upi_ptr = pioc->ret_buf; 6560 for (index = 0; index < num_paths; index++) { 6561 if (ddi_copyout(kpibuf[index].device.ret_ct, 6562 upi_ptr[index].device.ret_ct, MAXPATHLEN, mode)) { 6563 retval = EFAULT; 6564 break; 6565 } 6566 6567 if (ddi_copyout(kpibuf[index].ret_addr, 6568 upi_ptr[index].ret_addr, MAXNAMELEN, mode)) { 6569 retval = EFAULT; 6570 break; 6571 } 6572 6573 if (ddi_copyout(&kpibuf[index].ret_state, 6574 &upi_ptr[index].ret_state, sizeof (kpibuf[index].ret_state), 6575 mode)) { 6576 retval = EFAULT; 6577 break; 6578 } 6579 6580 if (ddi_copyout(&kpibuf[index].ret_ext_state, 6581 &upi_ptr[index].ret_ext_state, 6582 sizeof (kpibuf[index].ret_ext_state), mode)) { 6583 retval = EFAULT; 6584 break; 6585 } 6586 6587 if ((kpibuf[index].ret_prop.ret_buf_size != NULL) && 6588 ddi_copyout(kpibuf[index].ret_prop.ret_buf_size, 6589 upibuf[index].ret_prop.ret_buf_size, 6590 sizeof (*upibuf[index].ret_prop.ret_buf_size), mode)) { 6591 retval = EFAULT; 6592 break; 6593 } 6594 6595 if ((kpibuf[index].ret_prop.buf != NULL) && 6596 ddi_copyout(kpibuf[index].ret_prop.buf, 6597 upibuf[index].ret_prop.buf, 6598 upibuf[index].ret_prop.buf_size, mode)) { 6599 retval = EFAULT; 6600 break; 6601 } 6602 } 6603 6604 #ifdef DEBUG 6605 if (retval) { 6606 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_get_ioc: ioctl <%s> " 6607 "normal: path_info copyout failed", s)); 6608 } 6609 #endif 6610 6611 return (retval); 6612 6613 copy_32bit: 6614 /* 6615 * Copy path information and path properties to user land. 6616 * Pointer fields inside the path property structure were 6617 * saved in the 'upibuf' structure earlier. 6618 */ 6619 upi32_ptr = (sv_path_info32_t *)pioc->ret_buf; 6620 for (index = 0; index < num_paths; index++) { 6621 if (ddi_copyout(kpibuf[index].device.ret_ct, 6622 upi32_ptr[index].device.ret_ct, MAXPATHLEN, mode)) { 6623 retval = EFAULT; 6624 break; 6625 } 6626 6627 if (ddi_copyout(kpibuf[index].ret_addr, 6628 upi32_ptr[index].ret_addr, MAXNAMELEN, mode)) { 6629 retval = EFAULT; 6630 break; 6631 } 6632 6633 if (ddi_copyout(&kpibuf[index].ret_state, 6634 &upi32_ptr[index].ret_state, 6635 sizeof (kpibuf[index].ret_state), mode)) { 6636 retval = EFAULT; 6637 break; 6638 } 6639 6640 if (ddi_copyout(&kpibuf[index].ret_ext_state, 6641 &upi32_ptr[index].ret_ext_state, 6642 sizeof (kpibuf[index].ret_ext_state), mode)) { 6643 retval = EFAULT; 6644 break; 6645 } 6646 if ((kpibuf[index].ret_prop.ret_buf_size != NULL) && 6647 ddi_copyout(kpibuf[index].ret_prop.ret_buf_size, 6648 upibuf[index].ret_prop.ret_buf_size, 6649 sizeof (*upibuf[index].ret_prop.ret_buf_size), mode)) { 6650 retval = EFAULT; 6651 break; 6652 } 6653 6654 if ((kpibuf[index].ret_prop.buf != NULL) && 6655 ddi_copyout(kpibuf[index].ret_prop.buf, 6656 upibuf[index].ret_prop.buf, 6657 upibuf[index].ret_prop.buf_size, mode)) { 6658 retval = EFAULT; 6659 break; 6660 } 6661 } 6662 6663 #ifdef DEBUG 6664 if (retval) { 6665 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_get_ioc: ioctl <%s> " 6666 "normal: path_info copyout failed", s)); 6667 } 6668 #endif 6669 6670 return (retval); 6671 } 6672 6673 6674 /* 6675 * vhci_failover() 6676 * This routine expects VHCI_HOLD_LUN before being invoked. It can be invoked 6677 * as MDI_FAILOVER_ASYNC or MDI_FAILOVER_SYNC. For Asynchronous failovers 6678 * this routine shall VHCI_RELEASE_LUN on exiting. For synchronous failovers 6679 * it is the callers responsibility to release lun. 6680 */ 6681 6682 /* ARGSUSED */ 6683 static int 6684 vhci_failover(dev_info_t *vdip, dev_info_t *cdip, int flags) 6685 { 6686 char *guid; 6687 scsi_vhci_lun_t *vlun = NULL; 6688 struct scsi_vhci *vhci; 6689 mdi_pathinfo_t *pip, *npip; 6690 char *s_pclass, *pclass1, *pclass2, *pclass; 6691 char active_pclass_copy[255], *active_pclass_ptr; 6692 char *ptr1, *ptr2; 6693 mdi_pathinfo_state_t pi_state; 6694 uint32_t pi_ext_state; 6695 scsi_vhci_priv_t *svp; 6696 struct scsi_device *sd; 6697 struct scsi_failover_ops *sfo; 6698 int sps; /* mdi_select_path() status */ 6699 int activation_done = 0; 6700 int rval, retval = MDI_FAILURE; 6701 int reserve_pending, check_condition, UA_condition; 6702 struct scsi_pkt *pkt; 6703 struct buf *bp; 6704 6705 vhci = ddi_get_soft_state(vhci_softstate, ddi_get_instance(vdip)); 6706 sd = ddi_get_driver_private(cdip); 6707 vlun = ADDR2VLUN(&sd->sd_address); 6708 ASSERT(vlun != 0); 6709 ASSERT(VHCI_LUN_IS_HELD(vlun)); 6710 guid = vlun->svl_lun_wwn; 6711 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(1): guid %s\n", guid)); 6712 vhci_log(CE_NOTE, vdip, "!Initiating failover for device %s " 6713 "(GUID %s)", ddi_node_name(cdip), guid); 6714 6715 /* 6716 * Lets maintain a local copy of the vlun->svl_active_pclass 6717 * for the rest of the processing. Accessing the field 6718 * directly in the loop below causes loop logic to break 6719 * especially when the field gets updated by other threads 6720 * update path status etc and causes 'paths are not currently 6721 * available' condition to be declared prematurely. 6722 */ 6723 mutex_enter(&vlun->svl_mutex); 6724 if (vlun->svl_active_pclass != NULL) { 6725 (void) strlcpy(active_pclass_copy, vlun->svl_active_pclass, 6726 sizeof (active_pclass_copy)); 6727 active_pclass_ptr = &active_pclass_copy[0]; 6728 mutex_exit(&vlun->svl_mutex); 6729 if (vhci_quiesce_paths(vdip, cdip, vlun, guid, 6730 active_pclass_ptr) != 0) { 6731 retval = MDI_FAILURE; 6732 } 6733 } else { 6734 /* 6735 * can happen only when the available path to device 6736 * discovered is a STANDBY path. 6737 */ 6738 mutex_exit(&vlun->svl_mutex); 6739 active_pclass_copy[0] = '\0'; 6740 active_pclass_ptr = NULL; 6741 } 6742 6743 sfo = vlun->svl_fops; 6744 ASSERT(sfo != NULL); 6745 pclass1 = s_pclass = active_pclass_ptr; 6746 VHCI_DEBUG(1, (CE_NOTE, NULL, "!(%s)failing over from %s\n", guid, 6747 (s_pclass == NULL ? "<none>" : s_pclass))); 6748 6749 next_pathclass: 6750 6751 rval = sfo->sfo_pathclass_next(pclass1, &pclass2, 6752 vlun->svl_fops_ctpriv); 6753 if (rval == ENOENT) { 6754 if (s_pclass == NULL) { 6755 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(4)(%s): " 6756 "failed, no more pathclasses\n", guid)); 6757 goto done; 6758 } else { 6759 (void) sfo->sfo_pathclass_next(NULL, &pclass2, 6760 vlun->svl_fops_ctpriv); 6761 } 6762 } else if (rval == EINVAL) { 6763 vhci_log(CE_NOTE, vdip, "!Failover operation failed for " 6764 "device %s (GUID %s): Invalid path-class %s", 6765 ddi_node_name(cdip), guid, 6766 ((pclass1 == NULL) ? "<none>" : pclass1)); 6767 goto done; 6768 } 6769 if ((s_pclass != NULL) && (strcmp(pclass2, s_pclass) == 0)) { 6770 /* 6771 * paths are not currently available 6772 */ 6773 vhci_log(CE_NOTE, vdip, "!Failover path currently unavailable" 6774 " for device %s (GUID %s)", 6775 ddi_node_name(cdip), guid); 6776 goto done; 6777 } 6778 pip = npip = NULL; 6779 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(5.2)(%s): considering " 6780 "%s as failover destination\n", guid, pclass2)); 6781 sps = mdi_select_path(cdip, NULL, MDI_SELECT_STANDBY_PATH, NULL, &npip); 6782 if ((npip == NULL) || (sps != MDI_SUCCESS)) { 6783 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(%s): no " 6784 "STANDBY paths found (status:%x)!\n", guid, sps)); 6785 pclass1 = pclass2; 6786 goto next_pathclass; 6787 } 6788 do { 6789 pclass = NULL; 6790 if ((mdi_prop_lookup_string(npip, "path-class", 6791 &pclass) != MDI_SUCCESS) || (strcmp(pclass2, 6792 pclass) != 0)) { 6793 VHCI_DEBUG(1, (CE_NOTE, NULL, 6794 "!vhci_failover(5.5)(%s): skipping path " 6795 "%p(%s)...\n", guid, (void *)npip, pclass)); 6796 pip = npip; 6797 sps = mdi_select_path(cdip, NULL, 6798 MDI_SELECT_STANDBY_PATH, pip, &npip); 6799 mdi_rele_path(pip); 6800 (void) mdi_prop_free(pclass); 6801 continue; 6802 } 6803 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(npip); 6804 6805 /* 6806 * Issue READ at non-zer block on this STANDBY path. 6807 * Purple returns 6808 * 1. RESERVATION_CONFLICT if reservation is pending 6809 * 2. POR check condition if it reset happened. 6810 * 2. failover Check Conditions if one is already in progress. 6811 */ 6812 reserve_pending = 0; 6813 check_condition = 0; 6814 UA_condition = 0; 6815 6816 bp = scsi_alloc_consistent_buf(&svp->svp_psd->sd_address, 6817 (struct buf *)NULL, DEV_BSIZE, B_READ, NULL, NULL); 6818 if (!bp) { 6819 VHCI_DEBUG(1, (CE_NOTE, NULL, 6820 "vhci_failover !No resources (buf)\n")); 6821 mdi_rele_path(npip); 6822 goto done; 6823 } 6824 pkt = scsi_init_pkt(&svp->svp_psd->sd_address, NULL, bp, 6825 CDB_GROUP1, sizeof (struct scsi_arq_status), 0, 6826 PKT_CONSISTENT, NULL, NULL); 6827 if (pkt) { 6828 (void) scsi_setup_cdb((union scsi_cdb *)(uintptr_t) 6829 pkt->pkt_cdbp, SCMD_READ, 1, 1, 0); 6830 pkt->pkt_flags = FLAG_NOINTR; 6831 check_path_again: 6832 pkt->pkt_path_instance = mdi_pi_get_path_instance(npip); 6833 pkt->pkt_time = 3*30; 6834 6835 if (scsi_transport(pkt) == TRAN_ACCEPT) { 6836 switch (pkt->pkt_reason) { 6837 case CMD_CMPLT: 6838 switch (SCBP_C(pkt)) { 6839 case STATUS_GOOD: 6840 /* Already failed over */ 6841 activation_done = 1; 6842 break; 6843 case STATUS_RESERVATION_CONFLICT: 6844 reserve_pending = 1; 6845 break; 6846 case STATUS_CHECK: 6847 check_condition = 1; 6848 break; 6849 } 6850 } 6851 } 6852 if (check_condition && 6853 (pkt->pkt_state & STATE_ARQ_DONE)) { 6854 uint8_t *sns, skey, asc, ascq; 6855 sns = (uint8_t *) 6856 &(((struct scsi_arq_status *)(uintptr_t) 6857 (pkt->pkt_scbp))->sts_sensedata); 6858 skey = scsi_sense_key(sns); 6859 asc = scsi_sense_asc(sns); 6860 ascq = scsi_sense_ascq(sns); 6861 if (skey == KEY_UNIT_ATTENTION && 6862 asc == 0x29) { 6863 /* Already failed over */ 6864 VHCI_DEBUG(1, (CE_NOTE, NULL, 6865 "!vhci_failover(7)(%s): " 6866 "path 0x%p POR UA condition\n", 6867 guid, (void *)npip)); 6868 if (UA_condition == 0) { 6869 UA_condition = 1; 6870 goto check_path_again; 6871 } 6872 } else { 6873 activation_done = 0; 6874 VHCI_DEBUG(1, (CE_NOTE, NULL, 6875 "!vhci_failover(%s): path 0x%p " 6876 "unhandled chkcond %x %x %x\n", 6877 guid, (void *)npip, skey, 6878 asc, ascq)); 6879 } 6880 } 6881 scsi_destroy_pkt(pkt); 6882 } 6883 scsi_free_consistent_buf(bp); 6884 6885 if (activation_done) { 6886 mdi_rele_path(npip); 6887 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(7)(%s): " 6888 "path 0x%p already failedover\n", guid, 6889 (void *)npip)); 6890 break; 6891 } 6892 if (reserve_pending && (vlun->svl_xlf_capable == 0)) { 6893 (void) vhci_recovery_reset(vlun, 6894 &svp->svp_psd->sd_address, 6895 FALSE, VHCI_DEPTH_ALL); 6896 } 6897 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(6)(%s): " 6898 "activating path 0x%p(psd:%p)\n", guid, (void *)npip, 6899 (void *)svp->svp_psd)); 6900 if (sfo->sfo_path_activate(svp->svp_psd, pclass2, 6901 vlun->svl_fops_ctpriv) == 0) { 6902 activation_done = 1; 6903 mdi_rele_path(npip); 6904 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(7)(%s): " 6905 "path 0x%p successfully activated\n", guid, 6906 (void *)npip)); 6907 break; 6908 } 6909 pip = npip; 6910 sps = mdi_select_path(cdip, NULL, MDI_SELECT_STANDBY_PATH, 6911 pip, &npip); 6912 mdi_rele_path(pip); 6913 } while ((npip != NULL) && (sps == MDI_SUCCESS)); 6914 if (activation_done == 0) { 6915 pclass1 = pclass2; 6916 goto next_pathclass; 6917 } 6918 6919 /* 6920 * if we are here, we have succeeded in activating path npip of 6921 * pathclass pclass2; let us validate all paths of pclass2 by 6922 * "ping"-ing each one and mark the good ones ONLINE 6923 * Also, set the state of the paths belonging to the previously 6924 * active pathclass to STANDBY 6925 */ 6926 pip = npip = NULL; 6927 sps = mdi_select_path(cdip, NULL, (MDI_SELECT_ONLINE_PATH | 6928 MDI_SELECT_STANDBY_PATH | MDI_SELECT_USER_DISABLE_PATH), 6929 NULL, &npip); 6930 if (npip == NULL || sps != MDI_SUCCESS) { 6931 VHCI_DEBUG(1, (CE_NOTE, NULL, "!Failover operation failed for " 6932 "device %s (GUID %s): paths may be busy\n", 6933 ddi_node_name(cdip), guid)); 6934 goto done; 6935 } 6936 do { 6937 (void) mdi_pi_get_state2(npip, &pi_state, &pi_ext_state); 6938 if (mdi_prop_lookup_string(npip, "path-class", &pclass) 6939 != MDI_SUCCESS) { 6940 pip = npip; 6941 sps = mdi_select_path(cdip, NULL, 6942 (MDI_SELECT_ONLINE_PATH | 6943 MDI_SELECT_STANDBY_PATH | 6944 MDI_SELECT_USER_DISABLE_PATH), 6945 pip, &npip); 6946 mdi_rele_path(pip); 6947 continue; 6948 } 6949 if (strcmp(pclass, pclass2) == 0) { 6950 if (pi_state == MDI_PATHINFO_STATE_STANDBY) { 6951 svp = (scsi_vhci_priv_t *) 6952 mdi_pi_get_vhci_private(npip); 6953 VHCI_DEBUG(1, (CE_NOTE, NULL, 6954 "!vhci_failover(8)(%s): " 6955 "pinging path 0x%p\n", 6956 guid, (void *)npip)); 6957 if (sfo->sfo_path_ping(svp->svp_psd, 6958 vlun->svl_fops_ctpriv) == 1) { 6959 mdi_pi_set_state(npip, 6960 MDI_PATHINFO_STATE_ONLINE); 6961 VHCI_DEBUG(1, (CE_NOTE, NULL, 6962 "!vhci_failover(9)(%s): " 6963 "path 0x%p ping successful, " 6964 "marked online\n", guid, 6965 (void *)npip)); 6966 MDI_PI_ERRSTAT(npip, MDI_PI_FAILTO); 6967 } 6968 } 6969 } else if ((s_pclass != NULL) && (strcmp(pclass, s_pclass) 6970 == 0)) { 6971 if (pi_state == MDI_PATHINFO_STATE_ONLINE) { 6972 mdi_pi_set_state(npip, 6973 MDI_PATHINFO_STATE_STANDBY); 6974 VHCI_DEBUG(1, (CE_NOTE, NULL, 6975 "!vhci_failover(10)(%s): path 0x%p marked " 6976 "STANDBY\n", guid, (void *)npip)); 6977 MDI_PI_ERRSTAT(npip, MDI_PI_FAILFROM); 6978 } 6979 } 6980 (void) mdi_prop_free(pclass); 6981 pip = npip; 6982 sps = mdi_select_path(cdip, NULL, (MDI_SELECT_ONLINE_PATH | 6983 MDI_SELECT_STANDBY_PATH|MDI_SELECT_USER_DISABLE_PATH), 6984 pip, &npip); 6985 mdi_rele_path(pip); 6986 } while ((npip != NULL) && (sps == MDI_SUCCESS)); 6987 6988 /* 6989 * Update the AccessState of related MP-API TPGs 6990 */ 6991 (void) vhci_mpapi_update_tpg_acc_state_for_lu(vhci, vlun); 6992 6993 vhci_log(CE_NOTE, vdip, "!Failover operation completed successfully " 6994 "for device %s (GUID %s): failed over from %s to %s", 6995 ddi_node_name(cdip), guid, ((s_pclass == NULL) ? "<none>" : 6996 s_pclass), pclass2); 6997 ptr1 = kmem_alloc(strlen(pclass2)+1, KM_SLEEP); 6998 (void) strlcpy(ptr1, pclass2, (strlen(pclass2)+1)); 6999 mutex_enter(&vlun->svl_mutex); 7000 ptr2 = vlun->svl_active_pclass; 7001 vlun->svl_active_pclass = ptr1; 7002 mutex_exit(&vlun->svl_mutex); 7003 if (ptr2) { 7004 kmem_free(ptr2, strlen(ptr2)+1); 7005 } 7006 mutex_enter(&vhci->vhci_mutex); 7007 scsi_hba_reset_notify_callback(&vhci->vhci_mutex, 7008 &vhci->vhci_reset_notify_listf); 7009 /* All reservations are cleared upon these resets. */ 7010 vlun->svl_flags &= ~VLUN_RESERVE_ACTIVE_FLG; 7011 mutex_exit(&vhci->vhci_mutex); 7012 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(11): DONE! Active " 7013 "pathclass for %s is now %s\n", guid, pclass2)); 7014 retval = MDI_SUCCESS; 7015 7016 done: 7017 vlun->svl_failover_status = retval; 7018 if (flags == MDI_FAILOVER_ASYNC) { 7019 VHCI_RELEASE_LUN(vlun); 7020 VHCI_DEBUG(6, (CE_NOTE, NULL, "!vhci_failover(12): DONE! " 7021 "releasing lun, as failover was ASYNC\n")); 7022 } else { 7023 VHCI_DEBUG(6, (CE_NOTE, NULL, "!vhci_failover(12): DONE! " 7024 "NOT releasing lun, as failover was SYNC\n")); 7025 } 7026 return (retval); 7027 } 7028 7029 /* 7030 * vhci_client_attached is called after the successful attach of a 7031 * client devinfo node. 7032 */ 7033 static void 7034 vhci_client_attached(dev_info_t *cdip) 7035 { 7036 mdi_pathinfo_t *pip; 7037 int circular; 7038 7039 /* 7040 * At this point the client has attached and it's instance number is 7041 * valid, so we can set up kstats. We need to do this here because it 7042 * is possible for paths to go online prior to client attach, in which 7043 * case the call to vhci_kstat_create_pathinfo in vhci_pathinfo_online 7044 * was a noop. 7045 */ 7046 ndi_devi_enter(cdip, &circular); 7047 for (pip = mdi_get_next_phci_path(cdip, NULL); pip; 7048 pip = mdi_get_next_phci_path(cdip, pip)) 7049 vhci_kstat_create_pathinfo(pip); 7050 ndi_devi_exit(cdip, circular); 7051 } 7052 7053 /* 7054 * quiesce all of the online paths 7055 */ 7056 static int 7057 vhci_quiesce_paths(dev_info_t *vdip, dev_info_t *cdip, scsi_vhci_lun_t *vlun, 7058 char *guid, char *active_pclass_ptr) 7059 { 7060 scsi_vhci_priv_t *svp; 7061 char *s_pclass = NULL; 7062 mdi_pathinfo_t *npip, *pip; 7063 int sps; 7064 7065 /* quiesce currently active paths */ 7066 s_pclass = NULL; 7067 pip = npip = NULL; 7068 sps = mdi_select_path(cdip, NULL, MDI_SELECT_ONLINE_PATH, NULL, &npip); 7069 if ((npip == NULL) || (sps != MDI_SUCCESS)) { 7070 return (1); 7071 } 7072 do { 7073 if (mdi_prop_lookup_string(npip, "path-class", 7074 &s_pclass) != MDI_SUCCESS) { 7075 mdi_rele_path(npip); 7076 vhci_log(CE_NOTE, vdip, "!Failover operation failed " 7077 "for device %s (GUID %s) due to an internal " 7078 "error", ddi_node_name(cdip), guid); 7079 return (1); 7080 } 7081 if (strcmp(s_pclass, active_pclass_ptr) == 0) { 7082 /* 7083 * quiesce path. Free s_pclass since 7084 * we don't need it anymore 7085 */ 7086 VHCI_DEBUG(1, (CE_NOTE, NULL, 7087 "!vhci_failover(2)(%s): failing over " 7088 "from %s; quiescing path %p\n", 7089 guid, s_pclass, (void *)npip)); 7090 (void) mdi_prop_free(s_pclass); 7091 svp = (scsi_vhci_priv_t *) 7092 mdi_pi_get_vhci_private(npip); 7093 if (svp == NULL) { 7094 VHCI_DEBUG(1, (CE_NOTE, NULL, 7095 "!vhci_failover(2.5)(%s): no " 7096 "client priv! %p offlined?\n", 7097 guid, (void *)npip)); 7098 pip = npip; 7099 sps = mdi_select_path(cdip, NULL, 7100 MDI_SELECT_ONLINE_PATH, pip, &npip); 7101 mdi_rele_path(pip); 7102 continue; 7103 } 7104 if (scsi_abort(&svp->svp_psd->sd_address, NULL) 7105 == 0) { 7106 (void) vhci_recovery_reset(vlun, 7107 &svp->svp_psd->sd_address, FALSE, 7108 VHCI_DEPTH_TARGET); 7109 } 7110 mutex_enter(&svp->svp_mutex); 7111 if (svp->svp_cmds == 0) { 7112 VHCI_DEBUG(1, (CE_NOTE, NULL, 7113 "!vhci_failover(3)(%s):" 7114 "quiesced path %p\n", guid, (void *)npip)); 7115 } else { 7116 while (svp->svp_cmds != 0) { 7117 cv_wait(&svp->svp_cv, &svp->svp_mutex); 7118 VHCI_DEBUG(1, (CE_NOTE, NULL, 7119 "!vhci_failover(3.cv)(%s):" 7120 "quiesced path %p\n", guid, 7121 (void *)npip)); 7122 } 7123 } 7124 mutex_exit(&svp->svp_mutex); 7125 } else { 7126 /* 7127 * make sure we freeup the memory 7128 */ 7129 (void) mdi_prop_free(s_pclass); 7130 } 7131 pip = npip; 7132 sps = mdi_select_path(cdip, NULL, MDI_SELECT_ONLINE_PATH, 7133 pip, &npip); 7134 mdi_rele_path(pip); 7135 } while ((npip != NULL) && (sps == MDI_SUCCESS)); 7136 return (0); 7137 } 7138 7139 static struct scsi_vhci_lun * 7140 vhci_lun_lookup(dev_info_t *tgt_dip) 7141 { 7142 return ((struct scsi_vhci_lun *) 7143 mdi_client_get_vhci_private(tgt_dip)); 7144 } 7145 7146 static struct scsi_vhci_lun * 7147 vhci_lun_lookup_alloc(dev_info_t *tgt_dip, char *guid, int *didalloc) 7148 { 7149 struct scsi_vhci_lun *svl; 7150 7151 if (svl = vhci_lun_lookup(tgt_dip)) { 7152 return (svl); 7153 } 7154 7155 svl = kmem_zalloc(sizeof (*svl), KM_SLEEP); 7156 svl->svl_lun_wwn = kmem_zalloc(strlen(guid)+1, KM_SLEEP); 7157 (void) strcpy(svl->svl_lun_wwn, guid); 7158 mutex_init(&svl->svl_mutex, NULL, MUTEX_DRIVER, NULL); 7159 cv_init(&svl->svl_cv, NULL, CV_DRIVER, NULL); 7160 sema_init(&svl->svl_pgr_sema, 1, NULL, SEMA_DRIVER, NULL); 7161 svl->svl_waiting_for_activepath = 1; 7162 svl->svl_sector_size = 1; 7163 mdi_client_set_vhci_private(tgt_dip, svl); 7164 *didalloc = 1; 7165 VHCI_DEBUG(1, (CE_NOTE, NULL, 7166 "vhci_lun_lookup_alloc: guid %s vlun 0x%p\n", 7167 guid, (void *)svl)); 7168 return (svl); 7169 } 7170 7171 static void 7172 vhci_lun_free(dev_info_t *tgt_dip) 7173 { 7174 struct scsi_vhci_lun *dvlp; 7175 char *guid; 7176 struct scsi_device *sd; 7177 7178 /* 7179 * The scsi_device was set to driver private during child node 7180 * initialization in the scsi_hba_bus_ctl(). 7181 */ 7182 sd = (struct scsi_device *)ddi_get_driver_private(tgt_dip); 7183 7184 dvlp = (struct scsi_vhci_lun *) 7185 mdi_client_get_vhci_private(tgt_dip); 7186 ASSERT(dvlp != NULL); 7187 7188 mdi_client_set_vhci_private(tgt_dip, NULL); 7189 7190 guid = dvlp->svl_lun_wwn; 7191 ASSERT(guid != NULL); 7192 VHCI_DEBUG(4, (CE_NOTE, NULL, "!vhci_lun_free: %s\n", guid)); 7193 7194 mutex_enter(&dvlp->svl_mutex); 7195 if (dvlp->svl_active_pclass != NULL) { 7196 kmem_free(dvlp->svl_active_pclass, 7197 strlen(dvlp->svl_active_pclass)+1); 7198 } 7199 dvlp->svl_active_pclass = NULL; 7200 mutex_exit(&dvlp->svl_mutex); 7201 7202 if (dvlp->svl_lun_wwn != NULL) { 7203 kmem_free(dvlp->svl_lun_wwn, strlen(dvlp->svl_lun_wwn)+1); 7204 } 7205 dvlp->svl_lun_wwn = NULL; 7206 7207 if (dvlp->svl_fops_name) { 7208 kmem_free(dvlp->svl_fops_name, strlen(dvlp->svl_fops_name)+1); 7209 } 7210 dvlp->svl_fops_name = NULL; 7211 7212 if (dvlp->svl_fops_ctpriv != NULL && 7213 dvlp->svl_fops != NULL) { 7214 dvlp->svl_fops->sfo_device_unprobe(sd, dvlp->svl_fops_ctpriv); 7215 } 7216 7217 if (dvlp->svl_flags & VLUN_TASK_D_ALIVE_FLG) 7218 taskq_destroy(dvlp->svl_taskq); 7219 7220 mutex_destroy(&dvlp->svl_mutex); 7221 cv_destroy(&dvlp->svl_cv); 7222 sema_destroy(&dvlp->svl_pgr_sema); 7223 kmem_free(dvlp, sizeof (*dvlp)); 7224 /* 7225 * vhci_lun_free may be called before the tgt_dip 7226 * initialization so check if the sd is NULL. 7227 */ 7228 if (sd != NULL) 7229 scsi_device_hba_private_set(sd, NULL); 7230 } 7231 7232 int 7233 vhci_do_scsi_cmd(struct scsi_pkt *pkt) 7234 { 7235 int err = 0; 7236 int retry_cnt = 0; 7237 uint8_t *sns, skey; 7238 7239 #ifdef DEBUG 7240 if (vhci_debug > 5) { 7241 vhci_print_cdb(pkt->pkt_address.a_hba_tran->tran_hba_dip, 7242 CE_WARN, "Vhci command", pkt->pkt_cdbp); 7243 } 7244 #endif 7245 7246 retry: 7247 err = scsi_poll(pkt); 7248 if (err) { 7249 if (pkt->pkt_cdbp[0] == SCMD_RELEASE) { 7250 if (SCBP_C(pkt) == STATUS_RESERVATION_CONFLICT) { 7251 VHCI_DEBUG(1, (CE_NOTE, NULL, 7252 "!v_s_do_s_c: RELEASE conflict\n")); 7253 return (0); 7254 } 7255 } 7256 if (retry_cnt++ < 6) { 7257 VHCI_DEBUG(1, (CE_WARN, NULL, 7258 "!v_s_do_s_c:retry packet 0x%p " 7259 "status 0x%x reason %s", 7260 (void *)pkt, SCBP_C(pkt), 7261 scsi_rname(pkt->pkt_reason))); 7262 if ((pkt->pkt_reason == CMD_CMPLT) && 7263 (SCBP_C(pkt) == STATUS_CHECK) && 7264 (pkt->pkt_state & STATE_ARQ_DONE)) { 7265 sns = (uint8_t *) 7266 &(((struct scsi_arq_status *)(uintptr_t) 7267 (pkt->pkt_scbp))->sts_sensedata); 7268 skey = scsi_sense_key(sns); 7269 VHCI_DEBUG(1, (CE_WARN, NULL, 7270 "!v_s_do_s_c:retry " 7271 "packet 0x%p sense data %s", (void *)pkt, 7272 scsi_sname(skey))); 7273 } 7274 goto retry; 7275 } 7276 VHCI_DEBUG(1, (CE_WARN, NULL, 7277 "!v_s_do_s_c: failed transport 0x%p 0x%x", 7278 (void *)pkt, SCBP_C(pkt))); 7279 return (0); 7280 } 7281 7282 switch (pkt->pkt_reason) { 7283 case CMD_TIMEOUT: 7284 VHCI_DEBUG(1, (CE_WARN, NULL, "!pkt timed " 7285 "out (pkt 0x%p)", (void *)pkt)); 7286 return (0); 7287 case CMD_CMPLT: 7288 switch (SCBP_C(pkt)) { 7289 case STATUS_GOOD: 7290 break; 7291 case STATUS_CHECK: 7292 if (pkt->pkt_state & STATE_ARQ_DONE) { 7293 sns = (uint8_t *)&((( 7294 struct scsi_arq_status *) 7295 (uintptr_t) 7296 (pkt->pkt_scbp))-> 7297 sts_sensedata); 7298 skey = scsi_sense_key(sns); 7299 if ((skey == 7300 KEY_UNIT_ATTENTION) || 7301 (skey == 7302 KEY_NOT_READY)) { 7303 /* 7304 * clear unit attn. 7305 */ 7306 7307 VHCI_DEBUG(1, 7308 (CE_WARN, NULL, 7309 "!v_s_do_s_c: " 7310 "retry " 7311 "packet 0x%p sense " 7312 "data %s", 7313 (void *)pkt, 7314 scsi_sname 7315 (skey))); 7316 goto retry; 7317 } 7318 VHCI_DEBUG(4, (CE_WARN, NULL, 7319 "!ARQ while " 7320 "transporting " 7321 "(pkt 0x%p)", 7322 (void *)pkt)); 7323 return (0); 7324 } 7325 return (0); 7326 default: 7327 VHCI_DEBUG(1, (CE_WARN, NULL, 7328 "!Bad status returned " 7329 "(pkt 0x%p, status %x)", 7330 (void *)pkt, SCBP_C(pkt))); 7331 return (0); 7332 } 7333 break; 7334 case CMD_INCOMPLETE: 7335 case CMD_RESET: 7336 case CMD_ABORTED: 7337 case CMD_TRAN_ERR: 7338 if (retry_cnt++ < 1) { 7339 VHCI_DEBUG(1, (CE_WARN, NULL, 7340 "!v_s_do_s_c: retry packet 0x%p %s", 7341 (void *)pkt, scsi_rname(pkt->pkt_reason))); 7342 goto retry; 7343 } 7344 /* FALLTHROUGH */ 7345 default: 7346 VHCI_DEBUG(1, (CE_WARN, NULL, "!pkt did not " 7347 "complete successfully (pkt 0x%p," 7348 "reason %x)", (void *)pkt, pkt->pkt_reason)); 7349 return (0); 7350 } 7351 return (1); 7352 } 7353 7354 static int 7355 vhci_quiesce_lun(struct scsi_vhci_lun *vlun) 7356 { 7357 mdi_pathinfo_t *pip, *spip; 7358 dev_info_t *cdip; 7359 struct scsi_vhci_priv *svp; 7360 mdi_pathinfo_state_t pstate; 7361 uint32_t p_ext_state; 7362 int circular; 7363 7364 cdip = vlun->svl_dip; 7365 pip = spip = NULL; 7366 ndi_devi_enter(cdip, &circular); 7367 pip = mdi_get_next_phci_path(cdip, NULL); 7368 while (pip != NULL) { 7369 (void) mdi_pi_get_state2(pip, &pstate, &p_ext_state); 7370 if (pstate != MDI_PATHINFO_STATE_ONLINE) { 7371 spip = pip; 7372 pip = mdi_get_next_phci_path(cdip, spip); 7373 continue; 7374 } 7375 mdi_hold_path(pip); 7376 ndi_devi_exit(cdip, circular); 7377 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip); 7378 mutex_enter(&svp->svp_mutex); 7379 while (svp->svp_cmds != 0) { 7380 if (cv_reltimedwait(&svp->svp_cv, &svp->svp_mutex, 7381 drv_usectohz(vhci_path_quiesce_timeout * 1000000), 7382 TR_CLOCK_TICK) == -1) { 7383 mutex_exit(&svp->svp_mutex); 7384 mdi_rele_path(pip); 7385 VHCI_DEBUG(1, (CE_WARN, NULL, 7386 "Quiesce of lun is not successful " 7387 "vlun: 0x%p.", (void *)vlun)); 7388 return (0); 7389 } 7390 } 7391 mutex_exit(&svp->svp_mutex); 7392 ndi_devi_enter(cdip, &circular); 7393 spip = pip; 7394 pip = mdi_get_next_phci_path(cdip, spip); 7395 mdi_rele_path(spip); 7396 } 7397 ndi_devi_exit(cdip, circular); 7398 return (1); 7399 } 7400 7401 static int 7402 vhci_pgr_validate_and_register(scsi_vhci_priv_t *svp) 7403 { 7404 scsi_vhci_lun_t *vlun; 7405 vhci_prout_t *prout; 7406 int rval, success; 7407 mdi_pathinfo_t *pip, *npip; 7408 scsi_vhci_priv_t *osvp; 7409 dev_info_t *cdip; 7410 uchar_t cdb_1; 7411 uchar_t temp_res_key[MHIOC_RESV_KEY_SIZE]; 7412 7413 7414 /* 7415 * see if there are any other paths available; if none, 7416 * then there is nothing to do. 7417 */ 7418 cdip = svp->svp_svl->svl_dip; 7419 rval = mdi_select_path(cdip, NULL, MDI_SELECT_ONLINE_PATH | 7420 MDI_SELECT_STANDBY_PATH, NULL, &pip); 7421 if ((rval != MDI_SUCCESS) || (pip == NULL)) { 7422 VHCI_DEBUG(4, (CE_NOTE, NULL, 7423 "%s%d: vhci_pgr_validate_and_register: first path\n", 7424 ddi_driver_name(cdip), ddi_get_instance(cdip))); 7425 return (1); 7426 } 7427 7428 vlun = svp->svp_svl; 7429 prout = &vlun->svl_prout; 7430 ASSERT(vlun->svl_pgr_active != 0); 7431 7432 /* 7433 * When the path was busy/offlined, some other host might have 7434 * cleared this key. Validate key on some other path first. 7435 * If it fails, return failure. 7436 */ 7437 7438 npip = pip; 7439 pip = NULL; 7440 success = 0; 7441 7442 /* Save the res key */ 7443 bcopy(prout->res_key, temp_res_key, MHIOC_RESV_KEY_SIZE); 7444 7445 /* 7446 * Sometimes CDB from application can be a Register_And_Ignore. 7447 * Instead of validation, this cdb would result in force registration. 7448 * Convert it to normal cdb for validation. 7449 * After that be sure to restore the cdb. 7450 */ 7451 cdb_1 = vlun->svl_cdb[1]; 7452 vlun->svl_cdb[1] &= 0xe0; 7453 7454 do { 7455 osvp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(npip); 7456 if (osvp == NULL) { 7457 VHCI_DEBUG(4, (CE_NOTE, NULL, 7458 "vhci_pgr_validate_and_register: no " 7459 "client priv! 0x%p offlined?\n", 7460 (void *)npip)); 7461 goto next_path_1; 7462 } 7463 7464 if (osvp == svp) { 7465 VHCI_DEBUG(4, (CE_NOTE, NULL, 7466 "vhci_pgr_validate_and_register: same svp 0x%p" 7467 " npip 0x%p vlun 0x%p\n", 7468 (void *)svp, (void *)npip, (void *)vlun)); 7469 goto next_path_1; 7470 } 7471 7472 VHCI_DEBUG(4, (CE_NOTE, NULL, 7473 "vhci_pgr_validate_and_register: First validate on" 7474 " osvp 0x%p being done. vlun 0x%p thread 0x%p Before bcopy" 7475 " cdb1 %x\n", (void *)osvp, (void *)vlun, 7476 (void *)curthread, vlun->svl_cdb[1])); 7477 vhci_print_prout_keys(vlun, "v_pgr_val_reg: before bcopy:"); 7478 7479 bcopy(prout->service_key, prout->res_key, MHIOC_RESV_KEY_SIZE); 7480 7481 VHCI_DEBUG(4, (CE_WARN, NULL, "vlun 0x%p After bcopy", 7482 (void *)vlun)); 7483 vhci_print_prout_keys(vlun, "v_pgr_val_reg: after bcopy: "); 7484 7485 rval = vhci_do_prout(osvp); 7486 if (rval == 1) { 7487 VHCI_DEBUG(4, (CE_NOTE, NULL, 7488 "%s%d: vhci_pgr_validate_and_register: key" 7489 " validated thread 0x%p\n", ddi_driver_name(cdip), 7490 ddi_get_instance(cdip), (void *)curthread)); 7491 pip = npip; 7492 success = 1; 7493 break; 7494 } else { 7495 VHCI_DEBUG(4, (CE_NOTE, NULL, 7496 "vhci_pgr_validate_and_register: First validation" 7497 " on osvp 0x%p failed %x\n", (void *)osvp, rval)); 7498 vhci_print_prout_keys(vlun, "v_pgr_val_reg: failed:"); 7499 } 7500 7501 /* 7502 * Try other paths 7503 */ 7504 next_path_1: 7505 pip = npip; 7506 rval = mdi_select_path(cdip, NULL, 7507 MDI_SELECT_ONLINE_PATH|MDI_SELECT_STANDBY_PATH, 7508 pip, &npip); 7509 mdi_rele_path(pip); 7510 } while ((rval == MDI_SUCCESS) && (npip != NULL)); 7511 7512 7513 /* Be sure to restore original cdb */ 7514 vlun->svl_cdb[1] = cdb_1; 7515 7516 /* Restore the res_key */ 7517 bcopy(temp_res_key, prout->res_key, MHIOC_RESV_KEY_SIZE); 7518 7519 /* 7520 * If key could not be registered on any path for the first time, 7521 * return success as online should still continue. 7522 */ 7523 if (success == 0) { 7524 return (1); 7525 } 7526 7527 ASSERT(pip != NULL); 7528 7529 /* 7530 * Force register on new path 7531 */ 7532 cdb_1 = vlun->svl_cdb[1]; /* store the cdb */ 7533 7534 vlun->svl_cdb[1] &= 0xe0; 7535 vlun->svl_cdb[1] |= VHCI_PROUT_R_AND_IGNORE; 7536 7537 vhci_print_prout_keys(vlun, "v_pgr_val_reg: keys before bcopy: "); 7538 7539 bcopy(prout->active_service_key, prout->service_key, 7540 MHIOC_RESV_KEY_SIZE); 7541 bcopy(prout->active_res_key, prout->res_key, MHIOC_RESV_KEY_SIZE); 7542 7543 vhci_print_prout_keys(vlun, "v_pgr_val_reg:keys after bcopy: "); 7544 7545 rval = vhci_do_prout(svp); 7546 vlun->svl_cdb[1] = cdb_1; /* restore the cdb */ 7547 if (rval != 1) { 7548 VHCI_DEBUG(4, (CE_NOTE, NULL, 7549 "vhci_pgr_validate_and_register: register on new" 7550 " path 0x%p svp 0x%p failed %x\n", 7551 (void *)pip, (void *)svp, rval)); 7552 vhci_print_prout_keys(vlun, "v_pgr_val_reg: reg failed: "); 7553 mdi_rele_path(pip); 7554 return (0); 7555 } 7556 7557 if (bcmp(prout->service_key, zero_key, MHIOC_RESV_KEY_SIZE) == 0) { 7558 VHCI_DEBUG(4, (CE_NOTE, NULL, 7559 "vhci_pgr_validate_and_register: zero service key\n")); 7560 mdi_rele_path(pip); 7561 return (rval); 7562 } 7563 7564 /* 7565 * While the key was force registered, some other host might have 7566 * cleared the key. Re-validate key on another pre-existing path 7567 * before declaring success. 7568 */ 7569 npip = pip; 7570 pip = NULL; 7571 7572 /* 7573 * Sometimes CDB from application can be Register and Ignore. 7574 * Instead of validation, it would result in force registration. 7575 * Convert it to normal cdb for validation. 7576 * After that be sure to restore the cdb. 7577 */ 7578 cdb_1 = vlun->svl_cdb[1]; 7579 vlun->svl_cdb[1] &= 0xe0; 7580 success = 0; 7581 7582 do { 7583 osvp = (scsi_vhci_priv_t *) 7584 mdi_pi_get_vhci_private(npip); 7585 if (osvp == NULL) { 7586 VHCI_DEBUG(4, (CE_NOTE, NULL, 7587 "vhci_pgr_validate_and_register: no " 7588 "client priv! 0x%p offlined?\n", 7589 (void *)npip)); 7590 goto next_path_2; 7591 } 7592 7593 if (osvp == svp) { 7594 VHCI_DEBUG(4, (CE_NOTE, NULL, 7595 "vhci_pgr_validate_and_register: same osvp 0x%p" 7596 " npip 0x%p vlun 0x%p\n", 7597 (void *)svp, (void *)npip, (void *)vlun)); 7598 goto next_path_2; 7599 } 7600 7601 VHCI_DEBUG(4, (CE_NOTE, NULL, 7602 "vhci_pgr_validate_and_register: Re-validation on" 7603 " osvp 0x%p being done. vlun 0x%p Before bcopy cdb1 %x\n", 7604 (void *)osvp, (void *)vlun, vlun->svl_cdb[1])); 7605 vhci_print_prout_keys(vlun, "v_pgr_val_reg: before bcopy: "); 7606 7607 bcopy(prout->service_key, prout->res_key, MHIOC_RESV_KEY_SIZE); 7608 7609 vhci_print_prout_keys(vlun, "v_pgr_val_reg: after bcopy: "); 7610 7611 rval = vhci_do_prout(osvp); 7612 if (rval == 1) { 7613 VHCI_DEBUG(4, (CE_NOTE, NULL, 7614 "%s%d: vhci_pgr_validate_and_register: key" 7615 " validated thread 0x%p\n", ddi_driver_name(cdip), 7616 ddi_get_instance(cdip), (void *)curthread)); 7617 pip = npip; 7618 success = 1; 7619 break; 7620 } else { 7621 VHCI_DEBUG(4, (CE_NOTE, NULL, 7622 "vhci_pgr_validate_and_register: Re-validation on" 7623 " osvp 0x%p failed %x\n", (void *)osvp, rval)); 7624 vhci_print_prout_keys(vlun, 7625 "v_pgr_val_reg: reval failed: "); 7626 } 7627 7628 /* 7629 * Try other paths 7630 */ 7631 next_path_2: 7632 pip = npip; 7633 rval = mdi_select_path(cdip, NULL, 7634 MDI_SELECT_ONLINE_PATH|MDI_SELECT_STANDBY_PATH, 7635 pip, &npip); 7636 mdi_rele_path(pip); 7637 } while ((rval == MDI_SUCCESS) && (npip != NULL)); 7638 7639 /* Be sure to restore original cdb */ 7640 vlun->svl_cdb[1] = cdb_1; 7641 7642 if (success == 1) { 7643 /* Successfully validated registration */ 7644 mdi_rele_path(pip); 7645 return (1); 7646 } 7647 7648 VHCI_DEBUG(4, (CE_WARN, NULL, "key validation failed")); 7649 7650 /* 7651 * key invalid, back out by registering key value of 0 7652 */ 7653 VHCI_DEBUG(4, (CE_NOTE, NULL, 7654 "vhci_pgr_validate_and_register: backout on" 7655 " svp 0x%p being done\n", (void *)svp)); 7656 vhci_print_prout_keys(vlun, "v_pgr_val_reg: before bcopy: "); 7657 7658 bcopy(prout->service_key, prout->res_key, MHIOC_RESV_KEY_SIZE); 7659 bzero(prout->service_key, MHIOC_RESV_KEY_SIZE); 7660 7661 vhci_print_prout_keys(vlun, "v_pgr_val_reg: before bcopy: "); 7662 7663 /* 7664 * Get a new path 7665 */ 7666 rval = mdi_select_path(cdip, NULL, MDI_SELECT_ONLINE_PATH | 7667 MDI_SELECT_STANDBY_PATH, NULL, &pip); 7668 if ((rval != MDI_SUCCESS) || (pip == NULL)) { 7669 VHCI_DEBUG(4, (CE_NOTE, NULL, 7670 "%s%d: vhci_pgr_validate_and_register: no valid pip\n", 7671 ddi_driver_name(cdip), ddi_get_instance(cdip))); 7672 return (0); 7673 } 7674 7675 if ((rval = vhci_do_prout(svp)) != 1) { 7676 VHCI_DEBUG(4, (CE_NOTE, NULL, 7677 "vhci_pgr_validate_and_register: backout on" 7678 " svp 0x%p failed\n", (void *)svp)); 7679 vhci_print_prout_keys(vlun, "backout failed"); 7680 7681 VHCI_DEBUG(4, (CE_WARN, NULL, 7682 "%s%d: vhci_pgr_validate_and_register: key" 7683 " validation and backout failed", ddi_driver_name(cdip), 7684 ddi_get_instance(cdip))); 7685 if (rval == VHCI_PGR_ILLEGALOP) { 7686 VHCI_DEBUG(4, (CE_WARN, NULL, 7687 "%s%d: vhci_pgr_validate_and_register: key" 7688 " already cleared", ddi_driver_name(cdip), 7689 ddi_get_instance(cdip))); 7690 rval = 1; 7691 } else 7692 rval = 0; 7693 } else { 7694 VHCI_DEBUG(4, (CE_NOTE, NULL, 7695 "%s%d: vhci_pgr_validate_and_register: key" 7696 " validation failed, key backed out\n", 7697 ddi_driver_name(cdip), ddi_get_instance(cdip))); 7698 vhci_print_prout_keys(vlun, "v_pgr_val_reg: key backed out: "); 7699 } 7700 mdi_rele_path(pip); 7701 7702 return (rval); 7703 } 7704 7705 /* 7706 * taskq routine to dispatch a scsi cmd to vhci_scsi_start. This ensures 7707 * that vhci_scsi_start is not called in interrupt context. 7708 * As the upper layer gets TRAN_ACCEPT when the command is dispatched, we 7709 * need to complete the command if something goes wrong. 7710 */ 7711 static void 7712 vhci_dispatch_scsi_start(void *arg) 7713 { 7714 struct vhci_pkt *vpkt = (struct vhci_pkt *)arg; 7715 struct scsi_pkt *tpkt = vpkt->vpkt_tgt_pkt; 7716 int rval = TRAN_BUSY; 7717 7718 VHCI_DEBUG(6, (CE_NOTE, NULL, "!vhci_dispatch_scsi_start: sending" 7719 " scsi-2 reserve for 0x%p\n", 7720 (void *)ADDR2DIP(&(vpkt->vpkt_tgt_pkt->pkt_address)))); 7721 7722 /* 7723 * To prevent the taskq from being called recursively we set the 7724 * the VHCI_PKT_THRU_TASKQ bit in the vhci_pkt_states. 7725 */ 7726 vpkt->vpkt_state |= VHCI_PKT_THRU_TASKQ; 7727 7728 /* 7729 * Wait for the transport to get ready to send packets 7730 * and if it times out, it will return something other than 7731 * TRAN_BUSY. The vhci_reserve_delay may want to 7732 * get tuned for other transports and is therefore a global. 7733 * Using delay since this routine is called by taskq dispatch 7734 * and not called during interrupt context. 7735 */ 7736 while ((rval = vhci_scsi_start(&(vpkt->vpkt_tgt_pkt->pkt_address), 7737 vpkt->vpkt_tgt_pkt)) == TRAN_BUSY) { 7738 delay(drv_usectohz(vhci_reserve_delay)); 7739 } 7740 7741 switch (rval) { 7742 case TRAN_ACCEPT: 7743 return; 7744 7745 default: 7746 /* 7747 * This pkt shall be retried, and to ensure another taskq 7748 * is dispatched for it, clear the VHCI_PKT_THRU_TASKQ 7749 * flag. 7750 */ 7751 vpkt->vpkt_state &= ~VHCI_PKT_THRU_TASKQ; 7752 7753 /* Ensure that the pkt is retried without a reset */ 7754 tpkt->pkt_reason = CMD_ABORTED; 7755 tpkt->pkt_statistics |= STAT_ABORTED; 7756 VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_dispatch_scsi_start: " 7757 "TRAN_rval %d returned for dip 0x%p", rval, 7758 (void *)ADDR2DIP(&(vpkt->vpkt_tgt_pkt->pkt_address)))); 7759 break; 7760 } 7761 7762 /* 7763 * vpkt_org_vpkt should always be NULL here if the retry command 7764 * has been successfully dispatched. If vpkt_org_vpkt != NULL at 7765 * this point, it is an error so restore the original vpkt and 7766 * return an error to the target driver so it can retry the 7767 * command as appropriate. 7768 */ 7769 if (vpkt->vpkt_org_vpkt != NULL) { 7770 struct vhci_pkt *new_vpkt = vpkt; 7771 scsi_vhci_priv_t *svp = (scsi_vhci_priv_t *) 7772 mdi_pi_get_vhci_private(vpkt->vpkt_path); 7773 7774 vpkt = vpkt->vpkt_org_vpkt; 7775 7776 vpkt->vpkt_tgt_pkt->pkt_reason = tpkt->pkt_reason; 7777 vpkt->vpkt_tgt_pkt->pkt_statistics = tpkt->pkt_statistics; 7778 7779 vhci_scsi_destroy_pkt(&svp->svp_psd->sd_address, 7780 new_vpkt->vpkt_tgt_pkt); 7781 7782 tpkt = vpkt->vpkt_tgt_pkt; 7783 } 7784 7785 scsi_hba_pkt_comp(tpkt); 7786 } 7787 7788 static void 7789 vhci_initiate_auto_failback(void *arg) 7790 { 7791 struct scsi_vhci_lun *vlun = (struct scsi_vhci_lun *)arg; 7792 dev_info_t *vdip, *cdip; 7793 int held; 7794 7795 cdip = vlun->svl_dip; 7796 vdip = ddi_get_parent(cdip); 7797 7798 VHCI_HOLD_LUN(vlun, VH_SLEEP, held); 7799 7800 /* 7801 * Perform a final check to see if the active path class is indeed 7802 * not the preferred path class. As in the time the auto failback 7803 * was dispatched, an external failover could have been detected. 7804 * [Some other host could have detected this condition and triggered 7805 * the auto failback before]. 7806 * In such a case if we go ahead with failover we will be negating the 7807 * whole purpose of auto failback. 7808 */ 7809 mutex_enter(&vlun->svl_mutex); 7810 if (vlun->svl_active_pclass != NULL) { 7811 char *best_pclass; 7812 struct scsi_failover_ops *fo; 7813 7814 fo = vlun->svl_fops; 7815 7816 (void) fo->sfo_pathclass_next(NULL, &best_pclass, 7817 vlun->svl_fops_ctpriv); 7818 if (strcmp(vlun->svl_active_pclass, best_pclass) == 0) { 7819 mutex_exit(&vlun->svl_mutex); 7820 VHCI_RELEASE_LUN(vlun); 7821 VHCI_DEBUG(1, (CE_NOTE, NULL, "Not initiating " 7822 "auto failback for %s as %s pathclass already " 7823 "active.\n", vlun->svl_lun_wwn, best_pclass)); 7824 return; 7825 } 7826 } 7827 mutex_exit(&vlun->svl_mutex); 7828 if (mdi_failover(vdip, vlun->svl_dip, MDI_FAILOVER_SYNC) 7829 == MDI_SUCCESS) { 7830 vhci_log(CE_NOTE, vdip, "!Auto failback operation " 7831 "succeeded for device %s (GUID %s)", 7832 ddi_node_name(cdip), vlun->svl_lun_wwn); 7833 } else { 7834 vhci_log(CE_NOTE, vdip, "!Auto failback operation " 7835 "failed for device %s (GUID %s)", 7836 ddi_node_name(cdip), vlun->svl_lun_wwn); 7837 } 7838 VHCI_RELEASE_LUN(vlun); 7839 } 7840 7841 #ifdef DEBUG 7842 static void 7843 vhci_print_prin_keys(vhci_prin_readkeys_t *prin, int numkeys) 7844 { 7845 vhci_clean_print(NULL, 5, "Current PGR Keys", 7846 (uchar_t *)prin, numkeys * 8); 7847 } 7848 #endif 7849 7850 static void 7851 vhci_print_prout_keys(scsi_vhci_lun_t *vlun, char *msg) 7852 { 7853 int i; 7854 vhci_prout_t *prout; 7855 char buf1[4*MHIOC_RESV_KEY_SIZE + 1]; 7856 char buf2[4*MHIOC_RESV_KEY_SIZE + 1]; 7857 char buf3[4*MHIOC_RESV_KEY_SIZE + 1]; 7858 char buf4[4*MHIOC_RESV_KEY_SIZE + 1]; 7859 7860 prout = &vlun->svl_prout; 7861 7862 for (i = 0; i < MHIOC_RESV_KEY_SIZE; i++) 7863 (void) sprintf(&buf1[4*i], "[%02x]", prout->res_key[i]); 7864 for (i = 0; i < MHIOC_RESV_KEY_SIZE; i++) 7865 (void) sprintf(&buf2[(4*i)], "[%02x]", prout->service_key[i]); 7866 for (i = 0; i < MHIOC_RESV_KEY_SIZE; i++) 7867 (void) sprintf(&buf3[4*i], "[%02x]", prout->active_res_key[i]); 7868 for (i = 0; i < MHIOC_RESV_KEY_SIZE; i++) 7869 (void) sprintf(&buf4[4*i], "[%02x]", 7870 prout->active_service_key[i]); 7871 7872 /* Printing all in one go. Otherwise it will jumble up */ 7873 VHCI_DEBUG(5, (CE_CONT, NULL, "%s vlun 0x%p, thread 0x%p\n" 7874 "res_key: : %s\n" 7875 "service_key : %s\n" 7876 "active_res_key : %s\n" 7877 "active_service_key: %s\n", 7878 msg, (void *)vlun, (void *)curthread, buf1, buf2, buf3, buf4)); 7879 } 7880 7881 /* 7882 * Called from vhci_scsi_start to update the pHCI pkt with target packet. 7883 */ 7884 static void 7885 vhci_update_pHCI_pkt(struct vhci_pkt *vpkt, struct scsi_pkt *pkt) 7886 { 7887 7888 ASSERT(vpkt->vpkt_hba_pkt); 7889 7890 vpkt->vpkt_hba_pkt->pkt_flags = pkt->pkt_flags; 7891 vpkt->vpkt_hba_pkt->pkt_flags |= FLAG_NOQUEUE; 7892 7893 if ((vpkt->vpkt_hba_pkt->pkt_flags & FLAG_NOINTR) || 7894 MDI_PI_IS_SUSPENDED(vpkt->vpkt_path)) { 7895 /* 7896 * Polled Command is requested or HBA is in 7897 * suspended state 7898 */ 7899 vpkt->vpkt_hba_pkt->pkt_flags |= FLAG_NOINTR; 7900 vpkt->vpkt_hba_pkt->pkt_comp = NULL; 7901 } else { 7902 vpkt->vpkt_hba_pkt->pkt_comp = vhci_intr; 7903 } 7904 vpkt->vpkt_hba_pkt->pkt_time = pkt->pkt_time; 7905 bcopy(pkt->pkt_cdbp, vpkt->vpkt_hba_pkt->pkt_cdbp, 7906 vpkt->vpkt_tgt_init_cdblen); 7907 vpkt->vpkt_hba_pkt->pkt_resid = pkt->pkt_resid; 7908 7909 /* Re-initialize the following pHCI packet state information */ 7910 vpkt->vpkt_hba_pkt->pkt_state = 0; 7911 vpkt->vpkt_hba_pkt->pkt_statistics = 0; 7912 vpkt->vpkt_hba_pkt->pkt_reason = 0; 7913 } 7914 7915 static int 7916 vhci_scsi_bus_power(dev_info_t *parent, void *impl_arg, pm_bus_power_op_t op, 7917 void *arg, void *result) 7918 { 7919 int ret = DDI_SUCCESS; 7920 7921 /* 7922 * Generic processing in MPxIO framework 7923 */ 7924 ret = mdi_bus_power(parent, impl_arg, op, arg, result); 7925 7926 switch (ret) { 7927 case MDI_SUCCESS: 7928 ret = DDI_SUCCESS; 7929 break; 7930 case MDI_FAILURE: 7931 ret = DDI_FAILURE; 7932 break; 7933 default: 7934 break; 7935 } 7936 7937 return (ret); 7938 } 7939 7940 static int 7941 vhci_pHCI_cap(struct scsi_address *ap, char *cap, int val, int whom, 7942 mdi_pathinfo_t *pip) 7943 { 7944 dev_info_t *cdip; 7945 mdi_pathinfo_t *npip = NULL; 7946 scsi_vhci_priv_t *svp = NULL; 7947 struct scsi_address *pap = NULL; 7948 scsi_hba_tran_t *hba = NULL; 7949 int sps; 7950 int mps_flag; 7951 int rval = 0; 7952 7953 mps_flag = (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH); 7954 if (pip) { 7955 /* 7956 * If the call is from vhci_pathinfo_state_change, 7957 * then this path was busy and is becoming ready to accept IO. 7958 */ 7959 ASSERT(ap != NULL); 7960 hba = ap->a_hba_tran; 7961 ASSERT(hba != NULL); 7962 rval = scsi_ifsetcap(ap, cap, val, whom); 7963 7964 VHCI_DEBUG(2, (CE_NOTE, NULL, 7965 "!vhci_pHCI_cap: only on path %p, ap %p, rval %x\n", 7966 (void *)pip, (void *)ap, rval)); 7967 7968 return (rval); 7969 } 7970 7971 /* 7972 * Set capability on all the pHCIs. 7973 * If any path is busy, then the capability would be set by 7974 * vhci_pathinfo_state_change. 7975 */ 7976 7977 cdip = ADDR2DIP(ap); 7978 ASSERT(cdip != NULL); 7979 sps = mdi_select_path(cdip, NULL, mps_flag, NULL, &pip); 7980 if ((sps != MDI_SUCCESS) || (pip == NULL)) { 7981 VHCI_DEBUG(2, (CE_WARN, NULL, 7982 "!vhci_pHCI_cap: Unable to get a path, dip 0x%p", 7983 (void *)cdip)); 7984 return (0); 7985 } 7986 7987 again: 7988 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip); 7989 if (svp == NULL) { 7990 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_pHCI_cap: " 7991 "priv is NULL, pip 0x%p", (void *)pip)); 7992 mdi_rele_path(pip); 7993 return (rval); 7994 } 7995 7996 if (svp->svp_psd == NULL) { 7997 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_pHCI_cap: " 7998 "psd is NULL, pip 0x%p, svp 0x%p", 7999 (void *)pip, (void *)svp)); 8000 mdi_rele_path(pip); 8001 return (rval); 8002 } 8003 8004 pap = &svp->svp_psd->sd_address; 8005 ASSERT(pap != NULL); 8006 hba = pap->a_hba_tran; 8007 ASSERT(hba != NULL); 8008 8009 if (hba->tran_setcap != NULL) { 8010 rval = scsi_ifsetcap(pap, cap, val, whom); 8011 8012 VHCI_DEBUG(2, (CE_NOTE, NULL, 8013 "!vhci_pHCI_cap: path %p, ap %p, rval %x\n", 8014 (void *)pip, (void *)ap, rval)); 8015 8016 /* 8017 * Select next path and issue the setcap, repeat 8018 * until all paths are exhausted 8019 */ 8020 sps = mdi_select_path(cdip, NULL, mps_flag, pip, &npip); 8021 if ((sps != MDI_SUCCESS) || (npip == NULL)) { 8022 mdi_rele_path(pip); 8023 return (1); 8024 } 8025 mdi_rele_path(pip); 8026 pip = npip; 8027 goto again; 8028 } 8029 mdi_rele_path(pip); 8030 return (rval); 8031 } 8032 8033 static int 8034 vhci_scsi_bus_config(dev_info_t *pdip, uint_t flags, ddi_bus_config_op_t op, 8035 void *arg, dev_info_t **child) 8036 { 8037 char *guid; 8038 8039 if (vhci_bus_config_debug) 8040 flags |= NDI_DEVI_DEBUG; 8041 8042 if (op == BUS_CONFIG_ONE || op == BUS_UNCONFIG_ONE) 8043 guid = vhci_devnm_to_guid((char *)arg); 8044 else 8045 guid = NULL; 8046 8047 if (mdi_vhci_bus_config(pdip, flags, op, arg, child, guid) 8048 == MDI_SUCCESS) 8049 return (NDI_SUCCESS); 8050 else 8051 return (NDI_FAILURE); 8052 } 8053 8054 static int 8055 vhci_scsi_bus_unconfig(dev_info_t *pdip, uint_t flags, ddi_bus_config_op_t op, 8056 void *arg) 8057 { 8058 if (vhci_bus_config_debug) 8059 flags |= NDI_DEVI_DEBUG; 8060 8061 return (ndi_busop_bus_unconfig(pdip, flags, op, arg)); 8062 } 8063 8064 /* 8065 * Take the original vhci_pkt, create a duplicate of the pkt for resending 8066 * as though it originated in ssd. 8067 */ 8068 static struct scsi_pkt * 8069 vhci_create_retry_pkt(struct vhci_pkt *vpkt) 8070 { 8071 struct vhci_pkt *new_vpkt = NULL; 8072 struct scsi_pkt *pkt = NULL; 8073 8074 scsi_vhci_priv_t *svp = (scsi_vhci_priv_t *) 8075 mdi_pi_get_vhci_private(vpkt->vpkt_path); 8076 8077 /* 8078 * Ensure consistent data at completion time by setting PKT_CONSISTENT 8079 */ 8080 pkt = vhci_scsi_init_pkt(&svp->svp_psd->sd_address, pkt, 8081 vpkt->vpkt_tgt_init_bp, vpkt->vpkt_tgt_init_cdblen, 8082 vpkt->vpkt_tgt_init_scblen, 0, PKT_CONSISTENT, NULL_FUNC, NULL); 8083 if (pkt != NULL) { 8084 new_vpkt = TGTPKT2VHCIPKT(pkt); 8085 8086 pkt->pkt_address = vpkt->vpkt_tgt_pkt->pkt_address; 8087 pkt->pkt_flags = vpkt->vpkt_tgt_pkt->pkt_flags; 8088 pkt->pkt_time = vpkt->vpkt_tgt_pkt->pkt_time; 8089 pkt->pkt_comp = vpkt->vpkt_tgt_pkt->pkt_comp; 8090 8091 pkt->pkt_resid = 0; 8092 pkt->pkt_statistics = 0; 8093 pkt->pkt_reason = 0; 8094 8095 bcopy(vpkt->vpkt_tgt_pkt->pkt_cdbp, 8096 pkt->pkt_cdbp, vpkt->vpkt_tgt_init_cdblen); 8097 8098 /* 8099 * Save a pointer to the original vhci_pkt 8100 */ 8101 new_vpkt->vpkt_org_vpkt = vpkt; 8102 } 8103 8104 return (pkt); 8105 } 8106 8107 /* 8108 * Copy the successful completion information from the hba packet into 8109 * the original target pkt from the upper layer. Returns the original 8110 * vpkt and destroys the new vpkt from the internal retry. 8111 */ 8112 static struct vhci_pkt * 8113 vhci_sync_retry_pkt(struct vhci_pkt *vpkt) 8114 { 8115 struct vhci_pkt *ret_vpkt = NULL; 8116 struct scsi_pkt *tpkt = NULL; 8117 struct scsi_pkt *hba_pkt = NULL; 8118 scsi_vhci_priv_t *svp = (scsi_vhci_priv_t *) 8119 mdi_pi_get_vhci_private(vpkt->vpkt_path); 8120 8121 ASSERT(vpkt->vpkt_org_vpkt != NULL); 8122 VHCI_DEBUG(0, (CE_NOTE, NULL, "vhci_sync_retry_pkt: Retry pkt " 8123 "completed successfully!\n")); 8124 8125 ret_vpkt = vpkt->vpkt_org_vpkt; 8126 tpkt = ret_vpkt->vpkt_tgt_pkt; 8127 hba_pkt = vpkt->vpkt_hba_pkt; 8128 8129 /* 8130 * Copy the good status into the target driver's packet 8131 */ 8132 *(tpkt->pkt_scbp) = *(hba_pkt->pkt_scbp); 8133 tpkt->pkt_resid = hba_pkt->pkt_resid; 8134 tpkt->pkt_state = hba_pkt->pkt_state; 8135 tpkt->pkt_statistics = hba_pkt->pkt_statistics; 8136 tpkt->pkt_reason = hba_pkt->pkt_reason; 8137 8138 /* 8139 * Destroy the internally created vpkt for the retry 8140 */ 8141 vhci_scsi_destroy_pkt(&svp->svp_psd->sd_address, 8142 vpkt->vpkt_tgt_pkt); 8143 8144 return (ret_vpkt); 8145 } 8146 8147 /* restart the request sense request */ 8148 static void 8149 vhci_uscsi_restart_sense(void *arg) 8150 { 8151 struct buf *rqbp; 8152 struct buf *bp; 8153 struct scsi_pkt *rqpkt = (struct scsi_pkt *)arg; 8154 mp_uscsi_cmd_t *mp_uscmdp; 8155 8156 VHCI_DEBUG(4, (CE_WARN, NULL, 8157 "vhci_uscsi_restart_sense: enter: rqpkt: %p", (void *)rqpkt)); 8158 8159 if (scsi_transport(rqpkt) != TRAN_ACCEPT) { 8160 /* if it fails - need to wakeup the original command */ 8161 mp_uscmdp = rqpkt->pkt_private; 8162 bp = mp_uscmdp->cmdbp; 8163 rqbp = mp_uscmdp->rqbp; 8164 ASSERT(mp_uscmdp && bp && rqbp); 8165 scsi_free_consistent_buf(rqbp); 8166 scsi_destroy_pkt(rqpkt); 8167 bp->b_resid = bp->b_bcount; 8168 bioerror(bp, EIO); 8169 biodone(bp); 8170 } 8171 } 8172 8173 /* 8174 * auto-rqsense is not enabled so we have to retrieve the request sense 8175 * manually. 8176 */ 8177 static int 8178 vhci_uscsi_send_sense(struct scsi_pkt *pkt, mp_uscsi_cmd_t *mp_uscmdp) 8179 { 8180 struct buf *rqbp, *cmdbp; 8181 struct scsi_pkt *rqpkt; 8182 int rval = 0; 8183 8184 cmdbp = mp_uscmdp->cmdbp; 8185 ASSERT(cmdbp != NULL); 8186 8187 VHCI_DEBUG(4, (CE_WARN, NULL, 8188 "vhci_uscsi_send_sense: enter: bp: %p pkt: %p scmd: %p", 8189 (void *)cmdbp, (void *)pkt, (void *)mp_uscmdp)); 8190 /* set up the packet information and cdb */ 8191 if ((rqbp = scsi_alloc_consistent_buf(mp_uscmdp->ap, NULL, 8192 SENSE_LENGTH, B_READ, NULL, NULL)) == NULL) { 8193 return (-1); 8194 } 8195 8196 if ((rqpkt = scsi_init_pkt(mp_uscmdp->ap, NULL, rqbp, 8197 CDB_GROUP0, 1, 0, PKT_CONSISTENT, NULL, NULL)) == NULL) { 8198 scsi_free_consistent_buf(rqbp); 8199 return (-1); 8200 } 8201 8202 (void) scsi_setup_cdb((union scsi_cdb *)(intptr_t)rqpkt->pkt_cdbp, 8203 SCMD_REQUEST_SENSE, 0, SENSE_LENGTH, 0); 8204 8205 mp_uscmdp->rqbp = rqbp; 8206 rqbp->b_private = mp_uscmdp; 8207 rqpkt->pkt_flags |= FLAG_SENSING; 8208 rqpkt->pkt_time = 60; 8209 rqpkt->pkt_comp = vhci_uscsi_iodone; 8210 rqpkt->pkt_private = mp_uscmdp; 8211 8212 /* 8213 * NOTE: This code path is related to MPAPI uscsi(7I), so path 8214 * selection is not based on path_instance. 8215 */ 8216 if (scsi_pkt_allocated_correctly(rqpkt)) 8217 rqpkt->pkt_path_instance = 0; 8218 8219 /* get her done */ 8220 switch (scsi_transport(rqpkt)) { 8221 case TRAN_ACCEPT: 8222 VHCI_DEBUG(1, (CE_NOTE, NULL, "vhci_uscsi_send_sense: " 8223 "transport accepted.")); 8224 break; 8225 case TRAN_BUSY: 8226 VHCI_DEBUG(1, (CE_NOTE, NULL, "vhci_uscsi_send_sense: " 8227 "transport busy, setting timeout.")); 8228 vhci_restart_timeid = timeout(vhci_uscsi_restart_sense, rqpkt, 8229 (drv_usectohz(5 * 1000000))); 8230 break; 8231 default: 8232 VHCI_DEBUG(1, (CE_NOTE, NULL, "vhci_uscsi_send_sense: " 8233 "transport failed")); 8234 scsi_free_consistent_buf(rqbp); 8235 scsi_destroy_pkt(rqpkt); 8236 rval = -1; 8237 } 8238 8239 return (rval); 8240 } 8241 8242 /* 8243 * done routine for the mpapi uscsi command - this is behaving as though 8244 * FLAG_DIAGNOSE is set meaning there are no retries except for a manual 8245 * request sense. 8246 */ 8247 void 8248 vhci_uscsi_iodone(struct scsi_pkt *pkt) 8249 { 8250 struct buf *bp; 8251 mp_uscsi_cmd_t *mp_uscmdp; 8252 struct uscsi_cmd *uscmdp; 8253 struct scsi_arq_status *arqstat; 8254 int err; 8255 8256 mp_uscmdp = (mp_uscsi_cmd_t *)pkt->pkt_private; 8257 uscmdp = mp_uscmdp->uscmdp; 8258 bp = mp_uscmdp->cmdbp; 8259 ASSERT(bp != NULL); 8260 VHCI_DEBUG(4, (CE_WARN, NULL, 8261 "vhci_uscsi_iodone: enter: bp: %p pkt: %p scmd: %p", 8262 (void *)bp, (void *)pkt, (void *)mp_uscmdp)); 8263 /* Save the status and the residual into the uscsi_cmd struct */ 8264 uscmdp->uscsi_status = ((*(pkt)->pkt_scbp) & STATUS_MASK); 8265 uscmdp->uscsi_resid = bp->b_resid; 8266 8267 /* return on a very successful command */ 8268 if (pkt->pkt_reason == CMD_CMPLT && 8269 SCBP_C(pkt) == 0 && ((pkt->pkt_flags & FLAG_SENSING) == 0) && 8270 pkt->pkt_resid == 0) { 8271 mdi_pi_kstat_iosupdate(mp_uscmdp->pip, bp); 8272 scsi_destroy_pkt(pkt); 8273 biodone(bp); 8274 return; 8275 } 8276 VHCI_DEBUG(4, (CE_NOTE, NULL, "iodone: reason=0x%x " 8277 " pkt_resid=%ld pkt_state: 0x%x b_count: %ld b_resid: %ld", 8278 pkt->pkt_reason, pkt->pkt_resid, 8279 pkt->pkt_state, bp->b_bcount, bp->b_resid)); 8280 8281 err = EIO; 8282 8283 arqstat = (struct scsi_arq_status *)(intptr_t)(pkt->pkt_scbp); 8284 if (pkt->pkt_reason != CMD_CMPLT) { 8285 /* 8286 * The command did not complete. 8287 */ 8288 VHCI_DEBUG(4, (CE_NOTE, NULL, 8289 "vhci_uscsi_iodone: command did not complete." 8290 " reason: %x flag: %x", pkt->pkt_reason, pkt->pkt_flags)); 8291 if (pkt->pkt_flags & FLAG_SENSING) { 8292 MDI_PI_ERRSTAT(mp_uscmdp->pip, MDI_PI_TRANSERR); 8293 } else if (pkt->pkt_reason == CMD_TIMEOUT) { 8294 MDI_PI_ERRSTAT(mp_uscmdp->pip, MDI_PI_HARDERR); 8295 err = ETIMEDOUT; 8296 } 8297 } else if (pkt->pkt_state & STATE_ARQ_DONE && mp_uscmdp->arq_enabled) { 8298 /* 8299 * The auto-rqsense happened, and the packet has a filled-in 8300 * scsi_arq_status structure, pointed to by pkt_scbp. 8301 */ 8302 VHCI_DEBUG(4, (CE_NOTE, NULL, 8303 "vhci_uscsi_iodone: received auto-requested sense")); 8304 if (uscmdp->uscsi_flags & USCSI_RQENABLE) { 8305 /* get the amount of data to copy into rqbuf */ 8306 int rqlen = SENSE_LENGTH - arqstat->sts_rqpkt_resid; 8307 rqlen = min(((int)uscmdp->uscsi_rqlen), rqlen); 8308 uscmdp->uscsi_rqresid = uscmdp->uscsi_rqlen - rqlen; 8309 uscmdp->uscsi_rqstatus = 8310 *((char *)&arqstat->sts_rqpkt_status); 8311 if (uscmdp->uscsi_rqbuf && uscmdp->uscsi_rqlen && 8312 rqlen != 0) { 8313 bcopy(&(arqstat->sts_sensedata), 8314 uscmdp->uscsi_rqbuf, rqlen); 8315 } 8316 mdi_pi_kstat_iosupdate(mp_uscmdp->pip, bp); 8317 VHCI_DEBUG(4, (CE_NOTE, NULL, 8318 "vhci_uscsi_iodone: ARQ " 8319 "uscsi_rqstatus=0x%x uscsi_rqresid=%d rqlen: %d " 8320 "xfer: %d rqpkt_resid: %d\n", 8321 uscmdp->uscsi_rqstatus, uscmdp->uscsi_rqresid, 8322 uscmdp->uscsi_rqlen, rqlen, 8323 arqstat->sts_rqpkt_resid)); 8324 } 8325 } else if (pkt->pkt_flags & FLAG_SENSING) { 8326 struct buf *rqbp; 8327 struct scsi_status *rqstatus; 8328 8329 rqstatus = (struct scsi_status *)pkt->pkt_scbp; 8330 /* a manual request sense was done - get the information */ 8331 if (uscmdp->uscsi_flags & USCSI_RQENABLE) { 8332 int rqlen = SENSE_LENGTH - pkt->pkt_resid; 8333 8334 rqbp = mp_uscmdp->rqbp; 8335 /* get the amount of data to copy into rqbuf */ 8336 rqlen = min(((int)uscmdp->uscsi_rqlen), rqlen); 8337 uscmdp->uscsi_rqresid = uscmdp->uscsi_rqlen - rqlen; 8338 uscmdp->uscsi_rqstatus = *((char *)rqstatus); 8339 if (uscmdp->uscsi_rqlen && uscmdp->uscsi_rqbuf) { 8340 bcopy(rqbp->b_un.b_addr, uscmdp->uscsi_rqbuf, 8341 rqlen); 8342 } 8343 MDI_PI_ERRSTAT(mp_uscmdp->pip, MDI_PI_TRANSERR); 8344 scsi_free_consistent_buf(rqbp); 8345 } 8346 VHCI_DEBUG(4, (CE_NOTE, NULL, "vhci_uscsi_iodone: FLAG_SENSING" 8347 "uscsi_rqstatus=0x%x uscsi_rqresid=%d\n", 8348 uscmdp->uscsi_rqstatus, uscmdp->uscsi_rqresid)); 8349 } else { 8350 struct scsi_status *status = 8351 (struct scsi_status *)pkt->pkt_scbp; 8352 /* 8353 * Command completed and we're not getting sense. Check for 8354 * errors and decide what to do next. 8355 */ 8356 VHCI_DEBUG(4, (CE_NOTE, NULL, 8357 "vhci_uscsi_iodone: command appears complete: reason: %x", 8358 pkt->pkt_reason)); 8359 if (status->sts_chk) { 8360 /* need to manually get the request sense */ 8361 if (vhci_uscsi_send_sense(pkt, mp_uscmdp) == 0) { 8362 scsi_destroy_pkt(pkt); 8363 return; 8364 } 8365 } else { 8366 VHCI_DEBUG(4, (CE_NOTE, NULL, 8367 "vhci_chk_err: appears complete")); 8368 err = 0; 8369 mdi_pi_kstat_iosupdate(mp_uscmdp->pip, bp); 8370 if (pkt->pkt_resid) { 8371 bp->b_resid += pkt->pkt_resid; 8372 } 8373 } 8374 } 8375 8376 if (err) { 8377 if (bp->b_resid == 0) 8378 bp->b_resid = bp->b_bcount; 8379 bioerror(bp, err); 8380 bp->b_flags |= B_ERROR; 8381 } 8382 8383 scsi_destroy_pkt(pkt); 8384 biodone(bp); 8385 8386 VHCI_DEBUG(4, (CE_WARN, NULL, "vhci_uscsi_iodone: exit")); 8387 } 8388 8389 /* 8390 * start routine for the mpapi uscsi command 8391 */ 8392 int 8393 vhci_uscsi_iostart(struct buf *bp) 8394 { 8395 struct scsi_pkt *pkt; 8396 struct uscsi_cmd *uscmdp; 8397 mp_uscsi_cmd_t *mp_uscmdp; 8398 int stat_size, rval; 8399 int retry = 0; 8400 8401 ASSERT(bp->b_private != NULL); 8402 8403 mp_uscmdp = (mp_uscsi_cmd_t *)bp->b_private; 8404 uscmdp = mp_uscmdp->uscmdp; 8405 if (uscmdp->uscsi_flags & USCSI_RQENABLE) { 8406 stat_size = SENSE_LENGTH; 8407 } else { 8408 stat_size = 1; 8409 } 8410 8411 pkt = scsi_init_pkt(mp_uscmdp->ap, NULL, bp, uscmdp->uscsi_cdblen, 8412 stat_size, 0, 0, SLEEP_FUNC, NULL); 8413 if (pkt == NULL) { 8414 VHCI_DEBUG(4, (CE_NOTE, NULL, 8415 "vhci_uscsi_iostart: rval: EINVAL")); 8416 bp->b_resid = bp->b_bcount; 8417 uscmdp->uscsi_resid = bp->b_bcount; 8418 bioerror(bp, EINVAL); 8419 biodone(bp); 8420 return (EINVAL); 8421 } 8422 8423 pkt->pkt_time = uscmdp->uscsi_timeout; 8424 bcopy(uscmdp->uscsi_cdb, pkt->pkt_cdbp, (size_t)uscmdp->uscsi_cdblen); 8425 pkt->pkt_comp = vhci_uscsi_iodone; 8426 pkt->pkt_private = mp_uscmdp; 8427 if (uscmdp->uscsi_flags & USCSI_SILENT) 8428 pkt->pkt_flags |= FLAG_SILENT; 8429 if (uscmdp->uscsi_flags & USCSI_ISOLATE) 8430 pkt->pkt_flags |= FLAG_ISOLATE; 8431 if (uscmdp->uscsi_flags & USCSI_DIAGNOSE) 8432 pkt->pkt_flags |= FLAG_DIAGNOSE; 8433 if (uscmdp->uscsi_flags & USCSI_RENEGOT) { 8434 pkt->pkt_flags |= FLAG_RENEGOTIATE_WIDE_SYNC; 8435 } 8436 VHCI_DEBUG(4, (CE_WARN, NULL, 8437 "vhci_uscsi_iostart: ap: %p pkt: %p pcdbp: %p uscmdp: %p" 8438 " ucdbp: %p pcdblen: %d bp: %p count: %ld pip: %p" 8439 " stat_size: %d", 8440 (void *)mp_uscmdp->ap, (void *)pkt, (void *)pkt->pkt_cdbp, 8441 (void *)uscmdp, (void *)uscmdp->uscsi_cdb, pkt->pkt_cdblen, 8442 (void *)bp, bp->b_bcount, (void *)mp_uscmdp->pip, stat_size)); 8443 8444 /* 8445 * NOTE: This code path is related to MPAPI uscsi(7I), so path 8446 * selection is not based on path_instance. 8447 */ 8448 if (scsi_pkt_allocated_correctly(pkt)) 8449 pkt->pkt_path_instance = 0; 8450 8451 while (((rval = scsi_transport(pkt)) == TRAN_BUSY) && 8452 retry < vhci_uscsi_retry_count) { 8453 delay(drv_usectohz(vhci_uscsi_delay)); 8454 retry++; 8455 } 8456 if (retry >= vhci_uscsi_retry_count) { 8457 VHCI_DEBUG(4, (CE_NOTE, NULL, 8458 "vhci_uscsi_iostart: tran_busy - retry: %d", retry)); 8459 } 8460 switch (rval) { 8461 case TRAN_ACCEPT: 8462 rval = 0; 8463 break; 8464 8465 default: 8466 VHCI_DEBUG(4, (CE_NOTE, NULL, 8467 "vhci_uscsi_iostart: rval: %d count: %ld res: %ld", 8468 rval, bp->b_bcount, bp->b_resid)); 8469 bp->b_resid = bp->b_bcount; 8470 uscmdp->uscsi_resid = bp->b_bcount; 8471 bioerror(bp, EIO); 8472 scsi_destroy_pkt(pkt); 8473 biodone(bp); 8474 rval = EIO; 8475 MDI_PI_ERRSTAT(mp_uscmdp->pip, MDI_PI_TRANSERR); 8476 break; 8477 } 8478 VHCI_DEBUG(4, (CE_NOTE, NULL, 8479 "vhci_uscsi_iostart: exit: rval: %d", rval)); 8480 return (rval); 8481 } 8482 8483 /* ARGSUSED */ 8484 static struct scsi_failover_ops * 8485 vhci_dev_fo(dev_info_t *vdip, struct scsi_device *psd, 8486 void **ctprivp, char **fo_namep) 8487 { 8488 struct scsi_failover_ops *sfo; 8489 char *sfo_name; 8490 char *override; 8491 struct scsi_failover *sf; 8492 8493 ASSERT(psd && psd->sd_inq); 8494 if ((psd == NULL) || (psd->sd_inq == NULL)) { 8495 VHCI_DEBUG(1, (CE_NOTE, NULL, 8496 "!vhci_dev_fo:return NULL no scsi_device or inquiry")); 8497 return (NULL); 8498 } 8499 8500 /* 8501 * Determine if device is supported under scsi_vhci, and select 8502 * failover module. 8503 * 8504 * See if there is a scsi_vhci.conf file override for this devices's 8505 * VID/PID. The following values can be returned: 8506 * 8507 * NULL If the NULL is returned then there is no scsi_vhci.conf 8508 * override. For NULL, we determine the failover_ops for 8509 * this device by checking the sfo_device_probe entry 8510 * point for each 'fops' module, in order. 8511 * 8512 * NOTE: Correct operation may depend on module ordering 8513 * of 'specific' (failover modules that are completely 8514 * VID/PID table based) to 'generic' (failover modules 8515 * that based on T10 standards like TPGS). Currently, 8516 * the value of 'ddi-forceload' in scsi_vhci.conf is used 8517 * to establish the module list and probe order. 8518 * 8519 * "NONE" If value "NONE" is returned then there is a 8520 * scsi_vhci.conf VID/PID override to indicate the device 8521 * should not be supported under scsi_vhci (even if there 8522 * is an 'fops' module supporting the device). 8523 * 8524 * "<other>" If another value is returned then that value is the 8525 * name of the 'fops' module that should be used. 8526 */ 8527 sfo = NULL; /* "NONE" */ 8528 override = scsi_get_device_type_string( 8529 "scsi-vhci-failover-override", vdip, psd); 8530 if (override == NULL) { 8531 /* NULL: default: select based on sfo_device_probe results */ 8532 for (sf = scsi_failover_table; sf->sf_mod; sf++) { 8533 if ((sf->sf_sfo == NULL) || 8534 sf->sf_sfo->sfo_device_probe(psd, psd->sd_inq, 8535 ctprivp) == SFO_DEVICE_PROBE_PHCI) 8536 continue; 8537 8538 /* found failover module, supported under scsi_vhci */ 8539 sfo = sf->sf_sfo; 8540 if (fo_namep && (*fo_namep == NULL)) { 8541 sfo_name = i_ddi_strdup(sfo->sfo_name, 8542 KM_SLEEP); 8543 *fo_namep = sfo_name; 8544 } 8545 break; 8546 } 8547 } else if (strcasecmp(override, "NONE")) { 8548 /* !"NONE": select based on driver.conf specified name */ 8549 for (sf = scsi_failover_table, sfo = NULL; sf->sf_mod; sf++) { 8550 if ((sf->sf_sfo == NULL) || 8551 (sf->sf_sfo->sfo_name == NULL) || 8552 strcmp(override, sf->sf_sfo->sfo_name)) 8553 continue; 8554 8555 /* 8556 * NOTE: If sfo_device_probe() has side-effects, 8557 * including setting *ctprivp, these are not going 8558 * to occur with override config. 8559 */ 8560 8561 /* found failover module, supported under scsi_vhci */ 8562 sfo = sf->sf_sfo; 8563 if (fo_namep && (*fo_namep == NULL)) { 8564 sfo_name = kmem_alloc(strlen("conf ") + 8565 strlen(sfo->sfo_name) + 1, KM_SLEEP); 8566 (void) sprintf(sfo_name, "conf %s", 8567 sfo->sfo_name); 8568 *fo_namep = sfo_name; 8569 } 8570 break; 8571 } 8572 } 8573 if (override) 8574 kmem_free(override, strlen(override) + 1); 8575 return (sfo); 8576 } 8577 8578 /* 8579 * Determine the device described by cinfo should be enumerated under 8580 * the vHCI or the pHCI - if there is a failover ops then device is 8581 * supported under vHCI. By agreement with SCSA cinfo is a pointer 8582 * to a scsi_device structure associated with a decorated pHCI probe node. 8583 */ 8584 /* ARGSUSED */ 8585 int 8586 vhci_is_dev_supported(dev_info_t *vdip, dev_info_t *pdip, void *cinfo) 8587 { 8588 struct scsi_device *psd = (struct scsi_device *)cinfo; 8589 8590 return (vhci_dev_fo(vdip, psd, NULL, NULL) ? MDI_SUCCESS : MDI_FAILURE); 8591 } 8592 8593 8594 #ifdef DEBUG 8595 extern struct scsi_key_strings scsi_cmds[]; 8596 8597 static char * 8598 vhci_print_scsi_cmd(char cmd) 8599 { 8600 char tmp[64]; 8601 char *cpnt; 8602 8603 cpnt = scsi_cmd_name(cmd, scsi_cmds, tmp); 8604 /* tmp goes out of scope on return and caller sees garbage */ 8605 if (cpnt == tmp) { 8606 cpnt = "Unknown Command"; 8607 } 8608 return (cpnt); 8609 } 8610 8611 extern uchar_t scsi_cdb_size[]; 8612 8613 static void 8614 vhci_print_cdb(dev_info_t *dip, uint_t level, char *title, uchar_t *cdb) 8615 { 8616 int len = scsi_cdb_size[CDB_GROUPID(cdb[0])]; 8617 char buf[256]; 8618 8619 if (level == CE_NOTE) { 8620 vhci_log(level, dip, "path cmd %s\n", 8621 vhci_print_scsi_cmd(*cdb)); 8622 return; 8623 } 8624 8625 (void) sprintf(buf, "%s for cmd(%s)", title, vhci_print_scsi_cmd(*cdb)); 8626 vhci_clean_print(dip, level, buf, cdb, len); 8627 } 8628 8629 static void 8630 vhci_clean_print(dev_info_t *dev, uint_t level, char *title, uchar_t *data, 8631 int len) 8632 { 8633 int i; 8634 int c; 8635 char *format; 8636 char buf[256]; 8637 uchar_t byte; 8638 8639 (void) sprintf(buf, "%s:\n", title); 8640 vhci_log(level, dev, "%s", buf); 8641 level = CE_CONT; 8642 for (i = 0; i < len; ) { 8643 buf[0] = 0; 8644 for (c = 0; c < 8 && i < len; c++, i++) { 8645 byte = (uchar_t)data[i]; 8646 if (byte < 0x10) 8647 format = "0x0%x "; 8648 else 8649 format = "0x%x "; 8650 (void) sprintf(&buf[(int)strlen(buf)], format, byte); 8651 } 8652 (void) sprintf(&buf[(int)strlen(buf)], "\n"); 8653 8654 vhci_log(level, dev, "%s\n", buf); 8655 } 8656 } 8657 #endif 8658 static void 8659 vhci_invalidate_mpapi_lu(struct scsi_vhci *vhci, scsi_vhci_lun_t *vlun) 8660 { 8661 char *svl_wwn; 8662 mpapi_item_list_t *ilist; 8663 mpapi_lu_data_t *ld; 8664 8665 if (vlun == NULL) { 8666 return; 8667 } else { 8668 svl_wwn = vlun->svl_lun_wwn; 8669 } 8670 8671 ilist = vhci->mp_priv->obj_hdr_list[MP_OBJECT_TYPE_MULTIPATH_LU]->head; 8672 8673 while (ilist != NULL) { 8674 ld = (mpapi_lu_data_t *)(ilist->item->idata); 8675 if ((ld != NULL) && (strncmp(ld->prop.name, svl_wwn, 8676 strlen(svl_wwn)) == 0)) { 8677 ld->valid = 0; 8678 VHCI_DEBUG(6, (CE_WARN, NULL, 8679 "vhci_invalidate_mpapi_lu: " 8680 "Invalidated LU(%s)", svl_wwn)); 8681 return; 8682 } 8683 ilist = ilist->next; 8684 } 8685 VHCI_DEBUG(6, (CE_WARN, NULL, "vhci_invalidate_mpapi_lu: " 8686 "Could not find LU(%s) to invalidate.", svl_wwn)); 8687 } 8688