1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 /* 27 * Multiplexed I/O SCSI vHCI implementation 28 */ 29 30 #include <sys/conf.h> 31 #include <sys/file.h> 32 #include <sys/ddi.h> 33 #include <sys/sunddi.h> 34 #include <sys/scsi/scsi.h> 35 #include <sys/scsi/impl/scsi_reset_notify.h> 36 #include <sys/scsi/impl/services.h> 37 #include <sys/sunmdi.h> 38 #include <sys/mdi_impldefs.h> 39 #include <sys/scsi/adapters/scsi_vhci.h> 40 #include <sys/disp.h> 41 #include <sys/byteorder.h> 42 43 extern uintptr_t scsi_callback_id; 44 extern ddi_dma_attr_t scsi_alloc_attr; 45 46 #ifdef DEBUG 47 int vhci_debug = VHCI_DEBUG_DEFAULT_VAL; 48 #endif 49 50 /* retry for the vhci_do_prout command when a not ready is returned */ 51 int vhci_prout_not_ready_retry = 180; 52 53 /* 54 * These values are defined to support the internal retry of 55 * SCSI packets for better sense code handling. 56 */ 57 #define VHCI_CMD_CMPLT 0 58 #define VHCI_CMD_RETRY 1 59 #define VHCI_CMD_ERROR -1 60 61 #define PROPFLAGS (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM) 62 #define VHCI_SCSI_PERR 0x47 63 #define VHCI_PGR_ILLEGALOP -2 64 #define VHCI_NUM_UPDATE_TASKQ 8 65 /* changed to 132 to accomodate HDS */ 66 67 /* 68 * Version Macros 69 */ 70 #define VHCI_NAME_VERSION "SCSI VHCI Driver" 71 char vhci_version_name[] = VHCI_NAME_VERSION; 72 73 int vhci_first_time = 0; 74 clock_t vhci_to_ticks = 0; 75 int vhci_init_wait_timeout = VHCI_INIT_WAIT_TIMEOUT; 76 kcondvar_t vhci_cv; 77 kmutex_t vhci_global_mutex; 78 void *vhci_softstate = NULL; /* for soft state */ 79 80 /* 81 * Flag to delay the retry of the reserve command 82 */ 83 int vhci_reserve_delay = 100000; 84 static int vhci_path_quiesce_timeout = 60; 85 static uchar_t zero_key[MHIOC_RESV_KEY_SIZE]; 86 87 /* uscsi delay for a TRAN_BUSY */ 88 static int vhci_uscsi_delay = 100000; 89 static int vhci_uscsi_retry_count = 180; 90 /* uscsi_restart_sense timeout id in case it needs to get canceled */ 91 static timeout_id_t vhci_restart_timeid = 0; 92 93 static int vhci_bus_config_debug = 0; 94 95 /* 96 * Bidirectional map of 'target-port' to port id <pid> for support of 97 * iostat(1M) '-Xx' and '-Yx' output. 98 */ 99 static kmutex_t vhci_targetmap_mutex; 100 static uint_t vhci_targetmap_pid = 1; 101 static mod_hash_t *vhci_targetmap_bypid; /* <pid> -> 'target-port' */ 102 static mod_hash_t *vhci_targetmap_byport; /* 'target-port' -> <pid> */ 103 104 /* 105 * functions exported by scsi_vhci struct cb_ops 106 */ 107 static int vhci_open(dev_t *, int, int, cred_t *); 108 static int vhci_close(dev_t, int, int, cred_t *); 109 static int vhci_ioctl(dev_t, int, intptr_t, int, cred_t *, int *); 110 111 /* 112 * functions exported by scsi_vhci struct dev_ops 113 */ 114 static int vhci_getinfo(dev_info_t *, ddi_info_cmd_t, void *, void **); 115 static int vhci_attach(dev_info_t *, ddi_attach_cmd_t); 116 static int vhci_detach(dev_info_t *, ddi_detach_cmd_t); 117 118 /* 119 * functions exported by scsi_vhci scsi_hba_tran_t transport table 120 */ 121 static int vhci_scsi_tgt_init(dev_info_t *, dev_info_t *, 122 scsi_hba_tran_t *, struct scsi_device *); 123 static void vhci_scsi_tgt_free(dev_info_t *, dev_info_t *, scsi_hba_tran_t *, 124 struct scsi_device *); 125 static int vhci_pgr_register_start(scsi_vhci_lun_t *, struct scsi_pkt *); 126 static int vhci_scsi_start(struct scsi_address *, struct scsi_pkt *); 127 static int vhci_scsi_abort(struct scsi_address *, struct scsi_pkt *); 128 static int vhci_scsi_reset(struct scsi_address *, int); 129 static int vhci_scsi_reset_target(struct scsi_address *, int level, 130 uint8_t select_path); 131 static int vhci_scsi_reset_bus(struct scsi_address *); 132 static int vhci_scsi_getcap(struct scsi_address *, char *, int); 133 static int vhci_scsi_setcap(struct scsi_address *, char *, int, int); 134 static int vhci_commoncap(struct scsi_address *, char *, int, int, int); 135 static int vhci_pHCI_cap(struct scsi_address *ap, char *cap, int val, int whom, 136 mdi_pathinfo_t *pip); 137 static struct scsi_pkt *vhci_scsi_init_pkt(struct scsi_address *, 138 struct scsi_pkt *, struct buf *, int, int, int, int, int (*)(), caddr_t); 139 static void vhci_scsi_destroy_pkt(struct scsi_address *, struct scsi_pkt *); 140 static void vhci_scsi_dmafree(struct scsi_address *, struct scsi_pkt *); 141 static void vhci_scsi_sync_pkt(struct scsi_address *, struct scsi_pkt *); 142 static int vhci_scsi_reset_notify(struct scsi_address *, int, void (*)(caddr_t), 143 caddr_t); 144 static int vhci_scsi_get_bus_addr(struct scsi_device *, char *, int); 145 static int vhci_scsi_get_name(struct scsi_device *, char *, int); 146 static int vhci_scsi_bus_power(dev_info_t *, void *, pm_bus_power_op_t, 147 void *, void *); 148 static int vhci_scsi_bus_config(dev_info_t *, uint_t, ddi_bus_config_op_t, 149 void *, dev_info_t **); 150 static int vhci_scsi_bus_unconfig(dev_info_t *, uint_t, ddi_bus_config_op_t, 151 void *); 152 static struct scsi_failover_ops *vhci_dev_fo(dev_info_t *, struct scsi_device *, 153 void **, char **); 154 155 /* 156 * functions registered with the mpxio framework via mdi_vhci_ops_t 157 */ 158 static int vhci_pathinfo_init(dev_info_t *, mdi_pathinfo_t *, int); 159 static int vhci_pathinfo_uninit(dev_info_t *, mdi_pathinfo_t *, int); 160 static int vhci_pathinfo_state_change(dev_info_t *, mdi_pathinfo_t *, 161 mdi_pathinfo_state_t, uint32_t, int); 162 static int vhci_pathinfo_online(dev_info_t *, mdi_pathinfo_t *, int); 163 static int vhci_pathinfo_offline(dev_info_t *, mdi_pathinfo_t *, int); 164 static int vhci_failover(dev_info_t *, dev_info_t *, int); 165 static void vhci_client_attached(dev_info_t *); 166 static int vhci_is_dev_supported(dev_info_t *, dev_info_t *, void *); 167 168 static int vhci_ctl(dev_t, int, intptr_t, int, cred_t *, int *); 169 static int vhci_devctl(dev_t, int, intptr_t, int, cred_t *, int *); 170 static int vhci_ioc_get_phci_path(sv_iocdata_t *, caddr_t, int, caddr_t); 171 static int vhci_ioc_get_client_path(sv_iocdata_t *, caddr_t, int, caddr_t); 172 static int vhci_ioc_get_paddr(sv_iocdata_t *, caddr_t, int, caddr_t); 173 static int vhci_ioc_send_client_path(caddr_t, sv_iocdata_t *, int, caddr_t); 174 static void vhci_ioc_devi_to_path(dev_info_t *, caddr_t); 175 static int vhci_get_phci_path_list(dev_info_t *, sv_path_info_t *, uint_t); 176 static int vhci_get_client_path_list(dev_info_t *, sv_path_info_t *, uint_t); 177 static int vhci_get_iocdata(const void *, sv_iocdata_t *, int, caddr_t); 178 static int vhci_get_iocswitchdata(const void *, sv_switch_to_cntlr_iocdata_t *, 179 int, caddr_t); 180 static int vhci_ioc_alloc_pathinfo(sv_path_info_t **, sv_path_info_t **, 181 uint_t, sv_iocdata_t *, int, caddr_t); 182 static void vhci_ioc_free_pathinfo(sv_path_info_t *, sv_path_info_t *, uint_t); 183 static int vhci_ioc_send_pathinfo(sv_path_info_t *, sv_path_info_t *, uint_t, 184 sv_iocdata_t *, int, caddr_t); 185 static int vhci_handle_ext_fo(struct scsi_pkt *, int); 186 static int vhci_efo_watch_cb(caddr_t, struct scsi_watch_result *); 187 static int vhci_quiesce_lun(struct scsi_vhci_lun *); 188 static int vhci_pgr_validate_and_register(scsi_vhci_priv_t *); 189 static void vhci_dispatch_scsi_start(void *); 190 static void vhci_efo_done(void *); 191 static void vhci_initiate_auto_failback(void *); 192 static void vhci_update_pHCI_pkt(struct vhci_pkt *, struct scsi_pkt *); 193 static int vhci_update_pathinfo(struct scsi_device *, mdi_pathinfo_t *, 194 struct scsi_failover_ops *, scsi_vhci_lun_t *, struct scsi_vhci *); 195 static void vhci_kstat_create_pathinfo(mdi_pathinfo_t *); 196 static int vhci_quiesce_paths(dev_info_t *, dev_info_t *, 197 scsi_vhci_lun_t *, char *, char *); 198 199 static char *vhci_devnm_to_guid(char *); 200 static int vhci_bind_transport(struct scsi_address *, struct vhci_pkt *, 201 int, int (*func)(caddr_t)); 202 static void vhci_intr(struct scsi_pkt *); 203 static int vhci_do_prout(scsi_vhci_priv_t *); 204 static void vhci_run_cmd(void *); 205 static int vhci_do_prin(struct vhci_pkt **); 206 static struct scsi_pkt *vhci_create_retry_pkt(struct vhci_pkt *); 207 static struct vhci_pkt *vhci_sync_retry_pkt(struct vhci_pkt *); 208 static struct scsi_vhci_lun *vhci_lun_lookup(dev_info_t *); 209 static struct scsi_vhci_lun *vhci_lun_lookup_alloc(dev_info_t *, char *, int *); 210 static void vhci_lun_free(dev_info_t *); 211 static int vhci_recovery_reset(scsi_vhci_lun_t *, struct scsi_address *, 212 uint8_t, uint8_t); 213 void vhci_update_pathstates(void *); 214 215 #ifdef DEBUG 216 static void vhci_print_prin_keys(vhci_prin_readkeys_t *, int); 217 static void vhci_print_cdb(dev_info_t *dip, uint_t level, 218 char *title, uchar_t *cdb); 219 static void vhci_clean_print(dev_info_t *dev, uint_t level, 220 char *title, uchar_t *data, int len); 221 #endif 222 static void vhci_print_prout_keys(scsi_vhci_lun_t *, char *); 223 static void vhci_uscsi_iodone(struct scsi_pkt *pkt); 224 225 /* 226 * MP-API related functions 227 */ 228 extern int vhci_mpapi_init(struct scsi_vhci *); 229 extern void vhci_mpapi_add_dev_prod(struct scsi_vhci *, char *); 230 extern int vhci_mpapi_ctl(dev_t, int, intptr_t, int, cred_t *, int *); 231 extern void vhci_update_mpapi_data(struct scsi_vhci *, 232 scsi_vhci_lun_t *, mdi_pathinfo_t *); 233 extern void* vhci_get_mpapi_item(struct scsi_vhci *, mpapi_list_header_t *, 234 uint8_t, void*); 235 extern void vhci_mpapi_set_path_state(dev_info_t *, mdi_pathinfo_t *, int); 236 extern int vhci_mpapi_update_tpg_acc_state_for_lu(struct scsi_vhci *, 237 scsi_vhci_lun_t *); 238 239 #define VHCI_DMA_MAX_XFER_CAP INT_MAX 240 241 #define VHCI_MAX_PGR_RETRIES 3 242 243 /* 244 * Macros for the device-type mpxio options 245 */ 246 #define LOAD_BALANCE_OPTIONS "load-balance-options" 247 #define LOGICAL_BLOCK_REGION_SIZE "region-size" 248 #define MPXIO_OPTIONS_LIST "device-type-mpxio-options-list" 249 #define DEVICE_TYPE_STR "device-type" 250 #define isdigit(ch) ((ch) >= '0' && (ch) <= '9') 251 252 static struct cb_ops vhci_cb_ops = { 253 vhci_open, /* open */ 254 vhci_close, /* close */ 255 nodev, /* strategy */ 256 nodev, /* print */ 257 nodev, /* dump */ 258 nodev, /* read */ 259 nodev, /* write */ 260 vhci_ioctl, /* ioctl */ 261 nodev, /* devmap */ 262 nodev, /* mmap */ 263 nodev, /* segmap */ 264 nochpoll, /* chpoll */ 265 ddi_prop_op, /* cb_prop_op */ 266 0, /* streamtab */ 267 D_NEW | D_MP, /* cb_flag */ 268 CB_REV, /* rev */ 269 nodev, /* aread */ 270 nodev /* awrite */ 271 }; 272 273 static struct dev_ops vhci_ops = { 274 DEVO_REV, 275 0, 276 vhci_getinfo, 277 nulldev, /* identify */ 278 nulldev, /* probe */ 279 vhci_attach, /* attach and detach are mandatory */ 280 vhci_detach, 281 nodev, /* reset */ 282 &vhci_cb_ops, /* cb_ops */ 283 NULL, /* bus_ops */ 284 NULL, /* power */ 285 ddi_quiesce_not_needed, /* quiesce */ 286 }; 287 288 extern struct mod_ops mod_driverops; 289 290 static struct modldrv modldrv = { 291 &mod_driverops, 292 vhci_version_name, /* module name */ 293 &vhci_ops 294 }; 295 296 static struct modlinkage modlinkage = { 297 MODREV_1, 298 &modldrv, 299 NULL 300 }; 301 302 static mdi_vhci_ops_t vhci_opinfo = { 303 MDI_VHCI_OPS_REV, 304 vhci_pathinfo_init, /* Pathinfo node init callback */ 305 vhci_pathinfo_uninit, /* Pathinfo uninit callback */ 306 vhci_pathinfo_state_change, /* Pathinfo node state change */ 307 vhci_failover, /* failover callback */ 308 vhci_client_attached, /* client attached callback */ 309 vhci_is_dev_supported /* is device supported by mdi */ 310 }; 311 312 /* 313 * The scsi_failover table defines an ordered set of 'fops' modules supported 314 * by scsi_vhci. Currently, initialize this table from the 'ddi-forceload' 315 * property specified in scsi_vhci.conf. 316 */ 317 static struct scsi_failover { 318 ddi_modhandle_t sf_mod; 319 struct scsi_failover_ops *sf_sfo; 320 } *scsi_failover_table; 321 static uint_t scsi_nfailover; 322 323 int 324 _init(void) 325 { 326 int rval; 327 328 /* 329 * Allocate soft state and prepare to do ddi_soft_state_zalloc() 330 * before registering with the transport first. 331 */ 332 if ((rval = ddi_soft_state_init(&vhci_softstate, 333 sizeof (struct scsi_vhci), 1)) != 0) { 334 VHCI_DEBUG(1, (CE_NOTE, NULL, 335 "!_init:soft state init failed\n")); 336 return (rval); 337 } 338 339 if ((rval = scsi_hba_init(&modlinkage)) != 0) { 340 VHCI_DEBUG(1, (CE_NOTE, NULL, 341 "!_init: scsi hba init failed\n")); 342 ddi_soft_state_fini(&vhci_softstate); 343 return (rval); 344 } 345 346 mutex_init(&vhci_global_mutex, NULL, MUTEX_DRIVER, NULL); 347 cv_init(&vhci_cv, NULL, CV_DRIVER, NULL); 348 349 mutex_init(&vhci_targetmap_mutex, NULL, MUTEX_DRIVER, NULL); 350 vhci_targetmap_byport = mod_hash_create_strhash( 351 "vhci_targetmap_byport", 256, mod_hash_null_valdtor); 352 vhci_targetmap_bypid = mod_hash_create_idhash( 353 "vhci_targetmap_bypid", 256, mod_hash_null_valdtor); 354 355 if ((rval = mod_install(&modlinkage)) != 0) { 356 VHCI_DEBUG(1, (CE_NOTE, NULL, "!_init: mod_install failed\n")); 357 if (vhci_targetmap_bypid) 358 mod_hash_destroy_idhash(vhci_targetmap_bypid); 359 if (vhci_targetmap_byport) 360 mod_hash_destroy_strhash(vhci_targetmap_byport); 361 mutex_destroy(&vhci_targetmap_mutex); 362 cv_destroy(&vhci_cv); 363 mutex_destroy(&vhci_global_mutex); 364 scsi_hba_fini(&modlinkage); 365 ddi_soft_state_fini(&vhci_softstate); 366 } 367 return (rval); 368 } 369 370 371 /* 372 * the system is done with us as a driver, so clean up 373 */ 374 int 375 _fini(void) 376 { 377 int rval; 378 379 /* 380 * don't start cleaning up until we know that the module remove 381 * has worked -- if this works, then we know that each instance 382 * has successfully been DDI_DETACHed 383 */ 384 if ((rval = mod_remove(&modlinkage)) != 0) { 385 VHCI_DEBUG(4, (CE_NOTE, NULL, "!_fini: mod_remove failed\n")); 386 return (rval); 387 } 388 389 if (vhci_targetmap_bypid) 390 mod_hash_destroy_idhash(vhci_targetmap_bypid); 391 if (vhci_targetmap_byport) 392 mod_hash_destroy_strhash(vhci_targetmap_byport); 393 mutex_destroy(&vhci_targetmap_mutex); 394 cv_destroy(&vhci_cv); 395 mutex_destroy(&vhci_global_mutex); 396 scsi_hba_fini(&modlinkage); 397 ddi_soft_state_fini(&vhci_softstate); 398 399 return (rval); 400 } 401 402 int 403 _info(struct modinfo *modinfop) 404 { 405 return (mod_info(&modlinkage, modinfop)); 406 } 407 408 /* 409 * Lookup scsi_failover by "short name" of failover module. 410 */ 411 struct scsi_failover_ops * 412 vhci_failover_ops_by_name(char *name) 413 { 414 struct scsi_failover *sf; 415 416 for (sf = scsi_failover_table; sf->sf_mod; sf++) { 417 if (sf->sf_sfo == NULL) 418 continue; 419 if (strcmp(sf->sf_sfo->sfo_name, name) == 0) 420 return (sf->sf_sfo); 421 } 422 return (NULL); 423 } 424 425 /* 426 * Load all scsi_failover_ops 'fops' modules. 427 */ 428 static void 429 vhci_failover_modopen(struct scsi_vhci *vhci) 430 { 431 char **module; 432 int i; 433 struct scsi_failover *sf; 434 char **dt; 435 int e; 436 437 if (scsi_failover_table) 438 return; 439 440 /* Get the list of modules from scsi_vhci.conf */ 441 if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, 442 vhci->vhci_dip, DDI_PROP_DONTPASS, "ddi-forceload", 443 &module, &scsi_nfailover) != DDI_PROP_SUCCESS) { 444 cmn_err(CE_WARN, "scsi_vhci: " 445 "scsi_vhci.conf is missing 'ddi-forceload'"); 446 return; 447 } 448 if (scsi_nfailover == 0) { 449 cmn_err(CE_WARN, "scsi_vhci: " 450 "scsi_vhci.conf has empty 'ddi-forceload'"); 451 ddi_prop_free(module); 452 return; 453 } 454 455 /* allocate failover table based on number of modules */ 456 scsi_failover_table = (struct scsi_failover *) 457 kmem_zalloc(sizeof (struct scsi_failover) * (scsi_nfailover + 1), 458 KM_SLEEP); 459 460 /* loop over modules specified in scsi_vhci.conf and open each module */ 461 for (i = 0, sf = scsi_failover_table; i < scsi_nfailover; i++) { 462 if (module[i] == NULL) 463 continue; 464 465 sf->sf_mod = ddi_modopen(module[i], KRTLD_MODE_FIRST, &e); 466 if (sf->sf_mod == NULL) { 467 /* 468 * A module returns EEXIST if other software is 469 * supporting the intended function: for example 470 * the scsi_vhci_f_sum_emc module returns EEXIST 471 * from _init if EMC powerpath software is installed. 472 */ 473 if (e != EEXIST) 474 cmn_err(CE_WARN, "scsi_vhci: unable to open " 475 "module '%s', error %d", module[i], e); 476 continue; 477 } 478 sf->sf_sfo = ddi_modsym(sf->sf_mod, 479 "scsi_vhci_failover_ops", &e); 480 if (sf->sf_sfo == NULL) { 481 cmn_err(CE_WARN, "scsi_vhci: " 482 "unable to import 'scsi_failover_ops' from '%s', " 483 "error %d", module[i], e); 484 (void) ddi_modclose(sf->sf_mod); 485 sf->sf_mod = NULL; 486 continue; 487 } 488 489 /* register vid/pid of devices supported with mpapi */ 490 for (dt = sf->sf_sfo->sfo_devices; *dt; dt++) 491 vhci_mpapi_add_dev_prod(vhci, *dt); 492 sf++; 493 } 494 495 /* verify that at least the "well-known" modules were there */ 496 if (vhci_failover_ops_by_name(SFO_NAME_SYM) == NULL) 497 cmn_err(CE_WARN, "scsi_vhci: well-known module \"" 498 SFO_NAME_SYM "\" not defined in scsi_vhci.conf's " 499 "'ddi-forceload'"); 500 if (vhci_failover_ops_by_name(SFO_NAME_TPGS) == NULL) 501 cmn_err(CE_WARN, "scsi_vhci: well-known module \"" 502 SFO_NAME_TPGS "\" not defined in scsi_vhci.conf's " 503 "'ddi-forceload'"); 504 505 /* call sfo_init for modules that need it */ 506 for (sf = scsi_failover_table; sf->sf_mod; sf++) { 507 if (sf->sf_sfo && sf->sf_sfo->sfo_init) 508 sf->sf_sfo->sfo_init(); 509 } 510 511 ddi_prop_free(module); 512 } 513 514 /* 515 * unload all loaded scsi_failover_ops modules 516 */ 517 static void 518 vhci_failover_modclose() 519 { 520 struct scsi_failover *sf; 521 522 for (sf = scsi_failover_table; sf->sf_mod; sf++) { 523 if ((sf->sf_mod == NULL) || (sf->sf_sfo == NULL)) 524 continue; 525 (void) ddi_modclose(sf->sf_mod); 526 sf->sf_mod = NULL; 527 sf->sf_sfo = NULL; 528 } 529 530 if (scsi_failover_table && scsi_nfailover) 531 kmem_free(scsi_failover_table, 532 sizeof (struct scsi_failover) * (scsi_nfailover + 1)); 533 scsi_failover_table = NULL; 534 scsi_nfailover = 0; 535 } 536 537 /* ARGSUSED */ 538 static int 539 vhci_open(dev_t *devp, int flag, int otype, cred_t *credp) 540 { 541 struct scsi_vhci *vhci; 542 543 if (otype != OTYP_CHR) { 544 return (EINVAL); 545 } 546 547 vhci = ddi_get_soft_state(vhci_softstate, MINOR2INST(getminor(*devp))); 548 if (vhci == NULL) { 549 VHCI_DEBUG(1, (CE_NOTE, NULL, "vhci_open: failed ENXIO\n")); 550 return (ENXIO); 551 } 552 553 mutex_enter(&vhci->vhci_mutex); 554 if ((flag & FEXCL) && (vhci->vhci_state & VHCI_STATE_OPEN)) { 555 mutex_exit(&vhci->vhci_mutex); 556 vhci_log(CE_NOTE, vhci->vhci_dip, 557 "!vhci%d: Already open\n", getminor(*devp)); 558 return (EBUSY); 559 } 560 561 vhci->vhci_state |= VHCI_STATE_OPEN; 562 mutex_exit(&vhci->vhci_mutex); 563 return (0); 564 } 565 566 567 /* ARGSUSED */ 568 static int 569 vhci_close(dev_t dev, int flag, int otype, cred_t *credp) 570 { 571 struct scsi_vhci *vhci; 572 573 if (otype != OTYP_CHR) { 574 return (EINVAL); 575 } 576 577 vhci = ddi_get_soft_state(vhci_softstate, MINOR2INST(getminor(dev))); 578 if (vhci == NULL) { 579 VHCI_DEBUG(1, (CE_NOTE, NULL, "vhci_close: failed ENXIO\n")); 580 return (ENXIO); 581 } 582 583 mutex_enter(&vhci->vhci_mutex); 584 vhci->vhci_state &= ~VHCI_STATE_OPEN; 585 mutex_exit(&vhci->vhci_mutex); 586 587 return (0); 588 } 589 590 /* ARGSUSED */ 591 static int 592 vhci_ioctl(dev_t dev, int cmd, intptr_t data, int mode, 593 cred_t *credp, int *rval) 594 { 595 if (IS_DEVCTL(cmd)) { 596 return (vhci_devctl(dev, cmd, data, mode, credp, rval)); 597 } else if (cmd == MP_CMD) { 598 return (vhci_mpapi_ctl(dev, cmd, data, mode, credp, rval)); 599 } else { 600 return (vhci_ctl(dev, cmd, data, mode, credp, rval)); 601 } 602 } 603 604 /* 605 * attach the module 606 */ 607 static int 608 vhci_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 609 { 610 int rval = DDI_FAILURE; 611 int scsi_hba_attached = 0; 612 int vhci_attached = 0; 613 int mutex_initted = 0; 614 int instance; 615 struct scsi_vhci *vhci; 616 scsi_hba_tran_t *tran; 617 char cache_name_buf[64]; 618 char *data; 619 620 VHCI_DEBUG(4, (CE_NOTE, NULL, "vhci_attach: cmd=0x%x\n", cmd)); 621 622 instance = ddi_get_instance(dip); 623 624 switch (cmd) { 625 case DDI_ATTACH: 626 break; 627 628 case DDI_RESUME: 629 case DDI_PM_RESUME: 630 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_attach: resume not yet" 631 "implemented\n")); 632 return (rval); 633 634 default: 635 VHCI_DEBUG(1, (CE_NOTE, NULL, 636 "!vhci_attach: unknown ddi command\n")); 637 return (rval); 638 } 639 640 /* 641 * Allocate vhci data structure. 642 */ 643 if (ddi_soft_state_zalloc(vhci_softstate, instance) != DDI_SUCCESS) { 644 VHCI_DEBUG(1, (CE_NOTE, dip, "!vhci_attach:" 645 "soft state alloc failed\n")); 646 return (DDI_FAILURE); 647 } 648 649 if ((vhci = ddi_get_soft_state(vhci_softstate, instance)) == NULL) { 650 VHCI_DEBUG(1, (CE_NOTE, dip, "!vhci_attach:" 651 "bad soft state\n")); 652 ddi_soft_state_free(vhci_softstate, instance); 653 return (DDI_FAILURE); 654 } 655 656 /* Allocate packet cache */ 657 (void) snprintf(cache_name_buf, sizeof (cache_name_buf), 658 "vhci%d_cache", instance); 659 660 mutex_init(&vhci->vhci_mutex, NULL, MUTEX_DRIVER, NULL); 661 mutex_initted++; 662 663 /* 664 * Allocate a transport structure 665 */ 666 tran = scsi_hba_tran_alloc(dip, SCSI_HBA_CANSLEEP); 667 ASSERT(tran != NULL); 668 669 vhci->vhci_tran = tran; 670 vhci->vhci_dip = dip; 671 vhci->vhci_instance = instance; 672 673 tran->tran_hba_private = vhci; 674 tran->tran_tgt_init = vhci_scsi_tgt_init; 675 tran->tran_tgt_probe = NULL; 676 tran->tran_tgt_free = vhci_scsi_tgt_free; 677 678 tran->tran_start = vhci_scsi_start; 679 tran->tran_abort = vhci_scsi_abort; 680 tran->tran_reset = vhci_scsi_reset; 681 tran->tran_getcap = vhci_scsi_getcap; 682 tran->tran_setcap = vhci_scsi_setcap; 683 tran->tran_init_pkt = vhci_scsi_init_pkt; 684 tran->tran_destroy_pkt = vhci_scsi_destroy_pkt; 685 tran->tran_dmafree = vhci_scsi_dmafree; 686 tran->tran_sync_pkt = vhci_scsi_sync_pkt; 687 tran->tran_reset_notify = vhci_scsi_reset_notify; 688 689 tran->tran_get_bus_addr = vhci_scsi_get_bus_addr; 690 tran->tran_get_name = vhci_scsi_get_name; 691 tran->tran_bus_reset = NULL; 692 tran->tran_quiesce = NULL; 693 tran->tran_unquiesce = NULL; 694 695 /* 696 * register event notification routines with scsa 697 */ 698 tran->tran_get_eventcookie = NULL; 699 tran->tran_add_eventcall = NULL; 700 tran->tran_remove_eventcall = NULL; 701 tran->tran_post_event = NULL; 702 703 tran->tran_bus_power = vhci_scsi_bus_power; 704 705 tran->tran_bus_config = vhci_scsi_bus_config; 706 tran->tran_bus_unconfig = vhci_scsi_bus_unconfig; 707 708 /* 709 * Attach this instance with the mpxio framework 710 */ 711 if (mdi_vhci_register(MDI_HCI_CLASS_SCSI, dip, &vhci_opinfo, 0) 712 != MDI_SUCCESS) { 713 VHCI_DEBUG(1, (CE_NOTE, dip, "!vhci_attach:" 714 "mdi_vhci_register failed\n")); 715 goto attach_fail; 716 } 717 vhci_attached++; 718 719 /* 720 * Attach this instance of the hba. 721 * 722 * Regarding dma attributes: Since scsi_vhci is a virtual scsi HBA 723 * driver, it has nothing to do with DMA. However, when calling 724 * scsi_hba_attach_setup() we need to pass something valid in the 725 * dma attributes parameter. So we just use scsi_alloc_attr. 726 * SCSA itself seems to care only for dma_attr_minxfer and 727 * dma_attr_burstsizes fields of dma attributes structure. 728 * It expects those fileds to be non-zero. 729 */ 730 if (scsi_hba_attach_setup(dip, &scsi_alloc_attr, tran, 731 SCSI_HBA_ADDR_COMPLEX) != DDI_SUCCESS) { 732 VHCI_DEBUG(1, (CE_NOTE, dip, "!vhci_attach:" 733 "hba attach failed\n")); 734 goto attach_fail; 735 } 736 scsi_hba_attached++; 737 738 if (ddi_create_minor_node(dip, "devctl", S_IFCHR, 739 INST2DEVCTL(instance), DDI_NT_SCSI_NEXUS, 0) != DDI_SUCCESS) { 740 VHCI_DEBUG(1, (CE_NOTE, dip, "!vhci_attach:" 741 " ddi_create_minor_node failed\n")); 742 goto attach_fail; 743 } 744 745 /* 746 * Set pm-want-child-notification property for 747 * power management of the phci and client 748 */ 749 if (ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP, 750 "pm-want-child-notification?", NULL, NULL) != DDI_PROP_SUCCESS) { 751 cmn_err(CE_WARN, 752 "%s%d fail to create pm-want-child-notification? prop", 753 ddi_driver_name(dip), ddi_get_instance(dip)); 754 goto attach_fail; 755 } 756 757 vhci->vhci_taskq = taskq_create("vhci_taskq", 1, MINCLSYSPRI, 1, 4, 0); 758 vhci->vhci_update_pathstates_taskq = 759 taskq_create("vhci_update_pathstates", VHCI_NUM_UPDATE_TASKQ, 760 MINCLSYSPRI, 1, 4, 0); 761 ASSERT(vhci->vhci_taskq); 762 ASSERT(vhci->vhci_update_pathstates_taskq); 763 764 /* 765 * Set appropriate configuration flags based on options set in 766 * conf file. 767 */ 768 vhci->vhci_conf_flags = 0; 769 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, PROPFLAGS, 770 "auto-failback", &data) == DDI_SUCCESS) { 771 if (strcmp(data, "enable") == 0) 772 vhci->vhci_conf_flags |= VHCI_CONF_FLAGS_AUTO_FAILBACK; 773 ddi_prop_free(data); 774 } 775 776 if (!(vhci->vhci_conf_flags & VHCI_CONF_FLAGS_AUTO_FAILBACK)) 777 vhci_log(CE_NOTE, dip, "!Auto-failback capability " 778 "disabled through scsi_vhci.conf file."); 779 780 /* 781 * Allocate an mpapi private structure 782 */ 783 vhci->mp_priv = kmem_zalloc(sizeof (mpapi_priv_t), KM_SLEEP); 784 if (vhci_mpapi_init(vhci) != 0) { 785 VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_attach: " 786 "vhci_mpapi_init() failed")); 787 } 788 789 vhci_failover_modopen(vhci); /* load failover modules */ 790 791 ddi_report_dev(dip); 792 return (DDI_SUCCESS); 793 794 attach_fail: 795 if (vhci_attached) 796 (void) mdi_vhci_unregister(dip, 0); 797 798 if (scsi_hba_attached) 799 (void) scsi_hba_detach(dip); 800 801 if (vhci->vhci_tran) 802 scsi_hba_tran_free(vhci->vhci_tran); 803 804 if (mutex_initted) { 805 mutex_destroy(&vhci->vhci_mutex); 806 } 807 808 ddi_soft_state_free(vhci_softstate, instance); 809 return (DDI_FAILURE); 810 } 811 812 813 /*ARGSUSED*/ 814 static int 815 vhci_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 816 { 817 int instance = ddi_get_instance(dip); 818 scsi_hba_tran_t *tran; 819 struct scsi_vhci *vhci; 820 821 VHCI_DEBUG(4, (CE_NOTE, NULL, "vhci_detach: cmd=0x%x\n", cmd)); 822 823 if ((tran = ddi_get_driver_private(dip)) == NULL) 824 return (DDI_FAILURE); 825 826 vhci = TRAN2HBAPRIVATE(tran); 827 if (!vhci) { 828 return (DDI_FAILURE); 829 } 830 831 switch (cmd) { 832 case DDI_DETACH: 833 break; 834 835 case DDI_SUSPEND: 836 case DDI_PM_SUSPEND: 837 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_detach: suspend/pm not yet" 838 "implemented\n")); 839 return (DDI_FAILURE); 840 841 default: 842 VHCI_DEBUG(1, (CE_NOTE, NULL, 843 "!vhci_detach: unknown ddi command\n")); 844 return (DDI_FAILURE); 845 } 846 847 (void) mdi_vhci_unregister(dip, 0); 848 (void) scsi_hba_detach(dip); 849 scsi_hba_tran_free(tran); 850 851 if (ddi_prop_remove(DDI_DEV_T_NONE, dip, 852 "pm-want-child-notification?") != DDI_PROP_SUCCESS) { 853 cmn_err(CE_WARN, 854 "%s%d unable to remove prop pm-want_child_notification?", 855 ddi_driver_name(dip), ddi_get_instance(dip)); 856 } 857 if (vhci_restart_timeid != 0) { 858 (void) untimeout(vhci_restart_timeid); 859 } 860 vhci_restart_timeid = 0; 861 862 mutex_destroy(&vhci->vhci_mutex); 863 vhci->vhci_dip = NULL; 864 vhci->vhci_tran = NULL; 865 taskq_destroy(vhci->vhci_taskq); 866 taskq_destroy(vhci->vhci_update_pathstates_taskq); 867 ddi_remove_minor_node(dip, NULL); 868 ddi_soft_state_free(vhci_softstate, instance); 869 870 vhci_failover_modclose(); /* unload failover modules */ 871 return (DDI_SUCCESS); 872 } 873 874 /* 875 * vhci_getinfo() 876 * Given the device number, return the devinfo pointer or the 877 * instance number. 878 * Note: always succeed DDI_INFO_DEVT2INSTANCE, even before attach. 879 */ 880 881 /*ARGSUSED*/ 882 static int 883 vhci_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result) 884 { 885 struct scsi_vhci *vhcip; 886 int instance = MINOR2INST(getminor((dev_t)arg)); 887 888 switch (cmd) { 889 case DDI_INFO_DEVT2DEVINFO: 890 vhcip = ddi_get_soft_state(vhci_softstate, instance); 891 if (vhcip != NULL) 892 *result = vhcip->vhci_dip; 893 else { 894 *result = NULL; 895 return (DDI_FAILURE); 896 } 897 break; 898 899 case DDI_INFO_DEVT2INSTANCE: 900 *result = (void *)(uintptr_t)instance; 901 break; 902 903 default: 904 return (DDI_FAILURE); 905 } 906 907 return (DDI_SUCCESS); 908 } 909 910 /*ARGSUSED*/ 911 static int 912 vhci_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip, 913 scsi_hba_tran_t *hba_tran, struct scsi_device *sd) 914 { 915 char *guid; 916 scsi_vhci_lun_t *vlun; 917 struct scsi_vhci *vhci; 918 clock_t from_ticks; 919 mdi_pathinfo_t *pip; 920 int rval; 921 922 ASSERT(hba_dip != NULL); 923 ASSERT(tgt_dip != NULL); 924 925 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, tgt_dip, PROPFLAGS, 926 MDI_CLIENT_GUID_PROP, &guid) != DDI_SUCCESS) { 927 /* 928 * This must be the .conf node without GUID property. 929 * The node under fp already inserts a delay, so we 930 * just return from here. We rely on this delay to have 931 * all dips be posted to the ndi hotplug thread's newdev 932 * list. This is necessary for the deferred attach 933 * mechanism to work and opens() done soon after boot to 934 * succeed. 935 */ 936 VHCI_DEBUG(4, (CE_WARN, hba_dip, "tgt_init: lun guid " 937 "property failed")); 938 return (DDI_NOT_WELL_FORMED); 939 } 940 941 if (ndi_dev_is_persistent_node(tgt_dip) == 0) { 942 /* 943 * This must be .conf node with the GUID property. We don't 944 * merge property by ndi_merge_node() here because the 945 * devi_addr_buf of .conf node is "" always according the 946 * implementation of vhci_scsi_get_name_bus_addr(). 947 */ 948 ddi_set_name_addr(tgt_dip, NULL); 949 return (DDI_FAILURE); 950 } 951 952 vhci = ddi_get_soft_state(vhci_softstate, ddi_get_instance(hba_dip)); 953 ASSERT(vhci != NULL); 954 955 VHCI_DEBUG(4, (CE_NOTE, hba_dip, 956 "!tgt_init: called for %s (instance %d)\n", 957 ddi_driver_name(tgt_dip), ddi_get_instance(tgt_dip))); 958 959 vlun = vhci_lun_lookup(tgt_dip); 960 961 mutex_enter(&vhci_global_mutex); 962 963 from_ticks = ddi_get_lbolt(); 964 if (vhci_to_ticks == 0) { 965 vhci_to_ticks = from_ticks + 966 drv_usectohz(vhci_init_wait_timeout); 967 } 968 969 #if DEBUG 970 if (vlun) { 971 VHCI_DEBUG(1, (CE_WARN, hba_dip, "tgt_init: " 972 "vhci_scsi_tgt_init: guid %s : found vlun 0x%p " 973 "from_ticks %lx to_ticks %lx", 974 guid, (void *)vlun, from_ticks, vhci_to_ticks)); 975 } else { 976 VHCI_DEBUG(1, (CE_WARN, hba_dip, "tgt_init: " 977 "vhci_scsi_tgt_init: guid %s : vlun not found " 978 "from_ticks %lx to_ticks %lx", guid, from_ticks, 979 vhci_to_ticks)); 980 } 981 #endif 982 983 rval = mdi_select_path(tgt_dip, NULL, 984 (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH), NULL, &pip); 985 if (rval == MDI_SUCCESS) { 986 mdi_rele_path(pip); 987 } 988 989 /* 990 * Wait for the following conditions : 991 * 1. no vlun available yet 992 * 2. no path established 993 * 3. timer did not expire 994 */ 995 while ((vlun == NULL) || (mdi_client_get_path_count(tgt_dip) == 0) || 996 (rval != MDI_SUCCESS)) { 997 if (vlun && vlun->svl_not_supported) { 998 VHCI_DEBUG(1, (CE_WARN, hba_dip, "tgt_init: " 999 "vlun 0x%p lun guid %s not supported!", 1000 (void *)vlun, guid)); 1001 mutex_exit(&vhci_global_mutex); 1002 ddi_prop_free(guid); 1003 return (DDI_NOT_WELL_FORMED); 1004 } 1005 if ((vhci_first_time == 0) && (from_ticks >= vhci_to_ticks)) { 1006 vhci_first_time = 1; 1007 } 1008 if (vhci_first_time == 1) { 1009 VHCI_DEBUG(1, (CE_WARN, hba_dip, "vhci_scsi_tgt_init: " 1010 "no wait for %s. from_tick %lx, to_tick %lx", 1011 guid, from_ticks, vhci_to_ticks)); 1012 mutex_exit(&vhci_global_mutex); 1013 ddi_prop_free(guid); 1014 return (DDI_NOT_WELL_FORMED); 1015 } 1016 1017 if (cv_timedwait(&vhci_cv, 1018 &vhci_global_mutex, vhci_to_ticks) == -1) { 1019 /* Timed out */ 1020 #ifdef DEBUG 1021 if (vlun == NULL) { 1022 VHCI_DEBUG(1, (CE_WARN, hba_dip, 1023 "tgt_init: no vlun for %s!", guid)); 1024 } else if (mdi_client_get_path_count(tgt_dip) == 0) { 1025 VHCI_DEBUG(1, (CE_WARN, hba_dip, 1026 "tgt_init: client path count is " 1027 "zero for %s!", guid)); 1028 } else { 1029 VHCI_DEBUG(1, (CE_WARN, hba_dip, 1030 "tgt_init: client path not " 1031 "available yet for %s!", guid)); 1032 } 1033 #endif /* DEBUG */ 1034 mutex_exit(&vhci_global_mutex); 1035 ddi_prop_free(guid); 1036 return (DDI_NOT_WELL_FORMED); 1037 } 1038 vlun = vhci_lun_lookup(tgt_dip); 1039 rval = mdi_select_path(tgt_dip, NULL, 1040 (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH), 1041 NULL, &pip); 1042 if (rval == MDI_SUCCESS) { 1043 mdi_rele_path(pip); 1044 } 1045 from_ticks = ddi_get_lbolt(); 1046 } 1047 mutex_exit(&vhci_global_mutex); 1048 1049 ASSERT(vlun != NULL); 1050 ddi_prop_free(guid); 1051 1052 scsi_device_hba_private_set(sd, vlun); 1053 1054 return (DDI_SUCCESS); 1055 } 1056 1057 /*ARGSUSED*/ 1058 static void 1059 vhci_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip, 1060 scsi_hba_tran_t *hba_tran, struct scsi_device *sd) 1061 { 1062 } 1063 1064 /* 1065 * a PGR register command has started; copy the info we need 1066 */ 1067 int 1068 vhci_pgr_register_start(scsi_vhci_lun_t *vlun, struct scsi_pkt *pkt) 1069 { 1070 struct vhci_pkt *vpkt = TGTPKT2VHCIPKT(pkt); 1071 void *addr; 1072 1073 if (!vpkt->vpkt_tgt_init_bp) 1074 return (TRAN_BADPKT); 1075 1076 addr = bp_mapin_common(vpkt->vpkt_tgt_init_bp, 1077 (vpkt->vpkt_flags & CFLAG_NOWAIT) ? VM_NOSLEEP : VM_SLEEP); 1078 if (addr == NULL) 1079 return (TRAN_BUSY); 1080 1081 mutex_enter(&vlun->svl_mutex); 1082 1083 vhci_print_prout_keys(vlun, "v_pgr_reg_start: before bcopy:"); 1084 1085 bcopy(addr, &vlun->svl_prout, sizeof (vhci_prout_t) - 1086 (2 * MHIOC_RESV_KEY_SIZE*sizeof (char))); 1087 bcopy(pkt->pkt_cdbp, vlun->svl_cdb, sizeof (vlun->svl_cdb)); 1088 1089 vhci_print_prout_keys(vlun, "v_pgr_reg_start: after bcopy:"); 1090 1091 vlun->svl_time = pkt->pkt_time; 1092 vlun->svl_bcount = vpkt->vpkt_tgt_init_bp->b_bcount; 1093 vlun->svl_first_path = vpkt->vpkt_path; 1094 mutex_exit(&vlun->svl_mutex); 1095 return (0); 1096 } 1097 1098 /* 1099 * Function name : vhci_scsi_start() 1100 * 1101 * Return Values : TRAN_FATAL_ERROR - vhci has been shutdown 1102 * or other fatal failure 1103 * preventing packet transportation 1104 * TRAN_BUSY - request queue is full 1105 * TRAN_ACCEPT - pkt has been submitted to phci 1106 * (or is held in the waitQ) 1107 * Description : Implements SCSA's tran_start() entry point for 1108 * packet transport 1109 * 1110 */ 1111 static int 1112 vhci_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt) 1113 { 1114 int rval = TRAN_ACCEPT; 1115 int instance, held; 1116 struct scsi_vhci *vhci = ADDR2VHCI(ap); 1117 struct scsi_vhci_lun *vlun = ADDR2VLUN(ap); 1118 struct vhci_pkt *vpkt = TGTPKT2VHCIPKT(pkt); 1119 int flags = 0; 1120 scsi_vhci_priv_t *svp; 1121 dev_info_t *cdip; 1122 client_lb_t lbp; 1123 int restore_lbp = 0; 1124 /* set if pkt is SCSI-II RESERVE cmd */ 1125 int pkt_reserve_cmd = 0; 1126 int reserve_failed = 0; 1127 1128 ASSERT(vhci != NULL); 1129 ASSERT(vpkt != NULL); 1130 ASSERT(vpkt->vpkt_state != VHCI_PKT_ISSUED); 1131 cdip = ADDR2DIP(ap); 1132 1133 /* 1134 * Block IOs if LUN is held or QUIESCED for IOs. 1135 */ 1136 if ((VHCI_LUN_IS_HELD(vlun)) || 1137 ((vlun->svl_flags & VLUN_QUIESCED_FLG) == VLUN_QUIESCED_FLG)) { 1138 return (TRAN_BUSY); 1139 } 1140 1141 /* 1142 * vhci_lun needs to be quiesced before SCSI-II RESERVE command 1143 * can be issued. This may require a cv_timedwait, which is 1144 * dangerous to perform in an interrupt context. So if this 1145 * is a RESERVE command a taskq is dispatched to service it. 1146 * This taskq shall again call vhci_scsi_start, but we shall be 1147 * sure its not in an interrupt context. 1148 */ 1149 if ((pkt->pkt_cdbp[0] == SCMD_RESERVE) || 1150 (pkt->pkt_cdbp[0] == SCMD_RESERVE_G1)) { 1151 if (!(vpkt->vpkt_state & VHCI_PKT_THRU_TASKQ)) { 1152 if (taskq_dispatch(vhci->vhci_taskq, 1153 vhci_dispatch_scsi_start, (void *) vpkt, 1154 KM_NOSLEEP)) { 1155 return (TRAN_ACCEPT); 1156 } else { 1157 return (TRAN_BUSY); 1158 } 1159 } 1160 1161 /* 1162 * Here we ensure that simultaneous SCSI-II RESERVE cmds don't 1163 * get serviced for a lun. 1164 */ 1165 VHCI_HOLD_LUN(vlun, VH_NOSLEEP, held); 1166 if (!held) { 1167 return (TRAN_BUSY); 1168 } else if ((vlun->svl_flags & VLUN_QUIESCED_FLG) == 1169 VLUN_QUIESCED_FLG) { 1170 VHCI_RELEASE_LUN(vlun); 1171 return (TRAN_BUSY); 1172 } 1173 1174 /* 1175 * To ensure that no IOs occur for this LUN for the duration 1176 * of this pkt set the VLUN_QUIESCED_FLG. 1177 * In case this routine needs to exit on error make sure that 1178 * this flag is cleared. 1179 */ 1180 vlun->svl_flags |= VLUN_QUIESCED_FLG; 1181 pkt_reserve_cmd = 1; 1182 1183 /* 1184 * if this is a SCSI-II RESERVE command, set load balancing 1185 * policy to be ALTERNATE PATH to ensure that all subsequent 1186 * IOs are routed on the same path. This is because if commands 1187 * are routed across multiple paths then IOs on paths other than 1188 * the one on which the RESERVE was executed will get a 1189 * RESERVATION CONFLICT 1190 */ 1191 lbp = mdi_get_lb_policy(cdip); 1192 if (lbp != LOAD_BALANCE_NONE) { 1193 if (vhci_quiesce_lun(vlun) != 1) { 1194 vlun->svl_flags &= ~VLUN_QUIESCED_FLG; 1195 VHCI_RELEASE_LUN(vlun); 1196 return (TRAN_FATAL_ERROR); 1197 } 1198 vlun->svl_lb_policy_save = lbp; 1199 if (mdi_set_lb_policy(cdip, LOAD_BALANCE_NONE) != 1200 MDI_SUCCESS) { 1201 vlun->svl_flags &= ~VLUN_QUIESCED_FLG; 1202 VHCI_RELEASE_LUN(vlun); 1203 return (TRAN_FATAL_ERROR); 1204 } 1205 restore_lbp = 1; 1206 } 1207 /* 1208 * See comments for VLUN_RESERVE_ACTIVE_FLG in scsi_vhci.h 1209 * To narrow this window where a reserve command may be sent 1210 * down an inactive path the path states first need to be 1211 * updated. Before calling vhci_update_pathstates reset 1212 * VLUN_RESERVE_ACTIVE_FLG, just in case it was already set 1213 * for this lun. This shall prevent an unnecessary reset 1214 * from being sent out. 1215 */ 1216 vlun->svl_flags &= ~VLUN_RESERVE_ACTIVE_FLG; 1217 vhci_update_pathstates((void *)vlun); 1218 } 1219 1220 instance = ddi_get_instance(vhci->vhci_dip); 1221 1222 /* 1223 * If the command is PRIN with action of zero, then the cmd 1224 * is reading PR keys which requires filtering on completion. 1225 * Data cache sync must be guaranteed. 1226 */ 1227 if ((pkt->pkt_cdbp[0] == SCMD_PRIN) && (pkt->pkt_cdbp[1] == 0) && 1228 (vpkt->vpkt_org_vpkt == NULL)) { 1229 vpkt->vpkt_tgt_init_pkt_flags |= PKT_CONSISTENT; 1230 } 1231 1232 /* 1233 * Do not defer bind for PKT_DMA_PARTIAL 1234 */ 1235 if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) == 0) { 1236 1237 /* This is a non pkt_dma_partial case */ 1238 if ((rval = vhci_bind_transport( 1239 ap, vpkt, vpkt->vpkt_tgt_init_pkt_flags, NULL_FUNC)) 1240 != TRAN_ACCEPT) { 1241 VHCI_DEBUG(6, (CE_WARN, vhci->vhci_dip, 1242 "!vhci%d %x: failed to bind transport: " 1243 "vlun 0x%p pkt_reserved %x restore_lbp %x," 1244 "lbp %x", instance, rval, (void *)vlun, 1245 pkt_reserve_cmd, restore_lbp, lbp)); 1246 if (restore_lbp) 1247 (void) mdi_set_lb_policy(cdip, lbp); 1248 if (pkt_reserve_cmd) 1249 vlun->svl_flags &= ~VLUN_QUIESCED_FLG; 1250 return (rval); 1251 } 1252 VHCI_DEBUG(8, (CE_NOTE, NULL, 1253 "vhci_scsi_start: v_b_t called 0x%p\n", (void *)vpkt)); 1254 } 1255 ASSERT(vpkt->vpkt_hba_pkt != NULL); 1256 ASSERT(vpkt->vpkt_path != NULL); 1257 1258 /* 1259 * This is the chance to adjust the pHCI's pkt and other information 1260 * from target driver's pkt. 1261 */ 1262 VHCI_DEBUG(8, (CE_NOTE, vhci->vhci_dip, "vhci_scsi_start vpkt %p\n", 1263 (void *)vpkt)); 1264 vhci_update_pHCI_pkt(vpkt, pkt); 1265 1266 if (vlun->svl_flags & VLUN_RESERVE_ACTIVE_FLG) { 1267 if (vpkt->vpkt_path != vlun->svl_resrv_pip) { 1268 VHCI_DEBUG(1, (CE_WARN, vhci->vhci_dip, 1269 "!vhci_bind: reserve flag set for vlun 0x%p, but, " 1270 "pktpath 0x%p resrv path 0x%p differ. lb_policy %x", 1271 (void *)vlun, (void *)vpkt->vpkt_path, 1272 (void *)vlun->svl_resrv_pip, 1273 mdi_get_lb_policy(cdip))); 1274 reserve_failed = 1; 1275 } 1276 } 1277 1278 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(vpkt->vpkt_path); 1279 if (svp == NULL || reserve_failed) { 1280 if (pkt_reserve_cmd) { 1281 VHCI_DEBUG(6, (CE_WARN, vhci->vhci_dip, 1282 "!vhci_bind returned null svp vlun 0x%p", 1283 (void *)vlun)); 1284 vlun->svl_flags &= ~VLUN_QUIESCED_FLG; 1285 if (restore_lbp) 1286 (void) mdi_set_lb_policy(cdip, lbp); 1287 } 1288 pkt_cleanup: 1289 if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) == 0) { 1290 scsi_destroy_pkt(vpkt->vpkt_hba_pkt); 1291 vpkt->vpkt_hba_pkt = NULL; 1292 if (vpkt->vpkt_path) { 1293 mdi_rele_path(vpkt->vpkt_path); 1294 vpkt->vpkt_path = NULL; 1295 } 1296 } 1297 if ((pkt->pkt_cdbp[0] == SCMD_PROUT) && 1298 (((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_REGISTER) || 1299 ((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_R_AND_IGNORE))) { 1300 sema_v(&vlun->svl_pgr_sema); 1301 } 1302 return (TRAN_BUSY); 1303 } 1304 1305 VHCI_INCR_PATH_CMDCOUNT(svp); 1306 1307 /* 1308 * Ensure that no other IOs raced ahead, while a RESERVE cmd was 1309 * QUIESCING the same lun. 1310 */ 1311 if ((!pkt_reserve_cmd) && 1312 ((vlun->svl_flags & VLUN_QUIESCED_FLG) == VLUN_QUIESCED_FLG)) { 1313 VHCI_DECR_PATH_CMDCOUNT(svp); 1314 goto pkt_cleanup; 1315 } 1316 1317 if ((pkt->pkt_cdbp[0] == SCMD_PRIN) || 1318 (pkt->pkt_cdbp[0] == SCMD_PROUT)) { 1319 /* 1320 * currently this thread only handles running PGR 1321 * commands, so don't bother creating it unless 1322 * something interesting is going to happen (like 1323 * either a PGR out, or a PGR in with enough space 1324 * to hold the keys that are getting returned) 1325 */ 1326 mutex_enter(&vlun->svl_mutex); 1327 if (((vlun->svl_flags & VLUN_TASK_D_ALIVE_FLG) == 0) && 1328 (pkt->pkt_cdbp[0] == SCMD_PROUT)) { 1329 vlun->svl_taskq = taskq_create("vlun_pgr_task_daemon", 1330 1, MINCLSYSPRI, 1, 4, 0); 1331 vlun->svl_flags |= VLUN_TASK_D_ALIVE_FLG; 1332 } 1333 mutex_exit(&vlun->svl_mutex); 1334 if ((pkt->pkt_cdbp[0] == SCMD_PROUT) && 1335 (((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_REGISTER) || 1336 ((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_R_AND_IGNORE))) { 1337 if (rval = vhci_pgr_register_start(vlun, pkt)) { 1338 /* an error */ 1339 sema_v(&vlun->svl_pgr_sema); 1340 return (rval); 1341 } 1342 } 1343 } 1344 1345 /* 1346 * SCSI-II RESERVE cmd is not expected in polled mode. 1347 * If this changes it needs to be handled for the polled scenario. 1348 */ 1349 flags = vpkt->vpkt_hba_pkt->pkt_flags; 1350 1351 /* 1352 * Set the path_instance *before* sending the scsi_pkt down the path 1353 * to mpxio's pHCI so that additional path abstractions at a pHCI 1354 * level (like maybe iSCSI at some point in the future) can update 1355 * the path_instance. 1356 */ 1357 if (scsi_pkt_allocated_correctly(vpkt->vpkt_hba_pkt)) 1358 vpkt->vpkt_hba_pkt->pkt_path_instance = 1359 mdi_pi_get_path_instance(vpkt->vpkt_path); 1360 1361 rval = scsi_transport(vpkt->vpkt_hba_pkt); 1362 if (rval == TRAN_ACCEPT) { 1363 if (flags & FLAG_NOINTR) { 1364 struct scsi_pkt *tpkt = vpkt->vpkt_tgt_pkt; 1365 struct scsi_pkt *pkt = vpkt->vpkt_hba_pkt; 1366 1367 ASSERT(tpkt != NULL); 1368 *(tpkt->pkt_scbp) = *(pkt->pkt_scbp); 1369 tpkt->pkt_resid = pkt->pkt_resid; 1370 tpkt->pkt_state = pkt->pkt_state; 1371 tpkt->pkt_statistics = pkt->pkt_statistics; 1372 tpkt->pkt_reason = pkt->pkt_reason; 1373 1374 if ((*(pkt->pkt_scbp) == STATUS_CHECK) && 1375 (pkt->pkt_state & STATE_ARQ_DONE)) { 1376 bcopy(pkt->pkt_scbp, tpkt->pkt_scbp, 1377 vpkt->vpkt_tgt_init_scblen); 1378 } 1379 1380 VHCI_DECR_PATH_CMDCOUNT(svp); 1381 if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) == 0) { 1382 scsi_destroy_pkt(vpkt->vpkt_hba_pkt); 1383 vpkt->vpkt_hba_pkt = NULL; 1384 if (vpkt->vpkt_path) { 1385 mdi_rele_path(vpkt->vpkt_path); 1386 vpkt->vpkt_path = NULL; 1387 } 1388 } 1389 /* 1390 * This path will not automatically retry pkts 1391 * internally, therefore, vpkt_org_vpkt should 1392 * never be set. 1393 */ 1394 ASSERT(vpkt->vpkt_org_vpkt == NULL); 1395 scsi_hba_pkt_comp(tpkt); 1396 } 1397 return (rval); 1398 } else if ((pkt->pkt_cdbp[0] == SCMD_PROUT) && 1399 (((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_REGISTER) || 1400 ((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_R_AND_IGNORE))) { 1401 /* the command exited with bad status */ 1402 sema_v(&vlun->svl_pgr_sema); 1403 } else if (vpkt->vpkt_tgt_pkt->pkt_cdbp[0] == SCMD_PRIN) { 1404 /* the command exited with bad status */ 1405 sema_v(&vlun->svl_pgr_sema); 1406 } else if (pkt_reserve_cmd) { 1407 VHCI_DEBUG(6, (CE_WARN, vhci->vhci_dip, 1408 "!vhci_scsi_start: reserve failed vlun 0x%p", 1409 (void *)vlun)); 1410 vlun->svl_flags &= ~VLUN_QUIESCED_FLG; 1411 if (restore_lbp) 1412 (void) mdi_set_lb_policy(cdip, lbp); 1413 } 1414 1415 ASSERT(vpkt->vpkt_hba_pkt != NULL); 1416 VHCI_DECR_PATH_CMDCOUNT(svp); 1417 1418 /* Do not destroy phci packet information for PKT_DMA_PARTIAL */ 1419 if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) == 0) { 1420 scsi_destroy_pkt(vpkt->vpkt_hba_pkt); 1421 vpkt->vpkt_hba_pkt = NULL; 1422 if (vpkt->vpkt_path) { 1423 MDI_PI_ERRSTAT(vpkt->vpkt_path, MDI_PI_TRANSERR); 1424 mdi_rele_path(vpkt->vpkt_path); 1425 vpkt->vpkt_path = NULL; 1426 } 1427 } 1428 return (TRAN_BUSY); 1429 } 1430 1431 /* 1432 * Function name : vhci_scsi_reset() 1433 * 1434 * Return Values : 0 - reset failed 1435 * 1 - reset succeeded 1436 */ 1437 1438 /* ARGSUSED */ 1439 static int 1440 vhci_scsi_reset(struct scsi_address *ap, int level) 1441 { 1442 int rval = 0; 1443 1444 cmn_err(CE_WARN, "!vhci_scsi_reset 0x%x", level); 1445 if ((level == RESET_TARGET) || (level == RESET_LUN)) { 1446 return (vhci_scsi_reset_target(ap, level, TRUE)); 1447 } else if (level == RESET_ALL) { 1448 return (vhci_scsi_reset_bus(ap)); 1449 } 1450 1451 return (rval); 1452 } 1453 1454 /* 1455 * vhci_recovery_reset: 1456 * Issues reset to the device 1457 * Input: 1458 * vlun - vhci lun pointer of the device 1459 * ap - address of the device 1460 * select_path: 1461 * If select_path is FALSE, then the address specified in ap is 1462 * the path on which reset will be issued. 1463 * If select_path is TRUE, then path is obtained by calling 1464 * mdi_select_path. 1465 * 1466 * recovery_depth: 1467 * Caller can specify the level of reset. 1468 * VHCI_DEPTH_LUN - 1469 * Issues LUN RESET if device supports lun reset. 1470 * VHCI_DEPTH_TARGET - 1471 * If Lun Reset fails or the device does not support 1472 * Lun Reset, issues TARGET RESET 1473 * VHCI_DEPTH_ALL - 1474 * If Lun Reset fails or the device does not support 1475 * Lun Reset, issues TARGET RESET. 1476 * If TARGET RESET does not succeed, issues Bus Reset. 1477 */ 1478 1479 static int 1480 vhci_recovery_reset(scsi_vhci_lun_t *vlun, struct scsi_address *ap, 1481 uint8_t select_path, uint8_t recovery_depth) 1482 { 1483 int ret = 0; 1484 1485 ASSERT(ap != NULL); 1486 1487 if (vlun && vlun->svl_support_lun_reset == 1) { 1488 ret = vhci_scsi_reset_target(ap, RESET_LUN, 1489 select_path); 1490 } 1491 1492 recovery_depth--; 1493 1494 if ((ret == 0) && recovery_depth) { 1495 ret = vhci_scsi_reset_target(ap, RESET_TARGET, 1496 select_path); 1497 recovery_depth--; 1498 } 1499 1500 if ((ret == 0) && recovery_depth) { 1501 (void) scsi_reset(ap, RESET_ALL); 1502 } 1503 1504 return (ret); 1505 } 1506 1507 /* 1508 * Note: The scsi_address passed to this routine could be the scsi_address 1509 * for the virtual device or the physical device. No assumptions should be 1510 * made in this routine about the contents of the ap structure. 1511 * Further, note that the child dip would be the dip of the ssd node regardless 1512 * of the scsi_address passed in. 1513 */ 1514 static int 1515 vhci_scsi_reset_target(struct scsi_address *ap, int level, uint8_t select_path) 1516 { 1517 dev_info_t *vdip, *pdip, *cdip; 1518 mdi_pathinfo_t *pip = NULL; 1519 mdi_pathinfo_t *npip = NULL; 1520 int rval = -1; 1521 scsi_vhci_priv_t *svp = NULL; 1522 struct scsi_address *pap = NULL; 1523 scsi_hba_tran_t *hba = NULL; 1524 int sps; 1525 struct scsi_vhci *vhci = NULL; 1526 1527 if (select_path != TRUE) { 1528 ASSERT(ap != NULL); 1529 if (level == RESET_LUN) { 1530 hba = ap->a_hba_tran; 1531 ASSERT(hba != NULL); 1532 return (hba->tran_reset(ap, RESET_LUN)); 1533 } 1534 return (scsi_reset(ap, level)); 1535 } 1536 1537 cdip = ADDR2DIP(ap); 1538 ASSERT(cdip != NULL); 1539 vdip = ddi_get_parent(cdip); 1540 ASSERT(vdip != NULL); 1541 vhci = ddi_get_soft_state(vhci_softstate, ddi_get_instance(vdip)); 1542 ASSERT(vhci != NULL); 1543 1544 rval = mdi_select_path(cdip, NULL, MDI_SELECT_ONLINE_PATH, NULL, &pip); 1545 if ((rval != MDI_SUCCESS) || (pip == NULL)) { 1546 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_scsi_reset_target: " 1547 "Unable to get a path, dip 0x%p", (void *)cdip)); 1548 return (0); 1549 } 1550 again: 1551 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip); 1552 if (svp == NULL) { 1553 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_scsi_reset_target: " 1554 "priv is NULL, pip 0x%p", (void *)pip)); 1555 mdi_rele_path(pip); 1556 return (0); 1557 } 1558 1559 if (svp->svp_psd == NULL) { 1560 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_scsi_reset_target: " 1561 "psd is NULL, pip 0x%p, svp 0x%p", 1562 (void *)pip, (void *)svp)); 1563 mdi_rele_path(pip); 1564 return (0); 1565 } 1566 1567 pap = &svp->svp_psd->sd_address; 1568 hba = pap->a_hba_tran; 1569 1570 ASSERT(pap != NULL); 1571 ASSERT(hba != NULL); 1572 1573 if (hba->tran_reset != NULL) { 1574 if (hba->tran_reset(pap, level) == 0) { 1575 pdip = mdi_pi_get_phci(pip); 1576 vhci_log(CE_WARN, vdip, "!(%s%d):" 1577 " path (%s%d), reset %d failed", 1578 ddi_driver_name(cdip), ddi_get_instance(cdip), 1579 ddi_driver_name(pdip), ddi_get_instance(pdip), 1580 level); 1581 1582 /* 1583 * Select next path and issue the reset, repeat 1584 * until all paths are exhausted 1585 */ 1586 sps = mdi_select_path(cdip, NULL, 1587 MDI_SELECT_ONLINE_PATH, pip, &npip); 1588 if ((sps != MDI_SUCCESS) || (npip == NULL)) { 1589 mdi_rele_path(pip); 1590 return (0); 1591 } 1592 mdi_rele_path(pip); 1593 pip = npip; 1594 goto again; 1595 } 1596 mdi_rele_path(pip); 1597 mutex_enter(&vhci->vhci_mutex); 1598 scsi_hba_reset_notify_callback(&vhci->vhci_mutex, 1599 &vhci->vhci_reset_notify_listf); 1600 mutex_exit(&vhci->vhci_mutex); 1601 VHCI_DEBUG(6, (CE_NOTE, NULL, "!vhci_scsi_reset_target: " 1602 "reset %d sent down pip:%p for cdip:%p\n", level, 1603 (void *)pip, (void *)cdip)); 1604 return (1); 1605 } 1606 mdi_rele_path(pip); 1607 return (0); 1608 } 1609 1610 1611 /* ARGSUSED */ 1612 static int 1613 vhci_scsi_reset_bus(struct scsi_address *ap) 1614 { 1615 return (1); 1616 } 1617 1618 1619 /* 1620 * called by vhci_getcap and vhci_setcap to get and set (respectively) 1621 * SCSI capabilities 1622 */ 1623 /* ARGSUSED */ 1624 static int 1625 vhci_commoncap(struct scsi_address *ap, char *cap, 1626 int val, int tgtonly, int doset) 1627 { 1628 struct scsi_vhci *vhci = ADDR2VHCI(ap); 1629 struct scsi_vhci_lun *vlun = ADDR2VLUN(ap); 1630 int cidx; 1631 int rval = 0; 1632 1633 if (cap == (char *)0) { 1634 VHCI_DEBUG(3, (CE_WARN, vhci->vhci_dip, 1635 "!vhci_commoncap: invalid arg")); 1636 return (rval); 1637 } 1638 1639 if (vlun == NULL) { 1640 VHCI_DEBUG(3, (CE_WARN, vhci->vhci_dip, 1641 "!vhci_commoncap: vlun is null")); 1642 return (rval); 1643 } 1644 1645 if ((cidx = scsi_hba_lookup_capstr(cap)) == -1) { 1646 return (UNDEFINED); 1647 } 1648 1649 /* 1650 * Process setcap request. 1651 */ 1652 if (doset) { 1653 /* 1654 * At present, we can only set binary (0/1) values 1655 */ 1656 switch (cidx) { 1657 case SCSI_CAP_ARQ: 1658 if (val == 0) { 1659 rval = 0; 1660 } else { 1661 rval = 1; 1662 } 1663 break; 1664 1665 case SCSI_CAP_LUN_RESET: 1666 if (tgtonly == 0) { 1667 VHCI_DEBUG(1, (CE_WARN, vhci->vhci_dip, 1668 "scsi_vhci_setcap: " 1669 "Returning error since whom = 0")); 1670 rval = -1; 1671 break; 1672 } 1673 /* 1674 * Set the capability accordingly. 1675 */ 1676 mutex_enter(&vlun->svl_mutex); 1677 vlun->svl_support_lun_reset = val; 1678 rval = val; 1679 mutex_exit(&vlun->svl_mutex); 1680 break; 1681 1682 case SCSI_CAP_SECTOR_SIZE: 1683 mutex_enter(&vlun->svl_mutex); 1684 vlun->svl_sector_size = val; 1685 vlun->svl_setcap_done = 1; 1686 mutex_exit(&vlun->svl_mutex); 1687 (void) vhci_pHCI_cap(ap, cap, val, tgtonly, NULL); 1688 1689 /* Always return success */ 1690 rval = 1; 1691 break; 1692 1693 default: 1694 VHCI_DEBUG(6, (CE_WARN, vhci->vhci_dip, 1695 "!vhci_setcap: unsupported %d", cidx)); 1696 rval = UNDEFINED; 1697 break; 1698 } 1699 1700 VHCI_DEBUG(6, (CE_NOTE, vhci->vhci_dip, 1701 "!set cap: cap=%s, val/tgtonly/doset/rval = " 1702 "0x%x/0x%x/0x%x/%d\n", 1703 cap, val, tgtonly, doset, rval)); 1704 1705 } else { 1706 /* 1707 * Process getcap request. 1708 */ 1709 switch (cidx) { 1710 case SCSI_CAP_DMA_MAX: 1711 /* 1712 * For X86 this capability is caught in scsi_ifgetcap(). 1713 * XXX Should this be getting the value from the pHCI? 1714 */ 1715 rval = (int)VHCI_DMA_MAX_XFER_CAP; 1716 break; 1717 1718 case SCSI_CAP_INITIATOR_ID: 1719 rval = 0x00; 1720 break; 1721 1722 case SCSI_CAP_ARQ: 1723 case SCSI_CAP_RESET_NOTIFICATION: 1724 case SCSI_CAP_TAGGED_QING: 1725 rval = 1; 1726 break; 1727 1728 case SCSI_CAP_SCSI_VERSION: 1729 rval = 3; 1730 break; 1731 1732 case SCSI_CAP_INTERCONNECT_TYPE: 1733 rval = INTERCONNECT_FABRIC; 1734 break; 1735 1736 case SCSI_CAP_LUN_RESET: 1737 /* 1738 * scsi_vhci will always return success for LUN reset. 1739 * When request for doing LUN reset comes 1740 * through scsi_reset entry point, at that time attempt 1741 * will be made to do reset through all the possible 1742 * paths. 1743 */ 1744 mutex_enter(&vlun->svl_mutex); 1745 rval = vlun->svl_support_lun_reset; 1746 mutex_exit(&vlun->svl_mutex); 1747 VHCI_DEBUG(4, (CE_WARN, vhci->vhci_dip, 1748 "scsi_vhci_getcap:" 1749 "Getting the Lun reset capability %d", rval)); 1750 break; 1751 1752 case SCSI_CAP_SECTOR_SIZE: 1753 mutex_enter(&vlun->svl_mutex); 1754 rval = vlun->svl_sector_size; 1755 mutex_exit(&vlun->svl_mutex); 1756 break; 1757 1758 case SCSI_CAP_CDB_LEN: 1759 rval = VHCI_SCSI_CDB_SIZE; 1760 break; 1761 1762 case SCSI_CAP_DMA_MAX_ARCH: 1763 /* 1764 * For X86 this capability is caught in scsi_ifgetcap(). 1765 * XXX Should this be getting the value from the pHCI? 1766 */ 1767 rval = 0; 1768 break; 1769 1770 default: 1771 VHCI_DEBUG(6, (CE_WARN, vhci->vhci_dip, 1772 "!vhci_getcap: unsupported %d", cidx)); 1773 rval = UNDEFINED; 1774 break; 1775 } 1776 1777 VHCI_DEBUG(6, (CE_NOTE, vhci->vhci_dip, 1778 "!get cap: cap=%s, val/tgtonly/doset/rval = " 1779 "0x%x/0x%x/0x%x/%d\n", 1780 cap, val, tgtonly, doset, rval)); 1781 } 1782 return (rval); 1783 } 1784 1785 1786 /* 1787 * Function name : vhci_scsi_getcap() 1788 * 1789 */ 1790 static int 1791 vhci_scsi_getcap(struct scsi_address *ap, char *cap, int whom) 1792 { 1793 return (vhci_commoncap(ap, cap, 0, whom, 0)); 1794 } 1795 1796 static int 1797 vhci_scsi_setcap(struct scsi_address *ap, char *cap, int value, int whom) 1798 { 1799 return (vhci_commoncap(ap, cap, value, whom, 1)); 1800 } 1801 1802 /* 1803 * Function name : vhci_scsi_abort() 1804 */ 1805 /* ARGSUSED */ 1806 static int 1807 vhci_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt) 1808 { 1809 return (0); 1810 } 1811 1812 /* 1813 * Function name : vhci_scsi_init_pkt 1814 * 1815 * Return Values : pointer to scsi_pkt, or NULL 1816 */ 1817 /* ARGSUSED */ 1818 static struct scsi_pkt * 1819 vhci_scsi_init_pkt(struct scsi_address *ap, struct scsi_pkt *pkt, 1820 struct buf *bp, int cmdlen, int statuslen, int tgtlen, 1821 int flags, int (*callback)(caddr_t), caddr_t arg) 1822 { 1823 struct scsi_vhci *vhci = ADDR2VHCI(ap); 1824 struct vhci_pkt *vpkt; 1825 int rval; 1826 int newpkt = 0; 1827 struct scsi_pkt *pktp; 1828 1829 1830 if (pkt == NULL) { 1831 if (cmdlen > VHCI_SCSI_CDB_SIZE) { 1832 if ((cmdlen != VHCI_SCSI_OSD_CDB_SIZE) || 1833 ((flags & VHCI_SCSI_OSD_PKT_FLAGS) != 1834 VHCI_SCSI_OSD_PKT_FLAGS)) { 1835 VHCI_DEBUG(1, (CE_NOTE, NULL, 1836 "!init pkt: cdb size not supported\n")); 1837 return (NULL); 1838 } 1839 } 1840 1841 pktp = scsi_hba_pkt_alloc(vhci->vhci_dip, 1842 ap, cmdlen, statuslen, tgtlen, sizeof (*vpkt), callback, 1843 arg); 1844 1845 if (pktp == NULL) { 1846 return (NULL); 1847 } 1848 1849 /* Get the vhci's private structure */ 1850 vpkt = (struct vhci_pkt *)(pktp->pkt_ha_private); 1851 ASSERT(vpkt); 1852 1853 /* Save the target driver's packet */ 1854 vpkt->vpkt_tgt_pkt = pktp; 1855 1856 /* 1857 * Save pkt_tgt_init_pkt fields if deferred binding 1858 * is needed or for other purposes. 1859 */ 1860 vpkt->vpkt_tgt_init_pkt_flags = flags; 1861 vpkt->vpkt_flags = (callback == NULL_FUNC) ? CFLAG_NOWAIT : 0; 1862 vpkt->vpkt_state = VHCI_PKT_IDLE; 1863 vpkt->vpkt_tgt_init_cdblen = cmdlen; 1864 vpkt->vpkt_tgt_init_scblen = statuslen; 1865 newpkt = 1; 1866 } else { /* pkt not NULL */ 1867 vpkt = pkt->pkt_ha_private; 1868 } 1869 1870 VHCI_DEBUG(8, (CE_NOTE, NULL, "vhci_scsi_init_pkt " 1871 "vpkt %p flags %x\n", (void *)vpkt, flags)); 1872 1873 /* Clear any stale error flags */ 1874 if (bp) { 1875 bioerror(bp, 0); 1876 } 1877 1878 vpkt->vpkt_tgt_init_bp = bp; 1879 1880 if (flags & PKT_DMA_PARTIAL) { 1881 1882 /* 1883 * Immediate binding is needed. 1884 * Target driver may not set this flag in next invocation. 1885 * vhci has to remember this flag was set during first 1886 * invocation of vhci_scsi_init_pkt. 1887 */ 1888 vpkt->vpkt_flags |= CFLAG_DMA_PARTIAL; 1889 } 1890 1891 if (vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) { 1892 1893 /* 1894 * Re-initialize some of the target driver packet state 1895 * information. 1896 */ 1897 vpkt->vpkt_tgt_pkt->pkt_state = 0; 1898 vpkt->vpkt_tgt_pkt->pkt_statistics = 0; 1899 vpkt->vpkt_tgt_pkt->pkt_reason = 0; 1900 1901 /* 1902 * Binding a vpkt->vpkt_path for this IO at init_time. 1903 * If an IO error happens later, target driver will clear 1904 * this vpkt->vpkt_path binding before re-init IO again. 1905 */ 1906 VHCI_DEBUG(8, (CE_NOTE, NULL, 1907 "vhci_scsi_init_pkt: calling v_b_t %p, newpkt %d\n", 1908 (void *)vpkt, newpkt)); 1909 if (pkt && vpkt->vpkt_hba_pkt) { 1910 VHCI_DEBUG(4, (CE_NOTE, NULL, 1911 "v_s_i_p calling update_pHCI_pkt resid %ld\n", 1912 pkt->pkt_resid)); 1913 vhci_update_pHCI_pkt(vpkt, pkt); 1914 } 1915 if (callback == SLEEP_FUNC) { 1916 rval = vhci_bind_transport( 1917 ap, vpkt, flags, callback); 1918 } else { 1919 rval = vhci_bind_transport( 1920 ap, vpkt, flags, NULL_FUNC); 1921 } 1922 VHCI_DEBUG(8, (CE_NOTE, NULL, 1923 "vhci_scsi_init_pkt: v_b_t called 0x%p rval 0x%x\n", 1924 (void *)vpkt, rval)); 1925 if (bp) { 1926 if (rval == TRAN_FATAL_ERROR) { 1927 /* 1928 * No paths available. Could not bind 1929 * any pHCI. Setting EFAULT as a way 1930 * to indicate no DMA is mapped. 1931 */ 1932 bioerror(bp, EFAULT); 1933 } else { 1934 /* 1935 * Do not indicate any pHCI errors to 1936 * target driver otherwise. 1937 */ 1938 bioerror(bp, 0); 1939 } 1940 } 1941 if (rval != TRAN_ACCEPT) { 1942 VHCI_DEBUG(8, (CE_NOTE, NULL, 1943 "vhci_scsi_init_pkt: " 1944 "v_b_t failed 0x%p newpkt %x\n", 1945 (void *)vpkt, newpkt)); 1946 if (newpkt) { 1947 scsi_hba_pkt_free(ap, 1948 vpkt->vpkt_tgt_pkt); 1949 } 1950 return (NULL); 1951 } 1952 ASSERT(vpkt->vpkt_hba_pkt != NULL); 1953 ASSERT(vpkt->vpkt_path != NULL); 1954 1955 /* Update the resid for the target driver */ 1956 vpkt->vpkt_tgt_pkt->pkt_resid = 1957 vpkt->vpkt_hba_pkt->pkt_resid; 1958 } 1959 1960 return (vpkt->vpkt_tgt_pkt); 1961 } 1962 1963 /* 1964 * Function name : vhci_scsi_destroy_pkt 1965 * 1966 * Return Values : none 1967 */ 1968 static void 1969 vhci_scsi_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt) 1970 { 1971 struct vhci_pkt *vpkt = (struct vhci_pkt *)pkt->pkt_ha_private; 1972 1973 VHCI_DEBUG(8, (CE_NOTE, NULL, 1974 "vhci_scsi_destroy_pkt: vpkt 0x%p\n", (void *)vpkt)); 1975 1976 vpkt->vpkt_tgt_init_pkt_flags = 0; 1977 if (vpkt->vpkt_hba_pkt) { 1978 scsi_destroy_pkt(vpkt->vpkt_hba_pkt); 1979 vpkt->vpkt_hba_pkt = NULL; 1980 } 1981 if (vpkt->vpkt_path) { 1982 mdi_rele_path(vpkt->vpkt_path); 1983 vpkt->vpkt_path = NULL; 1984 } 1985 1986 ASSERT(vpkt->vpkt_state != VHCI_PKT_ISSUED); 1987 scsi_hba_pkt_free(ap, vpkt->vpkt_tgt_pkt); 1988 } 1989 1990 /* 1991 * Function name : vhci_scsi_dmafree() 1992 * 1993 * Return Values : none 1994 */ 1995 /*ARGSUSED*/ 1996 static void 1997 vhci_scsi_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt) 1998 { 1999 struct vhci_pkt *vpkt = (struct vhci_pkt *)pkt->pkt_ha_private; 2000 2001 VHCI_DEBUG(6, (CE_NOTE, NULL, 2002 "vhci_scsi_dmafree: vpkt 0x%p\n", (void *)vpkt)); 2003 2004 ASSERT(vpkt != NULL); 2005 if (vpkt->vpkt_hba_pkt) { 2006 scsi_destroy_pkt(vpkt->vpkt_hba_pkt); 2007 vpkt->vpkt_hba_pkt = NULL; 2008 } 2009 if (vpkt->vpkt_path) { 2010 mdi_rele_path(vpkt->vpkt_path); 2011 vpkt->vpkt_path = NULL; 2012 } 2013 } 2014 2015 /* 2016 * Function name : vhci_scsi_sync_pkt() 2017 * 2018 * Return Values : none 2019 */ 2020 /*ARGSUSED*/ 2021 static void 2022 vhci_scsi_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt) 2023 { 2024 struct vhci_pkt *vpkt = (struct vhci_pkt *)pkt->pkt_ha_private; 2025 2026 ASSERT(vpkt != NULL); 2027 if (vpkt->vpkt_hba_pkt) { 2028 scsi_sync_pkt(vpkt->vpkt_hba_pkt); 2029 } 2030 } 2031 2032 /* 2033 * routine for reset notification setup, to register or cancel. 2034 */ 2035 static int 2036 vhci_scsi_reset_notify(struct scsi_address *ap, int flag, 2037 void (*callback)(caddr_t), caddr_t arg) 2038 { 2039 struct scsi_vhci *vhci = ADDR2VHCI(ap); 2040 return (scsi_hba_reset_notify_setup(ap, flag, callback, arg, 2041 &vhci->vhci_mutex, &vhci->vhci_reset_notify_listf)); 2042 } 2043 2044 static int 2045 vhci_scsi_get_name_bus_addr(struct scsi_device *sd, 2046 char *name, int len, int bus_addr) 2047 { 2048 dev_info_t *cdip; 2049 char *guid; 2050 scsi_vhci_lun_t *vlun; 2051 2052 ASSERT(sd != NULL); 2053 ASSERT(name != NULL); 2054 2055 cdip = sd->sd_dev; 2056 2057 ASSERT(cdip != NULL); 2058 2059 if (mdi_component_is_client(cdip, NULL) != MDI_SUCCESS) { 2060 name[0] = '\0'; 2061 return (1); 2062 } 2063 2064 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, cdip, PROPFLAGS, 2065 MDI_CLIENT_GUID_PROP, &guid) != DDI_SUCCESS) { 2066 name[0] = '\0'; 2067 return (1); 2068 } 2069 2070 vlun = ADDR2VLUN(&sd->sd_address); 2071 if (bus_addr && vlun && vlun->svl_fops_name) { 2072 /* report the guid and the name of the failover module */ 2073 (void) snprintf(name, len, "g%s %s", guid, vlun->svl_fops_name); 2074 } else { 2075 /* report the guid */ 2076 (void) snprintf(name, len, "g%s", guid); 2077 } 2078 2079 ddi_prop_free(guid); 2080 return (1); 2081 } 2082 2083 static int 2084 vhci_scsi_get_bus_addr(struct scsi_device *sd, char *name, int len) 2085 { 2086 return (vhci_scsi_get_name_bus_addr(sd, name, len, 1)); 2087 } 2088 2089 static int 2090 vhci_scsi_get_name(struct scsi_device *sd, char *name, int len) 2091 { 2092 return (vhci_scsi_get_name_bus_addr(sd, name, len, 0)); 2093 } 2094 2095 /* 2096 * Return a pointer to the guid part of the devnm. 2097 * devnm format is "nodename@busaddr", busaddr format is "gGUID". 2098 */ 2099 static char * 2100 vhci_devnm_to_guid(char *devnm) 2101 { 2102 char *cp = devnm; 2103 2104 if (devnm == NULL) 2105 return (NULL); 2106 2107 while (*cp != '\0' && *cp != '@') 2108 cp++; 2109 if (*cp == '@' && *(cp + 1) == 'g') 2110 return (cp + 2); 2111 return (NULL); 2112 } 2113 2114 static int 2115 vhci_bind_transport(struct scsi_address *ap, struct vhci_pkt *vpkt, int flags, 2116 int (*func)(caddr_t)) 2117 { 2118 struct scsi_vhci *vhci = ADDR2VHCI(ap); 2119 dev_info_t *cdip = ADDR2DIP(ap); 2120 mdi_pathinfo_t *pip = NULL; 2121 mdi_pathinfo_t *npip = NULL; 2122 scsi_vhci_priv_t *svp = NULL; 2123 struct scsi_device *psd = NULL; 2124 struct scsi_address *address = NULL; 2125 struct scsi_pkt *pkt = NULL; 2126 int rval = -1; 2127 int pgr_sema_held = 0; 2128 int held; 2129 int mps_flag = MDI_SELECT_ONLINE_PATH; 2130 struct scsi_vhci_lun *vlun; 2131 time_t tnow; 2132 int path_instance = 0; 2133 2134 vlun = ADDR2VLUN(ap); 2135 ASSERT(vlun != 0); 2136 2137 if ((vpkt->vpkt_tgt_pkt->pkt_cdbp[0] == SCMD_PROUT) && 2138 (((vpkt->vpkt_tgt_pkt->pkt_cdbp[1] & 0x1f) == 2139 VHCI_PROUT_REGISTER) || 2140 ((vpkt->vpkt_tgt_pkt->pkt_cdbp[1] & 0x1f) == 2141 VHCI_PROUT_R_AND_IGNORE))) { 2142 if (!sema_tryp(&vlun->svl_pgr_sema)) 2143 return (TRAN_BUSY); 2144 pgr_sema_held = 1; 2145 if (vlun->svl_first_path != NULL) { 2146 rval = mdi_select_path(cdip, NULL, 2147 MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH, 2148 NULL, &pip); 2149 if ((rval != MDI_SUCCESS) || (pip == NULL)) { 2150 VHCI_DEBUG(4, (CE_NOTE, NULL, 2151 "vhci_bind_transport: path select fail\n")); 2152 } else { 2153 npip = pip; 2154 do { 2155 if (npip == vlun->svl_first_path) { 2156 VHCI_DEBUG(4, (CE_NOTE, NULL, 2157 "vhci_bind_transport: " 2158 "valid first path 0x%p\n", 2159 (void *) 2160 vlun->svl_first_path)); 2161 pip = vlun->svl_first_path; 2162 goto bind_path; 2163 } 2164 pip = npip; 2165 rval = mdi_select_path(cdip, NULL, 2166 MDI_SELECT_ONLINE_PATH | 2167 MDI_SELECT_STANDBY_PATH, 2168 pip, &npip); 2169 mdi_rele_path(pip); 2170 } while ((rval == MDI_SUCCESS) && 2171 (npip != NULL)); 2172 } 2173 } 2174 2175 if (vlun->svl_first_path) { 2176 VHCI_DEBUG(4, (CE_NOTE, NULL, 2177 "vhci_bind_transport: invalid first path 0x%p\n", 2178 (void *)vlun->svl_first_path)); 2179 vlun->svl_first_path = NULL; 2180 } 2181 } else if (vpkt->vpkt_tgt_pkt->pkt_cdbp[0] == SCMD_PRIN) { 2182 if ((vpkt->vpkt_state & VHCI_PKT_THRU_TASKQ) == 0) { 2183 if (!sema_tryp(&vlun->svl_pgr_sema)) 2184 return (TRAN_BUSY); 2185 } 2186 pgr_sema_held = 1; 2187 } 2188 2189 /* 2190 * If the path is already bound for PKT_PARTIAL_DMA case, 2191 * try to use the same path. 2192 */ 2193 if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) && vpkt->vpkt_path) { 2194 VHCI_DEBUG(4, (CE_NOTE, NULL, 2195 "vhci_bind_transport: PKT_PARTIAL_DMA " 2196 "vpkt 0x%p, path 0x%p\n", 2197 (void *)vpkt, (void *)vpkt->vpkt_path)); 2198 pip = vpkt->vpkt_path; 2199 goto bind_path; 2200 } 2201 2202 /* 2203 * Get path_instance. Non-zero with FLAG_PKT_PATH_INSTANCE set 2204 * indicates that mdi_select_path should be called to select a 2205 * specific instance. 2206 * 2207 * NB: Condition pkt_path_instance reference on proper allocation. 2208 */ 2209 if ((vpkt->vpkt_tgt_pkt->pkt_flags & FLAG_PKT_PATH_INSTANCE) && 2210 scsi_pkt_allocated_correctly(vpkt->vpkt_tgt_pkt)) { 2211 path_instance = vpkt->vpkt_tgt_pkt->pkt_path_instance; 2212 } 2213 2214 /* 2215 * If reservation is active bind the transport directly to the pip 2216 * with the reservation. 2217 */ 2218 if (vpkt->vpkt_hba_pkt == NULL) { 2219 if (vlun->svl_flags & VLUN_RESERVE_ACTIVE_FLG) { 2220 if (MDI_PI_IS_ONLINE(vlun->svl_resrv_pip)) { 2221 pip = vlun->svl_resrv_pip; 2222 mdi_hold_path(pip); 2223 vlun->svl_waiting_for_activepath = 0; 2224 rval = MDI_SUCCESS; 2225 goto bind_path; 2226 } else { 2227 if (pgr_sema_held) { 2228 sema_v(&vlun->svl_pgr_sema); 2229 } 2230 return (TRAN_BUSY); 2231 } 2232 } 2233 try_again: 2234 rval = mdi_select_path(cdip, vpkt->vpkt_tgt_init_bp, 2235 path_instance ? MDI_SELECT_PATH_INSTANCE : 0, 2236 (void *)(intptr_t)path_instance, &pip); 2237 if (rval == MDI_BUSY) { 2238 if (pgr_sema_held) { 2239 sema_v(&vlun->svl_pgr_sema); 2240 } 2241 return (TRAN_BUSY); 2242 } else if (rval == MDI_DEVI_ONLINING) { 2243 /* 2244 * if we are here then we are in the midst of 2245 * an attach/probe of the client device. 2246 * We attempt to bind to ONLINE path if available, 2247 * else it is OK to bind to a STANDBY path (instead 2248 * of triggering a failover) because IO associated 2249 * with attach/probe (eg. INQUIRY, block 0 read) 2250 * are completed by targets even on passive paths 2251 * If no ONLINE paths available, it is important 2252 * to set svl_waiting_for_activepath for two 2253 * reasons: (1) avoid sense analysis in the 2254 * "external failure detection" codepath in 2255 * vhci_intr(). Failure to do so will result in 2256 * infinite loop (unless an ONLINE path becomes 2257 * available at some point) (2) avoid 2258 * unnecessary failover (see "---Waiting For Active 2259 * Path---" comment below). 2260 */ 2261 VHCI_DEBUG(1, (CE_NOTE, NULL, "!%p in onlining " 2262 "state\n", (void *)cdip)); 2263 pip = NULL; 2264 rval = mdi_select_path(cdip, vpkt->vpkt_tgt_init_bp, 2265 mps_flag, NULL, &pip); 2266 if ((rval != MDI_SUCCESS) || (pip == NULL)) { 2267 if (vlun->svl_waiting_for_activepath == 0) { 2268 vlun->svl_waiting_for_activepath = 1; 2269 vlun->svl_wfa_time = ddi_get_time(); 2270 } 2271 mps_flag |= MDI_SELECT_STANDBY_PATH; 2272 rval = mdi_select_path(cdip, 2273 vpkt->vpkt_tgt_init_bp, 2274 mps_flag, NULL, &pip); 2275 if ((rval != MDI_SUCCESS) || (pip == NULL)) { 2276 if (pgr_sema_held) { 2277 sema_v(&vlun->svl_pgr_sema); 2278 } 2279 return (TRAN_FATAL_ERROR); 2280 } 2281 goto bind_path; 2282 } 2283 } else if (rval == MDI_FAILURE) { 2284 if (pgr_sema_held) { 2285 sema_v(&vlun->svl_pgr_sema); 2286 } 2287 return (TRAN_FATAL_ERROR); 2288 } 2289 2290 if ((pip == NULL) || (rval == MDI_NOPATH)) { 2291 while (vlun->svl_waiting_for_activepath) { 2292 /* 2293 * ---Waiting For Active Path--- 2294 * This device was discovered across a 2295 * passive path; lets wait for a little 2296 * bit, hopefully an active path will 2297 * show up obviating the need for a 2298 * failover 2299 */ 2300 tnow = ddi_get_time(); 2301 if (tnow - vlun->svl_wfa_time >= 60) { 2302 vlun->svl_waiting_for_activepath = 0; 2303 } else { 2304 drv_usecwait(1000); 2305 if (vlun->svl_waiting_for_activepath 2306 == 0) { 2307 /* 2308 * an active path has come 2309 * online! 2310 */ 2311 goto try_again; 2312 } 2313 } 2314 } 2315 VHCI_HOLD_LUN(vlun, VH_NOSLEEP, held); 2316 if (!held) { 2317 VHCI_DEBUG(4, (CE_NOTE, NULL, 2318 "!Lun not held\n")); 2319 if (pgr_sema_held) { 2320 sema_v(&vlun->svl_pgr_sema); 2321 } 2322 return (TRAN_BUSY); 2323 } 2324 /* 2325 * now that the LUN is stable, one last check 2326 * to make sure no other changes sneaked in 2327 * (like a path coming online or a 2328 * failover initiated by another thread) 2329 */ 2330 pip = NULL; 2331 rval = mdi_select_path(cdip, vpkt->vpkt_tgt_init_bp, 2332 0, NULL, &pip); 2333 if (pip != NULL) { 2334 VHCI_RELEASE_LUN(vlun); 2335 vlun->svl_waiting_for_activepath = 0; 2336 goto bind_path; 2337 } 2338 2339 /* 2340 * Check if there is an ONLINE path OR a STANDBY path 2341 * available. If none is available, do not attempt 2342 * to do a failover, just return a fatal error at this 2343 * point. 2344 */ 2345 npip = NULL; 2346 rval = mdi_select_path(cdip, NULL, 2347 (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH), 2348 NULL, &npip); 2349 if ((npip == NULL) || (rval != MDI_SUCCESS)) { 2350 /* 2351 * No paths available, jus return FATAL error. 2352 */ 2353 VHCI_RELEASE_LUN(vlun); 2354 if (pgr_sema_held) { 2355 sema_v(&vlun->svl_pgr_sema); 2356 } 2357 return (TRAN_FATAL_ERROR); 2358 } 2359 mdi_rele_path(npip); 2360 VHCI_DEBUG(1, (CE_NOTE, NULL, "!invoking " 2361 "mdi_failover\n")); 2362 rval = mdi_failover(vhci->vhci_dip, cdip, 2363 MDI_FAILOVER_ASYNC); 2364 if (rval == MDI_FAILURE) { 2365 VHCI_RELEASE_LUN(vlun); 2366 if (pgr_sema_held) { 2367 sema_v(&vlun->svl_pgr_sema); 2368 } 2369 return (TRAN_FATAL_ERROR); 2370 } else if (rval == MDI_BUSY) { 2371 VHCI_RELEASE_LUN(vlun); 2372 if (pgr_sema_held) { 2373 sema_v(&vlun->svl_pgr_sema); 2374 } 2375 return (TRAN_BUSY); 2376 } else { 2377 if (pgr_sema_held) { 2378 sema_v(&vlun->svl_pgr_sema); 2379 } 2380 return (TRAN_BUSY); 2381 } 2382 } 2383 vlun->svl_waiting_for_activepath = 0; 2384 bind_path: 2385 vpkt->vpkt_path = pip; 2386 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip); 2387 ASSERT(svp != NULL); 2388 2389 psd = svp->svp_psd; 2390 ASSERT(psd != NULL); 2391 address = &psd->sd_address; 2392 } else { 2393 pkt = vpkt->vpkt_hba_pkt; 2394 address = &pkt->pkt_address; 2395 } 2396 2397 /* Verify match of specified path_instance and selected path_instance */ 2398 ASSERT((path_instance == 0) || 2399 (path_instance == mdi_pi_get_path_instance(vpkt->vpkt_path))); 2400 2401 /* 2402 * For PKT_PARTIAL_DMA case, call pHCI's scsi_init_pkt whenever 2403 * target driver calls vhci_scsi_init_pkt. 2404 */ 2405 if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) && 2406 vpkt->vpkt_path && vpkt->vpkt_hba_pkt) { 2407 VHCI_DEBUG(4, (CE_NOTE, NULL, 2408 "vhci_bind_transport: PKT_PARTIAL_DMA " 2409 "vpkt 0x%p, path 0x%p hba_pkt 0x%p\n", 2410 (void *)vpkt, (void *)vpkt->vpkt_path, (void *)pkt)); 2411 pkt = vpkt->vpkt_hba_pkt; 2412 address = &pkt->pkt_address; 2413 } 2414 2415 if (pkt == NULL || (vpkt->vpkt_flags & CFLAG_DMA_PARTIAL)) { 2416 pkt = scsi_init_pkt(address, pkt, 2417 vpkt->vpkt_tgt_init_bp, vpkt->vpkt_tgt_init_cdblen, 2418 vpkt->vpkt_tgt_init_scblen, 0, flags, func, NULL); 2419 2420 if (pkt == NULL) { 2421 VHCI_DEBUG(4, (CE_NOTE, NULL, 2422 "!bind transport: 0x%p 0x%p 0x%p\n", 2423 (void *)vhci, (void *)psd, (void *)vpkt)); 2424 if ((vpkt->vpkt_hba_pkt == NULL) && vpkt->vpkt_path) { 2425 MDI_PI_ERRSTAT(vpkt->vpkt_path, 2426 MDI_PI_TRANSERR); 2427 mdi_rele_path(vpkt->vpkt_path); 2428 vpkt->vpkt_path = NULL; 2429 } 2430 if (pgr_sema_held) { 2431 sema_v(&vlun->svl_pgr_sema); 2432 } 2433 /* 2434 * Looks like a fatal error. 2435 * May be device disappeared underneath. 2436 * Give another chance to target driver for a retry to 2437 * get another path. 2438 */ 2439 return (TRAN_BUSY); 2440 } 2441 } 2442 2443 pkt->pkt_private = vpkt; 2444 vpkt->vpkt_hba_pkt = pkt; 2445 return (TRAN_ACCEPT); 2446 } 2447 2448 2449 /*PRINTFLIKE3*/ 2450 void 2451 vhci_log(int level, dev_info_t *dip, const char *fmt, ...) 2452 { 2453 char buf[256]; 2454 va_list ap; 2455 2456 va_start(ap, fmt); 2457 (void) vsprintf(buf, fmt, ap); 2458 va_end(ap); 2459 2460 scsi_log(dip, "scsi_vhci", level, buf); 2461 } 2462 2463 /* do a PGR out with the information we've saved away */ 2464 static int 2465 vhci_do_prout(scsi_vhci_priv_t *svp) 2466 { 2467 2468 struct scsi_pkt *new_pkt; 2469 struct buf *bp; 2470 scsi_vhci_lun_t *vlun = svp->svp_svl; 2471 int rval, retry, nr_retry, ua_retry; 2472 uint8_t *sns, skey; 2473 2474 bp = getrbuf(KM_SLEEP); 2475 bp->b_flags = B_WRITE; 2476 bp->b_resid = 0; 2477 bp->b_un.b_addr = (caddr_t)&vlun->svl_prout; 2478 bp->b_bcount = vlun->svl_bcount; 2479 2480 VHCI_INCR_PATH_CMDCOUNT(svp); 2481 2482 new_pkt = scsi_init_pkt(&svp->svp_psd->sd_address, NULL, bp, 2483 CDB_GROUP1, sizeof (struct scsi_arq_status), 0, 0, 2484 SLEEP_FUNC, NULL); 2485 if (new_pkt == NULL) { 2486 VHCI_DECR_PATH_CMDCOUNT(svp); 2487 freerbuf(bp); 2488 cmn_err(CE_WARN, "!vhci_do_prout: scsi_init_pkt failed"); 2489 return (0); 2490 } 2491 mutex_enter(&vlun->svl_mutex); 2492 bp->b_un.b_addr = (caddr_t)&vlun->svl_prout; 2493 bp->b_bcount = vlun->svl_bcount; 2494 bcopy(vlun->svl_cdb, new_pkt->pkt_cdbp, 2495 sizeof (vlun->svl_cdb)); 2496 new_pkt->pkt_time = vlun->svl_time; 2497 mutex_exit(&vlun->svl_mutex); 2498 new_pkt->pkt_flags = FLAG_NOINTR; 2499 2500 ua_retry = nr_retry = retry = 0; 2501 again: 2502 rval = vhci_do_scsi_cmd(new_pkt); 2503 if (rval != 1) { 2504 if ((new_pkt->pkt_reason == CMD_CMPLT) && 2505 (SCBP_C(new_pkt) == STATUS_CHECK) && 2506 (new_pkt->pkt_state & STATE_ARQ_DONE)) { 2507 sns = (uint8_t *) 2508 &(((struct scsi_arq_status *)(uintptr_t) 2509 (new_pkt->pkt_scbp))->sts_sensedata); 2510 skey = scsi_sense_key(sns); 2511 if ((skey == KEY_UNIT_ATTENTION) || 2512 (skey == KEY_NOT_READY)) { 2513 int max_retry; 2514 struct scsi_failover_ops *fops; 2515 fops = vlun->svl_fops; 2516 rval = fops->sfo_analyze_sense(svp->svp_psd, 2517 sns, vlun->svl_fops_ctpriv); 2518 if (rval == SCSI_SENSE_NOT_READY) { 2519 max_retry = vhci_prout_not_ready_retry; 2520 retry = nr_retry++; 2521 delay(1*drv_usectohz(1000000)); 2522 } else { 2523 /* chk for state change and update */ 2524 if (rval == SCSI_SENSE_STATE_CHANGED) { 2525 int held; 2526 VHCI_HOLD_LUN(vlun, 2527 VH_NOSLEEP, held); 2528 if (!held) { 2529 rval = TRAN_BUSY; 2530 } else { 2531 /* chk for alua first */ 2532 vhci_update_pathstates( 2533 (void *)vlun); 2534 } 2535 } 2536 retry = ua_retry++; 2537 max_retry = VHCI_MAX_PGR_RETRIES; 2538 } 2539 if (retry < max_retry) { 2540 VHCI_DEBUG(4, (CE_WARN, NULL, 2541 "!vhci_do_prout retry 0x%x " 2542 "(0x%x 0x%x 0x%x)", 2543 SCBP_C(new_pkt), 2544 new_pkt->pkt_cdbp[0], 2545 new_pkt->pkt_cdbp[1], 2546 new_pkt->pkt_cdbp[2])); 2547 goto again; 2548 } 2549 rval = 0; 2550 VHCI_DEBUG(4, (CE_WARN, NULL, 2551 "!vhci_do_prout 0x%x " 2552 "(0x%x 0x%x 0x%x)", 2553 SCBP_C(new_pkt), 2554 new_pkt->pkt_cdbp[0], 2555 new_pkt->pkt_cdbp[1], 2556 new_pkt->pkt_cdbp[2])); 2557 } else if (skey == KEY_ILLEGAL_REQUEST) 2558 rval = VHCI_PGR_ILLEGALOP; 2559 } 2560 } else { 2561 rval = 1; 2562 } 2563 scsi_destroy_pkt(new_pkt); 2564 VHCI_DECR_PATH_CMDCOUNT(svp); 2565 freerbuf(bp); 2566 return (rval); 2567 } 2568 2569 static void 2570 vhci_run_cmd(void *arg) 2571 { 2572 struct scsi_pkt *pkt = (struct scsi_pkt *)arg; 2573 struct scsi_pkt *tpkt; 2574 scsi_vhci_priv_t *svp; 2575 mdi_pathinfo_t *pip, *npip; 2576 scsi_vhci_lun_t *vlun; 2577 dev_info_t *cdip; 2578 scsi_vhci_priv_t *nsvp; 2579 int fail = 0; 2580 int rval; 2581 struct vhci_pkt *vpkt; 2582 uchar_t cdb_1; 2583 vhci_prout_t *prout; 2584 2585 vpkt = (struct vhci_pkt *)pkt->pkt_private; 2586 tpkt = vpkt->vpkt_tgt_pkt; 2587 pip = vpkt->vpkt_path; 2588 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip); 2589 if (svp == NULL) { 2590 tpkt->pkt_reason = CMD_TRAN_ERR; 2591 tpkt->pkt_statistics = STAT_ABORTED; 2592 goto done; 2593 } 2594 vlun = svp->svp_svl; 2595 prout = &vlun->svl_prout; 2596 if (SCBP_C(pkt) != STATUS_GOOD) 2597 fail++; 2598 cdip = vlun->svl_dip; 2599 pip = npip = NULL; 2600 rval = mdi_select_path(cdip, NULL, 2601 MDI_SELECT_ONLINE_PATH|MDI_SELECT_STANDBY_PATH, NULL, &npip); 2602 if ((rval != MDI_SUCCESS) || (npip == NULL)) { 2603 VHCI_DEBUG(4, (CE_NOTE, NULL, 2604 "vhci_run_cmd: no path! 0x%p\n", (void *)svp)); 2605 tpkt->pkt_reason = CMD_TRAN_ERR; 2606 tpkt->pkt_statistics = STAT_ABORTED; 2607 goto done; 2608 } 2609 2610 cdb_1 = vlun->svl_cdb[1]; 2611 vlun->svl_cdb[1] &= 0xe0; 2612 vlun->svl_cdb[1] |= VHCI_PROUT_R_AND_IGNORE; 2613 2614 do { 2615 nsvp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(npip); 2616 if (nsvp == NULL) { 2617 VHCI_DEBUG(4, (CE_NOTE, NULL, 2618 "vhci_run_cmd: no " 2619 "client priv! 0x%p offlined?\n", 2620 (void *)npip)); 2621 goto next_path; 2622 } 2623 if (vlun->svl_first_path == npip) { 2624 goto next_path; 2625 } else { 2626 if (vhci_do_prout(nsvp) != 1) 2627 fail++; 2628 } 2629 next_path: 2630 pip = npip; 2631 rval = mdi_select_path(cdip, NULL, 2632 MDI_SELECT_ONLINE_PATH|MDI_SELECT_STANDBY_PATH, 2633 pip, &npip); 2634 mdi_rele_path(pip); 2635 } while ((rval == MDI_SUCCESS) && (npip != NULL)); 2636 2637 vlun->svl_cdb[1] = cdb_1; 2638 2639 if (fail) { 2640 VHCI_DEBUG(4, (CE_WARN, NULL, "%s%d: key registration failed, " 2641 "couldn't be replicated on all paths", 2642 ddi_driver_name(cdip), ddi_get_instance(cdip))); 2643 vhci_print_prout_keys(vlun, "vhci_run_cmd: "); 2644 2645 if (SCBP_C(pkt) != STATUS_GOOD) { 2646 tpkt->pkt_reason = CMD_TRAN_ERR; 2647 tpkt->pkt_statistics = STAT_ABORTED; 2648 } 2649 } else { 2650 vlun->svl_pgr_active = 1; 2651 vhci_print_prout_keys(vlun, "vhci_run_cmd: before bcopy:"); 2652 2653 bcopy((const void *)prout->service_key, 2654 (void *)prout->active_service_key, MHIOC_RESV_KEY_SIZE); 2655 bcopy((const void *)prout->res_key, 2656 (void *)prout->active_res_key, MHIOC_RESV_KEY_SIZE); 2657 2658 vhci_print_prout_keys(vlun, "vhci_run_cmd: after bcopy:"); 2659 } 2660 done: 2661 if (SCBP_C(pkt) == STATUS_GOOD) 2662 vlun->svl_first_path = NULL; 2663 2664 if (svp) 2665 VHCI_DECR_PATH_CMDCOUNT(svp); 2666 2667 if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) == 0) { 2668 scsi_destroy_pkt(pkt); 2669 vpkt->vpkt_hba_pkt = NULL; 2670 if (vpkt->vpkt_path) { 2671 mdi_rele_path(vpkt->vpkt_path); 2672 vpkt->vpkt_path = NULL; 2673 } 2674 } 2675 2676 sema_v(&vlun->svl_pgr_sema); 2677 /* 2678 * The PROUT commands are not included in the automatic retry 2679 * mechanism, therefore, vpkt_org_vpkt should never be set here. 2680 */ 2681 ASSERT(vpkt->vpkt_org_vpkt == NULL); 2682 scsi_hba_pkt_comp(tpkt); 2683 } 2684 2685 /* 2686 * Get the keys registered with this target. Since we will have 2687 * registered the same key with multiple initiators, strip out 2688 * any duplicate keys. 2689 * 2690 * The pointers which will be used to filter the registered keys from 2691 * the device will be stored in filter_prin and filter_pkt. If the 2692 * allocation length of the buffer was sufficient for the number of 2693 * parameter data bytes available to be returned by the device then the 2694 * key filtering will use the keylist returned from the original 2695 * request. If the allocation length of the buffer was not sufficient, 2696 * then the filtering will use the keylist returned from the request 2697 * that is resent below. 2698 * 2699 * If the device returns an additional length field that is greater than 2700 * the allocation length of the buffer, then allocate a new buffer which 2701 * can accommodate the number of parameter data bytes available to be 2702 * returned. Resend the scsi PRIN command, filter out the duplicate 2703 * keys and return as many of the unique keys found that was originally 2704 * requested and set the additional length field equal to the data bytes 2705 * of unique reservation keys available to be returned. 2706 * 2707 * If the device returns an additional length field that is less than or 2708 * equal to the allocation length of the buffer, then all the available 2709 * keys registered were returned by the device. Filter out the 2710 * duplicate keys and return all of the unique keys found and set the 2711 * additional length field equal to the data bytes of the reservation 2712 * keys to be returned. 2713 */ 2714 2715 #define VHCI_PRIN_HEADER_SZ (sizeof (prin->length) + sizeof (prin->generation)) 2716 2717 static int 2718 vhci_do_prin(struct vhci_pkt **intr_vpkt) 2719 { 2720 scsi_vhci_priv_t *svp; 2721 struct vhci_pkt *vpkt = *intr_vpkt; 2722 vhci_prin_readkeys_t *prin; 2723 scsi_vhci_lun_t *vlun; 2724 struct scsi_vhci *vhci = ADDR2VHCI(&vpkt->vpkt_tgt_pkt->pkt_address); 2725 2726 struct buf *new_bp = NULL; 2727 struct scsi_pkt *new_pkt = NULL; 2728 struct vhci_pkt *new_vpkt = NULL; 2729 uint32_t needed_length; 2730 int rval = VHCI_CMD_CMPLT; 2731 uint32_t prin_length = 0; 2732 uint32_t svl_prin_length = 0; 2733 2734 ASSERT(vpkt->vpkt_path); 2735 svp = mdi_pi_get_vhci_private(vpkt->vpkt_path); 2736 ASSERT(svp); 2737 vlun = svp->svp_svl; 2738 ASSERT(vlun); 2739 2740 /* 2741 * If the caller only asked for an amount of data that would not 2742 * be enough to include any key data it is likely that they will 2743 * send the next command with a buffer size based on the information 2744 * from this header. Doing recovery on this would be a duplication 2745 * of efforts. 2746 */ 2747 if (vpkt->vpkt_tgt_init_bp->b_bcount <= VHCI_PRIN_HEADER_SZ) { 2748 rval = VHCI_CMD_CMPLT; 2749 goto exit; 2750 } 2751 2752 if (vpkt->vpkt_org_vpkt == NULL) { 2753 /* 2754 * Can fail as sleep is not allowed. 2755 */ 2756 prin = (vhci_prin_readkeys_t *) 2757 bp_mapin_common(vpkt->vpkt_tgt_init_bp, VM_NOSLEEP); 2758 } else { 2759 /* 2760 * The retry buf doesn't need to be mapped in. 2761 */ 2762 prin = (vhci_prin_readkeys_t *) 2763 vpkt->vpkt_tgt_init_bp->b_un.b_daddr; 2764 } 2765 2766 if (prin == NULL) { 2767 VHCI_DEBUG(5, (CE_WARN, NULL, 2768 "vhci_do_prin: bp_mapin_common failed.")); 2769 rval = VHCI_CMD_ERROR; 2770 goto fail; 2771 } 2772 2773 prin_length = BE_32(prin->length); 2774 2775 /* 2776 * According to SPC-3r22, sec 4.3.4.6: "If the amount of 2777 * information to be transferred exceeds the maximum value 2778 * that the ALLOCATION LENGTH field is capable of specifying, 2779 * the device server shall...terminate the command with CHECK 2780 * CONDITION status". The ALLOCATION LENGTH field of the 2781 * PERSISTENT RESERVE IN command is 2 bytes. We should never 2782 * get here with an ADDITIONAL LENGTH greater than 0xFFFF 2783 * so if we do, then it is an error! 2784 */ 2785 2786 2787 if ((prin_length + VHCI_PRIN_HEADER_SZ) > 0xFFFF) { 2788 VHCI_DEBUG(5, (CE_NOTE, NULL, 2789 "vhci_do_prin: Device returned invalid " 2790 "length 0x%x\n", prin_length)); 2791 rval = VHCI_CMD_ERROR; 2792 goto fail; 2793 } 2794 needed_length = prin_length + VHCI_PRIN_HEADER_SZ; 2795 2796 /* 2797 * If prin->length is greater than the byte count allocated in the 2798 * original buffer, then resend the request with enough buffer 2799 * allocated to get all of the available registered keys. 2800 */ 2801 if ((vpkt->vpkt_tgt_init_bp->b_bcount < needed_length) && 2802 (vpkt->vpkt_org_vpkt == NULL)) { 2803 2804 new_pkt = vhci_create_retry_pkt(vpkt); 2805 if (new_pkt == NULL) { 2806 rval = VHCI_CMD_ERROR; 2807 goto fail; 2808 } 2809 new_vpkt = TGTPKT2VHCIPKT(new_pkt); 2810 2811 /* 2812 * This is the buf with buffer pointer 2813 * where the prin readkeys will be 2814 * returned from the device 2815 */ 2816 new_bp = scsi_alloc_consistent_buf(&svp->svp_psd->sd_address, 2817 NULL, needed_length, B_READ, NULL_FUNC, NULL); 2818 if ((new_bp == NULL) || (new_bp->b_un.b_addr == NULL)) { 2819 if (new_bp) { 2820 scsi_free_consistent_buf(new_bp); 2821 } 2822 vhci_scsi_destroy_pkt(&new_pkt->pkt_address, new_pkt); 2823 rval = VHCI_CMD_ERROR; 2824 goto fail; 2825 } 2826 new_bp->b_bcount = needed_length; 2827 new_pkt->pkt_cdbp[7] = (uchar_t)(needed_length >> 8); 2828 new_pkt->pkt_cdbp[8] = (uchar_t)needed_length; 2829 2830 rval = VHCI_CMD_RETRY; 2831 2832 new_vpkt->vpkt_tgt_init_bp = new_bp; 2833 } 2834 2835 if (rval == VHCI_CMD_RETRY) { 2836 2837 /* 2838 * There were more keys then the original request asked for. 2839 */ 2840 mdi_pathinfo_t *path_holder = vpkt->vpkt_path; 2841 2842 /* 2843 * Release the old path because it does not matter which path 2844 * this command is sent down. This allows the normal bind 2845 * transport mechanism to be used. 2846 */ 2847 if (vpkt->vpkt_path != NULL) { 2848 mdi_rele_path(vpkt->vpkt_path); 2849 vpkt->vpkt_path = NULL; 2850 } 2851 2852 /* 2853 * Dispatch the retry command 2854 */ 2855 if (taskq_dispatch(vhci->vhci_taskq, vhci_dispatch_scsi_start, 2856 (void *) new_vpkt, KM_NOSLEEP) == NULL) { 2857 if (path_holder) { 2858 vpkt->vpkt_path = path_holder; 2859 mdi_hold_path(path_holder); 2860 } 2861 scsi_free_consistent_buf(new_bp); 2862 vhci_scsi_destroy_pkt(&new_pkt->pkt_address, new_pkt); 2863 rval = VHCI_CMD_ERROR; 2864 goto fail; 2865 } 2866 2867 /* 2868 * If we return VHCI_CMD_RETRY, that means the caller 2869 * is going to bail and wait for the reissued command 2870 * to complete. In that case, we need to decrement 2871 * the path command count right now. In any other 2872 * case, it'll be decremented by the caller. 2873 */ 2874 VHCI_DECR_PATH_CMDCOUNT(svp); 2875 goto exit; 2876 2877 } 2878 2879 if (rval == VHCI_CMD_CMPLT) { 2880 /* 2881 * The original request got all of the keys or the recovery 2882 * packet returns. 2883 */ 2884 int new; 2885 int old; 2886 int num_keys = prin_length / MHIOC_RESV_KEY_SIZE; 2887 2888 VHCI_DEBUG(4, (CE_NOTE, NULL, "vhci_do_prin: %d keys read\n", 2889 num_keys)); 2890 2891 #ifdef DEBUG 2892 VHCI_DEBUG(5, (CE_NOTE, NULL, "vhci_do_prin: from storage\n")); 2893 if (vhci_debug == 5) 2894 vhci_print_prin_keys(prin, num_keys); 2895 VHCI_DEBUG(5, (CE_NOTE, NULL, 2896 "vhci_do_prin: MPxIO old keys:\n")); 2897 if (vhci_debug == 5) 2898 vhci_print_prin_keys(&vlun->svl_prin, num_keys); 2899 #endif 2900 2901 /* 2902 * Filter out all duplicate keys returned from the device 2903 * We know that we use a different key for every host, so we 2904 * can simply strip out duplicates. Otherwise we would need to 2905 * do more bookkeeping to figure out which keys to strip out. 2906 */ 2907 2908 new = 0; 2909 2910 /* 2911 * If we got at least 1 key copy it. 2912 */ 2913 if (num_keys > 0) { 2914 vlun->svl_prin.keylist[0] = prin->keylist[0]; 2915 new++; 2916 } 2917 2918 /* 2919 * find next unique key. 2920 */ 2921 for (old = 1; old < num_keys; old++) { 2922 int j; 2923 int match = 0; 2924 2925 if (new >= VHCI_NUM_RESV_KEYS) 2926 break; 2927 for (j = 0; j < new; j++) { 2928 if (bcmp(&prin->keylist[old], 2929 &vlun->svl_prin.keylist[j], 2930 sizeof (mhioc_resv_key_t)) == 0) { 2931 match = 1; 2932 break; 2933 } 2934 } 2935 if (!match) { 2936 vlun->svl_prin.keylist[new] = 2937 prin->keylist[old]; 2938 new++; 2939 } 2940 } 2941 2942 /* Stored Big Endian */ 2943 vlun->svl_prin.generation = prin->generation; 2944 svl_prin_length = new * sizeof (mhioc_resv_key_t); 2945 /* Stored Big Endian */ 2946 vlun->svl_prin.length = BE_32(svl_prin_length); 2947 svl_prin_length += VHCI_PRIN_HEADER_SZ; 2948 2949 /* 2950 * If we arrived at this point after issuing a retry, make sure 2951 * that we put everything back the way it originally was so 2952 * that the target driver can complete the command correctly. 2953 */ 2954 if (vpkt->vpkt_org_vpkt != NULL) { 2955 new_bp = vpkt->vpkt_tgt_init_bp; 2956 2957 scsi_free_consistent_buf(new_bp); 2958 2959 vpkt = vhci_sync_retry_pkt(vpkt); 2960 *intr_vpkt = vpkt; 2961 2962 /* 2963 * Make sure the original buffer is mapped into kernel 2964 * space before we try to copy the filtered keys into 2965 * it. 2966 */ 2967 prin = (vhci_prin_readkeys_t *)bp_mapin_common( 2968 vpkt->vpkt_tgt_init_bp, VM_NOSLEEP); 2969 } 2970 2971 /* 2972 * Now copy the desired number of prin keys into the original 2973 * target buffer. 2974 */ 2975 if (svl_prin_length <= vpkt->vpkt_tgt_init_bp->b_bcount) { 2976 /* 2977 * It is safe to return all of the available unique 2978 * keys 2979 */ 2980 bcopy(&vlun->svl_prin, prin, svl_prin_length); 2981 } else { 2982 /* 2983 * Not all of the available keys were requested by the 2984 * original command. 2985 */ 2986 bcopy(&vlun->svl_prin, prin, 2987 vpkt->vpkt_tgt_init_bp->b_bcount); 2988 } 2989 #ifdef DEBUG 2990 VHCI_DEBUG(5, (CE_NOTE, NULL, 2991 "vhci_do_prin: To Application:\n")); 2992 if (vhci_debug == 5) 2993 vhci_print_prin_keys(prin, new); 2994 VHCI_DEBUG(5, (CE_NOTE, NULL, 2995 "vhci_do_prin: MPxIO new keys:\n")); 2996 if (vhci_debug == 5) 2997 vhci_print_prin_keys(&vlun->svl_prin, new); 2998 #endif 2999 } 3000 fail: 3001 if (rval == VHCI_CMD_ERROR) { 3002 /* 3003 * If we arrived at this point after issuing a 3004 * retry, make sure that we put everything back 3005 * the way it originally was so that ssd can 3006 * complete the command correctly. 3007 */ 3008 3009 if (vpkt->vpkt_org_vpkt != NULL) { 3010 new_bp = vpkt->vpkt_tgt_init_bp; 3011 if (new_bp != NULL) { 3012 scsi_free_consistent_buf(new_bp); 3013 } 3014 3015 new_vpkt = vpkt; 3016 vpkt = vpkt->vpkt_org_vpkt; 3017 3018 vhci_scsi_destroy_pkt(&svp->svp_psd->sd_address, 3019 new_vpkt->vpkt_tgt_pkt); 3020 } 3021 3022 /* 3023 * Mark this command completion as having an error so that 3024 * ssd will retry the command. 3025 */ 3026 3027 vpkt->vpkt_tgt_pkt->pkt_reason = CMD_ABORTED; 3028 vpkt->vpkt_tgt_pkt->pkt_statistics |= STAT_ABORTED; 3029 3030 rval = VHCI_CMD_CMPLT; 3031 } 3032 exit: 3033 /* 3034 * Make sure that the semaphore is only released once. 3035 */ 3036 if (rval == VHCI_CMD_CMPLT) { 3037 sema_v(&vlun->svl_pgr_sema); 3038 } 3039 3040 return (rval); 3041 } 3042 3043 static void 3044 vhci_intr(struct scsi_pkt *pkt) 3045 { 3046 struct vhci_pkt *vpkt = (struct vhci_pkt *)pkt->pkt_private; 3047 struct scsi_pkt *tpkt; 3048 scsi_vhci_priv_t *svp; 3049 scsi_vhci_lun_t *vlun; 3050 int rval, held; 3051 struct scsi_failover_ops *fops; 3052 uint8_t *sns, skey, asc, ascq; 3053 mdi_pathinfo_t *lpath; 3054 static char *timeout_err = "Command Timeout"; 3055 static char *parity_err = "Parity Error"; 3056 char *err_str = NULL; 3057 dev_info_t *vdip, *cdip, *pdip; 3058 char *cpath, *dpath; 3059 3060 ASSERT(vpkt != NULL); 3061 tpkt = vpkt->vpkt_tgt_pkt; 3062 ASSERT(tpkt != NULL); 3063 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(vpkt->vpkt_path); 3064 ASSERT(svp != NULL); 3065 vlun = svp->svp_svl; 3066 ASSERT(vlun != NULL); 3067 lpath = vpkt->vpkt_path; 3068 3069 /* 3070 * sync up the target driver's pkt with the pkt that 3071 * we actually used 3072 */ 3073 *(tpkt->pkt_scbp) = *(pkt->pkt_scbp); 3074 tpkt->pkt_resid = pkt->pkt_resid; 3075 tpkt->pkt_state = pkt->pkt_state; 3076 tpkt->pkt_statistics = pkt->pkt_statistics; 3077 tpkt->pkt_reason = pkt->pkt_reason; 3078 3079 /* Return path_instance information back to the target driver. */ 3080 if (scsi_pkt_allocated_correctly(tpkt)) { 3081 if (scsi_pkt_allocated_correctly(pkt)) { 3082 /* 3083 * If both packets were correctly allocated, 3084 * return path returned by pHCI. 3085 */ 3086 tpkt->pkt_path_instance = pkt->pkt_path_instance; 3087 } else { 3088 /* Otherwise return path of pHCI we used */ 3089 tpkt->pkt_path_instance = 3090 mdi_pi_get_path_instance(lpath); 3091 } 3092 } 3093 3094 if (pkt->pkt_cdbp[0] == SCMD_PROUT && 3095 ((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_REGISTER) || 3096 ((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_R_AND_IGNORE)) { 3097 if ((SCBP_C(pkt) != STATUS_GOOD) || 3098 (pkt->pkt_reason != CMD_CMPLT)) { 3099 sema_v(&vlun->svl_pgr_sema); 3100 } 3101 } else if (pkt->pkt_cdbp[0] == SCMD_PRIN) { 3102 if (pkt->pkt_reason != CMD_CMPLT || 3103 (SCBP_C(pkt) != STATUS_GOOD)) { 3104 sema_v(&vlun->svl_pgr_sema); 3105 } 3106 } 3107 3108 switch (pkt->pkt_reason) { 3109 case CMD_CMPLT: 3110 /* 3111 * cmd completed successfully, check for scsi errors 3112 */ 3113 switch (*(pkt->pkt_scbp)) { 3114 case STATUS_CHECK: 3115 if (pkt->pkt_state & STATE_ARQ_DONE) { 3116 sns = (uint8_t *) 3117 &(((struct scsi_arq_status *)(uintptr_t) 3118 (pkt->pkt_scbp))->sts_sensedata); 3119 skey = scsi_sense_key(sns); 3120 asc = scsi_sense_asc(sns); 3121 ascq = scsi_sense_ascq(sns); 3122 fops = vlun->svl_fops; 3123 ASSERT(fops != NULL); 3124 VHCI_DEBUG(4, (CE_NOTE, NULL, "vhci_intr: " 3125 "Received sns key %x esc %x escq %x\n", 3126 skey, asc, ascq)); 3127 3128 if (vlun->svl_waiting_for_activepath == 1) { 3129 /* 3130 * if we are here it means we are 3131 * in the midst of a probe/attach 3132 * through a passive path; this 3133 * case is exempt from sense analysis 3134 * for detection of ext. failover 3135 * because that would unnecessarily 3136 * increase attach time. 3137 */ 3138 bcopy(pkt->pkt_scbp, tpkt->pkt_scbp, 3139 vpkt->vpkt_tgt_init_scblen); 3140 break; 3141 } 3142 if (asc == VHCI_SCSI_PERR) { 3143 /* 3144 * parity error 3145 */ 3146 err_str = parity_err; 3147 bcopy(pkt->pkt_scbp, tpkt->pkt_scbp, 3148 vpkt->vpkt_tgt_init_scblen); 3149 break; 3150 } 3151 rval = fops->sfo_analyze_sense(svp->svp_psd, 3152 sns, vlun->svl_fops_ctpriv); 3153 if ((rval == SCSI_SENSE_NOFAILOVER) || 3154 (rval == SCSI_SENSE_UNKNOWN) || 3155 (rval == SCSI_SENSE_NOT_READY)) { 3156 bcopy(pkt->pkt_scbp, tpkt->pkt_scbp, 3157 vpkt->vpkt_tgt_init_scblen); 3158 break; 3159 } else if (rval == SCSI_SENSE_STATE_CHANGED) { 3160 struct scsi_vhci *vhci; 3161 vhci = ADDR2VHCI(&tpkt->pkt_address); 3162 VHCI_HOLD_LUN(vlun, VH_NOSLEEP, held); 3163 if (!held) { 3164 /* 3165 * looks like some other thread 3166 * has already detected this 3167 * condition 3168 */ 3169 tpkt->pkt_state &= 3170 ~STATE_ARQ_DONE; 3171 *(tpkt->pkt_scbp) = 3172 STATUS_BUSY; 3173 break; 3174 } 3175 (void) taskq_dispatch( 3176 vhci->vhci_update_pathstates_taskq, 3177 vhci_update_pathstates, 3178 (void *)vlun, KM_SLEEP); 3179 } else { 3180 /* 3181 * externally initiated failover 3182 * has occurred or is in progress 3183 */ 3184 VHCI_HOLD_LUN(vlun, VH_NOSLEEP, held); 3185 if (!held) { 3186 /* 3187 * looks like some other thread 3188 * has already detected this 3189 * condition 3190 */ 3191 tpkt->pkt_state &= 3192 ~STATE_ARQ_DONE; 3193 *(tpkt->pkt_scbp) = 3194 STATUS_BUSY; 3195 break; 3196 } else { 3197 rval = vhci_handle_ext_fo 3198 (pkt, rval); 3199 if (rval == BUSY_RETURN) { 3200 tpkt->pkt_state &= 3201 ~STATE_ARQ_DONE; 3202 *(tpkt->pkt_scbp) = 3203 STATUS_BUSY; 3204 break; 3205 } 3206 bcopy(pkt->pkt_scbp, 3207 tpkt->pkt_scbp, 3208 vpkt->vpkt_tgt_init_scblen); 3209 break; 3210 } 3211 } 3212 } 3213 break; 3214 3215 /* 3216 * If this is a good SCSI-II RELEASE cmd completion then restore 3217 * the load balancing policy and reset VLUN_RESERVE_ACTIVE_FLG. 3218 * If this is a good SCSI-II RESERVE cmd completion then set 3219 * VLUN_RESERVE_ACTIVE_FLG. 3220 */ 3221 case STATUS_GOOD: 3222 if ((pkt->pkt_cdbp[0] == SCMD_RELEASE) || 3223 (pkt->pkt_cdbp[0] == SCMD_RELEASE_G1)) { 3224 (void) mdi_set_lb_policy(vlun->svl_dip, 3225 vlun->svl_lb_policy_save); 3226 vlun->svl_flags &= ~VLUN_RESERVE_ACTIVE_FLG; 3227 VHCI_DEBUG(1, (CE_WARN, NULL, 3228 "!vhci_intr: vlun 0x%p release path 0x%p", 3229 (void *)vlun, (void *)vpkt->vpkt_path)); 3230 } 3231 3232 if ((pkt->pkt_cdbp[0] == SCMD_RESERVE) || 3233 (pkt->pkt_cdbp[0] == SCMD_RESERVE_G1)) { 3234 vlun->svl_flags |= VLUN_RESERVE_ACTIVE_FLG; 3235 vlun->svl_resrv_pip = vpkt->vpkt_path; 3236 VHCI_DEBUG(1, (CE_WARN, NULL, 3237 "!vhci_intr: vlun 0x%p reserved path 0x%p", 3238 (void *)vlun, (void *)vpkt->vpkt_path)); 3239 } 3240 break; 3241 3242 case STATUS_RESERVATION_CONFLICT: 3243 VHCI_DEBUG(1, (CE_WARN, NULL, 3244 "!vhci_intr: vlun 0x%p " 3245 "reserve conflict on path 0x%p", 3246 (void *)vlun, (void *)vpkt->vpkt_path)); 3247 /* FALLTHROUGH */ 3248 default: 3249 break; 3250 } 3251 3252 /* 3253 * Update I/O completion statistics for the path 3254 */ 3255 mdi_pi_kstat_iosupdate(vpkt->vpkt_path, vpkt->vpkt_tgt_init_bp); 3256 3257 /* 3258 * Command completed successfully, release the dma binding and 3259 * destroy the transport side of the packet. 3260 */ 3261 if ((pkt->pkt_cdbp[0] == SCMD_PROUT) && 3262 (((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_REGISTER) || 3263 ((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_R_AND_IGNORE))) { 3264 if (SCBP_C(pkt) == STATUS_GOOD) { 3265 ASSERT(vlun->svl_taskq); 3266 svp->svp_last_pkt_reason = pkt->pkt_reason; 3267 (void) taskq_dispatch(vlun->svl_taskq, 3268 vhci_run_cmd, pkt, KM_SLEEP); 3269 return; 3270 } 3271 } 3272 if ((SCBP_C(pkt) == STATUS_GOOD) && 3273 (pkt->pkt_cdbp[0] == SCMD_PRIN) && vpkt->vpkt_tgt_init_bp) { 3274 /* 3275 * If the action (value in byte 1 of the cdb) is zero, 3276 * we're reading keys, and that's the only condition 3277 * where we need to be concerned with filtering keys 3278 * and potential retries. Otherwise, we simply signal 3279 * the semaphore and move on. 3280 */ 3281 if (pkt->pkt_cdbp[1] == 0) { 3282 /* 3283 * If this is the completion of an internal 3284 * retry then we need to make sure that the 3285 * pkt and tpkt pointers are readjusted so 3286 * the calls to scsi_destroy_pkt and pkt_comp 3287 * below work * correctly. 3288 */ 3289 if (vpkt->vpkt_org_vpkt != NULL) { 3290 pkt = vpkt->vpkt_org_vpkt->vpkt_hba_pkt; 3291 tpkt = vpkt->vpkt_org_vpkt-> 3292 vpkt_tgt_pkt; 3293 3294 /* 3295 * If this command was issued through 3296 * the taskq then we need to clear 3297 * this flag for proper processing in 3298 * the case of a retry from the target 3299 * driver. 3300 */ 3301 vpkt->vpkt_state &= 3302 ~VHCI_PKT_THRU_TASKQ; 3303 } 3304 3305 /* 3306 * if vhci_do_prin returns VHCI_CMD_CMPLT then 3307 * vpkt will contain the address of the 3308 * original vpkt 3309 */ 3310 if (vhci_do_prin(&vpkt) == VHCI_CMD_RETRY) { 3311 /* 3312 * The command has been resent to get 3313 * all the keys from the device. Don't 3314 * complete the command with ssd until 3315 * the retry completes. 3316 */ 3317 return; 3318 } 3319 } else { 3320 sema_v(&vlun->svl_pgr_sema); 3321 } 3322 } 3323 3324 break; 3325 3326 case CMD_TIMEOUT: 3327 if ((pkt->pkt_statistics & 3328 (STAT_BUS_RESET|STAT_DEV_RESET|STAT_ABORTED)) == 0) { 3329 3330 VHCI_DEBUG(1, (CE_NOTE, NULL, 3331 "!scsi vhci timeout invoked\n")); 3332 3333 (void) vhci_recovery_reset(vlun, &pkt->pkt_address, 3334 FALSE, VHCI_DEPTH_ALL); 3335 } 3336 MDI_PI_ERRSTAT(lpath, MDI_PI_TRANSERR); 3337 tpkt->pkt_statistics |= STAT_ABORTED; 3338 err_str = timeout_err; 3339 break; 3340 3341 case CMD_TRAN_ERR: 3342 /* 3343 * This status is returned if the transport has sent the cmd 3344 * down the link to the target and then some error occurs. 3345 * In case of SCSI-II RESERVE cmd, we don't know if the 3346 * reservation been accepted by the target or not, so we need 3347 * to clear the reservation. 3348 */ 3349 if ((pkt->pkt_cdbp[0] == SCMD_RESERVE) || 3350 (pkt->pkt_cdbp[0] == SCMD_RESERVE_G1)) { 3351 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_intr received" 3352 " cmd_tran_err for scsi-2 reserve cmd\n")); 3353 if (!vhci_recovery_reset(vlun, &pkt->pkt_address, 3354 TRUE, VHCI_DEPTH_TARGET)) { 3355 VHCI_DEBUG(1, (CE_WARN, NULL, 3356 "!vhci_intr cmd_tran_err reset failed!")); 3357 } 3358 } 3359 break; 3360 3361 case CMD_DEV_GONE: 3362 VHCI_DEBUG(1, (CE_NOTE, NULL, "vhci_intr received " 3363 "cmd_dev_gone\n")); 3364 tpkt->pkt_reason = CMD_CMPLT; 3365 tpkt->pkt_state = STATE_GOT_BUS | 3366 STATE_GOT_TARGET | STATE_SENT_CMD | 3367 STATE_GOT_STATUS; 3368 *(tpkt->pkt_scbp) = STATUS_BUSY; 3369 break; 3370 3371 default: 3372 break; 3373 } 3374 3375 /* 3376 * SCSI-II RESERVE cmd has been serviced by the lower layers clear 3377 * the flag so the lun is not QUIESCED any longer. 3378 * Also clear the VHCI_PKT_THRU_TASKQ flag, to ensure that if this pkt 3379 * is retried, a taskq shall again be dispatched to service it. Else 3380 * it may lead to a system hang if the retry is within interrupt 3381 * context. 3382 */ 3383 if ((pkt->pkt_cdbp[0] == SCMD_RESERVE) || 3384 (pkt->pkt_cdbp[0] == SCMD_RESERVE_G1)) { 3385 vlun->svl_flags &= ~VLUN_QUIESCED_FLG; 3386 vpkt->vpkt_state &= ~VHCI_PKT_THRU_TASKQ; 3387 } 3388 3389 /* 3390 * vpkt_org_vpkt should always be NULL here if the retry command 3391 * has been successfully processed. If vpkt_org_vpkt != NULL at 3392 * this point, it is an error so restore the original vpkt and 3393 * return an error to the target driver so it can retry the 3394 * command as appropriate. 3395 */ 3396 if (vpkt->vpkt_org_vpkt != NULL) { 3397 struct vhci_pkt *new_vpkt = vpkt; 3398 vpkt = vpkt->vpkt_org_vpkt; 3399 3400 vhci_scsi_destroy_pkt(&svp->svp_psd->sd_address, 3401 new_vpkt->vpkt_tgt_pkt); 3402 3403 /* 3404 * Mark this command completion as having an error so that 3405 * ssd will retry the command. 3406 */ 3407 vpkt->vpkt_tgt_pkt->pkt_reason = CMD_ABORTED; 3408 vpkt->vpkt_tgt_pkt->pkt_statistics |= STAT_ABORTED; 3409 3410 pkt = vpkt->vpkt_hba_pkt; 3411 tpkt = vpkt->vpkt_tgt_pkt; 3412 } 3413 3414 if ((err_str != NULL) && (pkt->pkt_reason != 3415 svp->svp_last_pkt_reason)) { 3416 cdip = vlun->svl_dip; 3417 pdip = mdi_pi_get_phci(vpkt->vpkt_path); 3418 vdip = ddi_get_parent(cdip); 3419 cpath = kmem_alloc(MAXPATHLEN, KM_SLEEP); 3420 dpath = kmem_alloc(MAXPATHLEN, KM_SLEEP); 3421 vhci_log(CE_WARN, vdip, "!%s (%s%d): %s on path %s (%s%d)", 3422 ddi_pathname(cdip, cpath), ddi_driver_name(cdip), 3423 ddi_get_instance(cdip), err_str, 3424 ddi_pathname(pdip, dpath), ddi_driver_name(pdip), 3425 ddi_get_instance(pdip)); 3426 kmem_free(cpath, MAXPATHLEN); 3427 kmem_free(dpath, MAXPATHLEN); 3428 } 3429 svp->svp_last_pkt_reason = pkt->pkt_reason; 3430 VHCI_DECR_PATH_CMDCOUNT(svp); 3431 3432 /* 3433 * For PARTIAL_DMA, vhci should not free the path. 3434 * Target driver will call into vhci_scsi_dmafree or 3435 * destroy pkt to release this path. 3436 */ 3437 if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) == 0) { 3438 scsi_destroy_pkt(pkt); 3439 vpkt->vpkt_hba_pkt = NULL; 3440 if (vpkt->vpkt_path) { 3441 mdi_rele_path(vpkt->vpkt_path); 3442 vpkt->vpkt_path = NULL; 3443 } 3444 } 3445 3446 scsi_hba_pkt_comp(tpkt); 3447 } 3448 3449 /* 3450 * two possibilities: (1) failover has completed 3451 * or (2) is in progress; update our path states for 3452 * the former case; for the latter case, 3453 * initiate a scsi_watch request to 3454 * determine when failover completes - vlun is HELD 3455 * until failover completes; BUSY is returned to upper 3456 * layer in both the cases 3457 */ 3458 static int 3459 vhci_handle_ext_fo(struct scsi_pkt *pkt, int fostat) 3460 { 3461 struct vhci_pkt *vpkt = (struct vhci_pkt *)pkt->pkt_private; 3462 struct scsi_pkt *tpkt; 3463 scsi_vhci_priv_t *svp; 3464 scsi_vhci_lun_t *vlun; 3465 struct scsi_vhci *vhci; 3466 scsi_vhci_swarg_t *swarg; 3467 char *path; 3468 3469 ASSERT(vpkt != NULL); 3470 tpkt = vpkt->vpkt_tgt_pkt; 3471 ASSERT(tpkt != NULL); 3472 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(vpkt->vpkt_path); 3473 ASSERT(svp != NULL); 3474 vlun = svp->svp_svl; 3475 ASSERT(vlun != NULL); 3476 ASSERT(VHCI_LUN_IS_HELD(vlun)); 3477 3478 vhci = ADDR2VHCI(&tpkt->pkt_address); 3479 3480 if (fostat == SCSI_SENSE_INACTIVE) { 3481 VHCI_DEBUG(1, (CE_NOTE, NULL, "!Failover " 3482 "detected for %s; updating path states...\n", 3483 vlun->svl_lun_wwn)); 3484 /* 3485 * set the vlun flag to indicate to the task that the target 3486 * port group needs updating 3487 */ 3488 vlun->svl_flags |= VLUN_UPDATE_TPG; 3489 (void) taskq_dispatch(vhci->vhci_update_pathstates_taskq, 3490 vhci_update_pathstates, (void *)vlun, KM_SLEEP); 3491 } else { 3492 path = kmem_alloc(MAXPATHLEN, KM_SLEEP); 3493 vhci_log(CE_NOTE, ddi_get_parent(vlun->svl_dip), 3494 "!%s (%s%d): Waiting for externally initiated failover " 3495 "to complete", ddi_pathname(vlun->svl_dip, path), 3496 ddi_driver_name(vlun->svl_dip), 3497 ddi_get_instance(vlun->svl_dip)); 3498 kmem_free(path, MAXPATHLEN); 3499 swarg = kmem_alloc(sizeof (*swarg), KM_NOSLEEP); 3500 if (swarg == NULL) { 3501 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_handle_ext_fo: " 3502 "request packet allocation for %s failed....\n", 3503 vlun->svl_lun_wwn)); 3504 VHCI_RELEASE_LUN(vlun); 3505 return (PKT_RETURN); 3506 } 3507 swarg->svs_svp = svp; 3508 swarg->svs_tos = ddi_get_time(); 3509 swarg->svs_pi = vpkt->vpkt_path; 3510 swarg->svs_release_lun = 0; 3511 swarg->svs_done = 0; 3512 /* 3513 * place a hold on the path...we don't want it to 3514 * vanish while scsi_watch is in progress 3515 */ 3516 mdi_hold_path(vpkt->vpkt_path); 3517 svp->svp_sw_token = scsi_watch_request_submit(svp->svp_psd, 3518 VHCI_FOWATCH_INTERVAL, SENSE_LENGTH, vhci_efo_watch_cb, 3519 (caddr_t)swarg); 3520 } 3521 return (BUSY_RETURN); 3522 } 3523 3524 /* 3525 * vhci_efo_watch_cb: 3526 * Callback from scsi_watch request to check the failover status. 3527 * Completion is either due to successful failover or timeout. 3528 * Upon successful completion, vhci_update_path_states is called. 3529 * For timeout condition, vhci_efo_done is called. 3530 * Always returns 0 to scsi_watch to keep retrying till vhci_efo_done 3531 * terminates this request properly in a separate thread. 3532 */ 3533 3534 static int 3535 vhci_efo_watch_cb(caddr_t arg, struct scsi_watch_result *resultp) 3536 { 3537 struct scsi_status *statusp = resultp->statusp; 3538 uint8_t *sensep = (uint8_t *)resultp->sensep; 3539 struct scsi_pkt *pkt = resultp->pkt; 3540 scsi_vhci_swarg_t *swarg; 3541 scsi_vhci_priv_t *svp; 3542 scsi_vhci_lun_t *vlun; 3543 struct scsi_vhci *vhci; 3544 dev_info_t *vdip; 3545 int rval, updt_paths; 3546 3547 swarg = (scsi_vhci_swarg_t *)(uintptr_t)arg; 3548 svp = swarg->svs_svp; 3549 if (swarg->svs_done) { 3550 /* 3551 * Already completed failover or timedout. 3552 * Waiting for vhci_efo_done to terminate this scsi_watch. 3553 */ 3554 return (0); 3555 } 3556 3557 ASSERT(svp != NULL); 3558 vlun = svp->svp_svl; 3559 ASSERT(vlun != NULL); 3560 ASSERT(VHCI_LUN_IS_HELD(vlun)); 3561 vlun->svl_efo_update_path = 0; 3562 vdip = ddi_get_parent(vlun->svl_dip); 3563 vhci = ddi_get_soft_state(vhci_softstate, 3564 ddi_get_instance(vdip)); 3565 3566 updt_paths = 0; 3567 3568 if (pkt->pkt_reason != CMD_CMPLT) { 3569 if ((ddi_get_time() - swarg->svs_tos) >= VHCI_EXTFO_TIMEOUT) { 3570 swarg->svs_release_lun = 1; 3571 goto done; 3572 } 3573 return (0); 3574 } 3575 if (*((unsigned char *)statusp) == STATUS_CHECK) { 3576 rval = vlun->svl_fops->sfo_analyze_sense(svp->svp_psd, sensep, 3577 vlun->svl_fops_ctpriv); 3578 switch (rval) { 3579 /* 3580 * Only update path states in case path is definitely 3581 * inactive, or no failover occurred. For all other 3582 * check conditions continue pinging. A unexpected 3583 * check condition shouldn't cause pinging to complete 3584 * prematurely. 3585 */ 3586 case SCSI_SENSE_INACTIVE: 3587 case SCSI_SENSE_NOFAILOVER: 3588 updt_paths = 1; 3589 break; 3590 default: 3591 if ((ddi_get_time() - swarg->svs_tos) 3592 >= VHCI_EXTFO_TIMEOUT) { 3593 swarg->svs_release_lun = 1; 3594 goto done; 3595 } 3596 return (0); 3597 } 3598 } else if (*((unsigned char *)statusp) == 3599 STATUS_RESERVATION_CONFLICT) { 3600 updt_paths = 1; 3601 } else if ((*((unsigned char *)statusp)) & 3602 (STATUS_BUSY | STATUS_QFULL)) { 3603 return (0); 3604 } 3605 if ((*((unsigned char *)statusp) == STATUS_GOOD) || 3606 (updt_paths == 1)) { 3607 /* 3608 * we got here because we had detected an 3609 * externally initiated failover; things 3610 * have settled down now, so let's 3611 * start up a task to update the 3612 * path states and target port group 3613 */ 3614 vlun->svl_efo_update_path = 1; 3615 swarg->svs_done = 1; 3616 vlun->svl_swarg = swarg; 3617 vlun->svl_flags |= VLUN_UPDATE_TPG; 3618 (void) taskq_dispatch(vhci->vhci_update_pathstates_taskq, 3619 vhci_update_pathstates, (void *)vlun, 3620 KM_SLEEP); 3621 return (0); 3622 } 3623 if ((ddi_get_time() - swarg->svs_tos) >= VHCI_EXTFO_TIMEOUT) { 3624 swarg->svs_release_lun = 1; 3625 goto done; 3626 } 3627 return (0); 3628 done: 3629 swarg->svs_done = 1; 3630 (void) taskq_dispatch(vhci->vhci_taskq, 3631 vhci_efo_done, (void *)swarg, KM_SLEEP); 3632 return (0); 3633 } 3634 3635 /* 3636 * vhci_efo_done: 3637 * cleanly terminates scsi_watch and free up resources. 3638 * Called as taskq function in vhci_efo_watch_cb for EFO timeout condition 3639 * or by vhci_update_path_states invoked during external initiated 3640 * failover completion. 3641 */ 3642 static void 3643 vhci_efo_done(void *arg) 3644 { 3645 scsi_vhci_lun_t *vlun; 3646 scsi_vhci_swarg_t *swarg = (scsi_vhci_swarg_t *)arg; 3647 scsi_vhci_priv_t *svp = swarg->svs_svp; 3648 ASSERT(svp); 3649 3650 vlun = svp->svp_svl; 3651 ASSERT(vlun); 3652 3653 /* Wait for clean termination of scsi_watch */ 3654 (void) scsi_watch_request_terminate(svp->svp_sw_token, 3655 SCSI_WATCH_TERMINATE_ALL_WAIT); 3656 svp->svp_sw_token = NULL; 3657 3658 /* release path and freeup resources to indicate failover completion */ 3659 mdi_rele_path(swarg->svs_pi); 3660 if (swarg->svs_release_lun) { 3661 VHCI_RELEASE_LUN(vlun); 3662 } 3663 kmem_free((void *)swarg, sizeof (*swarg)); 3664 } 3665 3666 /* 3667 * Update the path states 3668 * vlun should be HELD when this is invoked. 3669 * Calls vhci_efo_done to cleanup resources allocated for EFO. 3670 */ 3671 void 3672 vhci_update_pathstates(void *arg) 3673 { 3674 mdi_pathinfo_t *pip, *npip; 3675 dev_info_t *dip, *pdip; 3676 struct scsi_failover_ops *fo; 3677 struct scsi_vhci_priv *svp; 3678 struct scsi_device *psd; 3679 struct scsi_path_opinfo opinfo; 3680 char *pclass, *tptr; 3681 struct scsi_vhci_lun *vlun = (struct scsi_vhci_lun *)arg; 3682 int sps; /* mdi_select_path() status */ 3683 char *cpath, *dpath; 3684 struct scsi_vhci *vhci; 3685 struct scsi_pkt *pkt; 3686 struct buf *bp; 3687 int reserve_conflict = 0; 3688 3689 ASSERT(VHCI_LUN_IS_HELD(vlun)); 3690 dip = vlun->svl_dip; 3691 pip = npip = NULL; 3692 3693 vhci = ddi_get_soft_state(vhci_softstate, 3694 ddi_get_instance(ddi_get_parent(dip))); 3695 3696 sps = mdi_select_path(dip, NULL, (MDI_SELECT_ONLINE_PATH | 3697 MDI_SELECT_STANDBY_PATH | MDI_SELECT_NO_PREFERRED), NULL, &npip); 3698 if ((npip == NULL) || (sps != MDI_SUCCESS)) { 3699 goto done; 3700 } 3701 3702 fo = vlun->svl_fops; 3703 do { 3704 pip = npip; 3705 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip); 3706 psd = svp->svp_psd; 3707 if (fo->sfo_path_get_opinfo(psd, &opinfo, 3708 vlun->svl_fops_ctpriv) != 0) { 3709 sps = mdi_select_path(dip, NULL, 3710 (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH | 3711 MDI_SELECT_NO_PREFERRED), pip, &npip); 3712 mdi_rele_path(pip); 3713 continue; 3714 } 3715 3716 if (mdi_prop_lookup_string(pip, "path-class", &pclass) != 3717 MDI_SUCCESS) { 3718 VHCI_DEBUG(1, (CE_NOTE, NULL, 3719 "!vhci_update_pathstates: prop lookup failed for " 3720 "path 0x%p\n", (void *)pip)); 3721 sps = mdi_select_path(dip, NULL, 3722 (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH | 3723 MDI_SELECT_NO_PREFERRED), pip, &npip); 3724 mdi_rele_path(pip); 3725 continue; 3726 } 3727 3728 /* 3729 * Need to update the "path-class" property 3730 * value in the device tree if different 3731 * from the existing value. 3732 */ 3733 if (strcmp(pclass, opinfo.opinfo_path_attr) != 0) { 3734 (void) mdi_prop_update_string(pip, "path-class", 3735 opinfo.opinfo_path_attr); 3736 } 3737 3738 /* 3739 * Only change the state if needed. i.e. Don't call 3740 * mdi_pi_set_state to ONLINE a path if its already 3741 * ONLINE. Same for STANDBY paths. 3742 */ 3743 3744 if ((opinfo.opinfo_path_state == SCSI_PATH_ACTIVE || 3745 opinfo.opinfo_path_state == SCSI_PATH_ACTIVE_NONOPT)) { 3746 if (!(MDI_PI_IS_ONLINE(pip))) { 3747 VHCI_DEBUG(1, (CE_NOTE, NULL, 3748 "!vhci_update_pathstates: marking path" 3749 " 0x%p as ONLINE\n", (void *)pip)); 3750 pdip = mdi_pi_get_phci(pip); 3751 cpath = kmem_alloc(MAXPATHLEN, KM_SLEEP); 3752 dpath = kmem_alloc(MAXPATHLEN, KM_SLEEP); 3753 vhci_log(CE_NOTE, ddi_get_parent(dip), "!%s" 3754 " (%s%d): path %s (%s%d) target address %s" 3755 " is now ONLINE because of" 3756 " an externally initiated failover", 3757 ddi_pathname(dip, cpath), 3758 ddi_driver_name(dip), 3759 ddi_get_instance(dip), 3760 ddi_pathname(pdip, dpath), 3761 ddi_driver_name(pdip), 3762 ddi_get_instance(pdip), 3763 mdi_pi_get_addr(pip)); 3764 kmem_free(cpath, MAXPATHLEN); 3765 kmem_free(dpath, MAXPATHLEN); 3766 mdi_pi_set_state(pip, 3767 MDI_PATHINFO_STATE_ONLINE); 3768 mdi_pi_set_preferred(pip, 3769 opinfo.opinfo_preferred); 3770 tptr = kmem_alloc(strlen 3771 (opinfo.opinfo_path_attr)+1, KM_SLEEP); 3772 (void) strlcpy(tptr, opinfo.opinfo_path_attr, 3773 (strlen(opinfo.opinfo_path_attr)+1)); 3774 mutex_enter(&vlun->svl_mutex); 3775 if (vlun->svl_active_pclass != NULL) { 3776 kmem_free(vlun->svl_active_pclass, 3777 strlen(vlun->svl_active_pclass)+1); 3778 } 3779 vlun->svl_active_pclass = tptr; 3780 if (vlun->svl_waiting_for_activepath) { 3781 vlun->svl_waiting_for_activepath = 0; 3782 } 3783 mutex_exit(&vlun->svl_mutex); 3784 /* Check for Reservation Conflict */ 3785 bp = scsi_alloc_consistent_buf( 3786 &svp->svp_psd->sd_address, 3787 (struct buf *)NULL, DEV_BSIZE, B_READ, 3788 NULL, NULL); 3789 if (!bp) { 3790 VHCI_DEBUG(1, (CE_NOTE, NULL, 3791 "vhci_update_pathstates: " 3792 "!No resources (buf)\n")); 3793 mdi_rele_path(pip); 3794 goto done; 3795 } 3796 pkt = scsi_init_pkt(&svp->svp_psd->sd_address, 3797 NULL, bp, CDB_GROUP1, 3798 sizeof (struct scsi_arq_status), 0, 3799 PKT_CONSISTENT, NULL, NULL); 3800 if (pkt) { 3801 (void) scsi_setup_cdb((union scsi_cdb *) 3802 (uintptr_t)pkt->pkt_cdbp, 3803 SCMD_READ, 1, 1, 0); 3804 pkt->pkt_time = 3*30; 3805 pkt->pkt_flags = FLAG_NOINTR; 3806 pkt->pkt_path_instance = 3807 mdi_pi_get_path_instance(pip); 3808 3809 if ((scsi_transport(pkt) == 3810 TRAN_ACCEPT) && (pkt->pkt_reason 3811 == CMD_CMPLT) && (SCBP_C(pkt) == 3812 STATUS_RESERVATION_CONFLICT)) { 3813 reserve_conflict = 1; 3814 } 3815 scsi_destroy_pkt(pkt); 3816 } 3817 scsi_free_consistent_buf(bp); 3818 } else if (MDI_PI_IS_ONLINE(pip)) { 3819 if (strcmp(pclass, opinfo.opinfo_path_attr) 3820 != 0) { 3821 mdi_pi_set_preferred(pip, 3822 opinfo.opinfo_preferred); 3823 mutex_enter(&vlun->svl_mutex); 3824 if (vlun->svl_active_pclass == NULL || 3825 strcmp(opinfo.opinfo_path_attr, 3826 vlun->svl_active_pclass) != 0) { 3827 mutex_exit(&vlun->svl_mutex); 3828 tptr = kmem_alloc(strlen 3829 (opinfo.opinfo_path_attr)+1, 3830 KM_SLEEP); 3831 (void) strlcpy(tptr, 3832 opinfo.opinfo_path_attr, 3833 (strlen 3834 (opinfo.opinfo_path_attr) 3835 +1)); 3836 mutex_enter(&vlun->svl_mutex); 3837 } else { 3838 /* 3839 * No need to update 3840 * svl_active_pclass 3841 */ 3842 tptr = NULL; 3843 mutex_exit(&vlun->svl_mutex); 3844 } 3845 if (tptr) { 3846 if (vlun->svl_active_pclass 3847 != NULL) { 3848 kmem_free(vlun-> 3849 svl_active_pclass, 3850 strlen(vlun-> 3851 svl_active_pclass) 3852 +1); 3853 } 3854 vlun->svl_active_pclass = tptr; 3855 mutex_exit(&vlun->svl_mutex); 3856 } 3857 } 3858 } 3859 } else if ((opinfo.opinfo_path_state == SCSI_PATH_INACTIVE) && 3860 !(MDI_PI_IS_STANDBY(pip))) { 3861 VHCI_DEBUG(1, (CE_NOTE, NULL, 3862 "!vhci_update_pathstates: marking path" 3863 " 0x%p as STANDBY\n", (void *)pip)); 3864 pdip = mdi_pi_get_phci(pip); 3865 cpath = kmem_alloc(MAXPATHLEN, KM_SLEEP); 3866 dpath = kmem_alloc(MAXPATHLEN, KM_SLEEP); 3867 vhci_log(CE_NOTE, ddi_get_parent(dip), "!%s" 3868 " (%s%d): path %s (%s%d) target address %s" 3869 " is now STANDBY because of" 3870 " an externally initiated failover", 3871 ddi_pathname(dip, cpath), 3872 ddi_driver_name(dip), 3873 ddi_get_instance(dip), 3874 ddi_pathname(pdip, dpath), 3875 ddi_driver_name(pdip), 3876 ddi_get_instance(pdip), 3877 mdi_pi_get_addr(pip)); 3878 kmem_free(cpath, MAXPATHLEN); 3879 kmem_free(dpath, MAXPATHLEN); 3880 mdi_pi_set_state(pip, 3881 MDI_PATHINFO_STATE_STANDBY); 3882 mdi_pi_set_preferred(pip, 3883 opinfo.opinfo_preferred); 3884 mutex_enter(&vlun->svl_mutex); 3885 if (vlun->svl_active_pclass != NULL) { 3886 if (strcmp(vlun->svl_active_pclass, 3887 opinfo.opinfo_path_attr) == 0) { 3888 kmem_free(vlun-> 3889 svl_active_pclass, 3890 strlen(vlun-> 3891 svl_active_pclass)+1); 3892 vlun->svl_active_pclass = NULL; 3893 } 3894 } 3895 mutex_exit(&vlun->svl_mutex); 3896 } 3897 (void) mdi_prop_free(pclass); 3898 sps = mdi_select_path(dip, NULL, 3899 (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH | 3900 MDI_SELECT_NO_PREFERRED), pip, &npip); 3901 mdi_rele_path(pip); 3902 3903 } while ((npip != NULL) && (sps == MDI_SUCCESS)); 3904 3905 /* 3906 * Check to see if this vlun has an active SCSI-II RESERVE. If so 3907 * clear the reservation by sending a reset, so the host doesn't 3908 * receive a reservation conflict. 3909 * Reset VLUN_RESERVE_ACTIVE_FLG for this vlun. Also notify ssd 3910 * of the reset, explicitly. 3911 */ 3912 if (vlun->svl_flags & VLUN_RESERVE_ACTIVE_FLG) { 3913 if (reserve_conflict && (vlun->svl_xlf_capable == 0)) { 3914 (void) vhci_recovery_reset(vlun, 3915 &svp->svp_psd->sd_address, FALSE, 3916 VHCI_DEPTH_TARGET); 3917 } 3918 vlun->svl_flags &= ~VLUN_RESERVE_ACTIVE_FLG; 3919 mutex_enter(&vhci->vhci_mutex); 3920 scsi_hba_reset_notify_callback(&vhci->vhci_mutex, 3921 &vhci->vhci_reset_notify_listf); 3922 mutex_exit(&vhci->vhci_mutex); 3923 } 3924 if (vlun->svl_flags & VLUN_UPDATE_TPG) { 3925 /* 3926 * Update the AccessState of related MP-API TPGs 3927 */ 3928 (void) vhci_mpapi_update_tpg_acc_state_for_lu(vhci, vlun); 3929 vlun->svl_flags &= ~VLUN_UPDATE_TPG; 3930 } 3931 done: 3932 if (vlun->svl_efo_update_path) { 3933 vlun->svl_efo_update_path = 0; 3934 vhci_efo_done(vlun->svl_swarg); 3935 vlun->svl_swarg = 0; 3936 } 3937 VHCI_RELEASE_LUN(vlun); 3938 } 3939 3940 /* ARGSUSED */ 3941 static int 3942 vhci_pathinfo_init(dev_info_t *vdip, mdi_pathinfo_t *pip, int flags) 3943 { 3944 scsi_hba_tran_t *hba = NULL; 3945 struct scsi_device *psd = NULL; 3946 scsi_vhci_lun_t *vlun = NULL; 3947 dev_info_t *pdip = NULL; 3948 dev_info_t *tgt_dip; 3949 struct scsi_vhci *vhci; 3950 char *guid; 3951 scsi_vhci_priv_t *svp = NULL; 3952 int rval = MDI_FAILURE; 3953 int vlun_alloced = 0; 3954 3955 ASSERT(vdip != NULL); 3956 ASSERT(pip != NULL); 3957 3958 vhci = ddi_get_soft_state(vhci_softstate, ddi_get_instance(vdip)); 3959 ASSERT(vhci != NULL); 3960 3961 pdip = mdi_pi_get_phci(pip); 3962 ASSERT(pdip != NULL); 3963 3964 hba = ddi_get_driver_private(pdip); 3965 ASSERT(hba != NULL); 3966 3967 tgt_dip = mdi_pi_get_client(pip); 3968 ASSERT(tgt_dip != NULL); 3969 3970 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, tgt_dip, PROPFLAGS, 3971 MDI_CLIENT_GUID_PROP, &guid) != DDI_SUCCESS) { 3972 VHCI_DEBUG(1, (CE_WARN, NULL, 3973 "vhci_pathinfo_init: lun guid property failed")); 3974 goto failure; 3975 } 3976 3977 vlun = vhci_lun_lookup_alloc(tgt_dip, guid, &vlun_alloced); 3978 ddi_prop_free(guid); 3979 3980 vlun->svl_dip = tgt_dip; 3981 3982 svp = kmem_zalloc(sizeof (*svp), KM_SLEEP); 3983 svp->svp_svl = vlun; 3984 3985 vlun->svl_lb_policy_save = mdi_get_lb_policy(tgt_dip); 3986 mutex_init(&svp->svp_mutex, NULL, MUTEX_DRIVER, NULL); 3987 cv_init(&svp->svp_cv, NULL, CV_DRIVER, NULL); 3988 3989 psd = kmem_zalloc(sizeof (*psd), KM_SLEEP); 3990 mutex_init(&psd->sd_mutex, NULL, MUTEX_DRIVER, NULL); 3991 3992 if (hba->tran_hba_flags & SCSI_HBA_ADDR_COMPLEX) { 3993 /* 3994 * For a SCSI_HBA_ADDR_COMPLEX transport we store a pointer to 3995 * scsi_device in the scsi_address structure. This allows an 3996 * an HBA driver to find its scsi_device(9S) and 3997 * per-scsi_device(9S) HBA private data given a 3998 * scsi_address(9S) by using scsi_address_device(9F) and 3999 * scsi_device_hba_private_get(9F)). 4000 */ 4001 psd->sd_address.a.a_sd = psd; 4002 } else if (hba->tran_hba_flags & SCSI_HBA_TRAN_CLONE) { 4003 /* 4004 * Clone transport structure if requested, so 4005 * Self enumerating HBAs always need to use cloning 4006 */ 4007 scsi_hba_tran_t *clone = 4008 kmem_alloc(sizeof (scsi_hba_tran_t), KM_SLEEP); 4009 bcopy(hba, clone, sizeof (scsi_hba_tran_t)); 4010 hba = clone; 4011 hba->tran_sd = psd; 4012 } else { 4013 /* 4014 * SPI pHCI unit-address. If we ever need to support this 4015 * we could set a.spi.a_target/a.spi.a_lun based on pathinfo 4016 * node unit-address properties. For now we fail... 4017 */ 4018 goto failure; 4019 } 4020 4021 psd->sd_dev = tgt_dip; 4022 psd->sd_address.a_hba_tran = hba; 4023 4024 /* 4025 * Mark scsi_device as being associated with a pathinfo node. For 4026 * a scsi_device structure associated with a devinfo node, 4027 * scsi_ctlops_initchild sets this field to NULL. 4028 */ 4029 psd->sd_pathinfo = pip; 4030 4031 /* 4032 * LEGACY: sd_private: set for older mpxio-capable pHCI drivers with 4033 * too much scsi_vhci/mdi/ndi knowledge. Remove this code when all 4034 * mpxio-capable pHCI drivers use SCSA enumeration services (or at 4035 * least have been changed to use sd_pathinfo instead). 4036 */ 4037 psd->sd_private = (caddr_t)pip; 4038 4039 /* See scsi_hba.c for info on sd_tran_safe kludge */ 4040 psd->sd_tran_safe = hba; 4041 4042 svp->svp_psd = psd; 4043 mdi_pi_set_vhci_private(pip, (caddr_t)svp); 4044 4045 /* 4046 * call hba's target init entry point if it exists 4047 */ 4048 if (hba->tran_tgt_init != NULL) { 4049 if ((rval = (*hba->tran_tgt_init)(pdip, tgt_dip, 4050 hba, psd)) != DDI_SUCCESS) { 4051 VHCI_DEBUG(1, (CE_WARN, pdip, 4052 "!vhci_pathinfo_init: tran_tgt_init failed for " 4053 "path=0x%p rval=%x", (void *)pip, rval)); 4054 goto failure; 4055 } 4056 } 4057 4058 svp->svp_new_path = 1; 4059 4060 VHCI_DEBUG(4, (CE_NOTE, NULL, "!vhci_pathinfo_init: path:%p\n", 4061 (void *)pip)); 4062 return (MDI_SUCCESS); 4063 4064 failure: 4065 if (psd) { 4066 mutex_destroy(&psd->sd_mutex); 4067 kmem_free(psd, sizeof (*psd)); 4068 } 4069 if (svp) { 4070 mdi_pi_set_vhci_private(pip, NULL); 4071 mutex_destroy(&svp->svp_mutex); 4072 cv_destroy(&svp->svp_cv); 4073 kmem_free(svp, sizeof (*svp)); 4074 } 4075 if (hba && (hba->tran_hba_flags & SCSI_HBA_TRAN_CLONE)) 4076 kmem_free(hba, sizeof (scsi_hba_tran_t)); 4077 4078 if (vlun_alloced) 4079 vhci_lun_free(tgt_dip); 4080 4081 return (rval); 4082 } 4083 4084 /* ARGSUSED */ 4085 static int 4086 vhci_pathinfo_uninit(dev_info_t *vdip, mdi_pathinfo_t *pip, int flags) 4087 { 4088 scsi_hba_tran_t *hba = NULL; 4089 struct scsi_device *psd = NULL; 4090 dev_info_t *pdip = NULL; 4091 dev_info_t *cdip = NULL; 4092 scsi_vhci_priv_t *svp = NULL; 4093 4094 ASSERT(vdip != NULL); 4095 ASSERT(pip != NULL); 4096 4097 pdip = mdi_pi_get_phci(pip); 4098 ASSERT(pdip != NULL); 4099 4100 cdip = mdi_pi_get_client(pip); 4101 ASSERT(cdip != NULL); 4102 4103 hba = ddi_get_driver_private(pdip); 4104 ASSERT(hba != NULL); 4105 4106 vhci_mpapi_set_path_state(vdip, pip, MP_DRVR_PATH_STATE_REMOVED); 4107 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip); 4108 if (svp == NULL) { 4109 /* path already freed. Nothing to do. */ 4110 return (MDI_SUCCESS); 4111 } 4112 4113 psd = svp->svp_psd; 4114 ASSERT(psd != NULL); 4115 4116 if (hba->tran_hba_flags & SCSI_HBA_ADDR_COMPLEX) { 4117 /* Verify plumbing */ 4118 ASSERT(psd->sd_address.a_hba_tran == hba); 4119 ASSERT(psd->sd_address.a.a_sd == psd); 4120 } else if (hba->tran_hba_flags & SCSI_HBA_TRAN_CLONE) { 4121 /* Switch to cloned scsi_hba_tran(9S) structure */ 4122 hba = psd->sd_address.a_hba_tran; 4123 ASSERT(hba->tran_hba_flags & SCSI_HBA_TRAN_CLONE); 4124 ASSERT(hba->tran_sd == psd); 4125 } 4126 4127 if (hba->tran_tgt_free != NULL) { 4128 (*hba->tran_tgt_free) (pdip, cdip, hba, psd); 4129 } 4130 mutex_destroy(&psd->sd_mutex); 4131 if (hba->tran_hba_flags & SCSI_HBA_TRAN_CLONE) { 4132 kmem_free(hba, sizeof (*hba)); 4133 } 4134 4135 mdi_pi_set_vhci_private(pip, NULL); 4136 4137 /* 4138 * Free the pathinfo related scsi_device inquiry data. Note that this 4139 * matches what happens for scsi_hba.c devinfo case at uninitchild time. 4140 */ 4141 if (psd->sd_inq) 4142 kmem_free((caddr_t)psd->sd_inq, sizeof (struct scsi_inquiry)); 4143 kmem_free((caddr_t)psd, sizeof (*psd)); 4144 4145 mutex_destroy(&svp->svp_mutex); 4146 cv_destroy(&svp->svp_cv); 4147 kmem_free((caddr_t)svp, sizeof (*svp)); 4148 4149 /* 4150 * If this is the last path to the client, 4151 * then free up the vlun as well. 4152 */ 4153 if (mdi_client_get_path_count(cdip) == 1) { 4154 vhci_lun_free(cdip); 4155 } 4156 4157 VHCI_DEBUG(4, (CE_NOTE, NULL, "!vhci_pathinfo_uninit: path=0x%p\n", 4158 (void *)pip)); 4159 return (MDI_SUCCESS); 4160 } 4161 4162 /* ARGSUSED */ 4163 static int 4164 vhci_pathinfo_state_change(dev_info_t *vdip, mdi_pathinfo_t *pip, 4165 mdi_pathinfo_state_t state, uint32_t ext_state, int flags) 4166 { 4167 int rval = MDI_SUCCESS; 4168 scsi_vhci_priv_t *svp; 4169 scsi_vhci_lun_t *vlun; 4170 int held; 4171 int op = (flags & 0xf00) >> 8; 4172 struct scsi_vhci *vhci; 4173 4174 vhci = ddi_get_soft_state(vhci_softstate, ddi_get_instance(vdip)); 4175 4176 if (flags & MDI_EXT_STATE_CHANGE) { 4177 /* 4178 * We do not want to issue any commands down the path in case 4179 * sync flag is set. Lower layers might not be ready to accept 4180 * any I/O commands. 4181 */ 4182 if (op == DRIVER_DISABLE) 4183 return (MDI_SUCCESS); 4184 4185 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip); 4186 if (svp == NULL) { 4187 return (MDI_FAILURE); 4188 } 4189 vlun = svp->svp_svl; 4190 4191 if (flags & MDI_BEFORE_STATE_CHANGE) { 4192 /* 4193 * Hold the LUN. 4194 */ 4195 VHCI_HOLD_LUN(vlun, VH_SLEEP, held); 4196 if (flags & MDI_DISABLE_OP) { 4197 /* 4198 * Issue scsi reset if it happens to be 4199 * reserved path. 4200 */ 4201 if (vlun->svl_flags & VLUN_RESERVE_ACTIVE_FLG) { 4202 /* 4203 * if reservation pending on 4204 * this path, dont' mark the 4205 * path busy 4206 */ 4207 if (op == DRIVER_DISABLE_TRANSIENT) { 4208 VHCI_DEBUG(1, (CE_NOTE, NULL, 4209 "!vhci_pathinfo" 4210 "_state_change (pip:%p): " 4211 " reservation: fail busy\n", 4212 (void *)pip)); 4213 return (MDI_FAILURE); 4214 } 4215 if (pip == vlun->svl_resrv_pip) { 4216 if (vhci_recovery_reset( 4217 svp->svp_svl, 4218 &svp->svp_psd->sd_address, 4219 TRUE, 4220 VHCI_DEPTH_TARGET) == 0) { 4221 VHCI_DEBUG(1, 4222 (CE_NOTE, NULL, 4223 "!vhci_pathinfo" 4224 "_state_change " 4225 " (pip:%p): " 4226 "reset failed, " 4227 "give up!\n", 4228 (void *)pip)); 4229 } 4230 vlun->svl_flags &= 4231 ~VLUN_RESERVE_ACTIVE_FLG; 4232 } 4233 } 4234 } else if (flags & MDI_ENABLE_OP) { 4235 if (((vhci->vhci_conf_flags & 4236 VHCI_CONF_FLAGS_AUTO_FAILBACK) == 4237 VHCI_CONF_FLAGS_AUTO_FAILBACK) && 4238 MDI_PI_IS_USER_DISABLE(pip) && 4239 MDI_PI_IS_STANDBY(pip)) { 4240 struct scsi_failover_ops *fo; 4241 char *best_pclass, *pclass = NULL; 4242 int best_class, rv; 4243 /* 4244 * Failback if enabling a standby path 4245 * and it is the primary class or 4246 * preferred class 4247 */ 4248 best_class = mdi_pi_get_preferred(pip); 4249 if (best_class == 0) { 4250 /* 4251 * if not preferred - compare 4252 * path-class with class 4253 */ 4254 fo = vlun->svl_fops; 4255 (void) fo->sfo_pathclass_next( 4256 NULL, &best_pclass, 4257 vlun->svl_fops_ctpriv); 4258 pclass = NULL; 4259 rv = mdi_prop_lookup_string(pip, 4260 "path-class", &pclass); 4261 if (rv != MDI_SUCCESS || 4262 pclass == NULL) { 4263 vhci_log(CE_NOTE, vdip, 4264 "!path-class " 4265 " lookup " 4266 "failed. rv: %d" 4267 "class: %p", rv, 4268 (void *)pclass); 4269 } else if (strncmp(pclass, 4270 best_pclass, 4271 strlen(best_pclass)) == 0) { 4272 best_class = 1; 4273 } 4274 if (rv == MDI_SUCCESS && 4275 pclass != NULL) { 4276 rv = mdi_prop_free( 4277 pclass); 4278 if (rv != 4279 DDI_PROP_SUCCESS) { 4280 vhci_log( 4281 CE_NOTE, 4282 vdip, 4283 "!path-" 4284 "class" 4285 " free" 4286 " failed" 4287 " rv: %d" 4288 " class: " 4289 "%p", 4290 rv, 4291 (void *) 4292 pclass); 4293 } 4294 } 4295 } 4296 if (best_class == 1) { 4297 VHCI_DEBUG(1, (CE_NOTE, NULL, 4298 "preferred path: %p " 4299 "USER_DISABLE->USER_ENABLE " 4300 "transition for lun %s\n", 4301 (void *)pip, 4302 vlun->svl_lun_wwn)); 4303 (void) taskq_dispatch( 4304 vhci->vhci_taskq, 4305 vhci_initiate_auto_failback, 4306 (void *) vlun, KM_SLEEP); 4307 } 4308 } 4309 /* 4310 * if PGR is active, revalidate key and 4311 * register on this path also, if key is 4312 * still valid 4313 */ 4314 sema_p(&vlun->svl_pgr_sema); 4315 if (vlun->svl_pgr_active) 4316 (void) 4317 vhci_pgr_validate_and_register(svp); 4318 sema_v(&vlun->svl_pgr_sema); 4319 /* 4320 * Inform target driver about any 4321 * reservations to be reinstated if target 4322 * has dropped reservation during the busy 4323 * period. 4324 */ 4325 mutex_enter(&vhci->vhci_mutex); 4326 scsi_hba_reset_notify_callback( 4327 &vhci->vhci_mutex, 4328 &vhci->vhci_reset_notify_listf); 4329 mutex_exit(&vhci->vhci_mutex); 4330 } 4331 } 4332 if (flags & MDI_AFTER_STATE_CHANGE) { 4333 if (flags & MDI_ENABLE_OP) { 4334 mutex_enter(&vhci_global_mutex); 4335 cv_broadcast(&vhci_cv); 4336 mutex_exit(&vhci_global_mutex); 4337 } 4338 if (vlun->svl_setcap_done) { 4339 (void) vhci_pHCI_cap(&svp->svp_psd->sd_address, 4340 "sector-size", vlun->svl_sector_size, 4341 1, pip); 4342 } 4343 4344 /* 4345 * Release the LUN 4346 */ 4347 VHCI_RELEASE_LUN(vlun); 4348 4349 /* 4350 * Path transition is complete. 4351 * Run callback to indicate target driver to 4352 * retry to prevent IO starvation. 4353 */ 4354 if (scsi_callback_id != 0) { 4355 ddi_run_callback(&scsi_callback_id); 4356 } 4357 } 4358 } else { 4359 switch (state) { 4360 case MDI_PATHINFO_STATE_ONLINE: 4361 rval = vhci_pathinfo_online(vdip, pip, flags); 4362 break; 4363 4364 case MDI_PATHINFO_STATE_OFFLINE: 4365 rval = vhci_pathinfo_offline(vdip, pip, flags); 4366 break; 4367 4368 default: 4369 break; 4370 } 4371 /* 4372 * Path transition is complete. 4373 * Run callback to indicate target driver to 4374 * retry to prevent IO starvation. 4375 */ 4376 if ((rval == MDI_SUCCESS) && (scsi_callback_id != 0)) { 4377 ddi_run_callback(&scsi_callback_id); 4378 } 4379 return (rval); 4380 } 4381 4382 return (MDI_SUCCESS); 4383 } 4384 4385 /* 4386 * Parse the mpxio load balancing options. The datanameptr 4387 * will point to a string containing the load-balance-options value. 4388 * The load-balance-options value will be a property that 4389 * defines the load-balance algorithm and any arguments to that 4390 * algorithm. 4391 * For example: 4392 * device-type-mpxio-options-list= 4393 * "device-type=SUN SENA", "load-balance-options=logical-block-options" 4394 * "device-type=SUN SE6920", "round-robin-options"; 4395 * logical-block-options="load-balance=logical-block", "region-size=15"; 4396 * round-robin-options="load-balance=round-robin"; 4397 * 4398 * If the load-balance is not defined the load balance algorithm will 4399 * default to the global setting. There will be default values assigned 4400 * to the arguments (region-size=18) and if an argument is one 4401 * that is not known, it will be ignored. 4402 */ 4403 static void 4404 vhci_parse_mpxio_lb_options(dev_info_t *dip, dev_info_t *cdip, 4405 caddr_t datanameptr) 4406 { 4407 char *dataptr, *next_entry; 4408 caddr_t config_list = NULL; 4409 int config_list_len = 0, list_len = 0; 4410 int region_size = -1; 4411 client_lb_t load_balance; 4412 4413 if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, datanameptr, 4414 (caddr_t)&config_list, &config_list_len) != DDI_PROP_SUCCESS) { 4415 return; 4416 } 4417 4418 list_len = config_list_len; 4419 next_entry = config_list; 4420 while (config_list_len > 0) { 4421 dataptr = next_entry; 4422 4423 if (strncmp(mdi_load_balance, dataptr, 4424 strlen(mdi_load_balance)) == 0) { 4425 /* get the load-balance scheme */ 4426 dataptr += strlen(mdi_load_balance) + 1; 4427 if (strcmp(dataptr, LOAD_BALANCE_PROP_RR) == 0) { 4428 (void) mdi_set_lb_policy(cdip, LOAD_BALANCE_RR); 4429 load_balance = LOAD_BALANCE_RR; 4430 } else if (strcmp(dataptr, 4431 LOAD_BALANCE_PROP_LBA) == 0) { 4432 (void) mdi_set_lb_policy(cdip, 4433 LOAD_BALANCE_LBA); 4434 load_balance = LOAD_BALANCE_LBA; 4435 } else if (strcmp(dataptr, 4436 LOAD_BALANCE_PROP_NONE) == 0) { 4437 (void) mdi_set_lb_policy(cdip, 4438 LOAD_BALANCE_NONE); 4439 load_balance = LOAD_BALANCE_NONE; 4440 } 4441 } else if (strncmp(dataptr, LOGICAL_BLOCK_REGION_SIZE, 4442 strlen(LOGICAL_BLOCK_REGION_SIZE)) == 0) { 4443 int i = 0; 4444 char *ptr; 4445 char *tmp; 4446 4447 tmp = dataptr + (strlen(LOGICAL_BLOCK_REGION_SIZE) + 1); 4448 /* check for numeric value */ 4449 for (ptr = tmp; i < strlen(tmp); i++, ptr++) { 4450 if (!isdigit(*ptr)) { 4451 cmn_err(CE_WARN, 4452 "Illegal region size: %s." 4453 " Setting to default value: %d", 4454 tmp, 4455 LOAD_BALANCE_DEFAULT_REGION_SIZE); 4456 region_size = 4457 LOAD_BALANCE_DEFAULT_REGION_SIZE; 4458 break; 4459 } 4460 } 4461 if (i >= strlen(tmp)) { 4462 region_size = stoi(&tmp); 4463 } 4464 (void) mdi_set_lb_region_size(cdip, region_size); 4465 } 4466 config_list_len -= (strlen(next_entry) + 1); 4467 next_entry += strlen(next_entry) + 1; 4468 } 4469 #ifdef DEBUG 4470 if ((region_size >= 0) && (load_balance != LOAD_BALANCE_LBA)) { 4471 VHCI_DEBUG(1, (CE_NOTE, dip, 4472 "!vhci_parse_mpxio_lb_options: region-size: %d" 4473 "only valid for load-balance=logical-block\n", 4474 region_size)); 4475 } 4476 #endif 4477 if ((region_size == -1) && (load_balance == LOAD_BALANCE_LBA)) { 4478 VHCI_DEBUG(1, (CE_NOTE, dip, 4479 "!vhci_parse_mpxio_lb_options: No region-size" 4480 " defined load-balance=logical-block." 4481 " Default to: %d\n", LOAD_BALANCE_DEFAULT_REGION_SIZE)); 4482 (void) mdi_set_lb_region_size(cdip, 4483 LOAD_BALANCE_DEFAULT_REGION_SIZE); 4484 } 4485 if (list_len > 0) { 4486 kmem_free(config_list, list_len); 4487 } 4488 } 4489 4490 /* 4491 * Parse the device-type-mpxio-options-list looking for the key of 4492 * "load-balance-options". If found, parse the load balancing options. 4493 * Check the comment of the vhci_get_device_type_mpxio_options() 4494 * for the device-type-mpxio-options-list. 4495 */ 4496 static void 4497 vhci_parse_mpxio_options(dev_info_t *dip, dev_info_t *cdip, 4498 caddr_t datanameptr, int list_len) 4499 { 4500 char *dataptr; 4501 int len; 4502 4503 /* 4504 * get the data list 4505 */ 4506 dataptr = datanameptr; 4507 len = 0; 4508 while (len < list_len && 4509 strncmp(dataptr, DEVICE_TYPE_STR, strlen(DEVICE_TYPE_STR)) 4510 != 0) { 4511 if (strncmp(dataptr, LOAD_BALANCE_OPTIONS, 4512 strlen(LOAD_BALANCE_OPTIONS)) == 0) { 4513 len += strlen(LOAD_BALANCE_OPTIONS) + 1; 4514 dataptr += strlen(LOAD_BALANCE_OPTIONS) + 1; 4515 vhci_parse_mpxio_lb_options(dip, cdip, dataptr); 4516 } 4517 len += strlen(dataptr) + 1; 4518 dataptr += strlen(dataptr) + 1; 4519 } 4520 } 4521 4522 /* 4523 * Check the inquriy string returned from the device with the device-type 4524 * Check for the existence of the device-type-mpxio-options-list and 4525 * if found parse the list checking for a match with the device-type 4526 * value and the inquiry string returned from the device. If a match 4527 * is found, parse the mpxio options list. The format of the 4528 * device-type-mpxio-options-list is: 4529 * device-type-mpxio-options-list= 4530 * "device-type=SUN SENA", "load-balance-options=logical-block-options" 4531 * "device-type=SUN SE6920", "round-robin-options"; 4532 * logical-block-options="load-balance=logical-block", "region-size=15"; 4533 * round-robin-options="load-balance=round-robin"; 4534 */ 4535 void 4536 vhci_get_device_type_mpxio_options(dev_info_t *dip, dev_info_t *cdip, 4537 struct scsi_device *devp) 4538 { 4539 4540 caddr_t config_list = NULL; 4541 caddr_t vidptr, datanameptr; 4542 int vidlen, dupletlen = 0; 4543 int config_list_len = 0, len; 4544 struct scsi_inquiry *inq = devp->sd_inq; 4545 4546 /* 4547 * look up the device-type-mpxio-options-list and walk thru 4548 * the list compare the vendor ids of the earlier inquiry command and 4549 * with those vids in the list if there is a match, lookup 4550 * the mpxio-options value 4551 */ 4552 if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 4553 MPXIO_OPTIONS_LIST, 4554 (caddr_t)&config_list, &config_list_len) == DDI_PROP_SUCCESS) { 4555 4556 /* 4557 * Compare vids in each duplet - if it matches, 4558 * parse the mpxio options list. 4559 */ 4560 for (len = config_list_len, vidptr = config_list; len > 0; 4561 len -= dupletlen) { 4562 4563 dupletlen = 0; 4564 4565 if (strlen(vidptr) != 0 && 4566 strncmp(vidptr, DEVICE_TYPE_STR, 4567 strlen(DEVICE_TYPE_STR)) == 0) { 4568 /* point to next duplet */ 4569 datanameptr = vidptr + strlen(vidptr) + 1; 4570 /* add len of this duplet */ 4571 dupletlen += strlen(vidptr) + 1; 4572 /* get to device type */ 4573 vidptr += strlen(DEVICE_TYPE_STR) + 1; 4574 vidlen = strlen(vidptr); 4575 if ((vidlen != 0) && 4576 bcmp(inq->inq_vid, vidptr, vidlen) == 0) { 4577 vhci_parse_mpxio_options(dip, cdip, 4578 datanameptr, len - dupletlen); 4579 break; 4580 } 4581 /* get to next duplet */ 4582 vidptr += strlen(vidptr) + 1; 4583 } 4584 /* get to the next device-type */ 4585 while (len - dupletlen > 0 && 4586 strlen(vidptr) != 0 && 4587 strncmp(vidptr, DEVICE_TYPE_STR, 4588 strlen(DEVICE_TYPE_STR)) != 0) { 4589 dupletlen += strlen(vidptr) + 1; 4590 vidptr += strlen(vidptr) + 1; 4591 } 4592 } 4593 if (config_list_len > 0) { 4594 kmem_free(config_list, config_list_len); 4595 } 4596 } 4597 } 4598 4599 static int 4600 vhci_update_pathinfo(struct scsi_device *psd, mdi_pathinfo_t *pip, 4601 struct scsi_failover_ops *fo, 4602 scsi_vhci_lun_t *vlun, 4603 struct scsi_vhci *vhci) 4604 { 4605 struct scsi_path_opinfo opinfo; 4606 char *pclass, *best_pclass; 4607 4608 if (fo->sfo_path_get_opinfo(psd, &opinfo, vlun->svl_fops_ctpriv) != 0) { 4609 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_update_pathinfo: " 4610 "Failed to get operation info for path:%p\n", (void *)pip)); 4611 return (MDI_FAILURE); 4612 } 4613 /* set the xlf capable flag in the vlun for future use */ 4614 vlun->svl_xlf_capable = opinfo.opinfo_xlf_capable; 4615 (void) mdi_prop_update_string(pip, "path-class", 4616 opinfo.opinfo_path_attr); 4617 4618 pclass = opinfo.opinfo_path_attr; 4619 if (opinfo.opinfo_path_state == SCSI_PATH_ACTIVE) { 4620 mutex_enter(&vlun->svl_mutex); 4621 if (vlun->svl_active_pclass != NULL) { 4622 if (strcmp(vlun->svl_active_pclass, pclass) != 0) { 4623 mutex_exit(&vlun->svl_mutex); 4624 /* 4625 * Externally initiated failover has happened; 4626 * force the path state to be STANDBY/ONLINE, 4627 * next IO will trigger failover and thus 4628 * sync-up the pathstates. Reason we don't 4629 * sync-up immediately by invoking 4630 * vhci_update_pathstates() is because it 4631 * needs a VHCI_HOLD_LUN() and we don't 4632 * want to block here. 4633 * 4634 * Further, if the device is an ALUA device, 4635 * then failure to exactly match 'pclass' and 4636 * 'svl_active_pclass'(as is the case here) 4637 * indicates that the currently active path 4638 * is a 'non-optimized' path - which means 4639 * that 'svl_active_pclass' needs to be 4640 * replaced with opinfo.opinfo_path_state 4641 * value. 4642 */ 4643 4644 if (SCSI_FAILOVER_IS_TPGS(vlun->svl_fops)) { 4645 char *tptr; 4646 4647 /* 4648 * The device is ALUA compliant. The 4649 * state need to be changed to online 4650 * rather than standby state which is 4651 * done typically for a asymmetric 4652 * device that is non ALUA compliant. 4653 */ 4654 mdi_pi_set_state(pip, 4655 MDI_PATHINFO_STATE_ONLINE); 4656 tptr = kmem_alloc(strlen 4657 (opinfo.opinfo_path_attr)+1, 4658 KM_SLEEP); 4659 (void) strlcpy(tptr, 4660 opinfo.opinfo_path_attr, 4661 (strlen(opinfo.opinfo_path_attr) 4662 +1)); 4663 mutex_enter(&vlun->svl_mutex); 4664 kmem_free(vlun->svl_active_pclass, 4665 strlen(vlun->svl_active_pclass)+1); 4666 vlun->svl_active_pclass = tptr; 4667 mutex_exit(&vlun->svl_mutex); 4668 } else { 4669 /* 4670 * Non ALUA device case. 4671 */ 4672 mdi_pi_set_state(pip, 4673 MDI_PATHINFO_STATE_STANDBY); 4674 } 4675 vlun->svl_fo_support = opinfo.opinfo_mode; 4676 mdi_pi_set_preferred(pip, 4677 opinfo.opinfo_preferred); 4678 return (MDI_SUCCESS); 4679 } 4680 } else { 4681 char *tptr; 4682 4683 /* 4684 * lets release the mutex before we try to 4685 * allocate since the potential to sleep is 4686 * possible. 4687 */ 4688 mutex_exit(&vlun->svl_mutex); 4689 tptr = kmem_alloc(strlen(pclass)+1, KM_SLEEP); 4690 (void) strlcpy(tptr, pclass, (strlen(pclass)+1)); 4691 mutex_enter(&vlun->svl_mutex); 4692 vlun->svl_active_pclass = tptr; 4693 } 4694 mutex_exit(&vlun->svl_mutex); 4695 mdi_pi_set_state(pip, MDI_PATHINFO_STATE_ONLINE); 4696 vlun->svl_waiting_for_activepath = 0; 4697 } else if (opinfo.opinfo_path_state == SCSI_PATH_ACTIVE_NONOPT) { 4698 mutex_enter(&vlun->svl_mutex); 4699 if (vlun->svl_active_pclass == NULL) { 4700 char *tptr; 4701 4702 mutex_exit(&vlun->svl_mutex); 4703 tptr = kmem_alloc(strlen(pclass)+1, KM_SLEEP); 4704 (void) strlcpy(tptr, pclass, (strlen(pclass)+1)); 4705 mutex_enter(&vlun->svl_mutex); 4706 vlun->svl_active_pclass = tptr; 4707 } 4708 mutex_exit(&vlun->svl_mutex); 4709 mdi_pi_set_state(pip, MDI_PATHINFO_STATE_ONLINE); 4710 vlun->svl_waiting_for_activepath = 0; 4711 } else if (opinfo.opinfo_path_state == SCSI_PATH_INACTIVE) { 4712 mutex_enter(&vlun->svl_mutex); 4713 if (vlun->svl_active_pclass != NULL) { 4714 if (strcmp(vlun->svl_active_pclass, pclass) == 0) { 4715 mutex_exit(&vlun->svl_mutex); 4716 /* 4717 * externally initiated failover has happened; 4718 * force state to ONLINE (see comment above) 4719 */ 4720 mdi_pi_set_state(pip, 4721 MDI_PATHINFO_STATE_ONLINE); 4722 vlun->svl_fo_support = opinfo.opinfo_mode; 4723 mdi_pi_set_preferred(pip, 4724 opinfo.opinfo_preferred); 4725 return (MDI_SUCCESS); 4726 } 4727 } 4728 mutex_exit(&vlun->svl_mutex); 4729 mdi_pi_set_state(pip, MDI_PATHINFO_STATE_STANDBY); 4730 4731 /* 4732 * Initiate auto-failback, if enabled, for path if path-state 4733 * is transitioning from OFFLINE->STANDBY and pathclass is the 4734 * preferred pathclass for this storage. 4735 * NOTE: In case where opinfo_path_state is SCSI_PATH_ACTIVE 4736 * (above), where the pi state is set to STANDBY, we don't 4737 * initiate auto-failback as the next IO shall take care of. 4738 * this. See comment above. 4739 */ 4740 (void) fo->sfo_pathclass_next(NULL, &best_pclass, 4741 vlun->svl_fops_ctpriv); 4742 if (((vhci->vhci_conf_flags & VHCI_CONF_FLAGS_AUTO_FAILBACK) == 4743 VHCI_CONF_FLAGS_AUTO_FAILBACK) && 4744 ((strcmp(pclass, best_pclass) == 0) || 4745 mdi_pi_get_preferred(pip) == 1) && 4746 ((MDI_PI_OLD_STATE(pip) == MDI_PATHINFO_STATE_OFFLINE)|| 4747 (MDI_PI_OLD_STATE(pip) == MDI_PATHINFO_STATE_INIT))) { 4748 VHCI_DEBUG(1, (CE_NOTE, NULL, "%s pathclass path: %p" 4749 " OFFLINE->STANDBY transition for lun %s\n", 4750 best_pclass, (void *)pip, vlun->svl_lun_wwn)); 4751 (void) taskq_dispatch(vhci->vhci_taskq, 4752 vhci_initiate_auto_failback, (void *) vlun, 4753 KM_SLEEP); 4754 } 4755 } 4756 vlun->svl_fo_support = opinfo.opinfo_mode; 4757 mdi_pi_set_preferred(pip, opinfo.opinfo_preferred); 4758 4759 VHCI_DEBUG(8, (CE_NOTE, NULL, "vhci_update_pathinfo: opinfo_rev = %x," 4760 " opinfo_path_state = %x opinfo_preferred = %x, opinfo_mode = %x\n", 4761 opinfo.opinfo_rev, opinfo.opinfo_path_state, 4762 opinfo.opinfo_preferred, opinfo.opinfo_mode)); 4763 4764 return (MDI_SUCCESS); 4765 } 4766 4767 /* 4768 * Form the kstat name and and call mdi_pi_kstat_create() 4769 */ 4770 void 4771 vhci_kstat_create_pathinfo(mdi_pathinfo_t *pip) 4772 { 4773 dev_info_t *tgt_dip; 4774 dev_info_t *pdip; 4775 char *guid; 4776 char *target_port, *target_port_dup; 4777 char ks_name[KSTAT_STRLEN]; 4778 uint_t pid; 4779 int by_id; 4780 mod_hash_val_t hv; 4781 4782 4783 /* return if we have already allocated kstats */ 4784 if (mdi_pi_kstat_exists(pip)) 4785 return; 4786 4787 /* 4788 * We need instance numbers to create a kstat name, return if we don't 4789 * have instance numbers assigned yet. 4790 */ 4791 tgt_dip = mdi_pi_get_client(pip); 4792 pdip = mdi_pi_get_phci(pip); 4793 if ((ddi_get_instance(tgt_dip) == -1) || (ddi_get_instance(pdip) == -1)) 4794 return; 4795 4796 /* 4797 * A path oriented kstat has a ks_name of the form: 4798 * 4799 * <client-driver><instance>.t<pid>.<pHCI-driver><instance> 4800 * 4801 * We maintain a bidirectional 'target-port' to <pid> map, 4802 * called targetmap. All pathinfo nodes with the same 4803 * 'target-port' map to the same <pid>. The iostat(1M) code, 4804 * when parsing a path oriented kstat name, uses the <pid> as 4805 * a SCSI_VHCI_GET_TARGET_LONGNAME ioctl argument in order 4806 * to get the 'target-port'. For KSTAT_FLAG_PERSISTENT kstats, 4807 * this ioctl needs to translate a <pid> to a 'target-port' 4808 * even after all pathinfo nodes associated with the 4809 * 'target-port' have been destroyed. This is needed to support 4810 * consistent first-iteration activity-since-boot iostat(1M) 4811 * output. Because of this requirement, the mapping can't be 4812 * based on pathinfo information in a devinfo snapshot. 4813 */ 4814 4815 /* determine 'target-port' */ 4816 if (mdi_prop_lookup_string(pip, 4817 SCSI_ADDR_PROP_TARGET_PORT, &target_port) == MDI_SUCCESS) { 4818 target_port_dup = i_ddi_strdup(target_port, KM_SLEEP); 4819 (void) mdi_prop_free(target_port); 4820 by_id = 1; 4821 } else { 4822 /* 4823 * If the pHCI did not set up 'target-port' on this 4824 * pathinfo node, assume that our client is the only 4825 * one with paths to the device by using the guid 4826 * value as the 'target-port'. Since no other client 4827 * will have the same guid, no other client will use 4828 * the same <pid>. NOTE: a client with an instance 4829 * number always has a guid. 4830 */ 4831 (void) ddi_prop_lookup_string(DDI_DEV_T_ANY, tgt_dip, 4832 PROPFLAGS, MDI_CLIENT_GUID_PROP, &guid); 4833 target_port_dup = i_ddi_strdup(guid, KM_SLEEP); 4834 ddi_prop_free(guid); 4835 4836 /* 4837 * For this type of mapping we don't want the 4838 * <id> -> 'target-port' mapping to be made. This 4839 * will cause the SCSI_VHCI_GET_TARGET_LONGNAME ioctl 4840 * to fail, and the iostat(1M) long '-n' output will 4841 * still use the <pid>. We do this because we just 4842 * made up the 'target-port' using the guid, and we 4843 * don't want to expose that fact in iostat output. 4844 */ 4845 by_id = 0; 4846 } 4847 4848 /* find/establish <pid> given 'target-port' */ 4849 mutex_enter(&vhci_targetmap_mutex); 4850 if (mod_hash_find(vhci_targetmap_byport, 4851 (mod_hash_key_t)target_port_dup, &hv) == 0) { 4852 pid = (int)(intptr_t)hv; /* mapping exists */ 4853 } else { 4854 pid = vhci_targetmap_pid++; /* new mapping */ 4855 4856 (void) mod_hash_insert(vhci_targetmap_byport, 4857 (mod_hash_key_t)target_port_dup, 4858 (mod_hash_val_t)(intptr_t)pid); 4859 if (by_id) { 4860 (void) mod_hash_insert(vhci_targetmap_bypid, 4861 (mod_hash_key_t)(uintptr_t)pid, 4862 (mod_hash_val_t)(uintptr_t)target_port_dup); 4863 } 4864 target_port_dup = NULL; /* owned by hash */ 4865 } 4866 mutex_exit(&vhci_targetmap_mutex); 4867 4868 /* form kstat name */ 4869 (void) snprintf(ks_name, KSTAT_STRLEN, "%s%d.t%d.%s%d", 4870 ddi_driver_name(tgt_dip), ddi_get_instance(tgt_dip), 4871 pid, ddi_driver_name(pdip), ddi_get_instance(pdip)); 4872 4873 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_path_online: path:%p " 4874 "kstat %s: pid %x <-> port %s\n", (void *)pip, 4875 ks_name, pid, target_port_dup)); 4876 if (target_port_dup) 4877 kmem_free(target_port_dup, strlen(target_port_dup) + 1); 4878 4879 /* call mdi to create kstats with the name we built */ 4880 (void) mdi_pi_kstat_create(pip, ks_name); 4881 } 4882 4883 /* ARGSUSED */ 4884 static int 4885 vhci_pathinfo_online(dev_info_t *vdip, mdi_pathinfo_t *pip, int flags) 4886 { 4887 scsi_hba_tran_t *hba = NULL; 4888 struct scsi_device *psd = NULL; 4889 scsi_vhci_lun_t *vlun = NULL; 4890 dev_info_t *pdip = NULL; 4891 dev_info_t *cdip; 4892 dev_info_t *tgt_dip; 4893 struct scsi_vhci *vhci; 4894 char *guid; 4895 struct scsi_failover_ops *sfo; 4896 scsi_vhci_priv_t *svp = NULL; 4897 struct scsi_address *ap; 4898 struct scsi_pkt *pkt; 4899 int rval = MDI_FAILURE; 4900 mpapi_item_list_t *list_ptr; 4901 mpapi_lu_data_t *ld; 4902 int ce_type; 4903 4904 ASSERT(vdip != NULL); 4905 ASSERT(pip != NULL); 4906 4907 vhci = ddi_get_soft_state(vhci_softstate, ddi_get_instance(vdip)); 4908 ASSERT(vhci != NULL); 4909 4910 pdip = mdi_pi_get_phci(pip); 4911 hba = ddi_get_driver_private(pdip); 4912 ASSERT(hba != NULL); 4913 4914 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip); 4915 ASSERT(svp != NULL); 4916 4917 cdip = mdi_pi_get_client(pip); 4918 ASSERT(cdip != NULL); 4919 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, cdip, PROPFLAGS, 4920 MDI_CLIENT_GUID_PROP, &guid) != DDI_SUCCESS) { 4921 VHCI_DEBUG(1, (CE_WARN, NULL, "vhci_path_online: lun guid " 4922 "property failed")); 4923 goto failure; 4924 } 4925 4926 vlun = vhci_lun_lookup(cdip); 4927 ASSERT(vlun != NULL); 4928 4929 ddi_prop_free(guid); 4930 4931 vlun->svl_dip = mdi_pi_get_client(pip); 4932 ASSERT(vlun->svl_dip != NULL); 4933 4934 psd = svp->svp_psd; 4935 ASSERT(psd != NULL); 4936 4937 /* 4938 * Get inquiry data into pathinfo related scsi_device structure. 4939 * Free sq_inq when pathinfo related scsi_device structure is destroyed 4940 * by vhci_pathinfo_uninit(). In other words, vhci maintains its own 4941 * copy of scsi_device and scsi_inquiry data on a per-path basis. 4942 */ 4943 if (scsi_probe(psd, SLEEP_FUNC) != SCSIPROBE_EXISTS) { 4944 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_pathinfo_online: " 4945 "scsi_probe failed path:%p rval:%x\n", (void *)pip, rval)); 4946 rval = MDI_FAILURE; 4947 goto failure; 4948 } 4949 4950 /* 4951 * See if we have a failover module to support the device. 4952 * 4953 * We re-probe to determine the failover ops for each path. This 4954 * is done in case there are any path-specific side-effects associated 4955 * with the sfo_device_probe implementation. 4956 * 4957 * Give the first successfull sfo_device_probe the opportunity to 4958 * establish 'ctpriv', vlun/client private data. The ctpriv will 4959 * then be passed into the failover module on all other sfo_device_*() 4960 * operations (and must be freed by sfo_device_unprobe implementation). 4961 * 4962 * NOTE: While sfo_device_probe is done once per path, 4963 * sfo_device_unprobe only occurs once - when the vlun is destroyed. 4964 * 4965 * NOTE: We don't currently support per-path fops private data 4966 * mechanism. 4967 */ 4968 sfo = vhci_dev_fo(vdip, psd, 4969 &vlun->svl_fops_ctpriv, &vlun->svl_fops_name); 4970 4971 /* check path configuration result with current vlun state */ 4972 if (((sfo && vlun->svl_fops) && (sfo != vlun->svl_fops)) || 4973 (sfo && vlun->svl_not_supported) || 4974 ((sfo == NULL) && vlun->svl_fops)) { 4975 /* Getting different results for different paths. */ 4976 VHCI_DEBUG(1, (CE_NOTE, vhci->vhci_dip, 4977 "!vhci_pathinfo_online: dev (path 0x%p) contradiction\n", 4978 (void *)pip)); 4979 #ifdef DEBUG 4980 ce_type = CE_PANIC; 4981 #else /* DEBUG */ 4982 ce_type = CE_WARN; 4983 #endif /* DEBUG */ 4984 cmn_err(ce_type, "scsi_vhci: failover contradiction: " 4985 "'%s'.vs.'%s': path %s\n", 4986 vlun->svl_fops ? vlun->svl_fops->sfo_name : "NULL", 4987 sfo ? sfo->sfo_name : "NULL", mdi_pi_pathname(pip)); 4988 vlun->svl_not_supported = 1; 4989 rval = MDI_NOT_SUPPORTED; 4990 goto done; 4991 } else if (sfo == NULL) { 4992 /* No failover module - device not supported under vHCI. */ 4993 VHCI_DEBUG(1, (CE_NOTE, vhci->vhci_dip, 4994 "!vhci_pathinfo_online: dev (path 0x%p) not " 4995 "supported\n", (void *)pip)); 4996 4997 /* XXX does this contradict vhci_is_dev_supported ? */ 4998 vlun->svl_not_supported = 1; 4999 rval = MDI_NOT_SUPPORTED; 5000 goto done; 5001 } 5002 5003 /* failover supported for device - save failover_ops in vlun */ 5004 vlun->svl_fops = sfo; 5005 ASSERT(vlun->svl_fops_name != NULL); 5006 5007 /* 5008 * Obtain the device-type based mpxio options as specified in 5009 * scsi_vhci.conf file. 5010 * 5011 * NOTE: currently, the end result is a call to 5012 * mdi_set_lb_region_size(). 5013 */ 5014 tgt_dip = psd->sd_dev; 5015 ASSERT(tgt_dip != NULL); 5016 vhci_get_device_type_mpxio_options(vdip, tgt_dip, psd); 5017 5018 /* 5019 * The device probe or options in conf file may have set/changed the 5020 * lb policy, save the current value. 5021 */ 5022 vlun->svl_lb_policy_save = mdi_get_lb_policy(tgt_dip); 5023 5024 /* 5025 * if PGR is active, revalidate key and register on this path also, 5026 * if key is still valid 5027 */ 5028 sema_p(&vlun->svl_pgr_sema); 5029 if (vlun->svl_pgr_active) { 5030 rval = vhci_pgr_validate_and_register(svp); 5031 if (rval != 1) { 5032 rval = MDI_FAILURE; 5033 sema_v(&vlun->svl_pgr_sema); 5034 goto failure; 5035 } 5036 } 5037 sema_v(&vlun->svl_pgr_sema); 5038 5039 if (svp->svp_new_path) { 5040 /* 5041 * Last chance to perform any cleanup operations on this 5042 * new path before making this path completely online. 5043 */ 5044 svp->svp_new_path = 0; 5045 5046 /* 5047 * If scsi_vhci knows the lun is alread RESERVE'd, 5048 * then skip the issue of RELEASE on new path. 5049 */ 5050 if ((vlun->svl_flags & VLUN_RESERVE_ACTIVE_FLG) == 0) { 5051 /* 5052 * Issue SCSI-2 RELEASE only for the first time on 5053 * a new path just in case the host rebooted and 5054 * a reservation is still pending on this path. 5055 * IBM Shark storage does not clear RESERVE upon 5056 * host reboot. 5057 */ 5058 ap = &psd->sd_address; 5059 pkt = scsi_init_pkt(ap, NULL, NULL, CDB_GROUP0, 5060 sizeof (struct scsi_arq_status), 0, 0, 5061 SLEEP_FUNC, NULL); 5062 if (pkt == NULL) { 5063 VHCI_DEBUG(1, (CE_NOTE, NULL, 5064 "!vhci_pathinfo_online: " 5065 "Release init_pkt failed :%p\n", 5066 (void *)pip)); 5067 rval = MDI_FAILURE; 5068 goto failure; 5069 } 5070 pkt->pkt_cdbp[0] = SCMD_RELEASE; 5071 pkt->pkt_time = 60; 5072 5073 VHCI_DEBUG(1, (CE_NOTE, NULL, 5074 "!vhci_path_online: path:%p " 5075 "Issued SCSI-2 RELEASE\n", (void *)pip)); 5076 5077 /* Ignore the return value */ 5078 (void) vhci_do_scsi_cmd(pkt); 5079 scsi_destroy_pkt(pkt); 5080 } 5081 } 5082 5083 rval = vhci_update_pathinfo(psd, pip, sfo, vlun, vhci); 5084 if (rval == MDI_FAILURE) { 5085 goto failure; 5086 } 5087 5088 /* Initialize MP-API data */ 5089 vhci_update_mpapi_data(vhci, vlun, pip); 5090 5091 /* 5092 * MP-API also needs the Inquiry data to be maintained in the 5093 * mp_vendor_prop_t structure, so find the lun and update its 5094 * structure with this data. 5095 */ 5096 list_ptr = (mpapi_item_list_t *)vhci_get_mpapi_item(vhci, NULL, 5097 MP_OBJECT_TYPE_MULTIPATH_LU, (void *)vlun); 5098 ld = (mpapi_lu_data_t *)list_ptr->item->idata; 5099 if (ld != NULL) { 5100 bcopy(psd->sd_inq->inq_vid, ld->prop.prodInfo.vendor, 8); 5101 bcopy(psd->sd_inq->inq_pid, ld->prop.prodInfo.product, 16); 5102 bcopy(psd->sd_inq->inq_revision, ld->prop.prodInfo.revision, 4); 5103 } else { 5104 VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_pathinfo_online: " 5105 "mpapi_lu_data_t is NULL")); 5106 } 5107 5108 /* create kstats for path */ 5109 vhci_kstat_create_pathinfo(pip); 5110 5111 done: 5112 mutex_enter(&vhci_global_mutex); 5113 cv_broadcast(&vhci_cv); 5114 mutex_exit(&vhci_global_mutex); 5115 5116 if (vlun->svl_setcap_done) { 5117 (void) vhci_pHCI_cap(ap, "sector-size", 5118 vlun->svl_sector_size, 1, pip); 5119 } 5120 5121 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_path_online: path:%p\n", 5122 (void *)pip)); 5123 5124 failure: 5125 return (rval); 5126 } 5127 5128 /* 5129 * path offline handler. Release all bindings that will not be 5130 * released by the normal packet transport/completion code path. 5131 * Since we don't (presently) keep any bindings alive outside of 5132 * the in-transport packets (which will be released on completion) 5133 * there is not much to do here. 5134 */ 5135 /* ARGSUSED */ 5136 static int 5137 vhci_pathinfo_offline(dev_info_t *vdip, mdi_pathinfo_t *pip, int flags) 5138 { 5139 scsi_hba_tran_t *hba = NULL; 5140 struct scsi_device *psd = NULL; 5141 dev_info_t *pdip = NULL; 5142 dev_info_t *cdip = NULL; 5143 scsi_vhci_priv_t *svp = NULL; 5144 5145 ASSERT(vdip != NULL); 5146 ASSERT(pip != NULL); 5147 5148 pdip = mdi_pi_get_phci(pip); 5149 ASSERT(pdip != NULL); 5150 if (pdip == NULL) { 5151 VHCI_DEBUG(1, (CE_WARN, vdip, "Invalid path 0x%p: NULL " 5152 "phci dip", (void *)pip)); 5153 return (MDI_FAILURE); 5154 } 5155 5156 cdip = mdi_pi_get_client(pip); 5157 ASSERT(cdip != NULL); 5158 if (cdip == NULL) { 5159 VHCI_DEBUG(1, (CE_WARN, vdip, "Invalid path 0x%p: NULL " 5160 "client dip", (void *)pip)); 5161 return (MDI_FAILURE); 5162 } 5163 5164 hba = ddi_get_driver_private(pdip); 5165 ASSERT(hba != NULL); 5166 5167 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip); 5168 if (svp == NULL) { 5169 /* 5170 * mdi_pathinfo node in INIT state can have vHCI private 5171 * information set to null 5172 */ 5173 VHCI_DEBUG(1, (CE_NOTE, vdip, "!vhci_pathinfo_offline: " 5174 "svp is NULL for pip 0x%p\n", (void *)pip)); 5175 return (MDI_SUCCESS); 5176 } 5177 5178 psd = svp->svp_psd; 5179 ASSERT(psd != NULL); 5180 5181 mutex_enter(&svp->svp_mutex); 5182 5183 VHCI_DEBUG(1, (CE_NOTE, vdip, "!vhci_pathinfo_offline: " 5184 "%d cmds pending on path: 0x%p\n", svp->svp_cmds, (void *)pip)); 5185 while (svp->svp_cmds != 0) { 5186 if (cv_timedwait(&svp->svp_cv, &svp->svp_mutex, 5187 ddi_get_lbolt() + 5188 drv_usectohz(vhci_path_quiesce_timeout * 1000000)) == -1) { 5189 /* 5190 * The timeout time reached without the condition 5191 * being signaled. 5192 */ 5193 VHCI_DEBUG(1, (CE_NOTE, vdip, "!vhci_pathinfo_offline: " 5194 "Timeout reached on path 0x%p without the cond\n", 5195 (void *)pip)); 5196 VHCI_DEBUG(1, (CE_NOTE, vdip, "!vhci_pathinfo_offline: " 5197 "%d cmds still pending on path: 0x%p\n", 5198 svp->svp_cmds, (void *)pip)); 5199 break; 5200 } 5201 } 5202 mutex_exit(&svp->svp_mutex); 5203 5204 /* 5205 * Check to see if this vlun has an active SCSI-II RESERVE. And this 5206 * is the pip for the path that has been reserved. 5207 * If so clear the reservation by sending a reset, so the host will not 5208 * get a reservation conflict. Reset the flag VLUN_RESERVE_ACTIVE_FLG 5209 * for this lun. Also a reset notify is sent to the target driver 5210 * just in case the POR check condition is cleared by some other layer 5211 * in the stack. 5212 */ 5213 if (svp->svp_svl->svl_flags & VLUN_RESERVE_ACTIVE_FLG) { 5214 if (pip == svp->svp_svl->svl_resrv_pip) { 5215 if (vhci_recovery_reset(svp->svp_svl, 5216 &svp->svp_psd->sd_address, TRUE, 5217 VHCI_DEPTH_TARGET) == 0) { 5218 VHCI_DEBUG(1, (CE_NOTE, NULL, 5219 "!vhci_pathinfo_offline (pip:%p):" 5220 "reset failed, retrying\n", (void *)pip)); 5221 delay(1*drv_usectohz(1000000)); 5222 if (vhci_recovery_reset(svp->svp_svl, 5223 &svp->svp_psd->sd_address, TRUE, 5224 VHCI_DEPTH_TARGET) == 0) { 5225 VHCI_DEBUG(1, (CE_NOTE, NULL, 5226 "!vhci_pathinfo_offline " 5227 "(pip:%p): reset failed, " 5228 "giving up!\n", (void *)pip)); 5229 } 5230 } 5231 svp->svp_svl->svl_flags &= ~VLUN_RESERVE_ACTIVE_FLG; 5232 } 5233 } 5234 5235 mdi_pi_set_state(pip, MDI_PATHINFO_STATE_OFFLINE); 5236 vhci_mpapi_set_path_state(vdip, pip, MP_DRVR_PATH_STATE_REMOVED); 5237 5238 VHCI_DEBUG(1, (CE_NOTE, NULL, 5239 "!vhci_pathinfo_offline: offlined path 0x%p\n", (void *)pip)); 5240 return (MDI_SUCCESS); 5241 } 5242 5243 5244 /* 5245 * routine for SCSI VHCI IOCTL implementation. 5246 */ 5247 /* ARGSUSED */ 5248 static int 5249 vhci_ctl(dev_t dev, int cmd, intptr_t data, int mode, cred_t *credp, int *rval) 5250 { 5251 struct scsi_vhci *vhci; 5252 dev_info_t *vdip; 5253 mdi_pathinfo_t *pip; 5254 int instance, held; 5255 int retval = 0; 5256 caddr_t phci_path = NULL, client_path = NULL; 5257 caddr_t paddr = NULL; 5258 sv_iocdata_t ioc; 5259 sv_iocdata_t *pioc = &ioc; 5260 sv_switch_to_cntlr_iocdata_t iocsc; 5261 sv_switch_to_cntlr_iocdata_t *piocsc = &iocsc; 5262 caddr_t s; 5263 scsi_vhci_lun_t *vlun; 5264 struct scsi_failover_ops *fo; 5265 char *pclass; 5266 5267 /* Check for validity of vhci structure */ 5268 vhci = ddi_get_soft_state(vhci_softstate, MINOR2INST(getminor(dev))); 5269 if (vhci == NULL) { 5270 return (ENXIO); 5271 } 5272 5273 mutex_enter(&vhci->vhci_mutex); 5274 if ((vhci->vhci_state & VHCI_STATE_OPEN) == 0) { 5275 mutex_exit(&vhci->vhci_mutex); 5276 return (ENXIO); 5277 } 5278 mutex_exit(&vhci->vhci_mutex); 5279 5280 /* Get the vhci dip */ 5281 vdip = vhci->vhci_dip; 5282 ASSERT(vdip != NULL); 5283 instance = ddi_get_instance(vdip); 5284 5285 /* Allocate memory for getting parameters from userland */ 5286 phci_path = kmem_zalloc(MAXPATHLEN, KM_SLEEP); 5287 client_path = kmem_zalloc(MAXPATHLEN, KM_SLEEP); 5288 paddr = kmem_zalloc(MAXNAMELEN, KM_SLEEP); 5289 5290 /* 5291 * Set a local variable indicating the ioctl name. Used for 5292 * printing debug strings. 5293 */ 5294 switch (cmd) { 5295 case SCSI_VHCI_GET_CLIENT_MULTIPATH_INFO: 5296 s = "GET_CLIENT_MULTIPATH_INFO"; 5297 break; 5298 5299 case SCSI_VHCI_GET_PHCI_MULTIPATH_INFO: 5300 s = "GET_PHCI_MULTIPATH_INFO"; 5301 break; 5302 5303 case SCSI_VHCI_GET_CLIENT_NAME: 5304 s = "GET_CLIENT_NAME"; 5305 break; 5306 5307 case SCSI_VHCI_PATH_ONLINE: 5308 s = "PATH_ONLINE"; 5309 break; 5310 5311 case SCSI_VHCI_PATH_OFFLINE: 5312 s = "PATH_OFFLINE"; 5313 break; 5314 5315 case SCSI_VHCI_PATH_STANDBY: 5316 s = "PATH_STANDBY"; 5317 break; 5318 5319 case SCSI_VHCI_PATH_TEST: 5320 s = "PATH_TEST"; 5321 break; 5322 5323 case SCSI_VHCI_SWITCH_TO_CNTLR: 5324 s = "SWITCH_TO_CNTLR"; 5325 break; 5326 case SCSI_VHCI_PATH_DISABLE: 5327 s = "PATH_DISABLE"; 5328 break; 5329 case SCSI_VHCI_PATH_ENABLE: 5330 s = "PATH_ENABLE"; 5331 break; 5332 5333 case SCSI_VHCI_GET_TARGET_LONGNAME: 5334 s = "GET_TARGET_LONGNAME"; 5335 break; 5336 5337 #ifdef DEBUG 5338 case SCSI_VHCI_CONFIGURE_PHCI: 5339 s = "CONFIGURE_PHCI"; 5340 break; 5341 5342 case SCSI_VHCI_UNCONFIGURE_PHCI: 5343 s = "UNCONFIGURE_PHCI"; 5344 break; 5345 #endif 5346 5347 default: 5348 s = "Unknown"; 5349 vhci_log(CE_NOTE, vdip, 5350 "!vhci%d: ioctl %x (unsupported ioctl)", instance, cmd); 5351 retval = ENOTSUP; 5352 break; 5353 } 5354 if (retval != 0) { 5355 goto end; 5356 } 5357 5358 VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci%d: ioctl <%s>", instance, s)); 5359 5360 /* 5361 * Get IOCTL parameters from userland 5362 */ 5363 switch (cmd) { 5364 case SCSI_VHCI_GET_CLIENT_MULTIPATH_INFO: 5365 case SCSI_VHCI_GET_PHCI_MULTIPATH_INFO: 5366 case SCSI_VHCI_GET_CLIENT_NAME: 5367 case SCSI_VHCI_PATH_ONLINE: 5368 case SCSI_VHCI_PATH_OFFLINE: 5369 case SCSI_VHCI_PATH_STANDBY: 5370 case SCSI_VHCI_PATH_TEST: 5371 case SCSI_VHCI_PATH_DISABLE: 5372 case SCSI_VHCI_PATH_ENABLE: 5373 case SCSI_VHCI_GET_TARGET_LONGNAME: 5374 #ifdef DEBUG 5375 case SCSI_VHCI_CONFIGURE_PHCI: 5376 case SCSI_VHCI_UNCONFIGURE_PHCI: 5377 #endif 5378 retval = vhci_get_iocdata((const void *)data, pioc, mode, s); 5379 break; 5380 5381 case SCSI_VHCI_SWITCH_TO_CNTLR: 5382 retval = vhci_get_iocswitchdata((const void *)data, piocsc, 5383 mode, s); 5384 break; 5385 } 5386 if (retval != 0) { 5387 goto end; 5388 } 5389 5390 5391 /* 5392 * Process the IOCTL 5393 */ 5394 switch (cmd) { 5395 case SCSI_VHCI_GET_CLIENT_MULTIPATH_INFO: 5396 { 5397 uint_t num_paths; /* Num paths to client dev */ 5398 sv_path_info_t *upibuf = NULL; /* To keep userland values */ 5399 sv_path_info_t *kpibuf = NULL; /* Kernel data for ioctls */ 5400 dev_info_t *cdip; /* Client device dip */ 5401 5402 if (pioc->ret_elem == NULL) { 5403 retval = EINVAL; 5404 break; 5405 } 5406 5407 /* Get client device path from user land */ 5408 if (vhci_ioc_get_client_path(pioc, client_path, mode, s)) { 5409 retval = EFAULT; 5410 break; 5411 } 5412 5413 VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> " 5414 "client <%s>", s, client_path)); 5415 5416 /* Get number of paths to this client device */ 5417 if ((cdip = mdi_client_path2devinfo(vdip, client_path)) 5418 == NULL) { 5419 retval = ENXIO; 5420 VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> " 5421 "client dip doesn't exist. invalid path <%s>", 5422 s, client_path)); 5423 break; 5424 } 5425 num_paths = mdi_client_get_path_count(cdip); 5426 5427 if (ddi_copyout(&num_paths, pioc->ret_elem, 5428 sizeof (num_paths), mode)) { 5429 VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> " 5430 "num_paths copyout failed", s)); 5431 retval = EFAULT; 5432 break; 5433 } 5434 5435 /* If user just wanted num_paths, then return */ 5436 if (pioc->buf_elem == 0 || pioc->ret_buf == NULL || 5437 num_paths == 0) { 5438 break; 5439 } 5440 5441 /* Set num_paths to value as much as can be sent to userland */ 5442 if (num_paths > pioc->buf_elem) { 5443 num_paths = pioc->buf_elem; 5444 } 5445 5446 /* Allocate memory and get userland pointers */ 5447 if (vhci_ioc_alloc_pathinfo(&upibuf, &kpibuf, num_paths, 5448 pioc, mode, s) != 0) { 5449 retval = EFAULT; 5450 break; 5451 } 5452 ASSERT(upibuf != NULL); 5453 ASSERT(kpibuf != NULL); 5454 5455 /* 5456 * Get the path information and send it to userland. 5457 */ 5458 if (vhci_get_client_path_list(cdip, kpibuf, num_paths) 5459 != MDI_SUCCESS) { 5460 retval = ENXIO; 5461 vhci_ioc_free_pathinfo(upibuf, kpibuf, num_paths); 5462 break; 5463 } 5464 5465 if (vhci_ioc_send_pathinfo(upibuf, kpibuf, num_paths, 5466 pioc, mode, s)) { 5467 retval = EFAULT; 5468 vhci_ioc_free_pathinfo(upibuf, kpibuf, num_paths); 5469 break; 5470 } 5471 5472 /* Free the memory allocated for path information */ 5473 vhci_ioc_free_pathinfo(upibuf, kpibuf, num_paths); 5474 break; 5475 } 5476 5477 case SCSI_VHCI_GET_PHCI_MULTIPATH_INFO: 5478 { 5479 uint_t num_paths; /* Num paths to client dev */ 5480 sv_path_info_t *upibuf = NULL; /* To keep userland values */ 5481 sv_path_info_t *kpibuf = NULL; /* Kernel data for ioctls */ 5482 dev_info_t *pdip; /* PHCI device dip */ 5483 5484 if (pioc->ret_elem == NULL) { 5485 retval = EINVAL; 5486 break; 5487 } 5488 5489 /* Get PHCI device path from user land */ 5490 if (vhci_ioc_get_phci_path(pioc, phci_path, mode, s)) { 5491 retval = EFAULT; 5492 break; 5493 } 5494 5495 VHCI_DEBUG(6, (CE_WARN, vdip, 5496 "!vhci_ioctl: ioctl <%s> phci <%s>", s, phci_path)); 5497 5498 /* Get number of devices associated with this PHCI device */ 5499 if ((pdip = mdi_phci_path2devinfo(vdip, phci_path)) == NULL) { 5500 VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> " 5501 "phci dip doesn't exist. invalid path <%s>", 5502 s, phci_path)); 5503 retval = ENXIO; 5504 break; 5505 } 5506 5507 num_paths = mdi_phci_get_path_count(pdip); 5508 5509 if (ddi_copyout(&num_paths, pioc->ret_elem, 5510 sizeof (num_paths), mode)) { 5511 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> " 5512 "num_paths copyout failed", s)); 5513 retval = EFAULT; 5514 break; 5515 } 5516 5517 /* If user just wanted num_paths, then return */ 5518 if (pioc->buf_elem == 0 || pioc->ret_buf == NULL || 5519 num_paths == 0) { 5520 break; 5521 } 5522 5523 /* Set num_paths to value as much as can be sent to userland */ 5524 if (num_paths > pioc->buf_elem) { 5525 num_paths = pioc->buf_elem; 5526 } 5527 5528 /* Allocate memory and get userland pointers */ 5529 if (vhci_ioc_alloc_pathinfo(&upibuf, &kpibuf, num_paths, 5530 pioc, mode, s) != 0) { 5531 retval = EFAULT; 5532 break; 5533 } 5534 ASSERT(upibuf != NULL); 5535 ASSERT(kpibuf != NULL); 5536 5537 /* 5538 * Get the path information and send it to userland. 5539 */ 5540 if (vhci_get_phci_path_list(pdip, kpibuf, num_paths) 5541 != MDI_SUCCESS) { 5542 retval = ENXIO; 5543 vhci_ioc_free_pathinfo(upibuf, kpibuf, num_paths); 5544 break; 5545 } 5546 5547 if (vhci_ioc_send_pathinfo(upibuf, kpibuf, num_paths, 5548 pioc, mode, s)) { 5549 retval = EFAULT; 5550 vhci_ioc_free_pathinfo(upibuf, kpibuf, num_paths); 5551 break; 5552 } 5553 5554 /* Free the memory allocated for path information */ 5555 vhci_ioc_free_pathinfo(upibuf, kpibuf, num_paths); 5556 break; 5557 } 5558 5559 case SCSI_VHCI_GET_CLIENT_NAME: 5560 { 5561 dev_info_t *cdip, *pdip; 5562 5563 /* Get PHCI path and device address from user land */ 5564 if (vhci_ioc_get_phci_path(pioc, phci_path, mode, s) || 5565 vhci_ioc_get_paddr(pioc, paddr, mode, s)) { 5566 retval = EFAULT; 5567 break; 5568 } 5569 5570 VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> " 5571 "phci <%s>, paddr <%s>", s, phci_path, paddr)); 5572 5573 /* Get the PHCI dip */ 5574 if ((pdip = mdi_phci_path2devinfo(vdip, phci_path)) == NULL) { 5575 VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> " 5576 "phci dip doesn't exist. invalid path <%s>", 5577 s, phci_path)); 5578 retval = ENXIO; 5579 break; 5580 } 5581 5582 if ((pip = mdi_pi_find(pdip, NULL, paddr)) == NULL) { 5583 VHCI_DEBUG(1, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> " 5584 "pathinfo doesn't exist. invalid device addr", s)); 5585 retval = ENXIO; 5586 break; 5587 } 5588 5589 /* Get the client device pathname and send to userland */ 5590 cdip = mdi_pi_get_client(pip); 5591 vhci_ioc_devi_to_path(cdip, client_path); 5592 5593 VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> " 5594 "client <%s>", s, client_path)); 5595 5596 if (vhci_ioc_send_client_path(client_path, pioc, mode, s)) { 5597 retval = EFAULT; 5598 break; 5599 } 5600 break; 5601 } 5602 5603 case SCSI_VHCI_PATH_ONLINE: 5604 case SCSI_VHCI_PATH_OFFLINE: 5605 case SCSI_VHCI_PATH_STANDBY: 5606 case SCSI_VHCI_PATH_TEST: 5607 { 5608 dev_info_t *pdip; /* PHCI dip */ 5609 5610 /* Get PHCI path and device address from user land */ 5611 if (vhci_ioc_get_phci_path(pioc, phci_path, mode, s) || 5612 vhci_ioc_get_paddr(pioc, paddr, mode, s)) { 5613 retval = EFAULT; 5614 break; 5615 } 5616 5617 VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> " 5618 "phci <%s>, paddr <%s>", s, phci_path, paddr)); 5619 5620 /* Get the PHCI dip */ 5621 if ((pdip = mdi_phci_path2devinfo(vdip, phci_path)) == NULL) { 5622 VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> " 5623 "phci dip doesn't exist. invalid path <%s>", 5624 s, phci_path)); 5625 retval = ENXIO; 5626 break; 5627 } 5628 5629 if ((pip = mdi_pi_find(pdip, NULL, paddr)) == NULL) { 5630 VHCI_DEBUG(1, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> " 5631 "pathinfo doesn't exist. invalid device addr", s)); 5632 retval = ENXIO; 5633 break; 5634 } 5635 5636 VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> " 5637 "Calling MDI function to change device state", s)); 5638 5639 switch (cmd) { 5640 case SCSI_VHCI_PATH_ONLINE: 5641 retval = mdi_pi_online(pip, 0); 5642 break; 5643 5644 case SCSI_VHCI_PATH_OFFLINE: 5645 retval = mdi_pi_offline(pip, 0); 5646 break; 5647 5648 case SCSI_VHCI_PATH_STANDBY: 5649 retval = mdi_pi_standby(pip, 0); 5650 break; 5651 5652 case SCSI_VHCI_PATH_TEST: 5653 break; 5654 } 5655 break; 5656 } 5657 5658 case SCSI_VHCI_SWITCH_TO_CNTLR: 5659 { 5660 dev_info_t *cdip; 5661 struct scsi_device *devp; 5662 5663 /* Get the client device pathname */ 5664 if (ddi_copyin(piocsc->client, client_path, 5665 MAXPATHLEN, mode)) { 5666 VHCI_DEBUG(2, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> " 5667 "client_path copyin failed", s)); 5668 retval = EFAULT; 5669 break; 5670 } 5671 5672 /* Get the path class to which user wants to switch */ 5673 if (ddi_copyin(piocsc->class, paddr, MAXNAMELEN, mode)) { 5674 VHCI_DEBUG(2, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> " 5675 "controller_class copyin failed", s)); 5676 retval = EFAULT; 5677 break; 5678 } 5679 5680 /* Perform validity checks */ 5681 if ((cdip = mdi_client_path2devinfo(vdip, 5682 client_path)) == NULL) { 5683 VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> " 5684 "client dip doesn't exist. invalid path <%s>", 5685 s, client_path)); 5686 retval = ENXIO; 5687 break; 5688 } 5689 5690 VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: Calling MDI func " 5691 "to switch controller")); 5692 VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: client <%s> " 5693 "class <%s>", client_path, paddr)); 5694 5695 if (strcmp(paddr, PCLASS_PRIMARY) && 5696 strcmp(paddr, PCLASS_SECONDARY)) { 5697 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> " 5698 "invalid path class <%s>", s, paddr)); 5699 retval = ENXIO; 5700 break; 5701 } 5702 5703 devp = ddi_get_driver_private(cdip); 5704 if (devp == NULL) { 5705 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> " 5706 "invalid scsi device <%s>", s, client_path)); 5707 retval = ENXIO; 5708 break; 5709 } 5710 vlun = ADDR2VLUN(&devp->sd_address); 5711 ASSERT(vlun); 5712 5713 /* 5714 * Checking to see if device has only one pclass, PRIMARY. 5715 * If so this device doesn't support failovers. Assumed 5716 * that the devices with one pclass is PRIMARY, as thats the 5717 * case today. If this is not true and in future other 5718 * symmetric devices are supported with other pclass, this 5719 * IOCTL shall have to be overhauled anyways as now the only 5720 * arguments it accepts are PRIMARY and SECONDARY. 5721 */ 5722 fo = vlun->svl_fops; 5723 if (fo->sfo_pathclass_next(PCLASS_PRIMARY, &pclass, 5724 vlun->svl_fops_ctpriv)) { 5725 retval = ENOTSUP; 5726 break; 5727 } 5728 5729 VHCI_HOLD_LUN(vlun, VH_SLEEP, held); 5730 mutex_enter(&vlun->svl_mutex); 5731 if (vlun->svl_active_pclass != NULL) { 5732 if (strcmp(vlun->svl_active_pclass, paddr) == 0) { 5733 mutex_exit(&vlun->svl_mutex); 5734 retval = EALREADY; 5735 VHCI_RELEASE_LUN(vlun); 5736 break; 5737 } 5738 } 5739 mutex_exit(&vlun->svl_mutex); 5740 /* Call mdi function to cause a switch over */ 5741 retval = mdi_failover(vdip, cdip, MDI_FAILOVER_SYNC); 5742 if (retval == MDI_SUCCESS) { 5743 retval = 0; 5744 } else if (retval == MDI_BUSY) { 5745 retval = EBUSY; 5746 } else { 5747 retval = EIO; 5748 } 5749 VHCI_RELEASE_LUN(vlun); 5750 break; 5751 } 5752 5753 case SCSI_VHCI_PATH_ENABLE: 5754 case SCSI_VHCI_PATH_DISABLE: 5755 { 5756 dev_info_t *cdip, *pdip; 5757 5758 /* 5759 * Get client device path from user land 5760 */ 5761 if (vhci_ioc_get_client_path(pioc, client_path, mode, s)) { 5762 retval = EFAULT; 5763 break; 5764 } 5765 5766 /* 5767 * Get Phci device path from user land 5768 */ 5769 if (vhci_ioc_get_phci_path(pioc, phci_path, mode, s)) { 5770 retval = EFAULT; 5771 break; 5772 } 5773 5774 /* 5775 * Get the devinfo for the Phci. 5776 */ 5777 if ((pdip = mdi_phci_path2devinfo(vdip, phci_path)) == NULL) { 5778 VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> " 5779 "phci dip doesn't exist. invalid path <%s>", 5780 s, phci_path)); 5781 retval = ENXIO; 5782 break; 5783 } 5784 5785 /* 5786 * If the client path is set to /scsi_vhci then we need 5787 * to do the operation on all the clients so set cdip to NULL. 5788 * Else, try to get the client dip. 5789 */ 5790 if (strcmp(client_path, "/scsi_vhci") == 0) { 5791 cdip = NULL; 5792 } else { 5793 if ((cdip = mdi_client_path2devinfo(vdip, 5794 client_path)) == NULL) { 5795 retval = ENXIO; 5796 VHCI_DEBUG(1, (CE_WARN, NULL, 5797 "!vhci_ioctl: ioctl <%s> client dip " 5798 "doesn't exist. invalid path <%s>", 5799 s, client_path)); 5800 break; 5801 } 5802 } 5803 5804 if (cmd == SCSI_VHCI_PATH_ENABLE) 5805 retval = mdi_pi_enable(cdip, pdip, USER_DISABLE); 5806 else 5807 retval = mdi_pi_disable(cdip, pdip, USER_DISABLE); 5808 5809 break; 5810 } 5811 5812 case SCSI_VHCI_GET_TARGET_LONGNAME: 5813 { 5814 uint_t pid = pioc->buf_elem; 5815 char *target_port; 5816 mod_hash_val_t hv; 5817 5818 /* targetmap lookup of 'target-port' by <pid> */ 5819 if (mod_hash_find(vhci_targetmap_bypid, 5820 (mod_hash_key_t)(uintptr_t)pid, &hv) != 0) { 5821 /* 5822 * NOTE: failure to find the mapping is OK for guid 5823 * based 'target-port' values. 5824 */ 5825 VHCI_DEBUG(3, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> " 5826 "targetport mapping doesn't exist: pid %d", 5827 s, pid)); 5828 retval = ENXIO; 5829 break; 5830 } 5831 5832 /* copyout 'target-port' result */ 5833 target_port = (char *)hv; 5834 if (copyoutstr(target_port, pioc->addr, MAXNAMELEN, NULL)) { 5835 VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> " 5836 "targetport copyout failed: len: %d", 5837 s, (int)strlen(target_port))); 5838 retval = EFAULT; 5839 } 5840 break; 5841 } 5842 5843 #ifdef DEBUG 5844 case SCSI_VHCI_CONFIGURE_PHCI: 5845 { 5846 dev_info_t *pdip; 5847 5848 /* Get PHCI path and device address from user land */ 5849 if (vhci_ioc_get_phci_path(pioc, phci_path, mode, s)) { 5850 retval = EFAULT; 5851 break; 5852 } 5853 5854 VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> " 5855 "phci <%s>", s, phci_path)); 5856 5857 /* Get the PHCI dip */ 5858 if ((pdip = e_ddi_hold_devi_by_path(phci_path, 0)) == NULL) { 5859 VHCI_DEBUG(3, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> " 5860 "phci dip doesn't exist. invalid path <%s>", 5861 s, phci_path)); 5862 retval = ENXIO; 5863 break; 5864 } 5865 5866 if (ndi_devi_config(pdip, 5867 NDI_DEVFS_CLEAN|NDI_DEVI_PERSIST) != NDI_SUCCESS) { 5868 retval = EIO; 5869 } 5870 5871 ddi_release_devi(pdip); 5872 break; 5873 } 5874 5875 case SCSI_VHCI_UNCONFIGURE_PHCI: 5876 { 5877 dev_info_t *pdip; 5878 5879 /* Get PHCI path and device address from user land */ 5880 if (vhci_ioc_get_phci_path(pioc, phci_path, mode, s)) { 5881 retval = EFAULT; 5882 break; 5883 } 5884 5885 VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> " 5886 "phci <%s>", s, phci_path)); 5887 5888 /* Get the PHCI dip */ 5889 if ((pdip = e_ddi_hold_devi_by_path(phci_path, 0)) == NULL) { 5890 VHCI_DEBUG(3, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> " 5891 "phci dip doesn't exist. invalid path <%s>", 5892 s, phci_path)); 5893 retval = ENXIO; 5894 break; 5895 } 5896 5897 if (ndi_devi_unconfig(pdip, 5898 NDI_DEVI_REMOVE|NDI_DEVFS_CLEAN) != NDI_SUCCESS) { 5899 retval = EBUSY; 5900 } 5901 5902 ddi_release_devi(pdip); 5903 break; 5904 } 5905 #endif 5906 } 5907 5908 end: 5909 /* Free the memory allocated above */ 5910 if (phci_path != NULL) { 5911 kmem_free(phci_path, MAXPATHLEN); 5912 } 5913 if (client_path != NULL) { 5914 kmem_free(client_path, MAXPATHLEN); 5915 } 5916 if (paddr != NULL) { 5917 kmem_free(paddr, MAXNAMELEN); 5918 } 5919 return (retval); 5920 } 5921 5922 /* 5923 * devctl IOCTL support for client device DR 5924 */ 5925 /* ARGSUSED */ 5926 int 5927 vhci_devctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp, 5928 int *rvalp) 5929 { 5930 dev_info_t *self; 5931 dev_info_t *child; 5932 scsi_hba_tran_t *hba; 5933 struct devctl_iocdata *dcp; 5934 struct scsi_vhci *vhci; 5935 int rv = 0; 5936 int retval = 0; 5937 scsi_vhci_priv_t *svp; 5938 mdi_pathinfo_t *pip; 5939 5940 if ((vhci = ddi_get_soft_state(vhci_softstate, 5941 MINOR2INST(getminor(dev)))) == NULL) 5942 return (ENXIO); 5943 5944 /* 5945 * check if :devctl minor device has been opened 5946 */ 5947 mutex_enter(&vhci->vhci_mutex); 5948 if ((vhci->vhci_state & VHCI_STATE_OPEN) == 0) { 5949 mutex_exit(&vhci->vhci_mutex); 5950 return (ENXIO); 5951 } 5952 mutex_exit(&vhci->vhci_mutex); 5953 5954 self = vhci->vhci_dip; 5955 hba = ddi_get_driver_private(self); 5956 if (hba == NULL) 5957 return (ENXIO); 5958 5959 /* 5960 * We can use the generic implementation for these ioctls 5961 */ 5962 switch (cmd) { 5963 case DEVCTL_DEVICE_GETSTATE: 5964 case DEVCTL_DEVICE_ONLINE: 5965 case DEVCTL_DEVICE_OFFLINE: 5966 case DEVCTL_DEVICE_REMOVE: 5967 case DEVCTL_BUS_GETSTATE: 5968 return (ndi_devctl_ioctl(self, cmd, arg, mode, 0)); 5969 } 5970 5971 /* 5972 * read devctl ioctl data 5973 */ 5974 if (ndi_dc_allochdl((void *)arg, &dcp) != NDI_SUCCESS) 5975 return (EFAULT); 5976 5977 switch (cmd) { 5978 5979 case DEVCTL_DEVICE_RESET: 5980 /* 5981 * lookup and hold child device 5982 */ 5983 if ((child = ndi_devi_find(self, ndi_dc_getname(dcp), 5984 ndi_dc_getaddr(dcp))) == NULL) { 5985 rv = ENXIO; 5986 break; 5987 } 5988 retval = mdi_select_path(child, NULL, 5989 (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH), 5990 NULL, &pip); 5991 if ((retval != MDI_SUCCESS) || (pip == NULL)) { 5992 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioctl:" 5993 "Unable to get a path, dip 0x%p", (void *)child)); 5994 rv = ENXIO; 5995 break; 5996 } 5997 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip); 5998 if (vhci_recovery_reset(svp->svp_svl, 5999 &svp->svp_psd->sd_address, TRUE, 6000 VHCI_DEPTH_TARGET) == 0) { 6001 VHCI_DEBUG(1, (CE_NOTE, NULL, 6002 "!vhci_ioctl(pip:%p): " 6003 "reset failed\n", (void *)pip)); 6004 rv = ENXIO; 6005 } 6006 mdi_rele_path(pip); 6007 break; 6008 6009 case DEVCTL_BUS_QUIESCE: 6010 case DEVCTL_BUS_UNQUIESCE: 6011 case DEVCTL_BUS_RESET: 6012 case DEVCTL_BUS_RESETALL: 6013 #ifdef DEBUG 6014 case DEVCTL_BUS_CONFIGURE: 6015 case DEVCTL_BUS_UNCONFIGURE: 6016 #endif 6017 rv = ENOTSUP; 6018 break; 6019 6020 default: 6021 rv = ENOTTY; 6022 } /* end of outer switch */ 6023 6024 ndi_dc_freehdl(dcp); 6025 return (rv); 6026 } 6027 6028 /* 6029 * Routine to get the PHCI pathname from ioctl structures in userland 6030 */ 6031 /* ARGSUSED */ 6032 static int 6033 vhci_ioc_get_phci_path(sv_iocdata_t *pioc, caddr_t phci_path, 6034 int mode, caddr_t s) 6035 { 6036 int retval = 0; 6037 6038 if (ddi_copyin(pioc->phci, phci_path, MAXPATHLEN, mode)) { 6039 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioc_get_phci: ioctl <%s> " 6040 "phci_path copyin failed", s)); 6041 retval = EFAULT; 6042 } 6043 return (retval); 6044 6045 } 6046 6047 6048 /* 6049 * Routine to get the Client device pathname from ioctl structures in userland 6050 */ 6051 /* ARGSUSED */ 6052 static int 6053 vhci_ioc_get_client_path(sv_iocdata_t *pioc, caddr_t client_path, 6054 int mode, caddr_t s) 6055 { 6056 int retval = 0; 6057 6058 if (ddi_copyin(pioc->client, client_path, MAXPATHLEN, mode)) { 6059 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioc_get_client: " 6060 "ioctl <%s> client_path copyin failed", s)); 6061 retval = EFAULT; 6062 } 6063 return (retval); 6064 } 6065 6066 6067 /* 6068 * Routine to get physical device address from ioctl structure in userland 6069 */ 6070 /* ARGSUSED */ 6071 static int 6072 vhci_ioc_get_paddr(sv_iocdata_t *pioc, caddr_t paddr, int mode, caddr_t s) 6073 { 6074 int retval = 0; 6075 6076 if (ddi_copyin(pioc->addr, paddr, MAXNAMELEN, mode)) { 6077 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioc_get_paddr: " 6078 "ioctl <%s> device addr copyin failed", s)); 6079 retval = EFAULT; 6080 } 6081 return (retval); 6082 } 6083 6084 6085 /* 6086 * Routine to send client device pathname to userland. 6087 */ 6088 /* ARGSUSED */ 6089 static int 6090 vhci_ioc_send_client_path(caddr_t client_path, sv_iocdata_t *pioc, 6091 int mode, caddr_t s) 6092 { 6093 int retval = 0; 6094 6095 if (ddi_copyout(client_path, pioc->client, MAXPATHLEN, mode)) { 6096 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioc_send_client: " 6097 "ioctl <%s> client_path copyout failed", s)); 6098 retval = EFAULT; 6099 } 6100 return (retval); 6101 } 6102 6103 6104 /* 6105 * Routine to translated dev_info pointer (dip) to device pathname. 6106 */ 6107 static void 6108 vhci_ioc_devi_to_path(dev_info_t *dip, caddr_t path) 6109 { 6110 (void) ddi_pathname(dip, path); 6111 } 6112 6113 6114 /* 6115 * vhci_get_phci_path_list: 6116 * get information about devices associated with a 6117 * given PHCI device. 6118 * 6119 * Return Values: 6120 * path information elements 6121 */ 6122 int 6123 vhci_get_phci_path_list(dev_info_t *pdip, sv_path_info_t *pibuf, 6124 uint_t num_elems) 6125 { 6126 uint_t count, done; 6127 mdi_pathinfo_t *pip; 6128 sv_path_info_t *ret_pip; 6129 int status; 6130 size_t prop_size; 6131 int circular; 6132 6133 /* 6134 * Get the PHCI structure and retrieve the path information 6135 * from the GUID hash table. 6136 */ 6137 6138 ret_pip = pibuf; 6139 count = 0; 6140 6141 ndi_devi_enter(pdip, &circular); 6142 6143 done = (count >= num_elems); 6144 pip = mdi_get_next_client_path(pdip, NULL); 6145 while (pip && !done) { 6146 mdi_pi_lock(pip); 6147 (void) ddi_pathname(mdi_pi_get_phci(pip), 6148 ret_pip->device.ret_phci); 6149 (void) strcpy(ret_pip->ret_addr, mdi_pi_get_addr(pip)); 6150 (void) mdi_pi_get_state2(pip, &ret_pip->ret_state, 6151 &ret_pip->ret_ext_state); 6152 6153 status = mdi_prop_size(pip, &prop_size); 6154 if (status == MDI_SUCCESS && ret_pip->ret_prop.ret_buf_size) { 6155 *ret_pip->ret_prop.ret_buf_size = (uint_t)prop_size; 6156 } 6157 6158 #ifdef DEBUG 6159 if (status != MDI_SUCCESS) { 6160 VHCI_DEBUG(2, (CE_WARN, NULL, 6161 "!vhci_get_phci_path_list: " 6162 "phci <%s>, prop size failure 0x%x", 6163 ret_pip->device.ret_phci, status)); 6164 } 6165 #endif /* DEBUG */ 6166 6167 6168 if (status == MDI_SUCCESS && ret_pip->ret_prop.buf && 6169 prop_size && ret_pip->ret_prop.buf_size >= prop_size) { 6170 status = mdi_prop_pack(pip, 6171 &ret_pip->ret_prop.buf, 6172 ret_pip->ret_prop.buf_size); 6173 6174 #ifdef DEBUG 6175 if (status != MDI_SUCCESS) { 6176 VHCI_DEBUG(2, (CE_WARN, NULL, 6177 "!vhci_get_phci_path_list: " 6178 "phci <%s>, prop pack failure 0x%x", 6179 ret_pip->device.ret_phci, status)); 6180 } 6181 #endif /* DEBUG */ 6182 } 6183 6184 mdi_pi_unlock(pip); 6185 pip = mdi_get_next_client_path(pdip, pip); 6186 ret_pip++; 6187 count++; 6188 done = (count >= num_elems); 6189 } 6190 6191 ndi_devi_exit(pdip, circular); 6192 6193 return (MDI_SUCCESS); 6194 } 6195 6196 6197 /* 6198 * vhci_get_client_path_list: 6199 * get information about various paths associated with a 6200 * given client device. 6201 * 6202 * Return Values: 6203 * path information elements 6204 */ 6205 int 6206 vhci_get_client_path_list(dev_info_t *cdip, sv_path_info_t *pibuf, 6207 uint_t num_elems) 6208 { 6209 uint_t count, done; 6210 mdi_pathinfo_t *pip; 6211 sv_path_info_t *ret_pip; 6212 int status; 6213 size_t prop_size; 6214 int circular; 6215 6216 ret_pip = pibuf; 6217 count = 0; 6218 6219 ndi_devi_enter(cdip, &circular); 6220 6221 done = (count >= num_elems); 6222 pip = mdi_get_next_phci_path(cdip, NULL); 6223 while (pip && !done) { 6224 mdi_pi_lock(pip); 6225 (void) ddi_pathname(mdi_pi_get_phci(pip), 6226 ret_pip->device.ret_phci); 6227 (void) strcpy(ret_pip->ret_addr, mdi_pi_get_addr(pip)); 6228 (void) mdi_pi_get_state2(pip, &ret_pip->ret_state, 6229 &ret_pip->ret_ext_state); 6230 6231 status = mdi_prop_size(pip, &prop_size); 6232 if (status == MDI_SUCCESS && ret_pip->ret_prop.ret_buf_size) { 6233 *ret_pip->ret_prop.ret_buf_size = (uint_t)prop_size; 6234 } 6235 6236 #ifdef DEBUG 6237 if (status != MDI_SUCCESS) { 6238 VHCI_DEBUG(2, (CE_WARN, NULL, 6239 "!vhci_get_client_path_list: " 6240 "phci <%s>, prop size failure 0x%x", 6241 ret_pip->device.ret_phci, status)); 6242 } 6243 #endif /* DEBUG */ 6244 6245 6246 if (status == MDI_SUCCESS && ret_pip->ret_prop.buf && 6247 prop_size && ret_pip->ret_prop.buf_size >= prop_size) { 6248 status = mdi_prop_pack(pip, 6249 &ret_pip->ret_prop.buf, 6250 ret_pip->ret_prop.buf_size); 6251 6252 #ifdef DEBUG 6253 if (status != MDI_SUCCESS) { 6254 VHCI_DEBUG(2, (CE_WARN, NULL, 6255 "!vhci_get_client_path_list: " 6256 "phci <%s>, prop pack failure 0x%x", 6257 ret_pip->device.ret_phci, status)); 6258 } 6259 #endif /* DEBUG */ 6260 } 6261 6262 mdi_pi_unlock(pip); 6263 pip = mdi_get_next_phci_path(cdip, pip); 6264 ret_pip++; 6265 count++; 6266 done = (count >= num_elems); 6267 } 6268 6269 ndi_devi_exit(cdip, circular); 6270 6271 return (MDI_SUCCESS); 6272 } 6273 6274 6275 /* 6276 * Routine to get ioctl argument structure from userland. 6277 */ 6278 /* ARGSUSED */ 6279 static int 6280 vhci_get_iocdata(const void *data, sv_iocdata_t *pioc, int mode, caddr_t s) 6281 { 6282 int retval = 0; 6283 6284 #ifdef _MULTI_DATAMODEL 6285 switch (ddi_model_convert_from(mode & FMODELS)) { 6286 case DDI_MODEL_ILP32: 6287 { 6288 sv_iocdata32_t ioc32; 6289 6290 if (ddi_copyin(data, &ioc32, sizeof (ioc32), mode)) { 6291 retval = EFAULT; 6292 break; 6293 } 6294 pioc->client = (caddr_t)(uintptr_t)ioc32.client; 6295 pioc->phci = (caddr_t)(uintptr_t)ioc32.phci; 6296 pioc->addr = (caddr_t)(uintptr_t)ioc32.addr; 6297 pioc->buf_elem = (uint_t)ioc32.buf_elem; 6298 pioc->ret_buf = (sv_path_info_t *)(uintptr_t)ioc32.ret_buf; 6299 pioc->ret_elem = (uint_t *)(uintptr_t)ioc32.ret_elem; 6300 break; 6301 } 6302 6303 case DDI_MODEL_NONE: 6304 if (ddi_copyin(data, pioc, sizeof (*pioc), mode)) { 6305 retval = EFAULT; 6306 break; 6307 } 6308 break; 6309 } 6310 #else /* _MULTI_DATAMODEL */ 6311 if (ddi_copyin(data, pioc, sizeof (*pioc), mode)) { 6312 retval = EFAULT; 6313 } 6314 #endif /* _MULTI_DATAMODEL */ 6315 6316 #ifdef DEBUG 6317 if (retval) { 6318 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_get_ioc: cmd <%s> " 6319 "iocdata copyin failed", s)); 6320 } 6321 #endif 6322 6323 return (retval); 6324 } 6325 6326 6327 /* 6328 * Routine to get the ioctl argument for ioctl causing controller switchover. 6329 */ 6330 /* ARGSUSED */ 6331 static int 6332 vhci_get_iocswitchdata(const void *data, sv_switch_to_cntlr_iocdata_t *piocsc, 6333 int mode, caddr_t s) 6334 { 6335 int retval = 0; 6336 6337 #ifdef _MULTI_DATAMODEL 6338 switch (ddi_model_convert_from(mode & FMODELS)) { 6339 case DDI_MODEL_ILP32: 6340 { 6341 sv_switch_to_cntlr_iocdata32_t ioc32; 6342 6343 if (ddi_copyin(data, &ioc32, sizeof (ioc32), mode)) { 6344 retval = EFAULT; 6345 break; 6346 } 6347 piocsc->client = (caddr_t)(uintptr_t)ioc32.client; 6348 piocsc->class = (caddr_t)(uintptr_t)ioc32.class; 6349 break; 6350 } 6351 6352 case DDI_MODEL_NONE: 6353 if (ddi_copyin(data, piocsc, sizeof (*piocsc), mode)) { 6354 retval = EFAULT; 6355 } 6356 break; 6357 } 6358 #else /* _MULTI_DATAMODEL */ 6359 if (ddi_copyin(data, piocsc, sizeof (*piocsc), mode)) { 6360 retval = EFAULT; 6361 } 6362 #endif /* _MULTI_DATAMODEL */ 6363 6364 #ifdef DEBUG 6365 if (retval) { 6366 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_get_ioc: cmd <%s> " 6367 "switch_to_cntlr_iocdata copyin failed", s)); 6368 } 6369 #endif 6370 6371 return (retval); 6372 } 6373 6374 6375 /* 6376 * Routine to allocate memory for the path information structures. 6377 * It allocates two chunks of memory - one for keeping userland 6378 * pointers/values for path information and path properties, second for 6379 * keeping allocating kernel memory for path properties. These path 6380 * properties are finally copied to userland. 6381 */ 6382 /* ARGSUSED */ 6383 static int 6384 vhci_ioc_alloc_pathinfo(sv_path_info_t **upibuf, sv_path_info_t **kpibuf, 6385 uint_t num_paths, sv_iocdata_t *pioc, int mode, caddr_t s) 6386 { 6387 sv_path_info_t *pi; 6388 uint_t bufsize; 6389 int retval = 0; 6390 int index; 6391 6392 /* Allocate memory */ 6393 *upibuf = (sv_path_info_t *) 6394 kmem_zalloc(sizeof (sv_path_info_t) * num_paths, KM_SLEEP); 6395 ASSERT(*upibuf != NULL); 6396 *kpibuf = (sv_path_info_t *) 6397 kmem_zalloc(sizeof (sv_path_info_t) * num_paths, KM_SLEEP); 6398 ASSERT(*kpibuf != NULL); 6399 6400 /* 6401 * Get the path info structure from the user space. 6402 * We are interested in the following fields: 6403 * - user size of buffer for per path properties. 6404 * - user address of buffer for path info properties. 6405 * - user pointer for returning actual buffer size 6406 * Keep these fields in the 'upibuf' structures. 6407 * Allocate buffer for per path info properties in kernel 6408 * structure ('kpibuf'). 6409 * Size of these buffers will be equal to the size of buffers 6410 * in the user space. 6411 */ 6412 #ifdef _MULTI_DATAMODEL 6413 switch (ddi_model_convert_from(mode & FMODELS)) { 6414 case DDI_MODEL_ILP32: 6415 { 6416 sv_path_info32_t *src; 6417 sv_path_info32_t pi32; 6418 6419 src = (sv_path_info32_t *)pioc->ret_buf; 6420 pi = (sv_path_info_t *)*upibuf; 6421 for (index = 0; index < num_paths; index++, src++, pi++) { 6422 if (ddi_copyin(src, &pi32, sizeof (pi32), mode)) { 6423 retval = EFAULT; 6424 break; 6425 } 6426 6427 pi->ret_prop.buf_size = 6428 (uint_t)pi32.ret_prop.buf_size; 6429 pi->ret_prop.ret_buf_size = 6430 (uint_t *)(uintptr_t)pi32.ret_prop.ret_buf_size; 6431 pi->ret_prop.buf = 6432 (caddr_t)(uintptr_t)pi32.ret_prop.buf; 6433 } 6434 break; 6435 } 6436 6437 case DDI_MODEL_NONE: 6438 if (ddi_copyin(pioc->ret_buf, *upibuf, 6439 sizeof (sv_path_info_t) * num_paths, mode)) { 6440 retval = EFAULT; 6441 } 6442 break; 6443 } 6444 #else /* _MULTI_DATAMODEL */ 6445 if (ddi_copyin(pioc->ret_buf, *upibuf, 6446 sizeof (sv_path_info_t) * num_paths, mode)) { 6447 retval = EFAULT; 6448 } 6449 #endif /* _MULTI_DATAMODEL */ 6450 6451 if (retval != 0) { 6452 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_alloc_path_info: " 6453 "ioctl <%s> normal: path_info copyin failed", s)); 6454 kmem_free(*upibuf, sizeof (sv_path_info_t) * num_paths); 6455 kmem_free(*kpibuf, sizeof (sv_path_info_t) * num_paths); 6456 *upibuf = NULL; 6457 *kpibuf = NULL; 6458 return (retval); 6459 } 6460 6461 /* 6462 * Allocate memory for per path properties. 6463 */ 6464 for (index = 0, pi = *kpibuf; index < num_paths; index++, pi++) { 6465 bufsize = (*upibuf)[index].ret_prop.buf_size; 6466 6467 if (bufsize && bufsize <= SV_PROP_MAX_BUF_SIZE) { 6468 pi->ret_prop.buf_size = bufsize; 6469 pi->ret_prop.buf = (caddr_t) 6470 kmem_zalloc(bufsize, KM_SLEEP); 6471 ASSERT(pi->ret_prop.buf != NULL); 6472 } else { 6473 pi->ret_prop.buf_size = 0; 6474 pi->ret_prop.buf = NULL; 6475 } 6476 6477 if ((*upibuf)[index].ret_prop.ret_buf_size != NULL) { 6478 pi->ret_prop.ret_buf_size = (uint_t *)kmem_zalloc( 6479 sizeof (*pi->ret_prop.ret_buf_size), KM_SLEEP); 6480 ASSERT(pi->ret_prop.ret_buf_size != NULL); 6481 } else { 6482 pi->ret_prop.ret_buf_size = NULL; 6483 } 6484 } 6485 6486 return (0); 6487 } 6488 6489 6490 /* 6491 * Routine to free memory for the path information structures. 6492 * This is the memory which was allocated earlier. 6493 */ 6494 /* ARGSUSED */ 6495 static void 6496 vhci_ioc_free_pathinfo(sv_path_info_t *upibuf, sv_path_info_t *kpibuf, 6497 uint_t num_paths) 6498 { 6499 sv_path_info_t *pi; 6500 int index; 6501 6502 /* Free memory for per path properties */ 6503 for (index = 0, pi = kpibuf; index < num_paths; index++, pi++) { 6504 if (pi->ret_prop.ret_buf_size != NULL) { 6505 kmem_free(pi->ret_prop.ret_buf_size, 6506 sizeof (*pi->ret_prop.ret_buf_size)); 6507 } 6508 6509 if (pi->ret_prop.buf != NULL) { 6510 kmem_free(pi->ret_prop.buf, pi->ret_prop.buf_size); 6511 } 6512 } 6513 6514 /* Free memory for path info structures */ 6515 kmem_free(upibuf, sizeof (sv_path_info_t) * num_paths); 6516 kmem_free(kpibuf, sizeof (sv_path_info_t) * num_paths); 6517 } 6518 6519 6520 /* 6521 * Routine to copy path information and path properties to userland. 6522 */ 6523 /* ARGSUSED */ 6524 static int 6525 vhci_ioc_send_pathinfo(sv_path_info_t *upibuf, sv_path_info_t *kpibuf, 6526 uint_t num_paths, sv_iocdata_t *pioc, int mode, caddr_t s) 6527 { 6528 int retval = 0, index; 6529 sv_path_info_t *upi_ptr; 6530 sv_path_info32_t *upi32_ptr; 6531 6532 #ifdef _MULTI_DATAMODEL 6533 switch (ddi_model_convert_from(mode & FMODELS)) { 6534 case DDI_MODEL_ILP32: 6535 goto copy_32bit; 6536 6537 case DDI_MODEL_NONE: 6538 goto copy_normal; 6539 } 6540 #else /* _MULTI_DATAMODEL */ 6541 6542 goto copy_normal; 6543 6544 #endif /* _MULTI_DATAMODEL */ 6545 6546 copy_normal: 6547 6548 /* 6549 * Copy path information and path properties to user land. 6550 * Pointer fields inside the path property structure were 6551 * saved in the 'upibuf' structure earlier. 6552 */ 6553 upi_ptr = pioc->ret_buf; 6554 for (index = 0; index < num_paths; index++) { 6555 if (ddi_copyout(kpibuf[index].device.ret_ct, 6556 upi_ptr[index].device.ret_ct, MAXPATHLEN, mode)) { 6557 retval = EFAULT; 6558 break; 6559 } 6560 6561 if (ddi_copyout(kpibuf[index].ret_addr, 6562 upi_ptr[index].ret_addr, MAXNAMELEN, mode)) { 6563 retval = EFAULT; 6564 break; 6565 } 6566 6567 if (ddi_copyout(&kpibuf[index].ret_state, 6568 &upi_ptr[index].ret_state, sizeof (kpibuf[index].ret_state), 6569 mode)) { 6570 retval = EFAULT; 6571 break; 6572 } 6573 6574 if (ddi_copyout(&kpibuf[index].ret_ext_state, 6575 &upi_ptr[index].ret_ext_state, 6576 sizeof (kpibuf[index].ret_ext_state), mode)) { 6577 retval = EFAULT; 6578 break; 6579 } 6580 6581 if ((kpibuf[index].ret_prop.ret_buf_size != NULL) && 6582 ddi_copyout(kpibuf[index].ret_prop.ret_buf_size, 6583 upibuf[index].ret_prop.ret_buf_size, 6584 sizeof (*upibuf[index].ret_prop.ret_buf_size), mode)) { 6585 retval = EFAULT; 6586 break; 6587 } 6588 6589 if ((kpibuf[index].ret_prop.buf != NULL) && 6590 ddi_copyout(kpibuf[index].ret_prop.buf, 6591 upibuf[index].ret_prop.buf, 6592 upibuf[index].ret_prop.buf_size, mode)) { 6593 retval = EFAULT; 6594 break; 6595 } 6596 } 6597 6598 #ifdef DEBUG 6599 if (retval) { 6600 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_get_ioc: ioctl <%s> " 6601 "normal: path_info copyout failed", s)); 6602 } 6603 #endif 6604 6605 return (retval); 6606 6607 copy_32bit: 6608 /* 6609 * Copy path information and path properties to user land. 6610 * Pointer fields inside the path property structure were 6611 * saved in the 'upibuf' structure earlier. 6612 */ 6613 upi32_ptr = (sv_path_info32_t *)pioc->ret_buf; 6614 for (index = 0; index < num_paths; index++) { 6615 if (ddi_copyout(kpibuf[index].device.ret_ct, 6616 upi32_ptr[index].device.ret_ct, MAXPATHLEN, mode)) { 6617 retval = EFAULT; 6618 break; 6619 } 6620 6621 if (ddi_copyout(kpibuf[index].ret_addr, 6622 upi32_ptr[index].ret_addr, MAXNAMELEN, mode)) { 6623 retval = EFAULT; 6624 break; 6625 } 6626 6627 if (ddi_copyout(&kpibuf[index].ret_state, 6628 &upi32_ptr[index].ret_state, 6629 sizeof (kpibuf[index].ret_state), mode)) { 6630 retval = EFAULT; 6631 break; 6632 } 6633 6634 if (ddi_copyout(&kpibuf[index].ret_ext_state, 6635 &upi32_ptr[index].ret_ext_state, 6636 sizeof (kpibuf[index].ret_ext_state), mode)) { 6637 retval = EFAULT; 6638 break; 6639 } 6640 if ((kpibuf[index].ret_prop.ret_buf_size != NULL) && 6641 ddi_copyout(kpibuf[index].ret_prop.ret_buf_size, 6642 upibuf[index].ret_prop.ret_buf_size, 6643 sizeof (*upibuf[index].ret_prop.ret_buf_size), mode)) { 6644 retval = EFAULT; 6645 break; 6646 } 6647 6648 if ((kpibuf[index].ret_prop.buf != NULL) && 6649 ddi_copyout(kpibuf[index].ret_prop.buf, 6650 upibuf[index].ret_prop.buf, 6651 upibuf[index].ret_prop.buf_size, mode)) { 6652 retval = EFAULT; 6653 break; 6654 } 6655 } 6656 6657 #ifdef DEBUG 6658 if (retval) { 6659 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_get_ioc: ioctl <%s> " 6660 "normal: path_info copyout failed", s)); 6661 } 6662 #endif 6663 6664 return (retval); 6665 } 6666 6667 6668 /* 6669 * vhci_failover() 6670 * This routine expects VHCI_HOLD_LUN before being invoked. It can be invoked 6671 * as MDI_FAILOVER_ASYNC or MDI_FAILOVER_SYNC. For Asynchronous failovers 6672 * this routine shall VHCI_RELEASE_LUN on exiting. For synchronous failovers 6673 * it is the callers responsibility to release lun. 6674 */ 6675 6676 /* ARGSUSED */ 6677 static int 6678 vhci_failover(dev_info_t *vdip, dev_info_t *cdip, int flags) 6679 { 6680 char *guid; 6681 scsi_vhci_lun_t *vlun = NULL; 6682 struct scsi_vhci *vhci; 6683 mdi_pathinfo_t *pip, *npip; 6684 char *s_pclass, *pclass1, *pclass2, *pclass; 6685 char active_pclass_copy[255], *active_pclass_ptr; 6686 char *ptr1, *ptr2; 6687 mdi_pathinfo_state_t pi_state; 6688 uint32_t pi_ext_state; 6689 scsi_vhci_priv_t *svp; 6690 struct scsi_device *sd; 6691 struct scsi_failover_ops *sfo; 6692 int sps; /* mdi_select_path() status */ 6693 int activation_done = 0; 6694 int rval, retval = MDI_FAILURE; 6695 int reserve_pending, check_condition, UA_condition; 6696 struct scsi_pkt *pkt; 6697 struct buf *bp; 6698 6699 vhci = ddi_get_soft_state(vhci_softstate, ddi_get_instance(vdip)); 6700 sd = ddi_get_driver_private(cdip); 6701 vlun = ADDR2VLUN(&sd->sd_address); 6702 ASSERT(vlun != 0); 6703 ASSERT(VHCI_LUN_IS_HELD(vlun)); 6704 guid = vlun->svl_lun_wwn; 6705 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(1): guid %s\n", guid)); 6706 vhci_log(CE_NOTE, vdip, "!Initiating failover for device %s " 6707 "(GUID %s)", ddi_node_name(cdip), guid); 6708 6709 /* 6710 * Lets maintain a local copy of the vlun->svl_active_pclass 6711 * for the rest of the processing. Accessing the field 6712 * directly in the loop below causes loop logic to break 6713 * especially when the field gets updated by other threads 6714 * update path status etc and causes 'paths are not currently 6715 * available' condition to be declared prematurely. 6716 */ 6717 mutex_enter(&vlun->svl_mutex); 6718 if (vlun->svl_active_pclass != NULL) { 6719 (void) strlcpy(active_pclass_copy, vlun->svl_active_pclass, 6720 sizeof (active_pclass_copy)); 6721 active_pclass_ptr = &active_pclass_copy[0]; 6722 mutex_exit(&vlun->svl_mutex); 6723 if (vhci_quiesce_paths(vdip, cdip, vlun, guid, 6724 active_pclass_ptr) != 0) { 6725 retval = MDI_FAILURE; 6726 } 6727 } else { 6728 /* 6729 * can happen only when the available path to device 6730 * discovered is a STANDBY path. 6731 */ 6732 mutex_exit(&vlun->svl_mutex); 6733 active_pclass_copy[0] = '\0'; 6734 active_pclass_ptr = NULL; 6735 } 6736 6737 sfo = vlun->svl_fops; 6738 ASSERT(sfo != NULL); 6739 pclass1 = s_pclass = active_pclass_ptr; 6740 VHCI_DEBUG(1, (CE_NOTE, NULL, "!(%s)failing over from %s\n", guid, 6741 (s_pclass == NULL ? "<none>" : s_pclass))); 6742 6743 next_pathclass: 6744 6745 rval = sfo->sfo_pathclass_next(pclass1, &pclass2, 6746 vlun->svl_fops_ctpriv); 6747 if (rval == ENOENT) { 6748 if (s_pclass == NULL) { 6749 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(4)(%s): " 6750 "failed, no more pathclasses\n", guid)); 6751 goto done; 6752 } else { 6753 (void) sfo->sfo_pathclass_next(NULL, &pclass2, 6754 vlun->svl_fops_ctpriv); 6755 } 6756 } else if (rval == EINVAL) { 6757 vhci_log(CE_NOTE, vdip, "!Failover operation failed for " 6758 "device %s (GUID %s): Invalid path-class %s", 6759 ddi_node_name(cdip), guid, 6760 ((pclass1 == NULL) ? "<none>" : pclass1)); 6761 goto done; 6762 } 6763 if ((s_pclass != NULL) && (strcmp(pclass2, s_pclass) == 0)) { 6764 /* 6765 * paths are not currently available 6766 */ 6767 vhci_log(CE_NOTE, vdip, "!Failover path currently unavailable" 6768 " for device %s (GUID %s)", 6769 ddi_node_name(cdip), guid); 6770 goto done; 6771 } 6772 pip = npip = NULL; 6773 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(5.2)(%s): considering " 6774 "%s as failover destination\n", guid, pclass2)); 6775 sps = mdi_select_path(cdip, NULL, MDI_SELECT_STANDBY_PATH, NULL, &npip); 6776 if ((npip == NULL) || (sps != MDI_SUCCESS)) { 6777 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(%s): no " 6778 "STANDBY paths found (status:%x)!\n", guid, sps)); 6779 pclass1 = pclass2; 6780 goto next_pathclass; 6781 } 6782 do { 6783 pclass = NULL; 6784 if ((mdi_prop_lookup_string(npip, "path-class", 6785 &pclass) != MDI_SUCCESS) || (strcmp(pclass2, 6786 pclass) != 0)) { 6787 VHCI_DEBUG(1, (CE_NOTE, NULL, 6788 "!vhci_failover(5.5)(%s): skipping path " 6789 "%p(%s)...\n", guid, (void *)npip, pclass)); 6790 pip = npip; 6791 sps = mdi_select_path(cdip, NULL, 6792 MDI_SELECT_STANDBY_PATH, pip, &npip); 6793 mdi_rele_path(pip); 6794 (void) mdi_prop_free(pclass); 6795 continue; 6796 } 6797 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(npip); 6798 6799 /* 6800 * Issue READ at non-zer block on this STANDBY path. 6801 * Purple returns 6802 * 1. RESERVATION_CONFLICT if reservation is pending 6803 * 2. POR check condition if it reset happened. 6804 * 2. failover Check Conditions if one is already in progress. 6805 */ 6806 reserve_pending = 0; 6807 check_condition = 0; 6808 UA_condition = 0; 6809 6810 bp = scsi_alloc_consistent_buf(&svp->svp_psd->sd_address, 6811 (struct buf *)NULL, DEV_BSIZE, B_READ, NULL, NULL); 6812 if (!bp) { 6813 VHCI_DEBUG(1, (CE_NOTE, NULL, 6814 "vhci_failover !No resources (buf)\n")); 6815 mdi_rele_path(npip); 6816 goto done; 6817 } 6818 pkt = scsi_init_pkt(&svp->svp_psd->sd_address, NULL, bp, 6819 CDB_GROUP1, sizeof (struct scsi_arq_status), 0, 6820 PKT_CONSISTENT, NULL, NULL); 6821 if (pkt) { 6822 (void) scsi_setup_cdb((union scsi_cdb *)(uintptr_t) 6823 pkt->pkt_cdbp, SCMD_READ, 1, 1, 0); 6824 pkt->pkt_flags = FLAG_NOINTR; 6825 check_path_again: 6826 pkt->pkt_path_instance = mdi_pi_get_path_instance(npip); 6827 pkt->pkt_time = 3*30; 6828 6829 if (scsi_transport(pkt) == TRAN_ACCEPT) { 6830 switch (pkt->pkt_reason) { 6831 case CMD_CMPLT: 6832 switch (SCBP_C(pkt)) { 6833 case STATUS_GOOD: 6834 /* Already failed over */ 6835 activation_done = 1; 6836 break; 6837 case STATUS_RESERVATION_CONFLICT: 6838 reserve_pending = 1; 6839 break; 6840 case STATUS_CHECK: 6841 check_condition = 1; 6842 break; 6843 } 6844 } 6845 } 6846 if (check_condition && 6847 (pkt->pkt_state & STATE_ARQ_DONE)) { 6848 uint8_t *sns, skey, asc, ascq; 6849 sns = (uint8_t *) 6850 &(((struct scsi_arq_status *)(uintptr_t) 6851 (pkt->pkt_scbp))->sts_sensedata); 6852 skey = scsi_sense_key(sns); 6853 asc = scsi_sense_asc(sns); 6854 ascq = scsi_sense_ascq(sns); 6855 if (skey == KEY_UNIT_ATTENTION && 6856 asc == 0x29) { 6857 /* Already failed over */ 6858 VHCI_DEBUG(1, (CE_NOTE, NULL, 6859 "!vhci_failover(7)(%s): " 6860 "path 0x%p POR UA condition\n", 6861 guid, (void *)npip)); 6862 if (UA_condition == 0) { 6863 UA_condition = 1; 6864 goto check_path_again; 6865 } 6866 } else { 6867 activation_done = 0; 6868 VHCI_DEBUG(1, (CE_NOTE, NULL, 6869 "!vhci_failover(%s): path 0x%p " 6870 "unhandled chkcond %x %x %x\n", 6871 guid, (void *)npip, skey, 6872 asc, ascq)); 6873 } 6874 } 6875 scsi_destroy_pkt(pkt); 6876 } 6877 scsi_free_consistent_buf(bp); 6878 6879 if (activation_done) { 6880 mdi_rele_path(npip); 6881 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(7)(%s): " 6882 "path 0x%p already failedover\n", guid, 6883 (void *)npip)); 6884 break; 6885 } 6886 if (reserve_pending && (vlun->svl_xlf_capable == 0)) { 6887 (void) vhci_recovery_reset(vlun, 6888 &svp->svp_psd->sd_address, 6889 FALSE, VHCI_DEPTH_ALL); 6890 } 6891 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(6)(%s): " 6892 "activating path 0x%p(psd:%p)\n", guid, (void *)npip, 6893 (void *)svp->svp_psd)); 6894 if (sfo->sfo_path_activate(svp->svp_psd, pclass2, 6895 vlun->svl_fops_ctpriv) == 0) { 6896 activation_done = 1; 6897 mdi_rele_path(npip); 6898 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(7)(%s): " 6899 "path 0x%p successfully activated\n", guid, 6900 (void *)npip)); 6901 break; 6902 } 6903 pip = npip; 6904 sps = mdi_select_path(cdip, NULL, MDI_SELECT_STANDBY_PATH, 6905 pip, &npip); 6906 mdi_rele_path(pip); 6907 } while ((npip != NULL) && (sps == MDI_SUCCESS)); 6908 if (activation_done == 0) { 6909 pclass1 = pclass2; 6910 goto next_pathclass; 6911 } 6912 6913 /* 6914 * if we are here, we have succeeded in activating path npip of 6915 * pathclass pclass2; let us validate all paths of pclass2 by 6916 * "ping"-ing each one and mark the good ones ONLINE 6917 * Also, set the state of the paths belonging to the previously 6918 * active pathclass to STANDBY 6919 */ 6920 pip = npip = NULL; 6921 sps = mdi_select_path(cdip, NULL, (MDI_SELECT_ONLINE_PATH | 6922 MDI_SELECT_STANDBY_PATH | MDI_SELECT_USER_DISABLE_PATH), 6923 NULL, &npip); 6924 if (npip == NULL || sps != MDI_SUCCESS) { 6925 VHCI_DEBUG(1, (CE_NOTE, NULL, "!Failover operation failed for " 6926 "device %s (GUID %s): paths may be busy\n", 6927 ddi_node_name(cdip), guid)); 6928 goto done; 6929 } 6930 do { 6931 (void) mdi_pi_get_state2(npip, &pi_state, &pi_ext_state); 6932 if (mdi_prop_lookup_string(npip, "path-class", &pclass) 6933 != MDI_SUCCESS) { 6934 pip = npip; 6935 sps = mdi_select_path(cdip, NULL, 6936 (MDI_SELECT_ONLINE_PATH | 6937 MDI_SELECT_STANDBY_PATH | 6938 MDI_SELECT_USER_DISABLE_PATH), 6939 pip, &npip); 6940 mdi_rele_path(pip); 6941 continue; 6942 } 6943 if (strcmp(pclass, pclass2) == 0) { 6944 if (pi_state == MDI_PATHINFO_STATE_STANDBY) { 6945 svp = (scsi_vhci_priv_t *) 6946 mdi_pi_get_vhci_private(npip); 6947 VHCI_DEBUG(1, (CE_NOTE, NULL, 6948 "!vhci_failover(8)(%s): " 6949 "pinging path 0x%p\n", 6950 guid, (void *)npip)); 6951 if (sfo->sfo_path_ping(svp->svp_psd, 6952 vlun->svl_fops_ctpriv) == 1) { 6953 mdi_pi_set_state(npip, 6954 MDI_PATHINFO_STATE_ONLINE); 6955 VHCI_DEBUG(1, (CE_NOTE, NULL, 6956 "!vhci_failover(9)(%s): " 6957 "path 0x%p ping successful, " 6958 "marked online\n", guid, 6959 (void *)npip)); 6960 MDI_PI_ERRSTAT(npip, MDI_PI_FAILTO); 6961 } 6962 } 6963 } else if ((s_pclass != NULL) && (strcmp(pclass, s_pclass) 6964 == 0)) { 6965 if (pi_state == MDI_PATHINFO_STATE_ONLINE) { 6966 mdi_pi_set_state(npip, 6967 MDI_PATHINFO_STATE_STANDBY); 6968 VHCI_DEBUG(1, (CE_NOTE, NULL, 6969 "!vhci_failover(10)(%s): path 0x%p marked " 6970 "STANDBY\n", guid, (void *)npip)); 6971 MDI_PI_ERRSTAT(npip, MDI_PI_FAILFROM); 6972 } 6973 } 6974 (void) mdi_prop_free(pclass); 6975 pip = npip; 6976 sps = mdi_select_path(cdip, NULL, (MDI_SELECT_ONLINE_PATH | 6977 MDI_SELECT_STANDBY_PATH|MDI_SELECT_USER_DISABLE_PATH), 6978 pip, &npip); 6979 mdi_rele_path(pip); 6980 } while ((npip != NULL) && (sps == MDI_SUCCESS)); 6981 6982 /* 6983 * Update the AccessState of related MP-API TPGs 6984 */ 6985 (void) vhci_mpapi_update_tpg_acc_state_for_lu(vhci, vlun); 6986 6987 vhci_log(CE_NOTE, vdip, "!Failover operation completed successfully " 6988 "for device %s (GUID %s): failed over from %s to %s", 6989 ddi_node_name(cdip), guid, ((s_pclass == NULL) ? "<none>" : 6990 s_pclass), pclass2); 6991 ptr1 = kmem_alloc(strlen(pclass2)+1, KM_SLEEP); 6992 (void) strlcpy(ptr1, pclass2, (strlen(pclass2)+1)); 6993 mutex_enter(&vlun->svl_mutex); 6994 ptr2 = vlun->svl_active_pclass; 6995 vlun->svl_active_pclass = ptr1; 6996 mutex_exit(&vlun->svl_mutex); 6997 if (ptr2) { 6998 kmem_free(ptr2, strlen(ptr2)+1); 6999 } 7000 mutex_enter(&vhci->vhci_mutex); 7001 scsi_hba_reset_notify_callback(&vhci->vhci_mutex, 7002 &vhci->vhci_reset_notify_listf); 7003 /* All reservations are cleared upon these resets. */ 7004 vlun->svl_flags &= ~VLUN_RESERVE_ACTIVE_FLG; 7005 mutex_exit(&vhci->vhci_mutex); 7006 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(11): DONE! Active " 7007 "pathclass for %s is now %s\n", guid, pclass2)); 7008 retval = MDI_SUCCESS; 7009 7010 done: 7011 if (flags == MDI_FAILOVER_ASYNC) { 7012 VHCI_RELEASE_LUN(vlun); 7013 VHCI_DEBUG(6, (CE_NOTE, NULL, "!vhci_failover(12): DONE! " 7014 "releasing lun, as failover was ASYNC\n")); 7015 } else { 7016 VHCI_DEBUG(6, (CE_NOTE, NULL, "!vhci_failover(12): DONE! " 7017 "NOT releasing lun, as failover was SYNC\n")); 7018 } 7019 return (retval); 7020 } 7021 7022 /* 7023 * vhci_client_attached is called after the successful attach of a 7024 * client devinfo node. 7025 */ 7026 static void 7027 vhci_client_attached(dev_info_t *cdip) 7028 { 7029 mdi_pathinfo_t *pip; 7030 int circular; 7031 7032 /* 7033 * At this point the client has attached and it's instance number is 7034 * valid, so we can set up kstats. We need to do this here because it 7035 * is possible for paths to go online prior to client attach, in which 7036 * case the call to vhci_kstat_create_pathinfo in vhci_pathinfo_online 7037 * was a noop. 7038 */ 7039 ndi_devi_enter(cdip, &circular); 7040 for (pip = mdi_get_next_phci_path(cdip, NULL); pip; 7041 pip = mdi_get_next_phci_path(cdip, pip)) 7042 vhci_kstat_create_pathinfo(pip); 7043 ndi_devi_exit(cdip, circular); 7044 } 7045 7046 /* 7047 * quiesce all of the online paths 7048 */ 7049 static int 7050 vhci_quiesce_paths(dev_info_t *vdip, dev_info_t *cdip, scsi_vhci_lun_t *vlun, 7051 char *guid, char *active_pclass_ptr) 7052 { 7053 scsi_vhci_priv_t *svp; 7054 char *s_pclass = NULL; 7055 mdi_pathinfo_t *npip, *pip; 7056 int sps; 7057 7058 /* quiesce currently active paths */ 7059 s_pclass = NULL; 7060 pip = npip = NULL; 7061 sps = mdi_select_path(cdip, NULL, MDI_SELECT_ONLINE_PATH, NULL, &npip); 7062 if ((npip == NULL) || (sps != MDI_SUCCESS)) { 7063 return (1); 7064 } 7065 do { 7066 if (mdi_prop_lookup_string(npip, "path-class", 7067 &s_pclass) != MDI_SUCCESS) { 7068 mdi_rele_path(npip); 7069 vhci_log(CE_NOTE, vdip, "!Failover operation failed " 7070 "for device %s (GUID %s) due to an internal " 7071 "error", ddi_node_name(cdip), guid); 7072 return (1); 7073 } 7074 if (strcmp(s_pclass, active_pclass_ptr) == 0) { 7075 /* 7076 * quiesce path. Free s_pclass since 7077 * we don't need it anymore 7078 */ 7079 VHCI_DEBUG(1, (CE_NOTE, NULL, 7080 "!vhci_failover(2)(%s): failing over " 7081 "from %s; quiescing path %p\n", 7082 guid, s_pclass, (void *)npip)); 7083 (void) mdi_prop_free(s_pclass); 7084 svp = (scsi_vhci_priv_t *) 7085 mdi_pi_get_vhci_private(npip); 7086 if (svp == NULL) { 7087 VHCI_DEBUG(1, (CE_NOTE, NULL, 7088 "!vhci_failover(2.5)(%s): no " 7089 "client priv! %p offlined?\n", 7090 guid, (void *)npip)); 7091 pip = npip; 7092 sps = mdi_select_path(cdip, NULL, 7093 MDI_SELECT_ONLINE_PATH, pip, &npip); 7094 mdi_rele_path(pip); 7095 continue; 7096 } 7097 if (scsi_abort(&svp->svp_psd->sd_address, NULL) 7098 == 0) { 7099 (void) vhci_recovery_reset(vlun, 7100 &svp->svp_psd->sd_address, FALSE, 7101 VHCI_DEPTH_TARGET); 7102 } 7103 mutex_enter(&svp->svp_mutex); 7104 if (svp->svp_cmds == 0) { 7105 VHCI_DEBUG(1, (CE_NOTE, NULL, 7106 "!vhci_failover(3)(%s):" 7107 "quiesced path %p\n", guid, (void *)npip)); 7108 } else { 7109 while (svp->svp_cmds != 0) { 7110 cv_wait(&svp->svp_cv, &svp->svp_mutex); 7111 VHCI_DEBUG(1, (CE_NOTE, NULL, 7112 "!vhci_failover(3.cv)(%s):" 7113 "quiesced path %p\n", guid, 7114 (void *)npip)); 7115 } 7116 } 7117 mutex_exit(&svp->svp_mutex); 7118 } else { 7119 /* 7120 * make sure we freeup the memory 7121 */ 7122 (void) mdi_prop_free(s_pclass); 7123 } 7124 pip = npip; 7125 sps = mdi_select_path(cdip, NULL, MDI_SELECT_ONLINE_PATH, 7126 pip, &npip); 7127 mdi_rele_path(pip); 7128 } while ((npip != NULL) && (sps == MDI_SUCCESS)); 7129 return (0); 7130 } 7131 7132 static struct scsi_vhci_lun * 7133 vhci_lun_lookup(dev_info_t *tgt_dip) 7134 { 7135 return ((struct scsi_vhci_lun *) 7136 mdi_client_get_vhci_private(tgt_dip)); 7137 } 7138 7139 static struct scsi_vhci_lun * 7140 vhci_lun_lookup_alloc(dev_info_t *tgt_dip, char *guid, int *didalloc) 7141 { 7142 struct scsi_vhci_lun *svl; 7143 7144 if (svl = vhci_lun_lookup(tgt_dip)) { 7145 return (svl); 7146 } 7147 7148 svl = kmem_zalloc(sizeof (*svl), KM_SLEEP); 7149 svl->svl_lun_wwn = kmem_zalloc(strlen(guid)+1, KM_SLEEP); 7150 (void) strcpy(svl->svl_lun_wwn, guid); 7151 mutex_init(&svl->svl_mutex, NULL, MUTEX_DRIVER, NULL); 7152 cv_init(&svl->svl_cv, NULL, CV_DRIVER, NULL); 7153 sema_init(&svl->svl_pgr_sema, 1, NULL, SEMA_DRIVER, NULL); 7154 svl->svl_waiting_for_activepath = 1; 7155 svl->svl_sector_size = 1; 7156 mdi_client_set_vhci_private(tgt_dip, svl); 7157 *didalloc = 1; 7158 VHCI_DEBUG(1, (CE_NOTE, NULL, 7159 "vhci_lun_lookup_alloc: guid %s vlun 0x%p\n", 7160 guid, (void *)svl)); 7161 return (svl); 7162 } 7163 7164 static void 7165 vhci_lun_free(dev_info_t *tgt_dip) 7166 { 7167 struct scsi_vhci_lun *dvlp; 7168 char *guid; 7169 struct scsi_device *sd; 7170 7171 /* 7172 * The scsi_device was set to driver private during child node 7173 * initialization in the scsi_hba_bus_ctl(). 7174 */ 7175 sd = (struct scsi_device *)ddi_get_driver_private(tgt_dip); 7176 7177 dvlp = (struct scsi_vhci_lun *) 7178 mdi_client_get_vhci_private(tgt_dip); 7179 ASSERT(dvlp != NULL); 7180 7181 mdi_client_set_vhci_private(tgt_dip, NULL); 7182 7183 guid = dvlp->svl_lun_wwn; 7184 ASSERT(guid != NULL); 7185 VHCI_DEBUG(4, (CE_NOTE, NULL, "!vhci_lun_free: %s\n", guid)); 7186 7187 mutex_enter(&dvlp->svl_mutex); 7188 if (dvlp->svl_active_pclass != NULL) { 7189 kmem_free(dvlp->svl_active_pclass, 7190 strlen(dvlp->svl_active_pclass)+1); 7191 } 7192 dvlp->svl_active_pclass = NULL; 7193 mutex_exit(&dvlp->svl_mutex); 7194 7195 if (dvlp->svl_lun_wwn != NULL) { 7196 kmem_free(dvlp->svl_lun_wwn, strlen(dvlp->svl_lun_wwn)+1); 7197 } 7198 dvlp->svl_lun_wwn = NULL; 7199 7200 if (dvlp->svl_fops_name) { 7201 kmem_free(dvlp->svl_fops_name, strlen(dvlp->svl_fops_name)+1); 7202 } 7203 dvlp->svl_fops_name = NULL; 7204 7205 if (dvlp->svl_fops_ctpriv != NULL) { 7206 dvlp->svl_fops->sfo_device_unprobe(sd, dvlp->svl_fops_ctpriv); 7207 } 7208 7209 if (dvlp->svl_flags & VLUN_TASK_D_ALIVE_FLG) 7210 taskq_destroy(dvlp->svl_taskq); 7211 7212 mutex_destroy(&dvlp->svl_mutex); 7213 cv_destroy(&dvlp->svl_cv); 7214 sema_destroy(&dvlp->svl_pgr_sema); 7215 kmem_free(dvlp, sizeof (*dvlp)); 7216 /* 7217 * vhci_lun_free may be called before the tgt_dip 7218 * initialization so check if the sd is NULL. 7219 */ 7220 if (sd != NULL) 7221 scsi_device_hba_private_set(sd, NULL); 7222 } 7223 7224 int 7225 vhci_do_scsi_cmd(struct scsi_pkt *pkt) 7226 { 7227 int err = 0; 7228 int retry_cnt = 0; 7229 uint8_t *sns, skey; 7230 7231 #ifdef DEBUG 7232 if (vhci_debug > 5) { 7233 vhci_print_cdb(pkt->pkt_address.a_hba_tran->tran_hba_dip, 7234 CE_WARN, "Vhci command", pkt->pkt_cdbp); 7235 } 7236 #endif 7237 7238 retry: 7239 err = scsi_poll(pkt); 7240 if (err) { 7241 if (pkt->pkt_cdbp[0] == SCMD_RELEASE) { 7242 if (SCBP_C(pkt) == STATUS_RESERVATION_CONFLICT) { 7243 VHCI_DEBUG(1, (CE_NOTE, NULL, 7244 "!v_s_do_s_c: RELEASE conflict\n")); 7245 return (0); 7246 } 7247 } 7248 if (retry_cnt++ < 3) { 7249 VHCI_DEBUG(1, (CE_WARN, NULL, 7250 "!v_s_do_s_c:retry packet 0x%p " 7251 "status 0x%x reason %s", 7252 (void *)pkt, SCBP_C(pkt), 7253 scsi_rname(pkt->pkt_reason))); 7254 if ((pkt->pkt_reason == CMD_CMPLT) && 7255 (SCBP_C(pkt) == STATUS_CHECK) && 7256 (pkt->pkt_state & STATE_ARQ_DONE)) { 7257 sns = (uint8_t *) 7258 &(((struct scsi_arq_status *)(uintptr_t) 7259 (pkt->pkt_scbp))->sts_sensedata); 7260 skey = scsi_sense_key(sns); 7261 VHCI_DEBUG(1, (CE_WARN, NULL, 7262 "!v_s_do_s_c:retry " 7263 "packet 0x%p sense data %s", (void *)pkt, 7264 scsi_sname(skey))); 7265 } 7266 goto retry; 7267 } 7268 VHCI_DEBUG(1, (CE_WARN, NULL, 7269 "!v_s_do_s_c: failed transport 0x%p 0x%x", 7270 (void *)pkt, SCBP_C(pkt))); 7271 return (0); 7272 } 7273 7274 switch (pkt->pkt_reason) { 7275 case CMD_TIMEOUT: 7276 VHCI_DEBUG(1, (CE_WARN, NULL, "!pkt timed " 7277 "out (pkt 0x%p)", (void *)pkt)); 7278 return (0); 7279 case CMD_CMPLT: 7280 switch (SCBP_C(pkt)) { 7281 case STATUS_GOOD: 7282 break; 7283 case STATUS_CHECK: 7284 if (pkt->pkt_state & STATE_ARQ_DONE) { 7285 sns = (uint8_t *)&((( 7286 struct scsi_arq_status *) 7287 (uintptr_t) 7288 (pkt->pkt_scbp))-> 7289 sts_sensedata); 7290 skey = scsi_sense_key(sns); 7291 if ((skey == 7292 KEY_UNIT_ATTENTION) || 7293 (skey == 7294 KEY_NOT_READY)) { 7295 /* 7296 * clear unit attn. 7297 */ 7298 7299 VHCI_DEBUG(1, 7300 (CE_WARN, NULL, 7301 "!v_s_do_s_c: " 7302 "retry " 7303 "packet 0x%p sense " 7304 "data %s", 7305 (void *)pkt, 7306 scsi_sname 7307 (skey))); 7308 goto retry; 7309 } 7310 VHCI_DEBUG(4, (CE_WARN, NULL, 7311 "!ARQ while " 7312 "transporting " 7313 "(pkt 0x%p)", 7314 (void *)pkt)); 7315 return (0); 7316 } 7317 return (0); 7318 default: 7319 VHCI_DEBUG(1, (CE_WARN, NULL, 7320 "!Bad status returned " 7321 "(pkt 0x%p, status %x)", 7322 (void *)pkt, SCBP_C(pkt))); 7323 return (0); 7324 } 7325 break; 7326 case CMD_INCOMPLETE: 7327 case CMD_RESET: 7328 case CMD_ABORTED: 7329 case CMD_TRAN_ERR: 7330 if (retry_cnt++ < 1) { 7331 VHCI_DEBUG(1, (CE_WARN, NULL, 7332 "!v_s_do_s_c: retry packet 0x%p %s", 7333 (void *)pkt, scsi_rname(pkt->pkt_reason))); 7334 goto retry; 7335 } 7336 /* FALLTHROUGH */ 7337 default: 7338 VHCI_DEBUG(1, (CE_WARN, NULL, "!pkt did not " 7339 "complete successfully (pkt 0x%p," 7340 "reason %x)", (void *)pkt, pkt->pkt_reason)); 7341 return (0); 7342 } 7343 return (1); 7344 } 7345 7346 static int 7347 vhci_quiesce_lun(struct scsi_vhci_lun *vlun) 7348 { 7349 mdi_pathinfo_t *pip, *spip; 7350 dev_info_t *cdip; 7351 struct scsi_vhci_priv *svp; 7352 mdi_pathinfo_state_t pstate; 7353 uint32_t p_ext_state; 7354 int circular; 7355 7356 cdip = vlun->svl_dip; 7357 pip = spip = NULL; 7358 ndi_devi_enter(cdip, &circular); 7359 pip = mdi_get_next_phci_path(cdip, NULL); 7360 while (pip != NULL) { 7361 (void) mdi_pi_get_state2(pip, &pstate, &p_ext_state); 7362 if (pstate != MDI_PATHINFO_STATE_ONLINE) { 7363 spip = pip; 7364 pip = mdi_get_next_phci_path(cdip, spip); 7365 continue; 7366 } 7367 mdi_hold_path(pip); 7368 ndi_devi_exit(cdip, circular); 7369 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip); 7370 mutex_enter(&svp->svp_mutex); 7371 while (svp->svp_cmds != 0) { 7372 if (cv_timedwait(&svp->svp_cv, &svp->svp_mutex, 7373 ddi_get_lbolt() + drv_usectohz 7374 (vhci_path_quiesce_timeout * 1000000)) == -1) { 7375 mutex_exit(&svp->svp_mutex); 7376 mdi_rele_path(pip); 7377 VHCI_DEBUG(1, (CE_WARN, NULL, 7378 "Quiesce of lun is not successful " 7379 "vlun: 0x%p.", (void *)vlun)); 7380 return (0); 7381 } 7382 } 7383 mutex_exit(&svp->svp_mutex); 7384 ndi_devi_enter(cdip, &circular); 7385 spip = pip; 7386 pip = mdi_get_next_phci_path(cdip, spip); 7387 mdi_rele_path(spip); 7388 } 7389 ndi_devi_exit(cdip, circular); 7390 return (1); 7391 } 7392 7393 static int 7394 vhci_pgr_validate_and_register(scsi_vhci_priv_t *svp) 7395 { 7396 scsi_vhci_lun_t *vlun; 7397 vhci_prout_t *prout; 7398 int rval, success; 7399 mdi_pathinfo_t *pip, *npip; 7400 scsi_vhci_priv_t *osvp; 7401 dev_info_t *cdip; 7402 uchar_t cdb_1; 7403 uchar_t temp_res_key[MHIOC_RESV_KEY_SIZE]; 7404 7405 7406 /* 7407 * see if there are any other paths available; if none, 7408 * then there is nothing to do. 7409 */ 7410 cdip = svp->svp_svl->svl_dip; 7411 rval = mdi_select_path(cdip, NULL, MDI_SELECT_ONLINE_PATH | 7412 MDI_SELECT_STANDBY_PATH, NULL, &pip); 7413 if ((rval != MDI_SUCCESS) || (pip == NULL)) { 7414 VHCI_DEBUG(4, (CE_NOTE, NULL, 7415 "%s%d: vhci_pgr_validate_and_register: first path\n", 7416 ddi_driver_name(cdip), ddi_get_instance(cdip))); 7417 return (1); 7418 } 7419 7420 vlun = svp->svp_svl; 7421 prout = &vlun->svl_prout; 7422 ASSERT(vlun->svl_pgr_active != 0); 7423 7424 /* 7425 * When the path was busy/offlined, some other host might have 7426 * cleared this key. Validate key on some other path first. 7427 * If it fails, return failure. 7428 */ 7429 7430 npip = pip; 7431 pip = NULL; 7432 success = 0; 7433 7434 /* Save the res key */ 7435 bcopy(prout->res_key, temp_res_key, MHIOC_RESV_KEY_SIZE); 7436 7437 /* 7438 * Sometimes CDB from application can be a Register_And_Ignore. 7439 * Instead of validation, this cdb would result in force registration. 7440 * Convert it to normal cdb for validation. 7441 * After that be sure to restore the cdb. 7442 */ 7443 cdb_1 = vlun->svl_cdb[1]; 7444 vlun->svl_cdb[1] &= 0xe0; 7445 7446 do { 7447 osvp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(npip); 7448 if (osvp == NULL) { 7449 VHCI_DEBUG(4, (CE_NOTE, NULL, 7450 "vhci_pgr_validate_and_register: no " 7451 "client priv! 0x%p offlined?\n", 7452 (void *)npip)); 7453 goto next_path_1; 7454 } 7455 7456 if (osvp == svp) { 7457 VHCI_DEBUG(4, (CE_NOTE, NULL, 7458 "vhci_pgr_validate_and_register: same svp 0x%p" 7459 " npip 0x%p vlun 0x%p\n", 7460 (void *)svp, (void *)npip, (void *)vlun)); 7461 goto next_path_1; 7462 } 7463 7464 VHCI_DEBUG(4, (CE_NOTE, NULL, 7465 "vhci_pgr_validate_and_register: First validate on" 7466 " osvp 0x%p being done. vlun 0x%p thread 0x%p Before bcopy" 7467 " cdb1 %x\n", (void *)osvp, (void *)vlun, 7468 (void *)curthread, vlun->svl_cdb[1])); 7469 vhci_print_prout_keys(vlun, "v_pgr_val_reg: before bcopy:"); 7470 7471 bcopy(prout->service_key, prout->res_key, MHIOC_RESV_KEY_SIZE); 7472 7473 VHCI_DEBUG(4, (CE_WARN, NULL, "vlun 0x%p After bcopy", 7474 (void *)vlun)); 7475 vhci_print_prout_keys(vlun, "v_pgr_val_reg: after bcopy: "); 7476 7477 rval = vhci_do_prout(osvp); 7478 if (rval == 1) { 7479 VHCI_DEBUG(4, (CE_NOTE, NULL, 7480 "%s%d: vhci_pgr_validate_and_register: key" 7481 " validated thread 0x%p\n", ddi_driver_name(cdip), 7482 ddi_get_instance(cdip), (void *)curthread)); 7483 pip = npip; 7484 success = 1; 7485 break; 7486 } else { 7487 VHCI_DEBUG(4, (CE_NOTE, NULL, 7488 "vhci_pgr_validate_and_register: First validation" 7489 " on osvp 0x%p failed %x\n", (void *)osvp, rval)); 7490 vhci_print_prout_keys(vlun, "v_pgr_val_reg: failed:"); 7491 } 7492 7493 /* 7494 * Try other paths 7495 */ 7496 next_path_1: 7497 pip = npip; 7498 rval = mdi_select_path(cdip, NULL, 7499 MDI_SELECT_ONLINE_PATH|MDI_SELECT_STANDBY_PATH, 7500 pip, &npip); 7501 mdi_rele_path(pip); 7502 } while ((rval == MDI_SUCCESS) && (npip != NULL)); 7503 7504 7505 /* Be sure to restore original cdb */ 7506 vlun->svl_cdb[1] = cdb_1; 7507 7508 /* Restore the res_key */ 7509 bcopy(temp_res_key, prout->res_key, MHIOC_RESV_KEY_SIZE); 7510 7511 /* 7512 * If key could not be registered on any path for the first time, 7513 * return success as online should still continue. 7514 */ 7515 if (success == 0) { 7516 return (1); 7517 } 7518 7519 ASSERT(pip != NULL); 7520 7521 /* 7522 * Force register on new path 7523 */ 7524 cdb_1 = vlun->svl_cdb[1]; /* store the cdb */ 7525 7526 vlun->svl_cdb[1] &= 0xe0; 7527 vlun->svl_cdb[1] |= VHCI_PROUT_R_AND_IGNORE; 7528 7529 vhci_print_prout_keys(vlun, "v_pgr_val_reg: keys before bcopy: "); 7530 7531 bcopy(prout->active_service_key, prout->service_key, 7532 MHIOC_RESV_KEY_SIZE); 7533 bcopy(prout->active_res_key, prout->res_key, MHIOC_RESV_KEY_SIZE); 7534 7535 vhci_print_prout_keys(vlun, "v_pgr_val_reg:keys after bcopy: "); 7536 7537 rval = vhci_do_prout(svp); 7538 vlun->svl_cdb[1] = cdb_1; /* restore the cdb */ 7539 if (rval != 1) { 7540 VHCI_DEBUG(4, (CE_NOTE, NULL, 7541 "vhci_pgr_validate_and_register: register on new" 7542 " path 0x%p svp 0x%p failed %x\n", 7543 (void *)pip, (void *)svp, rval)); 7544 vhci_print_prout_keys(vlun, "v_pgr_val_reg: reg failed: "); 7545 mdi_rele_path(pip); 7546 return (0); 7547 } 7548 7549 if (bcmp(prout->service_key, zero_key, MHIOC_RESV_KEY_SIZE) == 0) { 7550 VHCI_DEBUG(4, (CE_NOTE, NULL, 7551 "vhci_pgr_validate_and_register: zero service key\n")); 7552 mdi_rele_path(pip); 7553 return (rval); 7554 } 7555 7556 /* 7557 * While the key was force registered, some other host might have 7558 * cleared the key. Re-validate key on another pre-existing path 7559 * before declaring success. 7560 */ 7561 npip = pip; 7562 pip = NULL; 7563 7564 /* 7565 * Sometimes CDB from application can be Register and Ignore. 7566 * Instead of validation, it would result in force registration. 7567 * Convert it to normal cdb for validation. 7568 * After that be sure to restore the cdb. 7569 */ 7570 cdb_1 = vlun->svl_cdb[1]; 7571 vlun->svl_cdb[1] &= 0xe0; 7572 success = 0; 7573 7574 do { 7575 osvp = (scsi_vhci_priv_t *) 7576 mdi_pi_get_vhci_private(npip); 7577 if (osvp == NULL) { 7578 VHCI_DEBUG(4, (CE_NOTE, NULL, 7579 "vhci_pgr_validate_and_register: no " 7580 "client priv! 0x%p offlined?\n", 7581 (void *)npip)); 7582 goto next_path_2; 7583 } 7584 7585 if (osvp == svp) { 7586 VHCI_DEBUG(4, (CE_NOTE, NULL, 7587 "vhci_pgr_validate_and_register: same osvp 0x%p" 7588 " npip 0x%p vlun 0x%p\n", 7589 (void *)svp, (void *)npip, (void *)vlun)); 7590 goto next_path_2; 7591 } 7592 7593 VHCI_DEBUG(4, (CE_NOTE, NULL, 7594 "vhci_pgr_validate_and_register: Re-validation on" 7595 " osvp 0x%p being done. vlun 0x%p Before bcopy cdb1 %x\n", 7596 (void *)osvp, (void *)vlun, vlun->svl_cdb[1])); 7597 vhci_print_prout_keys(vlun, "v_pgr_val_reg: before bcopy: "); 7598 7599 bcopy(prout->service_key, prout->res_key, MHIOC_RESV_KEY_SIZE); 7600 7601 vhci_print_prout_keys(vlun, "v_pgr_val_reg: after bcopy: "); 7602 7603 rval = vhci_do_prout(osvp); 7604 if (rval == 1) { 7605 VHCI_DEBUG(4, (CE_NOTE, NULL, 7606 "%s%d: vhci_pgr_validate_and_register: key" 7607 " validated thread 0x%p\n", ddi_driver_name(cdip), 7608 ddi_get_instance(cdip), (void *)curthread)); 7609 pip = npip; 7610 success = 1; 7611 break; 7612 } else { 7613 VHCI_DEBUG(4, (CE_NOTE, NULL, 7614 "vhci_pgr_validate_and_register: Re-validation on" 7615 " osvp 0x%p failed %x\n", (void *)osvp, rval)); 7616 vhci_print_prout_keys(vlun, 7617 "v_pgr_val_reg: reval failed: "); 7618 } 7619 7620 /* 7621 * Try other paths 7622 */ 7623 next_path_2: 7624 pip = npip; 7625 rval = mdi_select_path(cdip, NULL, 7626 MDI_SELECT_ONLINE_PATH|MDI_SELECT_STANDBY_PATH, 7627 pip, &npip); 7628 mdi_rele_path(pip); 7629 } while ((rval == MDI_SUCCESS) && (npip != NULL)); 7630 7631 /* Be sure to restore original cdb */ 7632 vlun->svl_cdb[1] = cdb_1; 7633 7634 if (success == 1) { 7635 /* Successfully validated registration */ 7636 mdi_rele_path(pip); 7637 return (1); 7638 } 7639 7640 VHCI_DEBUG(4, (CE_WARN, NULL, "key validation failed")); 7641 7642 /* 7643 * key invalid, back out by registering key value of 0 7644 */ 7645 VHCI_DEBUG(4, (CE_NOTE, NULL, 7646 "vhci_pgr_validate_and_register: backout on" 7647 " svp 0x%p being done\n", (void *)svp)); 7648 vhci_print_prout_keys(vlun, "v_pgr_val_reg: before bcopy: "); 7649 7650 bcopy(prout->service_key, prout->res_key, MHIOC_RESV_KEY_SIZE); 7651 bzero(prout->service_key, MHIOC_RESV_KEY_SIZE); 7652 7653 vhci_print_prout_keys(vlun, "v_pgr_val_reg: before bcopy: "); 7654 7655 /* 7656 * Get a new path 7657 */ 7658 rval = mdi_select_path(cdip, NULL, MDI_SELECT_ONLINE_PATH | 7659 MDI_SELECT_STANDBY_PATH, NULL, &pip); 7660 if ((rval != MDI_SUCCESS) || (pip == NULL)) { 7661 VHCI_DEBUG(4, (CE_NOTE, NULL, 7662 "%s%d: vhci_pgr_validate_and_register: no valid pip\n", 7663 ddi_driver_name(cdip), ddi_get_instance(cdip))); 7664 return (0); 7665 } 7666 7667 if ((rval = vhci_do_prout(svp)) != 1) { 7668 VHCI_DEBUG(4, (CE_NOTE, NULL, 7669 "vhci_pgr_validate_and_register: backout on" 7670 " svp 0x%p failed\n", (void *)svp)); 7671 vhci_print_prout_keys(vlun, "backout failed"); 7672 7673 VHCI_DEBUG(4, (CE_WARN, NULL, 7674 "%s%d: vhci_pgr_validate_and_register: key" 7675 " validation and backout failed", ddi_driver_name(cdip), 7676 ddi_get_instance(cdip))); 7677 if (rval == VHCI_PGR_ILLEGALOP) { 7678 VHCI_DEBUG(4, (CE_WARN, NULL, 7679 "%s%d: vhci_pgr_validate_and_register: key" 7680 " already cleared", ddi_driver_name(cdip), 7681 ddi_get_instance(cdip))); 7682 rval = 1; 7683 } else 7684 rval = 0; 7685 } else { 7686 VHCI_DEBUG(4, (CE_NOTE, NULL, 7687 "%s%d: vhci_pgr_validate_and_register: key" 7688 " validation failed, key backed out\n", 7689 ddi_driver_name(cdip), ddi_get_instance(cdip))); 7690 vhci_print_prout_keys(vlun, "v_pgr_val_reg: key backed out: "); 7691 } 7692 mdi_rele_path(pip); 7693 7694 return (rval); 7695 } 7696 7697 /* 7698 * taskq routine to dispatch a scsi cmd to vhci_scsi_start. This ensures 7699 * that vhci_scsi_start is not called in interrupt context. 7700 * As the upper layer gets TRAN_ACCEPT when the command is dispatched, we 7701 * need to complete the command if something goes wrong. 7702 */ 7703 static void 7704 vhci_dispatch_scsi_start(void *arg) 7705 { 7706 struct vhci_pkt *vpkt = (struct vhci_pkt *)arg; 7707 struct scsi_pkt *tpkt = vpkt->vpkt_tgt_pkt; 7708 int rval = TRAN_BUSY; 7709 7710 VHCI_DEBUG(6, (CE_NOTE, NULL, "!vhci_dispatch_scsi_start: sending" 7711 " scsi-2 reserve for 0x%p\n", 7712 (void *)ADDR2DIP(&(vpkt->vpkt_tgt_pkt->pkt_address)))); 7713 7714 /* 7715 * To prevent the taskq from being called recursively we set the 7716 * the VHCI_PKT_THRU_TASKQ bit in the vhci_pkt_states. 7717 */ 7718 vpkt->vpkt_state |= VHCI_PKT_THRU_TASKQ; 7719 7720 /* 7721 * Wait for the transport to get ready to send packets 7722 * and if it times out, it will return something other than 7723 * TRAN_BUSY. The vhci_reserve_delay may want to 7724 * get tuned for other transports and is therefore a global. 7725 * Using delay since this routine is called by taskq dispatch 7726 * and not called during interrupt context. 7727 */ 7728 while ((rval = vhci_scsi_start(&(vpkt->vpkt_tgt_pkt->pkt_address), 7729 vpkt->vpkt_tgt_pkt)) == TRAN_BUSY) { 7730 delay(drv_usectohz(vhci_reserve_delay)); 7731 } 7732 7733 switch (rval) { 7734 case TRAN_ACCEPT: 7735 return; 7736 7737 default: 7738 /* 7739 * This pkt shall be retried, and to ensure another taskq 7740 * is dispatched for it, clear the VHCI_PKT_THRU_TASKQ 7741 * flag. 7742 */ 7743 vpkt->vpkt_state &= ~VHCI_PKT_THRU_TASKQ; 7744 7745 /* Ensure that the pkt is retried without a reset */ 7746 tpkt->pkt_reason = CMD_ABORTED; 7747 tpkt->pkt_statistics |= STAT_ABORTED; 7748 VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_dispatch_scsi_start: " 7749 "TRAN_rval %d returned for dip 0x%p", rval, 7750 (void *)ADDR2DIP(&(vpkt->vpkt_tgt_pkt->pkt_address)))); 7751 break; 7752 } 7753 7754 /* 7755 * vpkt_org_vpkt should always be NULL here if the retry command 7756 * has been successfully dispatched. If vpkt_org_vpkt != NULL at 7757 * this point, it is an error so restore the original vpkt and 7758 * return an error to the target driver so it can retry the 7759 * command as appropriate. 7760 */ 7761 if (vpkt->vpkt_org_vpkt != NULL) { 7762 struct vhci_pkt *new_vpkt = vpkt; 7763 scsi_vhci_priv_t *svp = (scsi_vhci_priv_t *) 7764 mdi_pi_get_vhci_private(vpkt->vpkt_path); 7765 7766 vpkt = vpkt->vpkt_org_vpkt; 7767 7768 vpkt->vpkt_tgt_pkt->pkt_reason = tpkt->pkt_reason; 7769 vpkt->vpkt_tgt_pkt->pkt_statistics = tpkt->pkt_statistics; 7770 7771 vhci_scsi_destroy_pkt(&svp->svp_psd->sd_address, 7772 new_vpkt->vpkt_tgt_pkt); 7773 7774 tpkt = vpkt->vpkt_tgt_pkt; 7775 } 7776 7777 scsi_hba_pkt_comp(tpkt); 7778 } 7779 7780 static void 7781 vhci_initiate_auto_failback(void *arg) 7782 { 7783 struct scsi_vhci_lun *vlun = (struct scsi_vhci_lun *)arg; 7784 dev_info_t *vdip, *cdip; 7785 int held; 7786 7787 cdip = vlun->svl_dip; 7788 vdip = ddi_get_parent(cdip); 7789 7790 VHCI_HOLD_LUN(vlun, VH_SLEEP, held); 7791 7792 /* 7793 * Perform a final check to see if the active path class is indeed 7794 * not the preferred path class. As in the time the auto failback 7795 * was dispatched, an external failover could have been detected. 7796 * [Some other host could have detected this condition and triggered 7797 * the auto failback before]. 7798 * In such a case if we go ahead with failover we will be negating the 7799 * whole purpose of auto failback. 7800 */ 7801 mutex_enter(&vlun->svl_mutex); 7802 if (vlun->svl_active_pclass != NULL) { 7803 char *best_pclass; 7804 struct scsi_failover_ops *fo; 7805 7806 fo = vlun->svl_fops; 7807 7808 (void) fo->sfo_pathclass_next(NULL, &best_pclass, 7809 vlun->svl_fops_ctpriv); 7810 if (strcmp(vlun->svl_active_pclass, best_pclass) == 0) { 7811 mutex_exit(&vlun->svl_mutex); 7812 VHCI_RELEASE_LUN(vlun); 7813 VHCI_DEBUG(1, (CE_NOTE, NULL, "Not initiating " 7814 "auto failback for %s as %s pathclass already " 7815 "active.\n", vlun->svl_lun_wwn, best_pclass)); 7816 return; 7817 } 7818 } 7819 mutex_exit(&vlun->svl_mutex); 7820 if (mdi_failover(vdip, vlun->svl_dip, MDI_FAILOVER_SYNC) 7821 == MDI_SUCCESS) { 7822 vhci_log(CE_NOTE, vdip, "!Auto failback operation " 7823 "succeeded for device %s (GUID %s)", 7824 ddi_node_name(cdip), vlun->svl_lun_wwn); 7825 } else { 7826 vhci_log(CE_NOTE, vdip, "!Auto failback operation " 7827 "failed for device %s (GUID %s)", 7828 ddi_node_name(cdip), vlun->svl_lun_wwn); 7829 } 7830 VHCI_RELEASE_LUN(vlun); 7831 } 7832 7833 #ifdef DEBUG 7834 static void 7835 vhci_print_prin_keys(vhci_prin_readkeys_t *prin, int numkeys) 7836 { 7837 vhci_clean_print(NULL, 5, "Current PGR Keys", 7838 (uchar_t *)prin, numkeys * 8); 7839 } 7840 #endif 7841 7842 static void 7843 vhci_print_prout_keys(scsi_vhci_lun_t *vlun, char *msg) 7844 { 7845 int i; 7846 vhci_prout_t *prout; 7847 char buf1[4*MHIOC_RESV_KEY_SIZE + 1]; 7848 char buf2[4*MHIOC_RESV_KEY_SIZE + 1]; 7849 char buf3[4*MHIOC_RESV_KEY_SIZE + 1]; 7850 char buf4[4*MHIOC_RESV_KEY_SIZE + 1]; 7851 7852 prout = &vlun->svl_prout; 7853 7854 for (i = 0; i < MHIOC_RESV_KEY_SIZE; i++) 7855 (void) sprintf(&buf1[4*i], "[%02x]", prout->res_key[i]); 7856 for (i = 0; i < MHIOC_RESV_KEY_SIZE; i++) 7857 (void) sprintf(&buf2[(4*i)], "[%02x]", prout->service_key[i]); 7858 for (i = 0; i < MHIOC_RESV_KEY_SIZE; i++) 7859 (void) sprintf(&buf3[4*i], "[%02x]", prout->active_res_key[i]); 7860 for (i = 0; i < MHIOC_RESV_KEY_SIZE; i++) 7861 (void) sprintf(&buf4[4*i], "[%02x]", 7862 prout->active_service_key[i]); 7863 7864 /* Printing all in one go. Otherwise it will jumble up */ 7865 VHCI_DEBUG(5, (CE_CONT, NULL, "%s vlun 0x%p, thread 0x%p\n" 7866 "res_key: : %s\n" 7867 "service_key : %s\n" 7868 "active_res_key : %s\n" 7869 "active_service_key: %s\n", 7870 msg, (void *)vlun, (void *)curthread, buf1, buf2, buf3, buf4)); 7871 } 7872 7873 /* 7874 * Called from vhci_scsi_start to update the pHCI pkt with target packet. 7875 */ 7876 static void 7877 vhci_update_pHCI_pkt(struct vhci_pkt *vpkt, struct scsi_pkt *pkt) 7878 { 7879 7880 ASSERT(vpkt->vpkt_hba_pkt); 7881 7882 vpkt->vpkt_hba_pkt->pkt_flags = pkt->pkt_flags; 7883 vpkt->vpkt_hba_pkt->pkt_flags |= FLAG_NOQUEUE; 7884 7885 if ((vpkt->vpkt_hba_pkt->pkt_flags & FLAG_NOINTR) || 7886 MDI_PI_IS_SUSPENDED(vpkt->vpkt_path)) { 7887 /* 7888 * Polled Command is requested or HBA is in 7889 * suspended state 7890 */ 7891 vpkt->vpkt_hba_pkt->pkt_flags |= FLAG_NOINTR; 7892 vpkt->vpkt_hba_pkt->pkt_comp = NULL; 7893 } else { 7894 vpkt->vpkt_hba_pkt->pkt_comp = vhci_intr; 7895 } 7896 vpkt->vpkt_hba_pkt->pkt_time = pkt->pkt_time; 7897 bcopy(pkt->pkt_cdbp, vpkt->vpkt_hba_pkt->pkt_cdbp, 7898 vpkt->vpkt_tgt_init_cdblen); 7899 vpkt->vpkt_hba_pkt->pkt_resid = pkt->pkt_resid; 7900 7901 /* Re-initialize the following pHCI packet state information */ 7902 vpkt->vpkt_hba_pkt->pkt_state = 0; 7903 vpkt->vpkt_hba_pkt->pkt_statistics = 0; 7904 vpkt->vpkt_hba_pkt->pkt_reason = 0; 7905 } 7906 7907 static int 7908 vhci_scsi_bus_power(dev_info_t *parent, void *impl_arg, pm_bus_power_op_t op, 7909 void *arg, void *result) 7910 { 7911 int ret = DDI_SUCCESS; 7912 7913 /* 7914 * Generic processing in MPxIO framework 7915 */ 7916 ret = mdi_bus_power(parent, impl_arg, op, arg, result); 7917 7918 switch (ret) { 7919 case MDI_SUCCESS: 7920 ret = DDI_SUCCESS; 7921 break; 7922 case MDI_FAILURE: 7923 ret = DDI_FAILURE; 7924 break; 7925 default: 7926 break; 7927 } 7928 7929 return (ret); 7930 } 7931 7932 static int 7933 vhci_pHCI_cap(struct scsi_address *ap, char *cap, int val, int whom, 7934 mdi_pathinfo_t *pip) 7935 { 7936 dev_info_t *cdip; 7937 mdi_pathinfo_t *npip = NULL; 7938 scsi_vhci_priv_t *svp = NULL; 7939 struct scsi_address *pap = NULL; 7940 scsi_hba_tran_t *hba = NULL; 7941 int sps; 7942 int mps_flag; 7943 int rval = 0; 7944 7945 mps_flag = (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH); 7946 if (pip) { 7947 /* 7948 * If the call is from vhci_pathinfo_state_change, 7949 * then this path was busy and is becoming ready to accept IO. 7950 */ 7951 ASSERT(ap != NULL); 7952 hba = ap->a_hba_tran; 7953 ASSERT(hba != NULL); 7954 rval = scsi_ifsetcap(ap, cap, val, whom); 7955 7956 VHCI_DEBUG(2, (CE_NOTE, NULL, 7957 "!vhci_pHCI_cap: only on path %p, ap %p, rval %x\n", 7958 (void *)pip, (void *)ap, rval)); 7959 7960 return (rval); 7961 } 7962 7963 /* 7964 * Set capability on all the pHCIs. 7965 * If any path is busy, then the capability would be set by 7966 * vhci_pathinfo_state_change. 7967 */ 7968 7969 cdip = ADDR2DIP(ap); 7970 ASSERT(cdip != NULL); 7971 sps = mdi_select_path(cdip, NULL, mps_flag, NULL, &pip); 7972 if ((sps != MDI_SUCCESS) || (pip == NULL)) { 7973 VHCI_DEBUG(2, (CE_WARN, NULL, 7974 "!vhci_pHCI_cap: Unable to get a path, dip 0x%p", 7975 (void *)cdip)); 7976 return (0); 7977 } 7978 7979 again: 7980 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip); 7981 if (svp == NULL) { 7982 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_pHCI_cap: " 7983 "priv is NULL, pip 0x%p", (void *)pip)); 7984 mdi_rele_path(pip); 7985 return (rval); 7986 } 7987 7988 if (svp->svp_psd == NULL) { 7989 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_pHCI_cap: " 7990 "psd is NULL, pip 0x%p, svp 0x%p", 7991 (void *)pip, (void *)svp)); 7992 mdi_rele_path(pip); 7993 return (rval); 7994 } 7995 7996 pap = &svp->svp_psd->sd_address; 7997 ASSERT(pap != NULL); 7998 hba = pap->a_hba_tran; 7999 ASSERT(hba != NULL); 8000 8001 if (hba->tran_setcap != NULL) { 8002 rval = scsi_ifsetcap(pap, cap, val, whom); 8003 8004 VHCI_DEBUG(2, (CE_NOTE, NULL, 8005 "!vhci_pHCI_cap: path %p, ap %p, rval %x\n", 8006 (void *)pip, (void *)ap, rval)); 8007 8008 /* 8009 * Select next path and issue the setcap, repeat 8010 * until all paths are exhausted 8011 */ 8012 sps = mdi_select_path(cdip, NULL, mps_flag, pip, &npip); 8013 if ((sps != MDI_SUCCESS) || (npip == NULL)) { 8014 mdi_rele_path(pip); 8015 return (1); 8016 } 8017 mdi_rele_path(pip); 8018 pip = npip; 8019 goto again; 8020 } 8021 mdi_rele_path(pip); 8022 return (rval); 8023 } 8024 8025 static int 8026 vhci_scsi_bus_config(dev_info_t *pdip, uint_t flags, ddi_bus_config_op_t op, 8027 void *arg, dev_info_t **child) 8028 { 8029 char *guid; 8030 8031 if (vhci_bus_config_debug) 8032 flags |= NDI_DEVI_DEBUG; 8033 8034 if (op == BUS_CONFIG_ONE || op == BUS_UNCONFIG_ONE) 8035 guid = vhci_devnm_to_guid((char *)arg); 8036 else 8037 guid = NULL; 8038 8039 if (mdi_vhci_bus_config(pdip, flags, op, arg, child, guid) 8040 == MDI_SUCCESS) 8041 return (NDI_SUCCESS); 8042 else 8043 return (NDI_FAILURE); 8044 } 8045 8046 static int 8047 vhci_scsi_bus_unconfig(dev_info_t *pdip, uint_t flags, ddi_bus_config_op_t op, 8048 void *arg) 8049 { 8050 if (vhci_bus_config_debug) 8051 flags |= NDI_DEVI_DEBUG; 8052 8053 return (ndi_busop_bus_unconfig(pdip, flags, op, arg)); 8054 } 8055 8056 /* 8057 * Take the original vhci_pkt, create a duplicate of the pkt for resending 8058 * as though it originated in ssd. 8059 */ 8060 static struct scsi_pkt * 8061 vhci_create_retry_pkt(struct vhci_pkt *vpkt) 8062 { 8063 struct vhci_pkt *new_vpkt = NULL; 8064 struct scsi_pkt *pkt = NULL; 8065 8066 scsi_vhci_priv_t *svp = (scsi_vhci_priv_t *) 8067 mdi_pi_get_vhci_private(vpkt->vpkt_path); 8068 8069 /* 8070 * Ensure consistent data at completion time by setting PKT_CONSISTENT 8071 */ 8072 pkt = vhci_scsi_init_pkt(&svp->svp_psd->sd_address, pkt, 8073 vpkt->vpkt_tgt_init_bp, vpkt->vpkt_tgt_init_cdblen, 8074 vpkt->vpkt_tgt_init_scblen, 0, PKT_CONSISTENT, NULL_FUNC, NULL); 8075 if (pkt != NULL) { 8076 new_vpkt = TGTPKT2VHCIPKT(pkt); 8077 8078 pkt->pkt_address = vpkt->vpkt_tgt_pkt->pkt_address; 8079 pkt->pkt_flags = vpkt->vpkt_tgt_pkt->pkt_flags; 8080 pkt->pkt_time = vpkt->vpkt_tgt_pkt->pkt_time; 8081 pkt->pkt_comp = vpkt->vpkt_tgt_pkt->pkt_comp; 8082 8083 pkt->pkt_resid = 0; 8084 pkt->pkt_statistics = 0; 8085 pkt->pkt_reason = 0; 8086 8087 bcopy(vpkt->vpkt_tgt_pkt->pkt_cdbp, 8088 pkt->pkt_cdbp, vpkt->vpkt_tgt_init_cdblen); 8089 8090 /* 8091 * Save a pointer to the original vhci_pkt 8092 */ 8093 new_vpkt->vpkt_org_vpkt = vpkt; 8094 } 8095 8096 return (pkt); 8097 } 8098 8099 /* 8100 * Copy the successful completion information from the hba packet into 8101 * the original target pkt from the upper layer. Returns the original 8102 * vpkt and destroys the new vpkt from the internal retry. 8103 */ 8104 static struct vhci_pkt * 8105 vhci_sync_retry_pkt(struct vhci_pkt *vpkt) 8106 { 8107 struct vhci_pkt *ret_vpkt = NULL; 8108 struct scsi_pkt *tpkt = NULL; 8109 struct scsi_pkt *hba_pkt = NULL; 8110 scsi_vhci_priv_t *svp = (scsi_vhci_priv_t *) 8111 mdi_pi_get_vhci_private(vpkt->vpkt_path); 8112 8113 ASSERT(vpkt->vpkt_org_vpkt != NULL); 8114 VHCI_DEBUG(0, (CE_NOTE, NULL, "vhci_sync_retry_pkt: Retry pkt " 8115 "completed successfully!\n")); 8116 8117 ret_vpkt = vpkt->vpkt_org_vpkt; 8118 tpkt = ret_vpkt->vpkt_tgt_pkt; 8119 hba_pkt = vpkt->vpkt_hba_pkt; 8120 8121 /* 8122 * Copy the good status into the target driver's packet 8123 */ 8124 *(tpkt->pkt_scbp) = *(hba_pkt->pkt_scbp); 8125 tpkt->pkt_resid = hba_pkt->pkt_resid; 8126 tpkt->pkt_state = hba_pkt->pkt_state; 8127 tpkt->pkt_statistics = hba_pkt->pkt_statistics; 8128 tpkt->pkt_reason = hba_pkt->pkt_reason; 8129 8130 /* 8131 * Destroy the internally created vpkt for the retry 8132 */ 8133 vhci_scsi_destroy_pkt(&svp->svp_psd->sd_address, 8134 vpkt->vpkt_tgt_pkt); 8135 8136 return (ret_vpkt); 8137 } 8138 8139 /* restart the request sense request */ 8140 static void 8141 vhci_uscsi_restart_sense(void *arg) 8142 { 8143 struct buf *rqbp; 8144 struct buf *bp; 8145 struct scsi_pkt *rqpkt = (struct scsi_pkt *)arg; 8146 mp_uscsi_cmd_t *mp_uscmdp; 8147 8148 VHCI_DEBUG(4, (CE_WARN, NULL, 8149 "vhci_uscsi_restart_sense: enter: rqpkt: %p", (void *)rqpkt)); 8150 8151 if (scsi_transport(rqpkt) != TRAN_ACCEPT) { 8152 /* if it fails - need to wakeup the original command */ 8153 mp_uscmdp = rqpkt->pkt_private; 8154 bp = mp_uscmdp->cmdbp; 8155 rqbp = mp_uscmdp->rqbp; 8156 ASSERT(mp_uscmdp && bp && rqbp); 8157 scsi_free_consistent_buf(rqbp); 8158 scsi_destroy_pkt(rqpkt); 8159 bp->b_resid = bp->b_bcount; 8160 bioerror(bp, EIO); 8161 biodone(bp); 8162 } 8163 } 8164 8165 /* 8166 * auto-rqsense is not enabled so we have to retrieve the request sense 8167 * manually. 8168 */ 8169 static int 8170 vhci_uscsi_send_sense(struct scsi_pkt *pkt, mp_uscsi_cmd_t *mp_uscmdp) 8171 { 8172 struct buf *rqbp, *cmdbp; 8173 struct scsi_pkt *rqpkt; 8174 int rval = 0; 8175 8176 cmdbp = mp_uscmdp->cmdbp; 8177 ASSERT(cmdbp != NULL); 8178 8179 VHCI_DEBUG(4, (CE_WARN, NULL, 8180 "vhci_uscsi_send_sense: enter: bp: %p pkt: %p scmd: %p", 8181 (void *)cmdbp, (void *)pkt, (void *)mp_uscmdp)); 8182 /* set up the packet information and cdb */ 8183 if ((rqbp = scsi_alloc_consistent_buf(mp_uscmdp->ap, NULL, 8184 SENSE_LENGTH, B_READ, NULL, NULL)) == NULL) { 8185 return (-1); 8186 } 8187 8188 if ((rqpkt = scsi_init_pkt(mp_uscmdp->ap, NULL, rqbp, 8189 CDB_GROUP0, 1, 0, PKT_CONSISTENT, NULL, NULL)) == NULL) { 8190 scsi_free_consistent_buf(rqbp); 8191 return (-1); 8192 } 8193 8194 (void) scsi_setup_cdb((union scsi_cdb *)(intptr_t)rqpkt->pkt_cdbp, 8195 SCMD_REQUEST_SENSE, 0, SENSE_LENGTH, 0); 8196 8197 mp_uscmdp->rqbp = rqbp; 8198 rqbp->b_private = mp_uscmdp; 8199 rqpkt->pkt_flags |= FLAG_SENSING; 8200 rqpkt->pkt_time = 60; 8201 rqpkt->pkt_comp = vhci_uscsi_iodone; 8202 rqpkt->pkt_private = mp_uscmdp; 8203 8204 /* 8205 * NOTE: This code path is related to MPAPI uscsi(7I), so path 8206 * selection is not based on path_instance. 8207 */ 8208 if (scsi_pkt_allocated_correctly(rqpkt)) 8209 rqpkt->pkt_path_instance = 0; 8210 8211 /* get her done */ 8212 switch (scsi_transport(rqpkt)) { 8213 case TRAN_ACCEPT: 8214 VHCI_DEBUG(1, (CE_NOTE, NULL, "vhci_uscsi_send_sense: " 8215 "transport accepted.")); 8216 break; 8217 case TRAN_BUSY: 8218 VHCI_DEBUG(1, (CE_NOTE, NULL, "vhci_uscsi_send_sense: " 8219 "transport busy, setting timeout.")); 8220 vhci_restart_timeid = timeout(vhci_uscsi_restart_sense, rqpkt, 8221 (drv_usectohz(5 * 1000000))); 8222 break; 8223 default: 8224 VHCI_DEBUG(1, (CE_NOTE, NULL, "vhci_uscsi_send_sense: " 8225 "transport failed")); 8226 scsi_free_consistent_buf(rqbp); 8227 scsi_destroy_pkt(rqpkt); 8228 rval = -1; 8229 } 8230 8231 return (rval); 8232 } 8233 8234 /* 8235 * done routine for the mpapi uscsi command - this is behaving as though 8236 * FLAG_DIAGNOSE is set meaning there are no retries except for a manual 8237 * request sense. 8238 */ 8239 void 8240 vhci_uscsi_iodone(struct scsi_pkt *pkt) 8241 { 8242 struct buf *bp; 8243 mp_uscsi_cmd_t *mp_uscmdp; 8244 struct uscsi_cmd *uscmdp; 8245 struct scsi_arq_status *arqstat; 8246 int err; 8247 8248 mp_uscmdp = (mp_uscsi_cmd_t *)pkt->pkt_private; 8249 uscmdp = mp_uscmdp->uscmdp; 8250 bp = mp_uscmdp->cmdbp; 8251 ASSERT(bp != NULL); 8252 VHCI_DEBUG(4, (CE_WARN, NULL, 8253 "vhci_uscsi_iodone: enter: bp: %p pkt: %p scmd: %p", 8254 (void *)bp, (void *)pkt, (void *)mp_uscmdp)); 8255 /* Save the status and the residual into the uscsi_cmd struct */ 8256 uscmdp->uscsi_status = ((*(pkt)->pkt_scbp) & STATUS_MASK); 8257 uscmdp->uscsi_resid = bp->b_resid; 8258 8259 /* return on a very successful command */ 8260 if (pkt->pkt_reason == CMD_CMPLT && 8261 SCBP_C(pkt) == 0 && ((pkt->pkt_flags & FLAG_SENSING) == 0) && 8262 pkt->pkt_resid == 0) { 8263 mdi_pi_kstat_iosupdate(mp_uscmdp->pip, bp); 8264 scsi_destroy_pkt(pkt); 8265 biodone(bp); 8266 return; 8267 } 8268 VHCI_DEBUG(4, (CE_NOTE, NULL, "iodone: reason=0x%x " 8269 " pkt_resid=%ld pkt_state: 0x%x b_count: %ld b_resid: %ld", 8270 pkt->pkt_reason, pkt->pkt_resid, 8271 pkt->pkt_state, bp->b_bcount, bp->b_resid)); 8272 8273 err = EIO; 8274 8275 arqstat = (struct scsi_arq_status *)(intptr_t)(pkt->pkt_scbp); 8276 if (pkt->pkt_reason != CMD_CMPLT) { 8277 /* 8278 * The command did not complete. 8279 */ 8280 VHCI_DEBUG(4, (CE_NOTE, NULL, 8281 "vhci_uscsi_iodone: command did not complete." 8282 " reason: %x flag: %x", pkt->pkt_reason, pkt->pkt_flags)); 8283 if (pkt->pkt_flags & FLAG_SENSING) { 8284 MDI_PI_ERRSTAT(mp_uscmdp->pip, MDI_PI_TRANSERR); 8285 } else if (pkt->pkt_reason == CMD_TIMEOUT) { 8286 MDI_PI_ERRSTAT(mp_uscmdp->pip, MDI_PI_HARDERR); 8287 err = ETIMEDOUT; 8288 } 8289 } else if (pkt->pkt_state & STATE_ARQ_DONE && mp_uscmdp->arq_enabled) { 8290 /* 8291 * The auto-rqsense happened, and the packet has a filled-in 8292 * scsi_arq_status structure, pointed to by pkt_scbp. 8293 */ 8294 VHCI_DEBUG(4, (CE_NOTE, NULL, 8295 "vhci_uscsi_iodone: received auto-requested sense")); 8296 if (uscmdp->uscsi_flags & USCSI_RQENABLE) { 8297 /* get the amount of data to copy into rqbuf */ 8298 int rqlen = SENSE_LENGTH - arqstat->sts_rqpkt_resid; 8299 rqlen = min(((int)uscmdp->uscsi_rqlen), rqlen); 8300 uscmdp->uscsi_rqresid = uscmdp->uscsi_rqlen - rqlen; 8301 uscmdp->uscsi_rqstatus = 8302 *((char *)&arqstat->sts_rqpkt_status); 8303 if (uscmdp->uscsi_rqbuf && uscmdp->uscsi_rqlen && 8304 rqlen != 0) { 8305 bcopy(&(arqstat->sts_sensedata), 8306 uscmdp->uscsi_rqbuf, rqlen); 8307 } 8308 mdi_pi_kstat_iosupdate(mp_uscmdp->pip, bp); 8309 VHCI_DEBUG(4, (CE_NOTE, NULL, 8310 "vhci_uscsi_iodone: ARQ " 8311 "uscsi_rqstatus=0x%x uscsi_rqresid=%d rqlen: %d " 8312 "xfer: %d rqpkt_resid: %d\n", 8313 uscmdp->uscsi_rqstatus, uscmdp->uscsi_rqresid, 8314 uscmdp->uscsi_rqlen, rqlen, 8315 arqstat->sts_rqpkt_resid)); 8316 } 8317 } else if (pkt->pkt_flags & FLAG_SENSING) { 8318 struct buf *rqbp; 8319 struct scsi_status *rqstatus; 8320 8321 rqstatus = (struct scsi_status *)pkt->pkt_scbp; 8322 /* a manual request sense was done - get the information */ 8323 if (uscmdp->uscsi_flags & USCSI_RQENABLE) { 8324 int rqlen = SENSE_LENGTH - pkt->pkt_resid; 8325 8326 rqbp = mp_uscmdp->rqbp; 8327 /* get the amount of data to copy into rqbuf */ 8328 rqlen = min(((int)uscmdp->uscsi_rqlen), rqlen); 8329 uscmdp->uscsi_rqresid = uscmdp->uscsi_rqlen - rqlen; 8330 uscmdp->uscsi_rqstatus = *((char *)rqstatus); 8331 if (uscmdp->uscsi_rqlen && uscmdp->uscsi_rqbuf) { 8332 bcopy(rqbp->b_un.b_addr, uscmdp->uscsi_rqbuf, 8333 rqlen); 8334 } 8335 MDI_PI_ERRSTAT(mp_uscmdp->pip, MDI_PI_TRANSERR); 8336 scsi_free_consistent_buf(rqbp); 8337 } 8338 VHCI_DEBUG(4, (CE_NOTE, NULL, "vhci_uscsi_iodone: FLAG_SENSING" 8339 "uscsi_rqstatus=0x%x uscsi_rqresid=%d\n", 8340 uscmdp->uscsi_rqstatus, uscmdp->uscsi_rqresid)); 8341 } else { 8342 struct scsi_status *status = 8343 (struct scsi_status *)pkt->pkt_scbp; 8344 /* 8345 * Command completed and we're not getting sense. Check for 8346 * errors and decide what to do next. 8347 */ 8348 VHCI_DEBUG(4, (CE_NOTE, NULL, 8349 "vhci_uscsi_iodone: command appears complete: reason: %x", 8350 pkt->pkt_reason)); 8351 if (status->sts_chk) { 8352 /* need to manually get the request sense */ 8353 if (vhci_uscsi_send_sense(pkt, mp_uscmdp) == 0) { 8354 scsi_destroy_pkt(pkt); 8355 return; 8356 } 8357 } else { 8358 VHCI_DEBUG(4, (CE_NOTE, NULL, 8359 "vhci_chk_err: appears complete")); 8360 err = 0; 8361 mdi_pi_kstat_iosupdate(mp_uscmdp->pip, bp); 8362 if (pkt->pkt_resid) { 8363 bp->b_resid += pkt->pkt_resid; 8364 } 8365 } 8366 } 8367 8368 if (err) { 8369 if (bp->b_resid == 0) 8370 bp->b_resid = bp->b_bcount; 8371 bioerror(bp, err); 8372 bp->b_flags |= B_ERROR; 8373 } 8374 8375 scsi_destroy_pkt(pkt); 8376 biodone(bp); 8377 8378 VHCI_DEBUG(4, (CE_WARN, NULL, "vhci_uscsi_iodone: exit")); 8379 } 8380 8381 /* 8382 * start routine for the mpapi uscsi command 8383 */ 8384 int 8385 vhci_uscsi_iostart(struct buf *bp) 8386 { 8387 struct scsi_pkt *pkt; 8388 struct uscsi_cmd *uscmdp; 8389 mp_uscsi_cmd_t *mp_uscmdp; 8390 int stat_size, rval; 8391 int retry = 0; 8392 8393 ASSERT(bp->b_private != NULL); 8394 8395 mp_uscmdp = (mp_uscsi_cmd_t *)bp->b_private; 8396 uscmdp = mp_uscmdp->uscmdp; 8397 if (uscmdp->uscsi_flags & USCSI_RQENABLE) { 8398 stat_size = SENSE_LENGTH; 8399 } else { 8400 stat_size = 1; 8401 } 8402 8403 pkt = scsi_init_pkt(mp_uscmdp->ap, NULL, bp, uscmdp->uscsi_cdblen, 8404 stat_size, 0, 0, SLEEP_FUNC, NULL); 8405 if (pkt == NULL) { 8406 VHCI_DEBUG(4, (CE_NOTE, NULL, 8407 "vhci_uscsi_iostart: rval: EINVAL")); 8408 bp->b_resid = bp->b_bcount; 8409 uscmdp->uscsi_resid = bp->b_bcount; 8410 bioerror(bp, EINVAL); 8411 biodone(bp); 8412 return (EINVAL); 8413 } 8414 8415 pkt->pkt_time = uscmdp->uscsi_timeout; 8416 bcopy(uscmdp->uscsi_cdb, pkt->pkt_cdbp, (size_t)uscmdp->uscsi_cdblen); 8417 pkt->pkt_comp = vhci_uscsi_iodone; 8418 pkt->pkt_private = mp_uscmdp; 8419 if (uscmdp->uscsi_flags & USCSI_SILENT) 8420 pkt->pkt_flags |= FLAG_SILENT; 8421 if (uscmdp->uscsi_flags & USCSI_ISOLATE) 8422 pkt->pkt_flags |= FLAG_ISOLATE; 8423 if (uscmdp->uscsi_flags & USCSI_DIAGNOSE) 8424 pkt->pkt_flags |= FLAG_DIAGNOSE; 8425 if (uscmdp->uscsi_flags & USCSI_RENEGOT) { 8426 pkt->pkt_flags |= FLAG_RENEGOTIATE_WIDE_SYNC; 8427 } 8428 VHCI_DEBUG(4, (CE_WARN, NULL, 8429 "vhci_uscsi_iostart: ap: %p pkt: %p pcdbp: %p uscmdp: %p" 8430 " ucdbp: %p pcdblen: %d bp: %p count: %ld pip: %p" 8431 " stat_size: %d", 8432 (void *)mp_uscmdp->ap, (void *)pkt, (void *)pkt->pkt_cdbp, 8433 (void *)uscmdp, (void *)uscmdp->uscsi_cdb, pkt->pkt_cdblen, 8434 (void *)bp, bp->b_bcount, (void *)mp_uscmdp->pip, stat_size)); 8435 8436 /* 8437 * NOTE: This code path is related to MPAPI uscsi(7I), so path 8438 * selection is not based on path_instance. 8439 */ 8440 if (scsi_pkt_allocated_correctly(pkt)) 8441 pkt->pkt_path_instance = 0; 8442 8443 while (((rval = scsi_transport(pkt)) == TRAN_BUSY) && 8444 retry < vhci_uscsi_retry_count) { 8445 delay(drv_usectohz(vhci_uscsi_delay)); 8446 retry++; 8447 } 8448 if (retry >= vhci_uscsi_retry_count) { 8449 VHCI_DEBUG(4, (CE_NOTE, NULL, 8450 "vhci_uscsi_iostart: tran_busy - retry: %d", retry)); 8451 } 8452 switch (rval) { 8453 case TRAN_ACCEPT: 8454 rval = 0; 8455 break; 8456 8457 default: 8458 VHCI_DEBUG(4, (CE_NOTE, NULL, 8459 "vhci_uscsi_iostart: rval: %d count: %ld res: %ld", 8460 rval, bp->b_bcount, bp->b_resid)); 8461 bp->b_resid = bp->b_bcount; 8462 uscmdp->uscsi_resid = bp->b_bcount; 8463 bioerror(bp, EIO); 8464 scsi_destroy_pkt(pkt); 8465 biodone(bp); 8466 rval = EIO; 8467 MDI_PI_ERRSTAT(mp_uscmdp->pip, MDI_PI_TRANSERR); 8468 break; 8469 } 8470 VHCI_DEBUG(4, (CE_NOTE, NULL, 8471 "vhci_uscsi_iostart: exit: rval: %d", rval)); 8472 return (rval); 8473 } 8474 8475 /* ARGSUSED */ 8476 static struct scsi_failover_ops * 8477 vhci_dev_fo(dev_info_t *vdip, struct scsi_device *psd, 8478 void **ctprivp, char **fo_namep) 8479 { 8480 struct scsi_failover_ops *sfo; 8481 char *sfo_name; 8482 char *override; 8483 struct scsi_failover *sf; 8484 8485 ASSERT(psd && psd->sd_inq); 8486 if ((psd == NULL) || (psd->sd_inq == NULL)) { 8487 VHCI_DEBUG(1, (CE_NOTE, NULL, 8488 "!vhci_dev_fo:return NULL no scsi_device or inquiry")); 8489 return (NULL); 8490 } 8491 8492 /* 8493 * Determine if device is supported under scsi_vhci, and select 8494 * failover module. 8495 * 8496 * See if there is a scsi_vhci.conf file override for this devices's 8497 * VID/PID. The following values can be returned: 8498 * 8499 * NULL If the NULL is returned then there is no scsi_vhci.conf 8500 * override. For NULL, we determine the failover_ops for 8501 * this device by checking the sfo_device_probe entry 8502 * point for each 'fops' module, in order. 8503 * 8504 * NOTE: Correct operation may depend on module ordering 8505 * of 'specific' (failover modules that are completely 8506 * VID/PID table based) to 'generic' (failover modules 8507 * that based on T10 standards like TPGS). Currently, 8508 * the value of 'ddi-forceload' in scsi_vhci.conf is used 8509 * to establish the module list and probe order. 8510 * 8511 * "NONE" If value "NONE" is returned then there is a 8512 * scsi_vhci.conf VID/PID override to indicate the device 8513 * should not be supported under scsi_vhci (even if there 8514 * is an 'fops' module supporting the device). 8515 * 8516 * "<other>" If another value is returned then that value is the 8517 * name of the 'fops' module that should be used. 8518 */ 8519 sfo = NULL; /* "NONE" */ 8520 override = scsi_get_device_type_string( 8521 "scsi-vhci-failover-override", vdip, psd); 8522 if (override == NULL) { 8523 /* NULL: default: select based on sfo_device_probe results */ 8524 for (sf = scsi_failover_table; sf->sf_mod; sf++) { 8525 if ((sf->sf_sfo == NULL) || 8526 sf->sf_sfo->sfo_device_probe(psd, psd->sd_inq, 8527 ctprivp) == SFO_DEVICE_PROBE_PHCI) 8528 continue; 8529 8530 /* found failover module, supported under scsi_vhci */ 8531 sfo = sf->sf_sfo; 8532 if (fo_namep && (*fo_namep == NULL)) { 8533 sfo_name = i_ddi_strdup(sfo->sfo_name, 8534 KM_SLEEP); 8535 *fo_namep = sfo_name; 8536 } 8537 break; 8538 } 8539 } else if (strcasecmp(override, "NONE")) { 8540 /* !"NONE": select based on driver.conf specified name */ 8541 for (sf = scsi_failover_table, sfo = NULL; sf->sf_mod; sf++) { 8542 if ((sf->sf_sfo == NULL) || 8543 (sf->sf_sfo->sfo_name == NULL) || 8544 strcmp(override, sf->sf_sfo->sfo_name)) 8545 continue; 8546 8547 /* 8548 * NOTE: If sfo_device_probe() has side-effects, 8549 * including setting *ctprivp, these are not going 8550 * to occur with override config. 8551 */ 8552 8553 /* found failover module, supported under scsi_vhci */ 8554 sfo = sf->sf_sfo; 8555 if (fo_namep && (*fo_namep == NULL)) { 8556 sfo_name = kmem_alloc(strlen("conf ") + 8557 strlen(sfo->sfo_name) + 1, KM_SLEEP); 8558 (void) sprintf(sfo_name, "conf %s", 8559 sfo->sfo_name); 8560 *fo_namep = sfo_name; 8561 } 8562 break; 8563 } 8564 } 8565 if (override) 8566 kmem_free(override, strlen(override) + 1); 8567 return (sfo); 8568 } 8569 8570 /* 8571 * Determine the device described by cinfo should be enumerated under 8572 * the vHCI or the pHCI - if there is a failover ops then device is 8573 * supported under vHCI. By agreement with SCSA cinfo is a pointer 8574 * to a scsi_device structure associated with a decorated pHCI probe node. 8575 */ 8576 /* ARGSUSED */ 8577 int 8578 vhci_is_dev_supported(dev_info_t *vdip, dev_info_t *pdip, void *cinfo) 8579 { 8580 struct scsi_device *psd = (struct scsi_device *)cinfo; 8581 8582 return (vhci_dev_fo(vdip, psd, NULL, NULL) ? MDI_SUCCESS : MDI_FAILURE); 8583 } 8584 8585 8586 #ifdef DEBUG 8587 extern struct scsi_key_strings scsi_cmds[]; 8588 8589 static char * 8590 vhci_print_scsi_cmd(char cmd) 8591 { 8592 char tmp[64]; 8593 char *cpnt; 8594 8595 cpnt = scsi_cmd_name(cmd, scsi_cmds, tmp); 8596 /* tmp goes out of scope on return and caller sees garbage */ 8597 if (cpnt == tmp) { 8598 cpnt = "Unknown Command"; 8599 } 8600 return (cpnt); 8601 } 8602 8603 extern uchar_t scsi_cdb_size[]; 8604 8605 static void 8606 vhci_print_cdb(dev_info_t *dip, uint_t level, char *title, uchar_t *cdb) 8607 { 8608 int len = scsi_cdb_size[CDB_GROUPID(cdb[0])]; 8609 char buf[256]; 8610 8611 if (level == CE_NOTE) { 8612 vhci_log(level, dip, "path cmd %s\n", 8613 vhci_print_scsi_cmd(*cdb)); 8614 return; 8615 } 8616 8617 (void) sprintf(buf, "%s for cmd(%s)", title, vhci_print_scsi_cmd(*cdb)); 8618 vhci_clean_print(dip, level, buf, cdb, len); 8619 } 8620 8621 static void 8622 vhci_clean_print(dev_info_t *dev, uint_t level, char *title, uchar_t *data, 8623 int len) 8624 { 8625 int i; 8626 int c; 8627 char *format; 8628 char buf[256]; 8629 uchar_t byte; 8630 8631 (void) sprintf(buf, "%s:\n", title); 8632 vhci_log(level, dev, "%s", buf); 8633 level = CE_CONT; 8634 for (i = 0; i < len; ) { 8635 buf[0] = 0; 8636 for (c = 0; c < 8 && i < len; c++, i++) { 8637 byte = (uchar_t)data[i]; 8638 if (byte < 0x10) 8639 format = "0x0%x "; 8640 else 8641 format = "0x%x "; 8642 (void) sprintf(&buf[(int)strlen(buf)], format, byte); 8643 } 8644 (void) sprintf(&buf[(int)strlen(buf)], "\n"); 8645 8646 vhci_log(level, dev, "%s\n", buf); 8647 } 8648 } 8649 #endif 8650