1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 /* 27 * Multiplexed I/O SCSI vHCI implementation 28 */ 29 30 #include <sys/conf.h> 31 #include <sys/file.h> 32 #include <sys/ddi.h> 33 #include <sys/sunddi.h> 34 #include <sys/scsi/scsi.h> 35 #include <sys/scsi/impl/scsi_reset_notify.h> 36 #include <sys/scsi/impl/services.h> 37 #include <sys/sunmdi.h> 38 #include <sys/mdi_impldefs.h> 39 #include <sys/scsi/adapters/scsi_vhci.h> 40 #include <sys/disp.h> 41 #include <sys/byteorder.h> 42 43 extern uintptr_t scsi_callback_id; 44 extern ddi_dma_attr_t scsi_alloc_attr; 45 46 #ifdef DEBUG 47 int vhci_debug = VHCI_DEBUG_DEFAULT_VAL; 48 #endif 49 50 /* retry for the vhci_do_prout command when a not ready is returned */ 51 int vhci_prout_not_ready_retry = 180; 52 53 /* 54 * These values are defined to support the internal retry of 55 * SCSI packets for better sense code handling. 56 */ 57 #define VHCI_CMD_CMPLT 0 58 #define VHCI_CMD_RETRY 1 59 #define VHCI_CMD_ERROR -1 60 61 #define PROPFLAGS (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM) 62 #define VHCI_SCSI_PERR 0x47 63 #define VHCI_PGR_ILLEGALOP -2 64 #define VHCI_NUM_UPDATE_TASKQ 8 65 /* changed to 132 to accomodate HDS */ 66 #define VHCI_STD_INQ_SIZE 132 67 68 /* 69 * Version Macros 70 */ 71 #define VHCI_NAME_VERSION "SCSI VHCI Driver" 72 char vhci_version_name[] = VHCI_NAME_VERSION; 73 74 int vhci_first_time = 0; 75 clock_t vhci_to_ticks = 0; 76 int vhci_init_wait_timeout = VHCI_INIT_WAIT_TIMEOUT; 77 kcondvar_t vhci_cv; 78 kmutex_t vhci_global_mutex; 79 void *vhci_softstate = NULL; /* for soft state */ 80 81 /* 82 * Flag to delay the retry of the reserve command 83 */ 84 int vhci_reserve_delay = 100000; 85 static int vhci_path_quiesce_timeout = 60; 86 static uchar_t zero_key[MHIOC_RESV_KEY_SIZE]; 87 88 /* uscsi delay for a TRAN_BUSY */ 89 static int vhci_uscsi_delay = 100000; 90 static int vhci_uscsi_retry_count = 180; 91 /* uscsi_restart_sense timeout id in case it needs to get canceled */ 92 static timeout_id_t vhci_restart_timeid = 0; 93 94 /* 95 * Bidirectional map of 'target-port' to port id <pid> for support of 96 * iostat(1M) '-Xx' and '-Yx' output. 97 */ 98 static kmutex_t vhci_targetmap_mutex; 99 static uint_t vhci_targetmap_pid = 1; 100 static mod_hash_t *vhci_targetmap_bypid; /* <pid> -> 'target-port' */ 101 static mod_hash_t *vhci_targetmap_byport; /* 'target-port' -> <pid> */ 102 103 /* 104 * functions exported by scsi_vhci struct cb_ops 105 */ 106 static int vhci_open(dev_t *, int, int, cred_t *); 107 static int vhci_close(dev_t, int, int, cred_t *); 108 static int vhci_ioctl(dev_t, int, intptr_t, int, cred_t *, int *); 109 110 /* 111 * functions exported by scsi_vhci struct dev_ops 112 */ 113 static int vhci_getinfo(dev_info_t *, ddi_info_cmd_t, void *, void **); 114 static int vhci_attach(dev_info_t *, ddi_attach_cmd_t); 115 static int vhci_detach(dev_info_t *, ddi_detach_cmd_t); 116 117 /* 118 * functions exported by scsi_vhci scsi_hba_tran_t transport table 119 */ 120 static int vhci_scsi_tgt_init(dev_info_t *, dev_info_t *, 121 scsi_hba_tran_t *, struct scsi_device *); 122 static void vhci_scsi_tgt_free(dev_info_t *, dev_info_t *, scsi_hba_tran_t *, 123 struct scsi_device *); 124 static int vhci_pgr_register_start(scsi_vhci_lun_t *, struct scsi_pkt *); 125 static int vhci_scsi_start(struct scsi_address *, struct scsi_pkt *); 126 static int vhci_scsi_abort(struct scsi_address *, struct scsi_pkt *); 127 static int vhci_scsi_reset(struct scsi_address *, int); 128 static int vhci_scsi_reset_target(struct scsi_address *, int level, 129 uint8_t select_path); 130 static int vhci_scsi_reset_bus(struct scsi_address *); 131 static int vhci_scsi_getcap(struct scsi_address *, char *, int); 132 static int vhci_scsi_setcap(struct scsi_address *, char *, int, int); 133 static int vhci_commoncap(struct scsi_address *, char *, int, int, int); 134 static int vhci_pHCI_cap(struct scsi_address *ap, char *cap, int val, int whom, 135 mdi_pathinfo_t *pip); 136 static struct scsi_pkt *vhci_scsi_init_pkt(struct scsi_address *, 137 struct scsi_pkt *, struct buf *, int, int, int, int, int (*)(), caddr_t); 138 static void vhci_scsi_destroy_pkt(struct scsi_address *, struct scsi_pkt *); 139 static void vhci_scsi_dmafree(struct scsi_address *, struct scsi_pkt *); 140 static void vhci_scsi_sync_pkt(struct scsi_address *, struct scsi_pkt *); 141 static int vhci_scsi_reset_notify(struct scsi_address *, int, void (*)(caddr_t), 142 caddr_t); 143 static int vhci_scsi_get_bus_addr(struct scsi_device *, char *, int); 144 static int vhci_scsi_get_name(struct scsi_device *, char *, int); 145 static int vhci_scsi_bus_power(dev_info_t *, void *, pm_bus_power_op_t, 146 void *, void *); 147 static int vhci_scsi_bus_config(dev_info_t *, uint_t, ddi_bus_config_op_t, 148 void *, dev_info_t **); 149 150 /* 151 * functions registered with the mpxio framework via mdi_vhci_ops_t 152 */ 153 static int vhci_pathinfo_init(dev_info_t *, mdi_pathinfo_t *, int); 154 static int vhci_pathinfo_uninit(dev_info_t *, mdi_pathinfo_t *, int); 155 static int vhci_pathinfo_state_change(dev_info_t *, mdi_pathinfo_t *, 156 mdi_pathinfo_state_t, uint32_t, int); 157 static int vhci_pathinfo_online(dev_info_t *, mdi_pathinfo_t *, int); 158 static int vhci_pathinfo_offline(dev_info_t *, mdi_pathinfo_t *, int); 159 static int vhci_failover(dev_info_t *, dev_info_t *, int); 160 static void vhci_client_attached(dev_info_t *); 161 162 static int vhci_ctl(dev_t, int, intptr_t, int, cred_t *, int *); 163 static int vhci_devctl(dev_t, int, intptr_t, int, cred_t *, int *); 164 static int vhci_ioc_get_phci_path(sv_iocdata_t *, caddr_t, int, caddr_t); 165 static int vhci_ioc_get_client_path(sv_iocdata_t *, caddr_t, int, caddr_t); 166 static int vhci_ioc_get_paddr(sv_iocdata_t *, caddr_t, int, caddr_t); 167 static int vhci_ioc_send_client_path(caddr_t, sv_iocdata_t *, int, caddr_t); 168 static void vhci_ioc_devi_to_path(dev_info_t *, caddr_t); 169 static int vhci_get_phci_path_list(dev_info_t *, sv_path_info_t *, uint_t); 170 static int vhci_get_client_path_list(dev_info_t *, sv_path_info_t *, uint_t); 171 static int vhci_get_iocdata(const void *, sv_iocdata_t *, int, caddr_t); 172 static int vhci_get_iocswitchdata(const void *, sv_switch_to_cntlr_iocdata_t *, 173 int, caddr_t); 174 static int vhci_ioc_alloc_pathinfo(sv_path_info_t **, sv_path_info_t **, 175 uint_t, sv_iocdata_t *, int, caddr_t); 176 static void vhci_ioc_free_pathinfo(sv_path_info_t *, sv_path_info_t *, uint_t); 177 static int vhci_ioc_send_pathinfo(sv_path_info_t *, sv_path_info_t *, uint_t, 178 sv_iocdata_t *, int, caddr_t); 179 static int vhci_handle_ext_fo(struct scsi_pkt *, int); 180 static int vhci_efo_watch_cb(caddr_t, struct scsi_watch_result *); 181 static int vhci_quiesce_lun(struct scsi_vhci_lun *); 182 static int vhci_pgr_validate_and_register(scsi_vhci_priv_t *); 183 static void vhci_dispatch_scsi_start(void *); 184 static void vhci_efo_done(void *); 185 static void vhci_initiate_auto_failback(void *); 186 static void vhci_update_pHCI_pkt(struct vhci_pkt *, struct scsi_pkt *); 187 static int vhci_update_pathinfo(struct scsi_device *, mdi_pathinfo_t *, 188 struct scsi_failover_ops *, scsi_vhci_lun_t *, struct scsi_vhci *); 189 static void vhci_kstat_create_pathinfo(mdi_pathinfo_t *); 190 static int vhci_quiesce_paths(dev_info_t *, dev_info_t *, 191 scsi_vhci_lun_t *, char *, char *); 192 193 static char *vhci_devnm_to_guid(char *); 194 static int vhci_bind_transport(struct scsi_address *, struct vhci_pkt *, 195 int, int (*func)(caddr_t)); 196 static void vhci_intr(struct scsi_pkt *); 197 static int vhci_do_prout(scsi_vhci_priv_t *); 198 static void vhci_run_cmd(void *); 199 static int vhci_do_prin(struct vhci_pkt **); 200 static struct scsi_pkt *vhci_create_retry_pkt(struct vhci_pkt *); 201 static struct vhci_pkt *vhci_sync_retry_pkt(struct vhci_pkt *); 202 static struct scsi_vhci_lun *vhci_lun_lookup(dev_info_t *); 203 static struct scsi_vhci_lun *vhci_lun_lookup_alloc(dev_info_t *, char *, int *); 204 static void vhci_lun_free(dev_info_t *); 205 static int vhci_recovery_reset(scsi_vhci_lun_t *, struct scsi_address *, 206 uint8_t, uint8_t); 207 void vhci_update_pathstates(void *); 208 209 #ifdef DEBUG 210 static void vhci_print_prin_keys(vhci_prin_readkeys_t *, int); 211 static void vhci_print_cdb(dev_info_t *dip, uint_t level, 212 char *title, uchar_t *cdb); 213 static void vhci_clean_print(dev_info_t *dev, uint_t level, 214 char *title, uchar_t *data, int len); 215 #endif 216 static void vhci_print_prout_keys(scsi_vhci_lun_t *, char *); 217 static void vhci_uscsi_iodone(struct scsi_pkt *pkt); 218 219 /* 220 * MP-API related functions 221 */ 222 extern int vhci_mpapi_init(struct scsi_vhci *); 223 extern void vhci_mpapi_add_dev_prod(struct scsi_vhci *, char *); 224 extern int vhci_mpapi_ctl(dev_t, int, intptr_t, int, cred_t *, int *); 225 extern void vhci_update_mpapi_data(struct scsi_vhci *, 226 scsi_vhci_lun_t *, mdi_pathinfo_t *); 227 extern void* vhci_get_mpapi_item(struct scsi_vhci *, mpapi_list_header_t *, 228 uint8_t, void*); 229 extern void vhci_mpapi_set_path_state(dev_info_t *, mdi_pathinfo_t *, int); 230 extern int vhci_mpapi_update_tpg_acc_state_for_lu(struct scsi_vhci *, 231 scsi_vhci_lun_t *); 232 233 #define VHCI_DMA_MAX_XFER_CAP INT_MAX 234 235 #define VHCI_MAX_PGR_RETRIES 3 236 237 /* 238 * Macros for the device-type mpxio options 239 */ 240 #define LOAD_BALANCE_OPTIONS "load-balance-options" 241 #define LOGICAL_BLOCK_REGION_SIZE "region-size" 242 #define MPXIO_OPTIONS_LIST "device-type-mpxio-options-list" 243 #define DEVICE_TYPE_STR "device-type" 244 #define isdigit(ch) ((ch) >= '0' && (ch) <= '9') 245 246 static struct cb_ops vhci_cb_ops = { 247 vhci_open, /* open */ 248 vhci_close, /* close */ 249 nodev, /* strategy */ 250 nodev, /* print */ 251 nodev, /* dump */ 252 nodev, /* read */ 253 nodev, /* write */ 254 vhci_ioctl, /* ioctl */ 255 nodev, /* devmap */ 256 nodev, /* mmap */ 257 nodev, /* segmap */ 258 nochpoll, /* chpoll */ 259 ddi_prop_op, /* cb_prop_op */ 260 0, /* streamtab */ 261 D_NEW | D_MP, /* cb_flag */ 262 CB_REV, /* rev */ 263 nodev, /* aread */ 264 nodev /* awrite */ 265 }; 266 267 static struct dev_ops vhci_ops = { 268 DEVO_REV, 269 0, 270 vhci_getinfo, 271 nulldev, /* identify */ 272 nulldev, /* probe */ 273 vhci_attach, /* attach and detach are mandatory */ 274 vhci_detach, 275 nodev, /* reset */ 276 &vhci_cb_ops, /* cb_ops */ 277 NULL, /* bus_ops */ 278 NULL, /* power */ 279 ddi_quiesce_not_needed, /* quiesce */ 280 }; 281 282 extern struct mod_ops mod_driverops; 283 284 static struct modldrv modldrv = { 285 &mod_driverops, 286 vhci_version_name, /* module name */ 287 &vhci_ops 288 }; 289 290 static struct modlinkage modlinkage = { 291 MODREV_1, 292 &modldrv, 293 NULL 294 }; 295 296 static mdi_vhci_ops_t vhci_opinfo = { 297 MDI_VHCI_OPS_REV, 298 vhci_pathinfo_init, /* Pathinfo node init callback */ 299 vhci_pathinfo_uninit, /* Pathinfo uninit callback */ 300 vhci_pathinfo_state_change, /* Pathinfo node state change */ 301 vhci_failover, /* failover callback */ 302 vhci_client_attached /* client attached callback */ 303 }; 304 305 /* 306 * The scsi_failover table defines an ordered set of 'fops' modules supported 307 * by scsi_vhci. Currently, initialize this table from the 'ddi-forceload' 308 * property specified in scsi_vhci.conf. 309 */ 310 struct scsi_failover { 311 ddi_modhandle_t sf_mod; 312 struct scsi_failover_ops *sf_sfo; 313 } *scsi_failover_table; 314 uint_t scsi_nfailover; 315 316 int 317 _init(void) 318 { 319 int rval; 320 321 /* 322 * Allocate soft state and prepare to do ddi_soft_state_zalloc() 323 * before registering with the transport first. 324 */ 325 if ((rval = ddi_soft_state_init(&vhci_softstate, 326 sizeof (struct scsi_vhci), 1)) != 0) { 327 VHCI_DEBUG(1, (CE_NOTE, NULL, 328 "!_init:soft state init failed\n")); 329 return (rval); 330 } 331 332 if ((rval = scsi_hba_init(&modlinkage)) != 0) { 333 VHCI_DEBUG(1, (CE_NOTE, NULL, 334 "!_init: scsi hba init failed\n")); 335 ddi_soft_state_fini(&vhci_softstate); 336 return (rval); 337 } 338 339 mutex_init(&vhci_global_mutex, NULL, MUTEX_DRIVER, NULL); 340 cv_init(&vhci_cv, NULL, CV_DRIVER, NULL); 341 342 mutex_init(&vhci_targetmap_mutex, NULL, MUTEX_DRIVER, NULL); 343 vhci_targetmap_byport = mod_hash_create_strhash( 344 "vhci_targetmap_byport", 256, mod_hash_null_valdtor); 345 vhci_targetmap_bypid = mod_hash_create_idhash( 346 "vhci_targetmap_bypid", 256, mod_hash_null_valdtor); 347 348 if ((rval = mod_install(&modlinkage)) != 0) { 349 VHCI_DEBUG(1, (CE_NOTE, NULL, "!_init: mod_install failed\n")); 350 if (vhci_targetmap_bypid) 351 mod_hash_destroy_idhash(vhci_targetmap_bypid); 352 if (vhci_targetmap_byport) 353 mod_hash_destroy_strhash(vhci_targetmap_byport); 354 mutex_destroy(&vhci_targetmap_mutex); 355 cv_destroy(&vhci_cv); 356 mutex_destroy(&vhci_global_mutex); 357 scsi_hba_fini(&modlinkage); 358 ddi_soft_state_fini(&vhci_softstate); 359 } 360 return (rval); 361 } 362 363 364 /* 365 * the system is done with us as a driver, so clean up 366 */ 367 int 368 _fini(void) 369 { 370 int rval; 371 372 /* 373 * don't start cleaning up until we know that the module remove 374 * has worked -- if this works, then we know that each instance 375 * has successfully been DDI_DETACHed 376 */ 377 if ((rval = mod_remove(&modlinkage)) != 0) { 378 VHCI_DEBUG(4, (CE_NOTE, NULL, "!_fini: mod_remove failed\n")); 379 return (rval); 380 } 381 382 if (vhci_targetmap_bypid) 383 mod_hash_destroy_idhash(vhci_targetmap_bypid); 384 if (vhci_targetmap_byport) 385 mod_hash_destroy_strhash(vhci_targetmap_byport); 386 mutex_destroy(&vhci_targetmap_mutex); 387 cv_destroy(&vhci_cv); 388 mutex_destroy(&vhci_global_mutex); 389 scsi_hba_fini(&modlinkage); 390 ddi_soft_state_fini(&vhci_softstate); 391 392 return (rval); 393 } 394 395 int 396 _info(struct modinfo *modinfop) 397 { 398 return (mod_info(&modlinkage, modinfop)); 399 } 400 401 /* 402 * Lookup scsi_failover by "short name" of failover module. 403 */ 404 struct scsi_failover_ops * 405 vhci_failover_ops_by_name(char *name) 406 { 407 struct scsi_failover *sf; 408 409 for (sf = scsi_failover_table; sf->sf_mod; sf++) { 410 if (sf->sf_sfo == NULL) 411 continue; 412 if (strcmp(sf->sf_sfo->sfo_name, name) == 0) 413 return (sf->sf_sfo); 414 } 415 return (NULL); 416 } 417 418 /* 419 * Load all scsi_failover_ops 'fops' modules. 420 */ 421 static void 422 vhci_failover_modopen(struct scsi_vhci *vhci) 423 { 424 char **module; 425 int i; 426 struct scsi_failover *sf; 427 char **dt; 428 int e; 429 430 if (scsi_failover_table) 431 return; 432 433 /* Get the list of modules from scsi_vhci.conf */ 434 if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, 435 vhci->vhci_dip, DDI_PROP_DONTPASS, "ddi-forceload", 436 &module, &scsi_nfailover) != DDI_PROP_SUCCESS) { 437 cmn_err(CE_WARN, "scsi_vhci: " 438 "scsi_vhci.conf is missing 'ddi-forceload'"); 439 return; 440 } 441 if (scsi_nfailover == 0) { 442 cmn_err(CE_WARN, "scsi_vhci: " 443 "scsi_vhci.conf has empty 'ddi-forceload'"); 444 ddi_prop_free(module); 445 return; 446 } 447 448 /* allocate failover table based on number of modules */ 449 scsi_failover_table = (struct scsi_failover *) 450 kmem_zalloc(sizeof (struct scsi_failover) * (scsi_nfailover + 1), 451 KM_SLEEP); 452 453 /* loop over modules specified in scsi_vhci.conf and open each module */ 454 for (i = 0, sf = scsi_failover_table; i < scsi_nfailover; i++) { 455 if (module[i] == NULL) 456 continue; 457 458 sf->sf_mod = ddi_modopen(module[i], KRTLD_MODE_FIRST, &e); 459 if (sf->sf_mod == NULL) { 460 /* 461 * A module returns EEXIST if other software is 462 * supporting the intended function: for example 463 * the scsi_vhci_f_sum_emc module returns EEXIST 464 * from _init if EMC powerpath software is installed. 465 */ 466 if (e != EEXIST) 467 cmn_err(CE_WARN, "scsi_vhci: unable to open " 468 "module '%s', error %d", module[i], e); 469 continue; 470 } 471 sf->sf_sfo = ddi_modsym(sf->sf_mod, 472 "scsi_vhci_failover_ops", &e); 473 if (sf->sf_sfo == NULL) { 474 cmn_err(CE_WARN, "scsi_vhci: " 475 "unable to import 'scsi_failover_ops' from '%s', " 476 "error %d", module[i], e); 477 (void) ddi_modclose(sf->sf_mod); 478 sf->sf_mod = NULL; 479 continue; 480 } 481 482 /* register vid/pid of devices supported with mpapi */ 483 for (dt = sf->sf_sfo->sfo_devices; *dt; dt++) 484 vhci_mpapi_add_dev_prod(vhci, *dt); 485 sf++; 486 } 487 488 /* verify that at least the "well-known" modules were there */ 489 if (vhci_failover_ops_by_name(SFO_NAME_SYM) == NULL) 490 cmn_err(CE_WARN, "scsi_vhci: well-known module \"" 491 SFO_NAME_SYM "\" not defined in scsi_vhci.conf's " 492 "'ddi-forceload'"); 493 if (vhci_failover_ops_by_name(SFO_NAME_TPGS) == NULL) 494 cmn_err(CE_WARN, "scsi_vhci: well-known module \"" 495 SFO_NAME_TPGS "\" not defined in scsi_vhci.conf's " 496 "'ddi-forceload'"); 497 498 /* call sfo_init for modules that need it */ 499 for (sf = scsi_failover_table; sf->sf_mod; sf++) { 500 if (sf->sf_sfo && sf->sf_sfo->sfo_init) 501 sf->sf_sfo->sfo_init(); 502 } 503 504 ddi_prop_free(module); 505 } 506 507 /* 508 * unload all loaded scsi_failover_ops modules 509 */ 510 static void 511 vhci_failover_modclose() 512 { 513 struct scsi_failover *sf; 514 515 for (sf = scsi_failover_table; sf->sf_mod; sf++) { 516 if ((sf->sf_mod == NULL) || (sf->sf_sfo == NULL)) 517 continue; 518 (void) ddi_modclose(sf->sf_mod); 519 sf->sf_mod = NULL; 520 sf->sf_sfo = NULL; 521 } 522 523 if (scsi_failover_table && scsi_nfailover) 524 kmem_free(scsi_failover_table, 525 sizeof (struct scsi_failover) * (scsi_nfailover + 1)); 526 scsi_failover_table = NULL; 527 scsi_nfailover = 0; 528 } 529 530 /* ARGSUSED */ 531 static int 532 vhci_open(dev_t *devp, int flag, int otype, cred_t *credp) 533 { 534 struct scsi_vhci *vhci; 535 536 if (otype != OTYP_CHR) { 537 return (EINVAL); 538 } 539 540 vhci = ddi_get_soft_state(vhci_softstate, MINOR2INST(getminor(*devp))); 541 if (vhci == NULL) { 542 VHCI_DEBUG(1, (CE_NOTE, NULL, "vhci_open: failed ENXIO\n")); 543 return (ENXIO); 544 } 545 546 mutex_enter(&vhci->vhci_mutex); 547 if ((flag & FEXCL) && (vhci->vhci_state & VHCI_STATE_OPEN)) { 548 mutex_exit(&vhci->vhci_mutex); 549 vhci_log(CE_NOTE, vhci->vhci_dip, 550 "!vhci%d: Already open\n", getminor(*devp)); 551 return (EBUSY); 552 } 553 554 vhci->vhci_state |= VHCI_STATE_OPEN; 555 mutex_exit(&vhci->vhci_mutex); 556 return (0); 557 } 558 559 560 /* ARGSUSED */ 561 static int 562 vhci_close(dev_t dev, int flag, int otype, cred_t *credp) 563 { 564 struct scsi_vhci *vhci; 565 566 if (otype != OTYP_CHR) { 567 return (EINVAL); 568 } 569 570 vhci = ddi_get_soft_state(vhci_softstate, MINOR2INST(getminor(dev))); 571 if (vhci == NULL) { 572 VHCI_DEBUG(1, (CE_NOTE, NULL, "vhci_close: failed ENXIO\n")); 573 return (ENXIO); 574 } 575 576 mutex_enter(&vhci->vhci_mutex); 577 vhci->vhci_state &= ~VHCI_STATE_OPEN; 578 mutex_exit(&vhci->vhci_mutex); 579 580 return (0); 581 } 582 583 /* ARGSUSED */ 584 static int 585 vhci_ioctl(dev_t dev, int cmd, intptr_t data, int mode, 586 cred_t *credp, int *rval) 587 { 588 if (IS_DEVCTL(cmd)) { 589 return (vhci_devctl(dev, cmd, data, mode, credp, rval)); 590 } else if (cmd == MP_CMD) { 591 return (vhci_mpapi_ctl(dev, cmd, data, mode, credp, rval)); 592 } else { 593 return (vhci_ctl(dev, cmd, data, mode, credp, rval)); 594 } 595 } 596 597 /* 598 * attach the module 599 */ 600 static int 601 vhci_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 602 { 603 int rval = DDI_FAILURE; 604 int scsi_hba_attached = 0; 605 int vhci_attached = 0; 606 int mutex_initted = 0; 607 int instance; 608 struct scsi_vhci *vhci; 609 scsi_hba_tran_t *tran; 610 char cache_name_buf[64]; 611 char *data; 612 613 VHCI_DEBUG(4, (CE_NOTE, NULL, "vhci_attach: cmd=0x%x\n", cmd)); 614 615 instance = ddi_get_instance(dip); 616 617 switch (cmd) { 618 case DDI_ATTACH: 619 break; 620 621 case DDI_RESUME: 622 case DDI_PM_RESUME: 623 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_attach: resume not yet" 624 "implemented\n")); 625 return (rval); 626 627 default: 628 VHCI_DEBUG(1, (CE_NOTE, NULL, 629 "!vhci_attach: unknown ddi command\n")); 630 return (rval); 631 } 632 633 /* 634 * Allocate vhci data structure. 635 */ 636 if (ddi_soft_state_zalloc(vhci_softstate, instance) != DDI_SUCCESS) { 637 VHCI_DEBUG(1, (CE_NOTE, dip, "!vhci_attach:" 638 "soft state alloc failed\n")); 639 return (DDI_FAILURE); 640 } 641 642 if ((vhci = ddi_get_soft_state(vhci_softstate, instance)) == NULL) { 643 VHCI_DEBUG(1, (CE_NOTE, dip, "!vhci_attach:" 644 "bad soft state\n")); 645 ddi_soft_state_free(vhci_softstate, instance); 646 return (DDI_FAILURE); 647 } 648 649 /* Allocate packet cache */ 650 (void) snprintf(cache_name_buf, sizeof (cache_name_buf), 651 "vhci%d_cache", instance); 652 653 mutex_init(&vhci->vhci_mutex, NULL, MUTEX_DRIVER, NULL); 654 mutex_initted++; 655 656 /* 657 * Allocate a transport structure 658 */ 659 tran = scsi_hba_tran_alloc(dip, SCSI_HBA_CANSLEEP); 660 ASSERT(tran != NULL); 661 662 vhci->vhci_tran = tran; 663 vhci->vhci_dip = dip; 664 vhci->vhci_instance = instance; 665 666 tran->tran_hba_private = vhci; 667 tran->tran_tgt_private = NULL; 668 tran->tran_tgt_init = vhci_scsi_tgt_init; 669 tran->tran_tgt_probe = NULL; 670 tran->tran_tgt_free = vhci_scsi_tgt_free; 671 672 tran->tran_start = vhci_scsi_start; 673 tran->tran_abort = vhci_scsi_abort; 674 tran->tran_reset = vhci_scsi_reset; 675 tran->tran_getcap = vhci_scsi_getcap; 676 tran->tran_setcap = vhci_scsi_setcap; 677 tran->tran_init_pkt = vhci_scsi_init_pkt; 678 tran->tran_destroy_pkt = vhci_scsi_destroy_pkt; 679 tran->tran_dmafree = vhci_scsi_dmafree; 680 tran->tran_sync_pkt = vhci_scsi_sync_pkt; 681 tran->tran_reset_notify = vhci_scsi_reset_notify; 682 683 tran->tran_get_bus_addr = vhci_scsi_get_bus_addr; 684 tran->tran_get_name = vhci_scsi_get_name; 685 tran->tran_bus_reset = NULL; 686 tran->tran_quiesce = NULL; 687 tran->tran_unquiesce = NULL; 688 689 /* 690 * register event notification routines with scsa 691 */ 692 tran->tran_get_eventcookie = NULL; 693 tran->tran_add_eventcall = NULL; 694 tran->tran_remove_eventcall = NULL; 695 tran->tran_post_event = NULL; 696 697 tran->tran_bus_power = vhci_scsi_bus_power; 698 699 tran->tran_bus_config = vhci_scsi_bus_config; 700 701 /* 702 * Attach this instance with the mpxio framework 703 */ 704 if (mdi_vhci_register(MDI_HCI_CLASS_SCSI, dip, &vhci_opinfo, 0) 705 != MDI_SUCCESS) { 706 VHCI_DEBUG(1, (CE_NOTE, dip, "!vhci_attach:" 707 "mdi_vhci_register failed\n")); 708 goto attach_fail; 709 } 710 vhci_attached++; 711 712 /* 713 * Attach this instance of the hba. 714 * 715 * Regarding dma attributes: Since scsi_vhci is a virtual scsi HBA 716 * driver, it has nothing to do with DMA. However, when calling 717 * scsi_hba_attach_setup() we need to pass something valid in the 718 * dma attributes parameter. So we just use scsi_alloc_attr. 719 * SCSA itself seems to care only for dma_attr_minxfer and 720 * dma_attr_burstsizes fields of dma attributes structure. 721 * It expects those fileds to be non-zero. 722 */ 723 if (scsi_hba_attach_setup(dip, &scsi_alloc_attr, tran, 724 SCSI_HBA_TRAN_CLONE) != DDI_SUCCESS) { 725 VHCI_DEBUG(1, (CE_NOTE, dip, "!vhci_attach:" 726 "hba attach failed\n")); 727 goto attach_fail; 728 } 729 scsi_hba_attached++; 730 731 if (ddi_create_minor_node(dip, "devctl", S_IFCHR, 732 INST2DEVCTL(instance), DDI_NT_SCSI_NEXUS, 0) != DDI_SUCCESS) { 733 VHCI_DEBUG(1, (CE_NOTE, dip, "!vhci_attach:" 734 " ddi_create_minor_node failed\n")); 735 goto attach_fail; 736 } 737 738 /* 739 * Set pm-want-child-notification property for 740 * power management of the phci and client 741 */ 742 if (ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP, 743 "pm-want-child-notification?", NULL, NULL) != DDI_PROP_SUCCESS) { 744 cmn_err(CE_WARN, 745 "%s%d fail to create pm-want-child-notification? prop", 746 ddi_driver_name(dip), ddi_get_instance(dip)); 747 goto attach_fail; 748 } 749 750 vhci->vhci_taskq = taskq_create("vhci_taskq", 1, MINCLSYSPRI, 1, 4, 0); 751 vhci->vhci_update_pathstates_taskq = 752 taskq_create("vhci_update_pathstates", VHCI_NUM_UPDATE_TASKQ, 753 MINCLSYSPRI, 1, 4, 0); 754 ASSERT(vhci->vhci_taskq); 755 ASSERT(vhci->vhci_update_pathstates_taskq); 756 757 /* 758 * Set appropriate configuration flags based on options set in 759 * conf file. 760 */ 761 vhci->vhci_conf_flags = 0; 762 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, PROPFLAGS, 763 "auto-failback", &data) == DDI_SUCCESS) { 764 if (strcmp(data, "enable") == 0) 765 vhci->vhci_conf_flags |= VHCI_CONF_FLAGS_AUTO_FAILBACK; 766 ddi_prop_free(data); 767 } 768 769 if (!(vhci->vhci_conf_flags & VHCI_CONF_FLAGS_AUTO_FAILBACK)) 770 vhci_log(CE_NOTE, dip, "!Auto-failback capability " 771 "disabled through scsi_vhci.conf file."); 772 773 /* 774 * Allocate an mpapi private structure 775 */ 776 vhci->mp_priv = kmem_zalloc(sizeof (mpapi_priv_t), KM_SLEEP); 777 if (vhci_mpapi_init(vhci) != 0) { 778 VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_attach: " 779 "vhci_mpapi_init() failed")); 780 } 781 782 vhci_failover_modopen(vhci); /* load failover modules */ 783 784 ddi_report_dev(dip); 785 return (DDI_SUCCESS); 786 787 attach_fail: 788 if (vhci_attached) 789 (void) mdi_vhci_unregister(dip, 0); 790 791 if (scsi_hba_attached) 792 (void) scsi_hba_detach(dip); 793 794 if (vhci->vhci_tran) 795 scsi_hba_tran_free(vhci->vhci_tran); 796 797 if (mutex_initted) { 798 mutex_destroy(&vhci->vhci_mutex); 799 } 800 801 ddi_soft_state_free(vhci_softstate, instance); 802 return (DDI_FAILURE); 803 } 804 805 806 /*ARGSUSED*/ 807 static int 808 vhci_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 809 { 810 int instance = ddi_get_instance(dip); 811 scsi_hba_tran_t *tran; 812 struct scsi_vhci *vhci; 813 814 VHCI_DEBUG(4, (CE_NOTE, NULL, "vhci_detach: cmd=0x%x\n", cmd)); 815 816 if ((tran = ddi_get_driver_private(dip)) == NULL) 817 return (DDI_FAILURE); 818 819 vhci = TRAN2HBAPRIVATE(tran); 820 if (!vhci) { 821 return (DDI_FAILURE); 822 } 823 824 switch (cmd) { 825 case DDI_DETACH: 826 break; 827 828 case DDI_SUSPEND: 829 case DDI_PM_SUSPEND: 830 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_detach: suspend/pm not yet" 831 "implemented\n")); 832 return (DDI_FAILURE); 833 834 default: 835 VHCI_DEBUG(1, (CE_NOTE, NULL, 836 "!vhci_detach: unknown ddi command\n")); 837 return (DDI_FAILURE); 838 } 839 840 (void) mdi_vhci_unregister(dip, 0); 841 (void) scsi_hba_detach(dip); 842 scsi_hba_tran_free(tran); 843 844 if (ddi_prop_remove(DDI_DEV_T_NONE, dip, 845 "pm-want-child-notification?") != DDI_PROP_SUCCESS) { 846 cmn_err(CE_WARN, 847 "%s%d unable to remove prop pm-want_child_notification?", 848 ddi_driver_name(dip), ddi_get_instance(dip)); 849 } 850 if (vhci_restart_timeid != 0) { 851 (void) untimeout(vhci_restart_timeid); 852 } 853 vhci_restart_timeid = 0; 854 855 mutex_destroy(&vhci->vhci_mutex); 856 vhci->vhci_dip = NULL; 857 vhci->vhci_tran = NULL; 858 taskq_destroy(vhci->vhci_taskq); 859 taskq_destroy(vhci->vhci_update_pathstates_taskq); 860 ddi_remove_minor_node(dip, NULL); 861 ddi_soft_state_free(vhci_softstate, instance); 862 863 vhci_failover_modclose(); /* unload failover modules */ 864 return (DDI_SUCCESS); 865 } 866 867 /* 868 * vhci_getinfo() 869 * Given the device number, return the devinfo pointer or the 870 * instance number. 871 * Note: always succeed DDI_INFO_DEVT2INSTANCE, even before attach. 872 */ 873 874 /*ARGSUSED*/ 875 static int 876 vhci_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result) 877 { 878 struct scsi_vhci *vhcip; 879 int instance = MINOR2INST(getminor((dev_t)arg)); 880 881 switch (cmd) { 882 case DDI_INFO_DEVT2DEVINFO: 883 vhcip = ddi_get_soft_state(vhci_softstate, instance); 884 if (vhcip != NULL) 885 *result = vhcip->vhci_dip; 886 else { 887 *result = NULL; 888 return (DDI_FAILURE); 889 } 890 break; 891 892 case DDI_INFO_DEVT2INSTANCE: 893 *result = (void *)(uintptr_t)instance; 894 break; 895 896 default: 897 return (DDI_FAILURE); 898 } 899 900 return (DDI_SUCCESS); 901 } 902 903 /*ARGSUSED*/ 904 static int 905 vhci_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip, 906 scsi_hba_tran_t *hba_tran, struct scsi_device *sd) 907 { 908 char *guid; 909 scsi_vhci_lun_t *vlun; 910 struct scsi_vhci *vhci; 911 clock_t from_ticks; 912 mdi_pathinfo_t *pip; 913 int rval; 914 915 ASSERT(hba_dip != NULL); 916 ASSERT(tgt_dip != NULL); 917 918 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, tgt_dip, PROPFLAGS, 919 MDI_CLIENT_GUID_PROP, &guid) != DDI_SUCCESS) { 920 /* 921 * This must be the .conf node without GUID property. 922 * The node under fp already inserts a delay, so we 923 * just return from here. We rely on this delay to have 924 * all dips be posted to the ndi hotplug thread's newdev 925 * list. This is necessary for the deferred attach 926 * mechanism to work and opens() done soon after boot to 927 * succeed. 928 */ 929 VHCI_DEBUG(4, (CE_WARN, hba_dip, "tgt_init: lun guid " 930 "property failed")); 931 return (DDI_NOT_WELL_FORMED); 932 } 933 934 if (ndi_dev_is_persistent_node(tgt_dip) == 0) { 935 /* 936 * This must be .conf node with the GUID property. We don't 937 * merge property by ndi_merge_node() here because the 938 * devi_addr_buf of .conf node is "" always according the 939 * implementation of vhci_scsi_get_name_bus_addr(). 940 */ 941 ddi_set_name_addr(tgt_dip, NULL); 942 return (DDI_FAILURE); 943 } 944 945 vhci = ddi_get_soft_state(vhci_softstate, ddi_get_instance(hba_dip)); 946 ASSERT(vhci != NULL); 947 948 VHCI_DEBUG(4, (CE_NOTE, hba_dip, 949 "!tgt_init: called for %s (instance %d)\n", 950 ddi_driver_name(tgt_dip), ddi_get_instance(tgt_dip))); 951 952 vlun = vhci_lun_lookup(tgt_dip); 953 954 mutex_enter(&vhci_global_mutex); 955 956 from_ticks = ddi_get_lbolt(); 957 if (vhci_to_ticks == 0) { 958 vhci_to_ticks = from_ticks + 959 drv_usectohz(vhci_init_wait_timeout); 960 } 961 962 #if DEBUG 963 if (vlun) { 964 VHCI_DEBUG(1, (CE_WARN, hba_dip, "tgt_init: " 965 "vhci_scsi_tgt_init: guid %s : found vlun 0x%p " 966 "from_ticks %lx to_ticks %lx", 967 guid, (void *)vlun, from_ticks, vhci_to_ticks)); 968 } else { 969 VHCI_DEBUG(1, (CE_WARN, hba_dip, "tgt_init: " 970 "vhci_scsi_tgt_init: guid %s : vlun not found " 971 "from_ticks %lx to_ticks %lx", guid, from_ticks, 972 vhci_to_ticks)); 973 } 974 #endif 975 976 rval = mdi_select_path(tgt_dip, NULL, 977 (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH), NULL, &pip); 978 if (rval == MDI_SUCCESS) { 979 mdi_rele_path(pip); 980 } 981 982 /* 983 * Wait for the following conditions : 984 * 1. no vlun available yet 985 * 2. no path established 986 * 3. timer did not expire 987 */ 988 while ((vlun == NULL) || (mdi_client_get_path_count(tgt_dip) == 0) || 989 (rval != MDI_SUCCESS)) { 990 if (vlun && vlun->svl_not_supported) { 991 VHCI_DEBUG(1, (CE_WARN, hba_dip, "tgt_init: " 992 "vlun 0x%p lun guid %s not supported!", 993 (void *)vlun, guid)); 994 mutex_exit(&vhci_global_mutex); 995 ddi_prop_free(guid); 996 return (DDI_NOT_WELL_FORMED); 997 } 998 if ((vhci_first_time == 0) && (from_ticks >= vhci_to_ticks)) { 999 vhci_first_time = 1; 1000 } 1001 if (vhci_first_time == 1) { 1002 VHCI_DEBUG(1, (CE_WARN, hba_dip, "vhci_scsi_tgt_init: " 1003 "no wait for %s. from_tick %lx, to_tick %lx", 1004 guid, from_ticks, vhci_to_ticks)); 1005 mutex_exit(&vhci_global_mutex); 1006 ddi_prop_free(guid); 1007 return (DDI_NOT_WELL_FORMED); 1008 } 1009 1010 if (cv_timedwait(&vhci_cv, 1011 &vhci_global_mutex, vhci_to_ticks) == -1) { 1012 /* Timed out */ 1013 #ifdef DEBUG 1014 if (vlun == NULL) { 1015 VHCI_DEBUG(1, (CE_WARN, hba_dip, 1016 "tgt_init: no vlun for %s!", guid)); 1017 } else if (mdi_client_get_path_count(tgt_dip) == 0) { 1018 VHCI_DEBUG(1, (CE_WARN, hba_dip, 1019 "tgt_init: client path count is " 1020 "zero for %s!", guid)); 1021 } else { 1022 VHCI_DEBUG(1, (CE_WARN, hba_dip, 1023 "tgt_init: client path not " 1024 "available yet for %s!", guid)); 1025 } 1026 #endif /* DEBUG */ 1027 mutex_exit(&vhci_global_mutex); 1028 ddi_prop_free(guid); 1029 return (DDI_NOT_WELL_FORMED); 1030 } 1031 vlun = vhci_lun_lookup(tgt_dip); 1032 rval = mdi_select_path(tgt_dip, NULL, 1033 (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH), 1034 NULL, &pip); 1035 if (rval == MDI_SUCCESS) { 1036 mdi_rele_path(pip); 1037 } 1038 from_ticks = ddi_get_lbolt(); 1039 } 1040 mutex_exit(&vhci_global_mutex); 1041 1042 ASSERT(vlun != NULL); 1043 ddi_prop_free(guid); 1044 hba_tran->tran_tgt_private = vlun; 1045 1046 return (DDI_SUCCESS); 1047 } 1048 1049 /*ARGSUSED*/ 1050 static void 1051 vhci_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip, 1052 scsi_hba_tran_t *hba_tran, struct scsi_device *sd) 1053 { 1054 } 1055 1056 /* 1057 * a PGR register command has started; copy the info we need 1058 */ 1059 int 1060 vhci_pgr_register_start(scsi_vhci_lun_t *vlun, struct scsi_pkt *pkt) 1061 { 1062 struct vhci_pkt *vpkt = TGTPKT2VHCIPKT(pkt); 1063 void *addr; 1064 1065 if (!vpkt->vpkt_tgt_init_bp) 1066 return (TRAN_BADPKT); 1067 1068 addr = bp_mapin_common(vpkt->vpkt_tgt_init_bp, 1069 (vpkt->vpkt_flags & CFLAG_NOWAIT) ? VM_NOSLEEP : VM_SLEEP); 1070 if (addr == NULL) 1071 return (TRAN_BUSY); 1072 1073 mutex_enter(&vlun->svl_mutex); 1074 1075 vhci_print_prout_keys(vlun, "v_pgr_reg_start: before bcopy:"); 1076 1077 bcopy(addr, &vlun->svl_prout, sizeof (vhci_prout_t) - 1078 (2 * MHIOC_RESV_KEY_SIZE*sizeof (char))); 1079 bcopy(pkt->pkt_cdbp, vlun->svl_cdb, sizeof (vlun->svl_cdb)); 1080 1081 vhci_print_prout_keys(vlun, "v_pgr_reg_start: after bcopy:"); 1082 1083 vlun->svl_time = pkt->pkt_time; 1084 vlun->svl_bcount = vpkt->vpkt_tgt_init_bp->b_bcount; 1085 vlun->svl_first_path = vpkt->vpkt_path; 1086 mutex_exit(&vlun->svl_mutex); 1087 return (0); 1088 } 1089 1090 /* 1091 * Function name : vhci_scsi_start() 1092 * 1093 * Return Values : TRAN_FATAL_ERROR - vhci has been shutdown 1094 * or other fatal failure 1095 * preventing packet transportation 1096 * TRAN_BUSY - request queue is full 1097 * TRAN_ACCEPT - pkt has been submitted to phci 1098 * (or is held in the waitQ) 1099 * Description : Implements SCSA's tran_start() entry point for 1100 * packet transport 1101 * 1102 */ 1103 static int 1104 vhci_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt) 1105 { 1106 int rval = TRAN_ACCEPT; 1107 int instance, held; 1108 struct scsi_vhci *vhci = ADDR2VHCI(ap); 1109 struct scsi_vhci_lun *vlun = ADDR2VLUN(ap); 1110 struct vhci_pkt *vpkt = TGTPKT2VHCIPKT(pkt); 1111 int flags = 0; 1112 scsi_vhci_priv_t *svp; 1113 dev_info_t *cdip; 1114 client_lb_t lbp; 1115 int restore_lbp = 0; 1116 /* set if pkt is SCSI-II RESERVE cmd */ 1117 int pkt_reserve_cmd = 0; 1118 int reserve_failed = 0; 1119 1120 ASSERT(vhci != NULL); 1121 ASSERT(vpkt != NULL); 1122 ASSERT(vpkt->vpkt_state != VHCI_PKT_ISSUED); 1123 cdip = ADDR2DIP(ap); 1124 1125 /* 1126 * Block IOs if LUN is held or QUIESCED for IOs. 1127 */ 1128 if ((VHCI_LUN_IS_HELD(vlun)) || 1129 ((vlun->svl_flags & VLUN_QUIESCED_FLG) == VLUN_QUIESCED_FLG)) { 1130 return (TRAN_BUSY); 1131 } 1132 1133 /* 1134 * vhci_lun needs to be quiesced before SCSI-II RESERVE command 1135 * can be issued. This may require a cv_timedwait, which is 1136 * dangerous to perform in an interrupt context. So if this 1137 * is a RESERVE command a taskq is dispatched to service it. 1138 * This taskq shall again call vhci_scsi_start, but we shall be 1139 * sure its not in an interrupt context. 1140 */ 1141 if ((pkt->pkt_cdbp[0] == SCMD_RESERVE) || 1142 (pkt->pkt_cdbp[0] == SCMD_RESERVE_G1)) { 1143 if (!(vpkt->vpkt_state & VHCI_PKT_THRU_TASKQ)) { 1144 if (taskq_dispatch(vhci->vhci_taskq, 1145 vhci_dispatch_scsi_start, (void *) vpkt, 1146 KM_NOSLEEP)) { 1147 return (TRAN_ACCEPT); 1148 } else { 1149 return (TRAN_BUSY); 1150 } 1151 } 1152 1153 /* 1154 * Here we ensure that simultaneous SCSI-II RESERVE cmds don't 1155 * get serviced for a lun. 1156 */ 1157 VHCI_HOLD_LUN(vlun, VH_NOSLEEP, held); 1158 if (!held) { 1159 return (TRAN_BUSY); 1160 } else if ((vlun->svl_flags & VLUN_QUIESCED_FLG) == 1161 VLUN_QUIESCED_FLG) { 1162 VHCI_RELEASE_LUN(vlun); 1163 return (TRAN_BUSY); 1164 } 1165 1166 /* 1167 * To ensure that no IOs occur for this LUN for the duration 1168 * of this pkt set the VLUN_QUIESCED_FLG. 1169 * In case this routine needs to exit on error make sure that 1170 * this flag is cleared. 1171 */ 1172 vlun->svl_flags |= VLUN_QUIESCED_FLG; 1173 pkt_reserve_cmd = 1; 1174 1175 /* 1176 * if this is a SCSI-II RESERVE command, set load balancing 1177 * policy to be ALTERNATE PATH to ensure that all subsequent 1178 * IOs are routed on the same path. This is because if commands 1179 * are routed across multiple paths then IOs on paths other than 1180 * the one on which the RESERVE was executed will get a 1181 * RESERVATION CONFLICT 1182 */ 1183 lbp = mdi_get_lb_policy(cdip); 1184 if (lbp != LOAD_BALANCE_NONE) { 1185 if (vhci_quiesce_lun(vlun) != 1) { 1186 vlun->svl_flags &= ~VLUN_QUIESCED_FLG; 1187 VHCI_RELEASE_LUN(vlun); 1188 return (TRAN_FATAL_ERROR); 1189 } 1190 vlun->svl_lb_policy_save = lbp; 1191 if (mdi_set_lb_policy(cdip, LOAD_BALANCE_NONE) != 1192 MDI_SUCCESS) { 1193 vlun->svl_flags &= ~VLUN_QUIESCED_FLG; 1194 VHCI_RELEASE_LUN(vlun); 1195 return (TRAN_FATAL_ERROR); 1196 } 1197 restore_lbp = 1; 1198 } 1199 /* 1200 * See comments for VLUN_RESERVE_ACTIVE_FLG in scsi_vhci.h 1201 * To narrow this window where a reserve command may be sent 1202 * down an inactive path the path states first need to be 1203 * updated. Before calling vhci_update_pathstates reset 1204 * VLUN_RESERVE_ACTIVE_FLG, just in case it was already set 1205 * for this lun. This shall prevent an unnecessary reset 1206 * from being sent out. 1207 */ 1208 vlun->svl_flags &= ~VLUN_RESERVE_ACTIVE_FLG; 1209 vhci_update_pathstates((void *)vlun); 1210 } 1211 1212 instance = ddi_get_instance(vhci->vhci_dip); 1213 1214 /* 1215 * If the command is PRIN with action of zero, then the cmd 1216 * is reading PR keys which requires filtering on completion. 1217 * Data cache sync must be guaranteed. 1218 */ 1219 if ((pkt->pkt_cdbp[0] == SCMD_PRIN) && (pkt->pkt_cdbp[1] == 0) && 1220 (vpkt->vpkt_org_vpkt == NULL)) { 1221 vpkt->vpkt_tgt_init_pkt_flags |= PKT_CONSISTENT; 1222 } 1223 1224 /* 1225 * Do not defer bind for PKT_DMA_PARTIAL 1226 */ 1227 if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) == 0) { 1228 1229 /* This is a non pkt_dma_partial case */ 1230 if ((rval = vhci_bind_transport( 1231 ap, vpkt, vpkt->vpkt_tgt_init_pkt_flags, NULL_FUNC)) 1232 != TRAN_ACCEPT) { 1233 VHCI_DEBUG(6, (CE_WARN, vhci->vhci_dip, 1234 "!vhci%d %x: failed to bind transport: " 1235 "vlun 0x%p pkt_reserved %x restore_lbp %x," 1236 "lbp %x", instance, rval, (void *)vlun, 1237 pkt_reserve_cmd, restore_lbp, lbp)); 1238 if (restore_lbp) 1239 (void) mdi_set_lb_policy(cdip, lbp); 1240 if (pkt_reserve_cmd) 1241 vlun->svl_flags &= ~VLUN_QUIESCED_FLG; 1242 return (rval); 1243 } 1244 VHCI_DEBUG(8, (CE_NOTE, NULL, 1245 "vhci_scsi_start: v_b_t called 0x%p\n", (void *)vpkt)); 1246 } 1247 ASSERT(vpkt->vpkt_hba_pkt != NULL); 1248 ASSERT(vpkt->vpkt_path != NULL); 1249 1250 /* 1251 * This is the chance to adjust the pHCI's pkt and other information 1252 * from target driver's pkt. 1253 */ 1254 VHCI_DEBUG(8, (CE_NOTE, vhci->vhci_dip, "vhci_scsi_start vpkt %p\n", 1255 (void *)vpkt)); 1256 vhci_update_pHCI_pkt(vpkt, pkt); 1257 1258 if (vlun->svl_flags & VLUN_RESERVE_ACTIVE_FLG) { 1259 if (vpkt->vpkt_path != vlun->svl_resrv_pip) { 1260 VHCI_DEBUG(1, (CE_WARN, vhci->vhci_dip, 1261 "!vhci_bind: reserve flag set for vlun 0x%p, but, " 1262 "pktpath 0x%p resrv path 0x%p differ. lb_policy %x", 1263 (void *)vlun, (void *)vpkt->vpkt_path, 1264 (void *)vlun->svl_resrv_pip, 1265 mdi_get_lb_policy(cdip))); 1266 reserve_failed = 1; 1267 } 1268 } 1269 1270 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(vpkt->vpkt_path); 1271 if (svp == NULL || reserve_failed) { 1272 if (pkt_reserve_cmd) { 1273 VHCI_DEBUG(6, (CE_WARN, vhci->vhci_dip, 1274 "!vhci_bind returned null svp vlun 0x%p", 1275 (void *)vlun)); 1276 vlun->svl_flags &= ~VLUN_QUIESCED_FLG; 1277 if (restore_lbp) 1278 (void) mdi_set_lb_policy(cdip, lbp); 1279 } 1280 pkt_cleanup: 1281 if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) == 0) { 1282 scsi_destroy_pkt(vpkt->vpkt_hba_pkt); 1283 vpkt->vpkt_hba_pkt = NULL; 1284 if (vpkt->vpkt_path) { 1285 mdi_rele_path(vpkt->vpkt_path); 1286 vpkt->vpkt_path = NULL; 1287 } 1288 } 1289 if ((pkt->pkt_cdbp[0] == SCMD_PROUT) && 1290 (((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_REGISTER) || 1291 ((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_R_AND_IGNORE))) { 1292 sema_v(&vlun->svl_pgr_sema); 1293 } 1294 return (TRAN_BUSY); 1295 } 1296 1297 VHCI_INCR_PATH_CMDCOUNT(svp); 1298 1299 /* 1300 * Ensure that no other IOs raced ahead, while a RESERVE cmd was 1301 * QUIESCING the same lun. 1302 */ 1303 if ((!pkt_reserve_cmd) && 1304 ((vlun->svl_flags & VLUN_QUIESCED_FLG) == VLUN_QUIESCED_FLG)) { 1305 VHCI_DECR_PATH_CMDCOUNT(svp); 1306 goto pkt_cleanup; 1307 } 1308 1309 if ((pkt->pkt_cdbp[0] == SCMD_PRIN) || 1310 (pkt->pkt_cdbp[0] == SCMD_PROUT)) { 1311 /* 1312 * currently this thread only handles running PGR 1313 * commands, so don't bother creating it unless 1314 * something interesting is going to happen (like 1315 * either a PGR out, or a PGR in with enough space 1316 * to hold the keys that are getting returned) 1317 */ 1318 mutex_enter(&vlun->svl_mutex); 1319 if (((vlun->svl_flags & VLUN_TASK_D_ALIVE_FLG) == 0) && 1320 (pkt->pkt_cdbp[0] == SCMD_PROUT)) { 1321 vlun->svl_taskq = taskq_create("vlun_pgr_task_daemon", 1322 1, MINCLSYSPRI, 1, 4, 0); 1323 vlun->svl_flags |= VLUN_TASK_D_ALIVE_FLG; 1324 } 1325 mutex_exit(&vlun->svl_mutex); 1326 if ((pkt->pkt_cdbp[0] == SCMD_PROUT) && 1327 (((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_REGISTER) || 1328 ((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_R_AND_IGNORE))) { 1329 if (rval = vhci_pgr_register_start(vlun, pkt)) { 1330 /* an error */ 1331 sema_v(&vlun->svl_pgr_sema); 1332 return (rval); 1333 } 1334 } 1335 } 1336 1337 /* 1338 * SCSI-II RESERVE cmd is not expected in polled mode. 1339 * If this changes it needs to be handled for the polled scenario. 1340 */ 1341 flags = vpkt->vpkt_hba_pkt->pkt_flags; 1342 1343 /* 1344 * Set the path_instance *before* sending the scsi_pkt down the path 1345 * to mpxio's pHCI so that additional path abstractions at a pHCI 1346 * level (like maybe iSCSI at some point in the future) can update 1347 * the path_instance. 1348 */ 1349 if (scsi_pkt_allocated_correctly(vpkt->vpkt_hba_pkt)) 1350 vpkt->vpkt_hba_pkt->pkt_path_instance = 1351 mdi_pi_get_path_instance(vpkt->vpkt_path); 1352 1353 rval = scsi_transport(vpkt->vpkt_hba_pkt); 1354 if (rval == TRAN_ACCEPT) { 1355 if (flags & FLAG_NOINTR) { 1356 struct scsi_pkt *tpkt = vpkt->vpkt_tgt_pkt; 1357 struct scsi_pkt *pkt = vpkt->vpkt_hba_pkt; 1358 1359 ASSERT(tpkt != NULL); 1360 *(tpkt->pkt_scbp) = *(pkt->pkt_scbp); 1361 tpkt->pkt_resid = pkt->pkt_resid; 1362 tpkt->pkt_state = pkt->pkt_state; 1363 tpkt->pkt_statistics = pkt->pkt_statistics; 1364 tpkt->pkt_reason = pkt->pkt_reason; 1365 1366 if ((*(pkt->pkt_scbp) == STATUS_CHECK) && 1367 (pkt->pkt_state & STATE_ARQ_DONE)) { 1368 bcopy(pkt->pkt_scbp, tpkt->pkt_scbp, 1369 vpkt->vpkt_tgt_init_scblen); 1370 } 1371 1372 VHCI_DECR_PATH_CMDCOUNT(svp); 1373 if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) == 0) { 1374 scsi_destroy_pkt(vpkt->vpkt_hba_pkt); 1375 vpkt->vpkt_hba_pkt = NULL; 1376 if (vpkt->vpkt_path) { 1377 mdi_rele_path(vpkt->vpkt_path); 1378 vpkt->vpkt_path = NULL; 1379 } 1380 } 1381 /* 1382 * This path will not automatically retry pkts 1383 * internally, therefore, vpkt_org_vpkt should 1384 * never be set. 1385 */ 1386 ASSERT(vpkt->vpkt_org_vpkt == NULL); 1387 if (tpkt->pkt_comp) { 1388 tpkt->pkt_comp(tpkt); 1389 } 1390 } 1391 return (rval); 1392 } else if ((pkt->pkt_cdbp[0] == SCMD_PROUT) && 1393 (((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_REGISTER) || 1394 ((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_R_AND_IGNORE))) { 1395 /* the command exited with bad status */ 1396 sema_v(&vlun->svl_pgr_sema); 1397 } else if (vpkt->vpkt_tgt_pkt->pkt_cdbp[0] == SCMD_PRIN) { 1398 /* the command exited with bad status */ 1399 sema_v(&vlun->svl_pgr_sema); 1400 } else if (pkt_reserve_cmd) { 1401 VHCI_DEBUG(6, (CE_WARN, vhci->vhci_dip, 1402 "!vhci_scsi_start: reserve failed vlun 0x%p", 1403 (void *)vlun)); 1404 vlun->svl_flags &= ~VLUN_QUIESCED_FLG; 1405 if (restore_lbp) 1406 (void) mdi_set_lb_policy(cdip, lbp); 1407 } 1408 1409 ASSERT(vpkt->vpkt_hba_pkt != NULL); 1410 VHCI_DECR_PATH_CMDCOUNT(svp); 1411 1412 /* Do not destroy phci packet information for PKT_DMA_PARTIAL */ 1413 if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) == 0) { 1414 scsi_destroy_pkt(vpkt->vpkt_hba_pkt); 1415 vpkt->vpkt_hba_pkt = NULL; 1416 if (vpkt->vpkt_path) { 1417 MDI_PI_ERRSTAT(vpkt->vpkt_path, MDI_PI_TRANSERR); 1418 mdi_rele_path(vpkt->vpkt_path); 1419 vpkt->vpkt_path = NULL; 1420 } 1421 } 1422 return (TRAN_BUSY); 1423 } 1424 1425 /* 1426 * Function name : vhci_scsi_reset() 1427 * 1428 * Return Values : 0 - reset failed 1429 * 1 - reset succeeded 1430 */ 1431 1432 /* ARGSUSED */ 1433 static int 1434 vhci_scsi_reset(struct scsi_address *ap, int level) 1435 { 1436 int rval = 0; 1437 1438 cmn_err(CE_WARN, "!vhci_scsi_reset 0x%x", level); 1439 if ((level == RESET_TARGET) || (level == RESET_LUN)) { 1440 return (vhci_scsi_reset_target(ap, level, TRUE)); 1441 } else if (level == RESET_ALL) { 1442 return (vhci_scsi_reset_bus(ap)); 1443 } 1444 1445 return (rval); 1446 } 1447 1448 /* 1449 * vhci_recovery_reset: 1450 * Issues reset to the device 1451 * Input: 1452 * vlun - vhci lun pointer of the device 1453 * ap - address of the device 1454 * select_path: 1455 * If select_path is FALSE, then the address specified in ap is 1456 * the path on which reset will be issued. 1457 * If select_path is TRUE, then path is obtained by calling 1458 * mdi_select_path. 1459 * 1460 * recovery_depth: 1461 * Caller can specify the level of reset. 1462 * VHCI_DEPTH_LUN - 1463 * Issues LUN RESET if device supports lun reset. 1464 * VHCI_DEPTH_TARGET - 1465 * If Lun Reset fails or the device does not support 1466 * Lun Reset, issues TARGET RESET 1467 * VHCI_DEPTH_ALL - 1468 * If Lun Reset fails or the device does not support 1469 * Lun Reset, issues TARGET RESET. 1470 * If TARGET RESET does not succeed, issues Bus Reset. 1471 */ 1472 1473 static int 1474 vhci_recovery_reset(scsi_vhci_lun_t *vlun, struct scsi_address *ap, 1475 uint8_t select_path, uint8_t recovery_depth) 1476 { 1477 int ret = 0; 1478 1479 ASSERT(ap != NULL); 1480 1481 if (vlun && vlun->svl_support_lun_reset == 1) { 1482 ret = vhci_scsi_reset_target(ap, RESET_LUN, 1483 select_path); 1484 } 1485 1486 recovery_depth--; 1487 1488 if ((ret == 0) && recovery_depth) { 1489 ret = vhci_scsi_reset_target(ap, RESET_TARGET, 1490 select_path); 1491 recovery_depth--; 1492 } 1493 1494 if ((ret == 0) && recovery_depth) { 1495 (void) scsi_reset(ap, RESET_ALL); 1496 } 1497 1498 return (ret); 1499 } 1500 1501 /* 1502 * Note: The scsi_address passed to this routine could be the scsi_address 1503 * for the virtual device or the physical device. No assumptions should be 1504 * made in this routine about the ap structure and a_hba_tran->tran_tgt_private 1505 * field of ap can not be assumed to be the vhci structure. 1506 * Further note that the child dip would be the dip of the ssd node irrespective 1507 * of the scsi_address passed. 1508 */ 1509 1510 static int 1511 vhci_scsi_reset_target(struct scsi_address *ap, int level, uint8_t select_path) 1512 { 1513 dev_info_t *vdip, *pdip, *cdip = ADDR2DIP(ap); 1514 mdi_pathinfo_t *pip = NULL; 1515 mdi_pathinfo_t *npip = NULL; 1516 int rval = -1; 1517 scsi_vhci_priv_t *svp = NULL; 1518 struct scsi_address *pap = NULL; 1519 scsi_hba_tran_t *hba = NULL; 1520 int sps; 1521 struct scsi_vhci *vhci = NULL; 1522 1523 if (select_path != TRUE) { 1524 ASSERT(ap != NULL); 1525 if (level == RESET_LUN) { 1526 hba = ap->a_hba_tran; 1527 ASSERT(hba != NULL); 1528 return (hba->tran_reset(ap, RESET_LUN)); 1529 } 1530 return (scsi_reset(ap, level)); 1531 } 1532 1533 ASSERT(cdip != NULL); 1534 vdip = ddi_get_parent(cdip); 1535 ASSERT(vdip != NULL); 1536 vhci = ddi_get_soft_state(vhci_softstate, ddi_get_instance(vdip)); 1537 ASSERT(vhci != NULL); 1538 1539 rval = mdi_select_path(cdip, NULL, MDI_SELECT_ONLINE_PATH, NULL, &pip); 1540 if ((rval != MDI_SUCCESS) || (pip == NULL)) { 1541 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_scsi_reset_target: " 1542 "Unable to get a path, dip 0x%p", (void *)cdip)); 1543 return (0); 1544 } 1545 again: 1546 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip); 1547 if (svp == NULL) { 1548 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_scsi_reset_target: " 1549 "priv is NULL, pip 0x%p", (void *)pip)); 1550 mdi_rele_path(pip); 1551 return (0); 1552 } 1553 1554 if (svp->svp_psd == NULL) { 1555 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_scsi_reset_target: " 1556 "psd is NULL, pip 0x%p, svp 0x%p", 1557 (void *)pip, (void *)svp)); 1558 mdi_rele_path(pip); 1559 return (0); 1560 } 1561 1562 pap = &svp->svp_psd->sd_address; 1563 hba = pap->a_hba_tran; 1564 1565 ASSERT(pap != NULL); 1566 ASSERT(hba != NULL); 1567 1568 if (hba->tran_reset != NULL) { 1569 if (hba->tran_reset(pap, level) == 0) { 1570 pdip = mdi_pi_get_phci(pip); 1571 vhci_log(CE_WARN, vdip, "!(%s%d):" 1572 " path (%s%d), reset %d failed", 1573 ddi_driver_name(cdip), ddi_get_instance(cdip), 1574 ddi_driver_name(pdip), ddi_get_instance(pdip), 1575 level); 1576 1577 /* 1578 * Select next path and issue the reset, repeat 1579 * until all paths are exhausted 1580 */ 1581 sps = mdi_select_path(cdip, NULL, 1582 MDI_SELECT_ONLINE_PATH, pip, &npip); 1583 if ((sps != MDI_SUCCESS) || (npip == NULL)) { 1584 mdi_rele_path(pip); 1585 return (0); 1586 } 1587 mdi_rele_path(pip); 1588 pip = npip; 1589 goto again; 1590 } 1591 mdi_rele_path(pip); 1592 mutex_enter(&vhci->vhci_mutex); 1593 scsi_hba_reset_notify_callback(&vhci->vhci_mutex, 1594 &vhci->vhci_reset_notify_listf); 1595 mutex_exit(&vhci->vhci_mutex); 1596 VHCI_DEBUG(6, (CE_NOTE, NULL, "!vhci_scsi_reset_target: " 1597 "reset %d sent down pip:%p for cdip:%p\n", level, 1598 (void *)pip, (void *)cdip)); 1599 return (1); 1600 } 1601 mdi_rele_path(pip); 1602 return (0); 1603 } 1604 1605 1606 /* ARGSUSED */ 1607 static int 1608 vhci_scsi_reset_bus(struct scsi_address *ap) 1609 { 1610 return (1); 1611 } 1612 1613 1614 /* 1615 * called by vhci_getcap and vhci_setcap to get and set (respectively) 1616 * SCSI capabilities 1617 */ 1618 /* ARGSUSED */ 1619 static int 1620 vhci_commoncap(struct scsi_address *ap, char *cap, 1621 int val, int tgtonly, int doset) 1622 { 1623 struct scsi_vhci *vhci = ADDR2VHCI(ap); 1624 struct scsi_vhci_lun *vlun = ADDR2VLUN(ap); 1625 int cidx; 1626 int rval = 0; 1627 1628 if (cap == (char *)0) { 1629 VHCI_DEBUG(3, (CE_WARN, vhci->vhci_dip, 1630 "!vhci_commoncap: invalid arg")); 1631 return (rval); 1632 } 1633 1634 if (vlun == NULL) { 1635 VHCI_DEBUG(3, (CE_WARN, vhci->vhci_dip, 1636 "!vhci_commoncap: vlun is null")); 1637 return (rval); 1638 } 1639 1640 if ((cidx = scsi_hba_lookup_capstr(cap)) == -1) { 1641 return (UNDEFINED); 1642 } 1643 1644 /* 1645 * Process setcap request. 1646 */ 1647 if (doset) { 1648 /* 1649 * At present, we can only set binary (0/1) values 1650 */ 1651 switch (cidx) { 1652 case SCSI_CAP_ARQ: 1653 if (val == 0) { 1654 rval = 0; 1655 } else { 1656 rval = 1; 1657 } 1658 break; 1659 1660 case SCSI_CAP_LUN_RESET: 1661 if (tgtonly == 0) { 1662 VHCI_DEBUG(1, (CE_WARN, vhci->vhci_dip, 1663 "scsi_vhci_setcap: " 1664 "Returning error since whom = 0")); 1665 rval = -1; 1666 break; 1667 } 1668 /* 1669 * Set the capability accordingly. 1670 */ 1671 mutex_enter(&vlun->svl_mutex); 1672 vlun->svl_support_lun_reset = val; 1673 rval = val; 1674 mutex_exit(&vlun->svl_mutex); 1675 break; 1676 1677 case SCSI_CAP_SECTOR_SIZE: 1678 mutex_enter(&vlun->svl_mutex); 1679 vlun->svl_sector_size = val; 1680 vlun->svl_setcap_done = 1; 1681 mutex_exit(&vlun->svl_mutex); 1682 (void) vhci_pHCI_cap(ap, cap, val, tgtonly, NULL); 1683 1684 /* Always return success */ 1685 rval = 1; 1686 break; 1687 1688 default: 1689 VHCI_DEBUG(6, (CE_WARN, vhci->vhci_dip, 1690 "!vhci_setcap: unsupported %d", cidx)); 1691 rval = UNDEFINED; 1692 break; 1693 } 1694 1695 VHCI_DEBUG(6, (CE_NOTE, vhci->vhci_dip, 1696 "!set cap: cap=%s, val/tgtonly/doset/rval = " 1697 "0x%x/0x%x/0x%x/%d\n", 1698 cap, val, tgtonly, doset, rval)); 1699 1700 } else { 1701 /* 1702 * Process getcap request. 1703 */ 1704 switch (cidx) { 1705 case SCSI_CAP_DMA_MAX: 1706 /* 1707 * For X86 this capability is caught in scsi_ifgetcap(). 1708 * XXX Should this be getting the value from the pHCI? 1709 */ 1710 rval = (int)VHCI_DMA_MAX_XFER_CAP; 1711 break; 1712 1713 case SCSI_CAP_INITIATOR_ID: 1714 rval = 0x00; 1715 break; 1716 1717 case SCSI_CAP_ARQ: 1718 case SCSI_CAP_RESET_NOTIFICATION: 1719 case SCSI_CAP_TAGGED_QING: 1720 rval = 1; 1721 break; 1722 1723 case SCSI_CAP_SCSI_VERSION: 1724 rval = 3; 1725 break; 1726 1727 case SCSI_CAP_INTERCONNECT_TYPE: 1728 rval = INTERCONNECT_FABRIC; 1729 break; 1730 1731 case SCSI_CAP_LUN_RESET: 1732 /* 1733 * scsi_vhci will always return success for LUN reset. 1734 * When request for doing LUN reset comes 1735 * through scsi_reset entry point, at that time attempt 1736 * will be made to do reset through all the possible 1737 * paths. 1738 */ 1739 mutex_enter(&vlun->svl_mutex); 1740 rval = vlun->svl_support_lun_reset; 1741 mutex_exit(&vlun->svl_mutex); 1742 VHCI_DEBUG(4, (CE_WARN, vhci->vhci_dip, 1743 "scsi_vhci_getcap:" 1744 "Getting the Lun reset capability %d", rval)); 1745 break; 1746 1747 case SCSI_CAP_SECTOR_SIZE: 1748 mutex_enter(&vlun->svl_mutex); 1749 rval = vlun->svl_sector_size; 1750 mutex_exit(&vlun->svl_mutex); 1751 break; 1752 1753 case SCSI_CAP_CDB_LEN: 1754 rval = VHCI_SCSI_CDB_SIZE; 1755 break; 1756 1757 case SCSI_CAP_DMA_MAX_ARCH: 1758 /* 1759 * For X86 this capability is caught in scsi_ifgetcap(). 1760 * XXX Should this be getting the value from the pHCI? 1761 */ 1762 rval = 0; 1763 break; 1764 1765 default: 1766 VHCI_DEBUG(6, (CE_WARN, vhci->vhci_dip, 1767 "!vhci_getcap: unsupported %d", cidx)); 1768 rval = UNDEFINED; 1769 break; 1770 } 1771 1772 VHCI_DEBUG(6, (CE_NOTE, vhci->vhci_dip, 1773 "!get cap: cap=%s, val/tgtonly/doset/rval = " 1774 "0x%x/0x%x/0x%x/%d\n", 1775 cap, val, tgtonly, doset, rval)); 1776 } 1777 return (rval); 1778 } 1779 1780 1781 /* 1782 * Function name : vhci_scsi_getcap() 1783 * 1784 */ 1785 static int 1786 vhci_scsi_getcap(struct scsi_address *ap, char *cap, int whom) 1787 { 1788 return (vhci_commoncap(ap, cap, 0, whom, 0)); 1789 } 1790 1791 static int 1792 vhci_scsi_setcap(struct scsi_address *ap, char *cap, int value, int whom) 1793 { 1794 return (vhci_commoncap(ap, cap, value, whom, 1)); 1795 } 1796 1797 /* 1798 * Function name : vhci_scsi_abort() 1799 */ 1800 /* ARGSUSED */ 1801 static int 1802 vhci_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt) 1803 { 1804 return (0); 1805 } 1806 1807 /* 1808 * Function name : vhci_scsi_init_pkt 1809 * 1810 * Return Values : pointer to scsi_pkt, or NULL 1811 */ 1812 /* ARGSUSED */ 1813 static struct scsi_pkt * 1814 vhci_scsi_init_pkt(struct scsi_address *ap, struct scsi_pkt *pkt, 1815 struct buf *bp, int cmdlen, int statuslen, int tgtlen, 1816 int flags, int (*callback)(caddr_t), caddr_t arg) 1817 { 1818 struct scsi_vhci *vhci = ADDR2VHCI(ap); 1819 struct vhci_pkt *vpkt; 1820 int rval; 1821 int newpkt = 0; 1822 struct scsi_pkt *pktp; 1823 1824 1825 if (pkt == NULL) { 1826 if (cmdlen > VHCI_SCSI_CDB_SIZE) { 1827 if ((cmdlen != VHCI_SCSI_OSD_CDB_SIZE) || 1828 ((flags & VHCI_SCSI_OSD_PKT_FLAGS) != 1829 VHCI_SCSI_OSD_PKT_FLAGS)) { 1830 VHCI_DEBUG(1, (CE_NOTE, NULL, 1831 "!init pkt: cdb size not supported\n")); 1832 return (NULL); 1833 } 1834 } 1835 1836 pktp = scsi_hba_pkt_alloc(vhci->vhci_dip, 1837 ap, cmdlen, statuslen, tgtlen, sizeof (*vpkt), callback, 1838 arg); 1839 1840 if (pktp == NULL) { 1841 return (NULL); 1842 } 1843 1844 /* Get the vhci's private structure */ 1845 vpkt = (struct vhci_pkt *)(pktp->pkt_ha_private); 1846 ASSERT(vpkt); 1847 1848 /* Save the target driver's packet */ 1849 vpkt->vpkt_tgt_pkt = pktp; 1850 1851 /* 1852 * Save pkt_tgt_init_pkt fields if deferred binding 1853 * is needed or for other purposes. 1854 */ 1855 vpkt->vpkt_tgt_init_pkt_flags = flags; 1856 vpkt->vpkt_flags = (callback == NULL_FUNC) ? CFLAG_NOWAIT : 0; 1857 vpkt->vpkt_state = VHCI_PKT_IDLE; 1858 vpkt->vpkt_tgt_init_cdblen = cmdlen; 1859 vpkt->vpkt_tgt_init_scblen = statuslen; 1860 newpkt = 1; 1861 } else { /* pkt not NULL */ 1862 vpkt = pkt->pkt_ha_private; 1863 } 1864 1865 VHCI_DEBUG(8, (CE_NOTE, NULL, "vhci_scsi_init_pkt " 1866 "vpkt %p flags %x\n", (void *)vpkt, flags)); 1867 1868 /* Clear any stale error flags */ 1869 if (bp) { 1870 bioerror(bp, 0); 1871 } 1872 1873 vpkt->vpkt_tgt_init_bp = bp; 1874 1875 if (flags & PKT_DMA_PARTIAL) { 1876 1877 /* 1878 * Immediate binding is needed. 1879 * Target driver may not set this flag in next invocation. 1880 * vhci has to remember this flag was set during first 1881 * invocation of vhci_scsi_init_pkt. 1882 */ 1883 vpkt->vpkt_flags |= CFLAG_DMA_PARTIAL; 1884 } 1885 1886 if (vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) { 1887 1888 /* 1889 * Re-initialize some of the target driver packet state 1890 * information. 1891 */ 1892 vpkt->vpkt_tgt_pkt->pkt_state = 0; 1893 vpkt->vpkt_tgt_pkt->pkt_statistics = 0; 1894 vpkt->vpkt_tgt_pkt->pkt_reason = 0; 1895 1896 /* 1897 * Binding a vpkt->vpkt_path for this IO at init_time. 1898 * If an IO error happens later, target driver will clear 1899 * this vpkt->vpkt_path binding before re-init IO again. 1900 */ 1901 VHCI_DEBUG(8, (CE_NOTE, NULL, 1902 "vhci_scsi_init_pkt: calling v_b_t %p, newpkt %d\n", 1903 (void *)vpkt, newpkt)); 1904 if (pkt && vpkt->vpkt_hba_pkt) { 1905 VHCI_DEBUG(4, (CE_NOTE, NULL, 1906 "v_s_i_p calling update_pHCI_pkt resid %ld\n", 1907 pkt->pkt_resid)); 1908 vhci_update_pHCI_pkt(vpkt, pkt); 1909 } 1910 if (callback == SLEEP_FUNC) { 1911 rval = vhci_bind_transport( 1912 ap, vpkt, flags, callback); 1913 } else { 1914 rval = vhci_bind_transport( 1915 ap, vpkt, flags, NULL_FUNC); 1916 } 1917 VHCI_DEBUG(8, (CE_NOTE, NULL, 1918 "vhci_scsi_init_pkt: v_b_t called 0x%p rval 0x%x\n", 1919 (void *)vpkt, rval)); 1920 if (bp) { 1921 if (rval == TRAN_FATAL_ERROR) { 1922 /* 1923 * No paths available. Could not bind 1924 * any pHCI. Setting EFAULT as a way 1925 * to indicate no DMA is mapped. 1926 */ 1927 bioerror(bp, EFAULT); 1928 } else { 1929 /* 1930 * Do not indicate any pHCI errors to 1931 * target driver otherwise. 1932 */ 1933 bioerror(bp, 0); 1934 } 1935 } 1936 if (rval != TRAN_ACCEPT) { 1937 VHCI_DEBUG(8, (CE_NOTE, NULL, 1938 "vhci_scsi_init_pkt: " 1939 "v_b_t failed 0x%p newpkt %x\n", 1940 (void *)vpkt, newpkt)); 1941 if (newpkt) { 1942 scsi_hba_pkt_free(ap, 1943 vpkt->vpkt_tgt_pkt); 1944 } 1945 return (NULL); 1946 } 1947 ASSERT(vpkt->vpkt_hba_pkt != NULL); 1948 ASSERT(vpkt->vpkt_path != NULL); 1949 1950 /* Update the resid for the target driver */ 1951 vpkt->vpkt_tgt_pkt->pkt_resid = 1952 vpkt->vpkt_hba_pkt->pkt_resid; 1953 } 1954 1955 return (vpkt->vpkt_tgt_pkt); 1956 } 1957 1958 /* 1959 * Function name : vhci_scsi_destroy_pkt 1960 * 1961 * Return Values : none 1962 */ 1963 static void 1964 vhci_scsi_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt) 1965 { 1966 struct vhci_pkt *vpkt = (struct vhci_pkt *)pkt->pkt_ha_private; 1967 1968 VHCI_DEBUG(8, (CE_NOTE, NULL, 1969 "vhci_scsi_destroy_pkt: vpkt 0x%p\n", (void *)vpkt)); 1970 1971 vpkt->vpkt_tgt_init_pkt_flags = 0; 1972 if (vpkt->vpkt_hba_pkt) { 1973 scsi_destroy_pkt(vpkt->vpkt_hba_pkt); 1974 vpkt->vpkt_hba_pkt = NULL; 1975 } 1976 if (vpkt->vpkt_path) { 1977 mdi_rele_path(vpkt->vpkt_path); 1978 vpkt->vpkt_path = NULL; 1979 } 1980 1981 ASSERT(vpkt->vpkt_state != VHCI_PKT_ISSUED); 1982 scsi_hba_pkt_free(ap, vpkt->vpkt_tgt_pkt); 1983 } 1984 1985 /* 1986 * Function name : vhci_scsi_dmafree() 1987 * 1988 * Return Values : none 1989 */ 1990 /*ARGSUSED*/ 1991 static void 1992 vhci_scsi_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt) 1993 { 1994 struct vhci_pkt *vpkt = (struct vhci_pkt *)pkt->pkt_ha_private; 1995 1996 VHCI_DEBUG(6, (CE_NOTE, NULL, 1997 "vhci_scsi_dmafree: vpkt 0x%p\n", (void *)vpkt)); 1998 1999 ASSERT(vpkt != NULL); 2000 if (vpkt->vpkt_hba_pkt) { 2001 scsi_destroy_pkt(vpkt->vpkt_hba_pkt); 2002 vpkt->vpkt_hba_pkt = NULL; 2003 } 2004 if (vpkt->vpkt_path) { 2005 mdi_rele_path(vpkt->vpkt_path); 2006 vpkt->vpkt_path = NULL; 2007 } 2008 } 2009 2010 /* 2011 * Function name : vhci_scsi_sync_pkt() 2012 * 2013 * Return Values : none 2014 */ 2015 /*ARGSUSED*/ 2016 static void 2017 vhci_scsi_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt) 2018 { 2019 struct vhci_pkt *vpkt = (struct vhci_pkt *)pkt->pkt_ha_private; 2020 2021 ASSERT(vpkt != NULL); 2022 if (vpkt->vpkt_hba_pkt) { 2023 scsi_sync_pkt(vpkt->vpkt_hba_pkt); 2024 } 2025 } 2026 2027 /* 2028 * routine for reset notification setup, to register or cancel. 2029 */ 2030 static int 2031 vhci_scsi_reset_notify(struct scsi_address *ap, int flag, 2032 void (*callback)(caddr_t), caddr_t arg) 2033 { 2034 struct scsi_vhci *vhci = ADDR2VHCI(ap); 2035 return (scsi_hba_reset_notify_setup(ap, flag, callback, arg, 2036 &vhci->vhci_mutex, &vhci->vhci_reset_notify_listf)); 2037 } 2038 2039 static int 2040 vhci_scsi_get_name_bus_addr(struct scsi_device *sd, 2041 char *name, int len, int bus_addr) 2042 { 2043 dev_info_t *cdip; 2044 char *guid; 2045 scsi_vhci_lun_t *vlun; 2046 2047 ASSERT(sd != NULL); 2048 ASSERT(name != NULL); 2049 2050 cdip = sd->sd_dev; 2051 2052 ASSERT(cdip != NULL); 2053 2054 if (mdi_component_is_client(cdip, NULL) != MDI_SUCCESS) { 2055 name[0] = '\0'; 2056 return (1); 2057 } 2058 2059 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, cdip, PROPFLAGS, 2060 MDI_CLIENT_GUID_PROP, &guid) != DDI_SUCCESS) { 2061 name[0] = '\0'; 2062 return (1); 2063 } 2064 2065 vlun = ADDR2VLUN(&sd->sd_address); 2066 if (bus_addr && vlun && vlun->svl_fops_name) { 2067 /* report the guid and the name of the failover module */ 2068 (void) snprintf(name, len, "g%s %s", guid, vlun->svl_fops_name); 2069 } else { 2070 /* report the guid */ 2071 (void) snprintf(name, len, "g%s", guid); 2072 } 2073 2074 ddi_prop_free(guid); 2075 return (1); 2076 } 2077 2078 static int 2079 vhci_scsi_get_bus_addr(struct scsi_device *sd, char *name, int len) 2080 { 2081 return (vhci_scsi_get_name_bus_addr(sd, name, len, 1)); 2082 } 2083 2084 static int 2085 vhci_scsi_get_name(struct scsi_device *sd, char *name, int len) 2086 { 2087 return (vhci_scsi_get_name_bus_addr(sd, name, len, 0)); 2088 } 2089 2090 /* 2091 * Return a pointer to the guid part of the devnm. 2092 * devnm format is "nodename@busaddr", busaddr format is "gGUID". 2093 */ 2094 static char * 2095 vhci_devnm_to_guid(char *devnm) 2096 { 2097 char *cp = devnm; 2098 2099 if (devnm == NULL) 2100 return (NULL); 2101 2102 while (*cp != '\0' && *cp != '@') 2103 cp++; 2104 if (*cp == '@' && *(cp + 1) == 'g') 2105 return (cp + 2); 2106 return (NULL); 2107 } 2108 2109 static int 2110 vhci_bind_transport(struct scsi_address *ap, struct vhci_pkt *vpkt, int flags, 2111 int (*func)(caddr_t)) 2112 { 2113 struct scsi_vhci *vhci = ADDR2VHCI(ap); 2114 dev_info_t *cdip = ADDR2DIP(ap); 2115 mdi_pathinfo_t *pip = NULL; 2116 mdi_pathinfo_t *npip = NULL; 2117 scsi_vhci_priv_t *svp = NULL; 2118 struct scsi_device *psd = NULL; 2119 struct scsi_address *address = NULL; 2120 struct scsi_pkt *pkt = NULL; 2121 int rval = -1; 2122 int pgr_sema_held = 0; 2123 int held; 2124 int mps_flag = MDI_SELECT_ONLINE_PATH; 2125 struct scsi_vhci_lun *vlun; 2126 time_t tnow; 2127 int path_instance = 0; 2128 2129 vlun = ADDR2VLUN(ap); 2130 ASSERT(vlun != 0); 2131 2132 if ((vpkt->vpkt_tgt_pkt->pkt_cdbp[0] == SCMD_PROUT) && 2133 (((vpkt->vpkt_tgt_pkt->pkt_cdbp[1] & 0x1f) == 2134 VHCI_PROUT_REGISTER) || 2135 ((vpkt->vpkt_tgt_pkt->pkt_cdbp[1] & 0x1f) == 2136 VHCI_PROUT_R_AND_IGNORE))) { 2137 if (!sema_tryp(&vlun->svl_pgr_sema)) 2138 return (TRAN_BUSY); 2139 pgr_sema_held = 1; 2140 if (vlun->svl_first_path != NULL) { 2141 rval = mdi_select_path(cdip, NULL, 2142 MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH, 2143 NULL, &pip); 2144 if ((rval != MDI_SUCCESS) || (pip == NULL)) { 2145 VHCI_DEBUG(4, (CE_NOTE, NULL, 2146 "vhci_bind_transport: path select fail\n")); 2147 } else { 2148 npip = pip; 2149 do { 2150 if (npip == vlun->svl_first_path) { 2151 VHCI_DEBUG(4, (CE_NOTE, NULL, 2152 "vhci_bind_transport: " 2153 "valid first path 0x%p\n", 2154 (void *) 2155 vlun->svl_first_path)); 2156 pip = vlun->svl_first_path; 2157 goto bind_path; 2158 } 2159 pip = npip; 2160 rval = mdi_select_path(cdip, NULL, 2161 MDI_SELECT_ONLINE_PATH | 2162 MDI_SELECT_STANDBY_PATH, 2163 pip, &npip); 2164 mdi_rele_path(pip); 2165 } while ((rval == MDI_SUCCESS) && 2166 (npip != NULL)); 2167 } 2168 } 2169 2170 if (vlun->svl_first_path) { 2171 VHCI_DEBUG(4, (CE_NOTE, NULL, 2172 "vhci_bind_transport: invalid first path 0x%p\n", 2173 (void *)vlun->svl_first_path)); 2174 vlun->svl_first_path = NULL; 2175 } 2176 } else if (vpkt->vpkt_tgt_pkt->pkt_cdbp[0] == SCMD_PRIN) { 2177 if ((vpkt->vpkt_state & VHCI_PKT_THRU_TASKQ) == 0) { 2178 if (!sema_tryp(&vlun->svl_pgr_sema)) 2179 return (TRAN_BUSY); 2180 } 2181 pgr_sema_held = 1; 2182 } 2183 2184 /* 2185 * If the path is already bound for PKT_PARTIAL_DMA case, 2186 * try to use the same path. 2187 */ 2188 if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) && vpkt->vpkt_path) { 2189 VHCI_DEBUG(4, (CE_NOTE, NULL, 2190 "vhci_bind_transport: PKT_PARTIAL_DMA " 2191 "vpkt 0x%p, path 0x%p\n", 2192 (void *)vpkt, (void *)vpkt->vpkt_path)); 2193 pip = vpkt->vpkt_path; 2194 goto bind_path; 2195 } 2196 2197 /* 2198 * Get path_instance. Non-zero with FLAG_PKT_PATH_INSTANCE set 2199 * indicates that mdi_select_path should be called to select a 2200 * specific instance. 2201 * 2202 * NB: Condition pkt_path_instance reference on proper allocation. 2203 */ 2204 if ((vpkt->vpkt_tgt_pkt->pkt_flags & FLAG_PKT_PATH_INSTANCE) && 2205 scsi_pkt_allocated_correctly(vpkt->vpkt_tgt_pkt)) { 2206 path_instance = vpkt->vpkt_tgt_pkt->pkt_path_instance; 2207 } 2208 2209 /* 2210 * If reservation is active bind the transport directly to the pip 2211 * with the reservation. 2212 */ 2213 if (vpkt->vpkt_hba_pkt == NULL) { 2214 if (vlun->svl_flags & VLUN_RESERVE_ACTIVE_FLG) { 2215 if (MDI_PI_IS_ONLINE(vlun->svl_resrv_pip)) { 2216 pip = vlun->svl_resrv_pip; 2217 mdi_hold_path(pip); 2218 vlun->svl_waiting_for_activepath = 0; 2219 rval = MDI_SUCCESS; 2220 goto bind_path; 2221 } else { 2222 if (pgr_sema_held) { 2223 sema_v(&vlun->svl_pgr_sema); 2224 } 2225 return (TRAN_BUSY); 2226 } 2227 } 2228 try_again: 2229 rval = mdi_select_path(cdip, vpkt->vpkt_tgt_init_bp, 2230 path_instance ? MDI_SELECT_PATH_INSTANCE : 0, 2231 (void *)(intptr_t)path_instance, &pip); 2232 if (rval == MDI_BUSY) { 2233 if (pgr_sema_held) { 2234 sema_v(&vlun->svl_pgr_sema); 2235 } 2236 return (TRAN_BUSY); 2237 } else if (rval == MDI_DEVI_ONLINING) { 2238 /* 2239 * if we are here then we are in the midst of 2240 * an attach/probe of the client device. 2241 * We attempt to bind to ONLINE path if available, 2242 * else it is OK to bind to a STANDBY path (instead 2243 * of triggering a failover) because IO associated 2244 * with attach/probe (eg. INQUIRY, block 0 read) 2245 * are completed by targets even on passive paths 2246 * If no ONLINE paths available, it is important 2247 * to set svl_waiting_for_activepath for two 2248 * reasons: (1) avoid sense analysis in the 2249 * "external failure detection" codepath in 2250 * vhci_intr(). Failure to do so will result in 2251 * infinite loop (unless an ONLINE path becomes 2252 * available at some point) (2) avoid 2253 * unnecessary failover (see "---Waiting For Active 2254 * Path---" comment below). 2255 */ 2256 VHCI_DEBUG(1, (CE_NOTE, NULL, "!%p in onlining " 2257 "state\n", (void *)cdip)); 2258 pip = NULL; 2259 rval = mdi_select_path(cdip, vpkt->vpkt_tgt_init_bp, 2260 mps_flag, NULL, &pip); 2261 if ((rval != MDI_SUCCESS) || (pip == NULL)) { 2262 if (vlun->svl_waiting_for_activepath == 0) { 2263 vlun->svl_waiting_for_activepath = 1; 2264 vlun->svl_wfa_time = ddi_get_time(); 2265 } 2266 mps_flag |= MDI_SELECT_STANDBY_PATH; 2267 rval = mdi_select_path(cdip, 2268 vpkt->vpkt_tgt_init_bp, 2269 mps_flag, NULL, &pip); 2270 if ((rval != MDI_SUCCESS) || (pip == NULL)) { 2271 if (pgr_sema_held) { 2272 sema_v(&vlun->svl_pgr_sema); 2273 } 2274 return (TRAN_FATAL_ERROR); 2275 } 2276 goto bind_path; 2277 } 2278 } else if (rval == MDI_FAILURE) { 2279 if (pgr_sema_held) { 2280 sema_v(&vlun->svl_pgr_sema); 2281 } 2282 return (TRAN_FATAL_ERROR); 2283 } 2284 2285 if ((pip == NULL) || (rval == MDI_NOPATH)) { 2286 while (vlun->svl_waiting_for_activepath) { 2287 /* 2288 * ---Waiting For Active Path--- 2289 * This device was discovered across a 2290 * passive path; lets wait for a little 2291 * bit, hopefully an active path will 2292 * show up obviating the need for a 2293 * failover 2294 */ 2295 tnow = ddi_get_time(); 2296 if (tnow - vlun->svl_wfa_time >= 60) { 2297 vlun->svl_waiting_for_activepath = 0; 2298 } else { 2299 drv_usecwait(1000); 2300 if (vlun->svl_waiting_for_activepath 2301 == 0) { 2302 /* 2303 * an active path has come 2304 * online! 2305 */ 2306 goto try_again; 2307 } 2308 } 2309 } 2310 VHCI_HOLD_LUN(vlun, VH_NOSLEEP, held); 2311 if (!held) { 2312 VHCI_DEBUG(4, (CE_NOTE, NULL, 2313 "!Lun not held\n")); 2314 if (pgr_sema_held) { 2315 sema_v(&vlun->svl_pgr_sema); 2316 } 2317 return (TRAN_BUSY); 2318 } 2319 /* 2320 * now that the LUN is stable, one last check 2321 * to make sure no other changes sneaked in 2322 * (like a path coming online or a 2323 * failover initiated by another thread) 2324 */ 2325 pip = NULL; 2326 rval = mdi_select_path(cdip, vpkt->vpkt_tgt_init_bp, 2327 0, NULL, &pip); 2328 if (pip != NULL) { 2329 VHCI_RELEASE_LUN(vlun); 2330 vlun->svl_waiting_for_activepath = 0; 2331 goto bind_path; 2332 } 2333 2334 /* 2335 * Check if there is an ONLINE path OR a STANDBY path 2336 * available. If none is available, do not attempt 2337 * to do a failover, just return a fatal error at this 2338 * point. 2339 */ 2340 npip = NULL; 2341 rval = mdi_select_path(cdip, NULL, 2342 (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH), 2343 NULL, &npip); 2344 if ((npip == NULL) || (rval != MDI_SUCCESS)) { 2345 /* 2346 * No paths available, jus return FATAL error. 2347 */ 2348 VHCI_RELEASE_LUN(vlun); 2349 if (pgr_sema_held) { 2350 sema_v(&vlun->svl_pgr_sema); 2351 } 2352 return (TRAN_FATAL_ERROR); 2353 } 2354 mdi_rele_path(npip); 2355 VHCI_DEBUG(1, (CE_NOTE, NULL, "!invoking " 2356 "mdi_failover\n")); 2357 rval = mdi_failover(vhci->vhci_dip, cdip, 2358 MDI_FAILOVER_ASYNC); 2359 if (rval == MDI_FAILURE) { 2360 VHCI_RELEASE_LUN(vlun); 2361 if (pgr_sema_held) { 2362 sema_v(&vlun->svl_pgr_sema); 2363 } 2364 return (TRAN_FATAL_ERROR); 2365 } else if (rval == MDI_BUSY) { 2366 VHCI_RELEASE_LUN(vlun); 2367 if (pgr_sema_held) { 2368 sema_v(&vlun->svl_pgr_sema); 2369 } 2370 return (TRAN_BUSY); 2371 } else { 2372 if (pgr_sema_held) { 2373 sema_v(&vlun->svl_pgr_sema); 2374 } 2375 return (TRAN_BUSY); 2376 } 2377 } 2378 vlun->svl_waiting_for_activepath = 0; 2379 bind_path: 2380 vpkt->vpkt_path = pip; 2381 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip); 2382 ASSERT(svp != NULL); 2383 2384 psd = svp->svp_psd; 2385 ASSERT(psd != NULL); 2386 address = &psd->sd_address; 2387 } else { 2388 pkt = vpkt->vpkt_hba_pkt; 2389 address = &pkt->pkt_address; 2390 } 2391 2392 /* Verify match of specified path_instance and selected path_instance */ 2393 ASSERT((path_instance == 0) || 2394 (path_instance == mdi_pi_get_path_instance(vpkt->vpkt_path))); 2395 2396 /* 2397 * For PKT_PARTIAL_DMA case, call pHCI's scsi_init_pkt whenever 2398 * target driver calls vhci_scsi_init_pkt. 2399 */ 2400 if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) && 2401 vpkt->vpkt_path && vpkt->vpkt_hba_pkt) { 2402 VHCI_DEBUG(4, (CE_NOTE, NULL, 2403 "vhci_bind_transport: PKT_PARTIAL_DMA " 2404 "vpkt 0x%p, path 0x%p hba_pkt 0x%p\n", 2405 (void *)vpkt, (void *)vpkt->vpkt_path, (void *)pkt)); 2406 pkt = vpkt->vpkt_hba_pkt; 2407 address = &pkt->pkt_address; 2408 } 2409 2410 if (pkt == NULL || (vpkt->vpkt_flags & CFLAG_DMA_PARTIAL)) { 2411 pkt = scsi_init_pkt(address, pkt, 2412 vpkt->vpkt_tgt_init_bp, vpkt->vpkt_tgt_init_cdblen, 2413 vpkt->vpkt_tgt_init_scblen, 0, flags, func, NULL); 2414 2415 if (pkt == NULL) { 2416 VHCI_DEBUG(4, (CE_NOTE, NULL, 2417 "!bind transport: 0x%p 0x%p 0x%p\n", 2418 (void *)vhci, (void *)psd, (void *)vpkt)); 2419 if ((vpkt->vpkt_hba_pkt == NULL) && vpkt->vpkt_path) { 2420 MDI_PI_ERRSTAT(vpkt->vpkt_path, 2421 MDI_PI_TRANSERR); 2422 mdi_rele_path(vpkt->vpkt_path); 2423 vpkt->vpkt_path = NULL; 2424 } 2425 if (pgr_sema_held) { 2426 sema_v(&vlun->svl_pgr_sema); 2427 } 2428 /* 2429 * Looks like a fatal error. 2430 * May be device disappeared underneath. 2431 * Give another chance to target driver for a retry to 2432 * get another path. 2433 */ 2434 return (TRAN_BUSY); 2435 } 2436 } 2437 2438 pkt->pkt_private = vpkt; 2439 vpkt->vpkt_hba_pkt = pkt; 2440 return (TRAN_ACCEPT); 2441 } 2442 2443 2444 /*PRINTFLIKE3*/ 2445 void 2446 vhci_log(int level, dev_info_t *dip, const char *fmt, ...) 2447 { 2448 char buf[256]; 2449 va_list ap; 2450 2451 va_start(ap, fmt); 2452 (void) vsprintf(buf, fmt, ap); 2453 va_end(ap); 2454 2455 scsi_log(dip, "scsi_vhci", level, buf); 2456 } 2457 2458 /* do a PGR out with the information we've saved away */ 2459 static int 2460 vhci_do_prout(scsi_vhci_priv_t *svp) 2461 { 2462 2463 struct scsi_pkt *new_pkt; 2464 struct buf *bp; 2465 scsi_vhci_lun_t *vlun = svp->svp_svl; 2466 int rval, retry, nr_retry, ua_retry; 2467 struct scsi_extended_sense *sns; 2468 2469 bp = getrbuf(KM_SLEEP); 2470 bp->b_flags = B_WRITE; 2471 bp->b_resid = 0; 2472 bp->b_un.b_addr = (caddr_t)&vlun->svl_prout; 2473 bp->b_bcount = vlun->svl_bcount; 2474 2475 VHCI_INCR_PATH_CMDCOUNT(svp); 2476 2477 new_pkt = scsi_init_pkt(&svp->svp_psd->sd_address, NULL, bp, 2478 CDB_GROUP1, sizeof (struct scsi_arq_status), 0, 0, 2479 SLEEP_FUNC, NULL); 2480 if (new_pkt == NULL) { 2481 VHCI_DECR_PATH_CMDCOUNT(svp); 2482 freerbuf(bp); 2483 cmn_err(CE_WARN, "!vhci_do_prout: scsi_init_pkt failed"); 2484 return (0); 2485 } 2486 mutex_enter(&vlun->svl_mutex); 2487 bp->b_un.b_addr = (caddr_t)&vlun->svl_prout; 2488 bp->b_bcount = vlun->svl_bcount; 2489 bcopy(vlun->svl_cdb, new_pkt->pkt_cdbp, 2490 sizeof (vlun->svl_cdb)); 2491 new_pkt->pkt_time = vlun->svl_time; 2492 mutex_exit(&vlun->svl_mutex); 2493 new_pkt->pkt_flags = FLAG_NOINTR; 2494 2495 ua_retry = nr_retry = retry = 0; 2496 again: 2497 rval = vhci_do_scsi_cmd(new_pkt); 2498 if (rval != 1) { 2499 if ((new_pkt->pkt_reason == CMD_CMPLT) && 2500 (SCBP_C(new_pkt) == STATUS_CHECK) && 2501 (new_pkt->pkt_state & STATE_ARQ_DONE)) { 2502 sns = &(((struct scsi_arq_status *)(uintptr_t) 2503 (new_pkt->pkt_scbp))->sts_sensedata); 2504 if ((sns->es_key == KEY_UNIT_ATTENTION) || 2505 (sns->es_key == KEY_NOT_READY)) { 2506 int max_retry; 2507 struct scsi_failover_ops *fops; 2508 fops = vlun->svl_fops; 2509 rval = fops->sfo_analyze_sense(svp->svp_psd, 2510 sns, vlun->svl_fops_ctpriv); 2511 if (rval == SCSI_SENSE_NOT_READY) { 2512 max_retry = vhci_prout_not_ready_retry; 2513 retry = nr_retry++; 2514 delay(1*drv_usectohz(1000000)); 2515 } else { 2516 /* chk for state change and update */ 2517 if (rval == SCSI_SENSE_STATE_CHANGED) { 2518 int held; 2519 VHCI_HOLD_LUN(vlun, 2520 VH_NOSLEEP, held); 2521 if (!held) { 2522 rval = TRAN_BUSY; 2523 } else { 2524 /* chk for alua first */ 2525 vhci_update_pathstates( 2526 (void *)vlun); 2527 } 2528 } 2529 retry = ua_retry++; 2530 max_retry = VHCI_MAX_PGR_RETRIES; 2531 } 2532 if (retry < max_retry) { 2533 VHCI_DEBUG(4, (CE_WARN, NULL, 2534 "!vhci_do_prout retry 0x%x " 2535 "(0x%x 0x%x 0x%x)", 2536 SCBP_C(new_pkt), 2537 new_pkt->pkt_cdbp[0], 2538 new_pkt->pkt_cdbp[1], 2539 new_pkt->pkt_cdbp[2])); 2540 goto again; 2541 } 2542 rval = 0; 2543 VHCI_DEBUG(4, (CE_WARN, NULL, 2544 "!vhci_do_prout 0x%x " 2545 "(0x%x 0x%x 0x%x)", 2546 SCBP_C(new_pkt), 2547 new_pkt->pkt_cdbp[0], 2548 new_pkt->pkt_cdbp[1], 2549 new_pkt->pkt_cdbp[2])); 2550 } else if (sns->es_key == KEY_ILLEGAL_REQUEST) 2551 rval = VHCI_PGR_ILLEGALOP; 2552 } 2553 } else { 2554 rval = 1; 2555 } 2556 scsi_destroy_pkt(new_pkt); 2557 VHCI_DECR_PATH_CMDCOUNT(svp); 2558 freerbuf(bp); 2559 return (rval); 2560 } 2561 2562 static void 2563 vhci_run_cmd(void *arg) 2564 { 2565 struct scsi_pkt *pkt = (struct scsi_pkt *)arg; 2566 struct scsi_pkt *tpkt; 2567 scsi_vhci_priv_t *svp; 2568 mdi_pathinfo_t *pip, *npip; 2569 scsi_vhci_lun_t *vlun; 2570 dev_info_t *cdip; 2571 scsi_vhci_priv_t *nsvp; 2572 int fail = 0; 2573 int rval; 2574 struct vhci_pkt *vpkt; 2575 uchar_t cdb_1; 2576 vhci_prout_t *prout; 2577 2578 vpkt = (struct vhci_pkt *)pkt->pkt_private; 2579 tpkt = vpkt->vpkt_tgt_pkt; 2580 pip = vpkt->vpkt_path; 2581 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip); 2582 if (svp == NULL) { 2583 tpkt->pkt_reason = CMD_TRAN_ERR; 2584 tpkt->pkt_statistics = STAT_ABORTED; 2585 goto done; 2586 } 2587 vlun = svp->svp_svl; 2588 prout = &vlun->svl_prout; 2589 if (SCBP_C(pkt) != STATUS_GOOD) 2590 fail++; 2591 cdip = vlun->svl_dip; 2592 pip = npip = NULL; 2593 rval = mdi_select_path(cdip, NULL, 2594 MDI_SELECT_ONLINE_PATH|MDI_SELECT_STANDBY_PATH, NULL, &npip); 2595 if ((rval != MDI_SUCCESS) || (npip == NULL)) { 2596 VHCI_DEBUG(4, (CE_NOTE, NULL, 2597 "vhci_run_cmd: no path! 0x%p\n", (void *)svp)); 2598 tpkt->pkt_reason = CMD_TRAN_ERR; 2599 tpkt->pkt_statistics = STAT_ABORTED; 2600 goto done; 2601 } 2602 2603 cdb_1 = vlun->svl_cdb[1]; 2604 vlun->svl_cdb[1] &= 0xe0; 2605 vlun->svl_cdb[1] |= VHCI_PROUT_R_AND_IGNORE; 2606 2607 do { 2608 nsvp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(npip); 2609 if (nsvp == NULL) { 2610 VHCI_DEBUG(4, (CE_NOTE, NULL, 2611 "vhci_run_cmd: no " 2612 "client priv! 0x%p offlined?\n", 2613 (void *)npip)); 2614 goto next_path; 2615 } 2616 if (vlun->svl_first_path == npip) { 2617 goto next_path; 2618 } else { 2619 if (vhci_do_prout(nsvp) != 1) 2620 fail++; 2621 } 2622 next_path: 2623 pip = npip; 2624 rval = mdi_select_path(cdip, NULL, 2625 MDI_SELECT_ONLINE_PATH|MDI_SELECT_STANDBY_PATH, 2626 pip, &npip); 2627 mdi_rele_path(pip); 2628 } while ((rval == MDI_SUCCESS) && (npip != NULL)); 2629 2630 vlun->svl_cdb[1] = cdb_1; 2631 2632 if (fail) { 2633 VHCI_DEBUG(4, (CE_WARN, NULL, "%s%d: key registration failed, " 2634 "couldn't be replicated on all paths", 2635 ddi_driver_name(cdip), ddi_get_instance(cdip))); 2636 vhci_print_prout_keys(vlun, "vhci_run_cmd: "); 2637 2638 if (SCBP_C(pkt) != STATUS_GOOD) { 2639 tpkt->pkt_reason = CMD_TRAN_ERR; 2640 tpkt->pkt_statistics = STAT_ABORTED; 2641 } 2642 } else { 2643 vlun->svl_pgr_active = 1; 2644 vhci_print_prout_keys(vlun, "vhci_run_cmd: before bcopy:"); 2645 2646 bcopy((const void *)prout->service_key, 2647 (void *)prout->active_service_key, MHIOC_RESV_KEY_SIZE); 2648 bcopy((const void *)prout->res_key, 2649 (void *)prout->active_res_key, MHIOC_RESV_KEY_SIZE); 2650 2651 vhci_print_prout_keys(vlun, "vhci_run_cmd: after bcopy:"); 2652 } 2653 done: 2654 if (SCBP_C(pkt) == STATUS_GOOD) 2655 vlun->svl_first_path = NULL; 2656 2657 if (svp) 2658 VHCI_DECR_PATH_CMDCOUNT(svp); 2659 2660 if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) == 0) { 2661 scsi_destroy_pkt(pkt); 2662 vpkt->vpkt_hba_pkt = NULL; 2663 if (vpkt->vpkt_path) { 2664 mdi_rele_path(vpkt->vpkt_path); 2665 vpkt->vpkt_path = NULL; 2666 } 2667 } 2668 2669 sema_v(&vlun->svl_pgr_sema); 2670 /* 2671 * The PROUT commands are not included in the automatic retry 2672 * mechanism, therefore, vpkt_org_vpkt should never be set here. 2673 */ 2674 ASSERT(vpkt->vpkt_org_vpkt == NULL); 2675 if (tpkt->pkt_comp) 2676 (*tpkt->pkt_comp)(tpkt); 2677 2678 } 2679 2680 /* 2681 * Get the keys registered with this target. Since we will have 2682 * registered the same key with multiple initiators, strip out 2683 * any duplicate keys. 2684 * 2685 * The pointers which will be used to filter the registered keys from 2686 * the device will be stored in filter_prin and filter_pkt. If the 2687 * allocation length of the buffer was sufficient for the number of 2688 * parameter data bytes available to be returned by the device then the 2689 * key filtering will use the keylist returned from the original 2690 * request. If the allocation length of the buffer was not sufficient, 2691 * then the filtering will use the keylist returned from the request 2692 * that is resent below. 2693 * 2694 * If the device returns an additional length field that is greater than 2695 * the allocation length of the buffer, then allocate a new buffer which 2696 * can accommodate the number of parameter data bytes available to be 2697 * returned. Resend the scsi PRIN command, filter out the duplicate 2698 * keys and return as many of the unique keys found that was originally 2699 * requested and set the additional length field equal to the data bytes 2700 * of unique reservation keys available to be returned. 2701 * 2702 * If the device returns an additional length field that is less than or 2703 * equal to the allocation length of the buffer, then all the available 2704 * keys registered were returned by the device. Filter out the 2705 * duplicate keys and return all of the unique keys found and set the 2706 * additional length field equal to the data bytes of the reservation 2707 * keys to be returned. 2708 */ 2709 2710 #define VHCI_PRIN_HEADER_SZ (sizeof (prin->length) + sizeof (prin->generation)) 2711 2712 static int 2713 vhci_do_prin(struct vhci_pkt **intr_vpkt) 2714 { 2715 scsi_vhci_priv_t *svp; 2716 struct vhci_pkt *vpkt = *intr_vpkt; 2717 vhci_prin_readkeys_t *prin; 2718 scsi_vhci_lun_t *vlun; 2719 struct scsi_vhci *vhci = ADDR2VHCI(&vpkt->vpkt_tgt_pkt->pkt_address); 2720 2721 struct buf *new_bp = NULL; 2722 struct scsi_pkt *new_pkt = NULL; 2723 struct vhci_pkt *new_vpkt = NULL; 2724 uint32_t needed_length; 2725 int rval = VHCI_CMD_CMPLT; 2726 uint32_t prin_length = 0; 2727 uint32_t svl_prin_length = 0; 2728 2729 ASSERT(vpkt->vpkt_path); 2730 svp = mdi_pi_get_vhci_private(vpkt->vpkt_path); 2731 ASSERT(svp); 2732 vlun = svp->svp_svl; 2733 ASSERT(vlun); 2734 2735 /* 2736 * If the caller only asked for an amount of data that would not 2737 * be enough to include any key data it is likely that they will 2738 * send the next command with a buffer size based on the information 2739 * from this header. Doing recovery on this would be a duplication 2740 * of efforts. 2741 */ 2742 if (vpkt->vpkt_tgt_init_bp->b_bcount <= VHCI_PRIN_HEADER_SZ) { 2743 rval = VHCI_CMD_CMPLT; 2744 goto exit; 2745 } 2746 2747 if (vpkt->vpkt_org_vpkt == NULL) { 2748 /* 2749 * Can fail as sleep is not allowed. 2750 */ 2751 prin = (vhci_prin_readkeys_t *) 2752 bp_mapin_common(vpkt->vpkt_tgt_init_bp, VM_NOSLEEP); 2753 } else { 2754 /* 2755 * The retry buf doesn't need to be mapped in. 2756 */ 2757 prin = (vhci_prin_readkeys_t *) 2758 vpkt->vpkt_tgt_init_bp->b_un.b_daddr; 2759 } 2760 2761 if (prin == NULL) { 2762 VHCI_DEBUG(5, (CE_WARN, NULL, 2763 "vhci_do_prin: bp_mapin_common failed.")); 2764 rval = VHCI_CMD_ERROR; 2765 goto fail; 2766 } 2767 2768 prin_length = BE_32(prin->length); 2769 2770 /* 2771 * According to SPC-3r22, sec 4.3.4.6: "If the amount of 2772 * information to be transferred exceeds the maximum value 2773 * that the ALLOCATION LENGTH field is capable of specifying, 2774 * the device server shall...terminate the command with CHECK 2775 * CONDITION status". The ALLOCATION LENGTH field of the 2776 * PERSISTENT RESERVE IN command is 2 bytes. We should never 2777 * get here with an ADDITIONAL LENGTH greater than 0xFFFF 2778 * so if we do, then it is an error! 2779 */ 2780 2781 2782 if ((prin_length + VHCI_PRIN_HEADER_SZ) > 0xFFFF) { 2783 VHCI_DEBUG(5, (CE_NOTE, NULL, 2784 "vhci_do_prin: Device returned invalid " 2785 "length 0x%x\n", prin_length)); 2786 rval = VHCI_CMD_ERROR; 2787 goto fail; 2788 } 2789 needed_length = prin_length + VHCI_PRIN_HEADER_SZ; 2790 2791 /* 2792 * If prin->length is greater than the byte count allocated in the 2793 * original buffer, then resend the request with enough buffer 2794 * allocated to get all of the available registered keys. 2795 */ 2796 if ((vpkt->vpkt_tgt_init_bp->b_bcount < needed_length) && 2797 (vpkt->vpkt_org_vpkt == NULL)) { 2798 2799 new_pkt = vhci_create_retry_pkt(vpkt); 2800 if (new_pkt == NULL) { 2801 rval = VHCI_CMD_ERROR; 2802 goto fail; 2803 } 2804 new_vpkt = TGTPKT2VHCIPKT(new_pkt); 2805 2806 /* 2807 * This is the buf with buffer pointer 2808 * where the prin readkeys will be 2809 * returned from the device 2810 */ 2811 new_bp = scsi_alloc_consistent_buf(&svp->svp_psd->sd_address, 2812 NULL, needed_length, B_READ, NULL_FUNC, NULL); 2813 if ((new_bp == NULL) || (new_bp->b_un.b_addr == NULL)) { 2814 if (new_bp) { 2815 scsi_free_consistent_buf(new_bp); 2816 } 2817 vhci_scsi_destroy_pkt(&new_pkt->pkt_address, new_pkt); 2818 rval = VHCI_CMD_ERROR; 2819 goto fail; 2820 } 2821 new_bp->b_bcount = needed_length; 2822 new_pkt->pkt_cdbp[7] = (uchar_t)(needed_length >> 8); 2823 new_pkt->pkt_cdbp[8] = (uchar_t)needed_length; 2824 2825 rval = VHCI_CMD_RETRY; 2826 2827 new_vpkt->vpkt_tgt_init_bp = new_bp; 2828 } 2829 2830 if (rval == VHCI_CMD_RETRY) { 2831 2832 /* 2833 * There were more keys then the original request asked for. 2834 */ 2835 mdi_pathinfo_t *path_holder = vpkt->vpkt_path; 2836 2837 /* 2838 * Release the old path because it does not matter which path 2839 * this command is sent down. This allows the normal bind 2840 * transport mechanism to be used. 2841 */ 2842 if (vpkt->vpkt_path != NULL) { 2843 mdi_rele_path(vpkt->vpkt_path); 2844 vpkt->vpkt_path = NULL; 2845 } 2846 2847 /* 2848 * Dispatch the retry command 2849 */ 2850 if (taskq_dispatch(vhci->vhci_taskq, vhci_dispatch_scsi_start, 2851 (void *) new_vpkt, KM_NOSLEEP) == NULL) { 2852 if (path_holder) { 2853 vpkt->vpkt_path = path_holder; 2854 mdi_hold_path(path_holder); 2855 } 2856 scsi_free_consistent_buf(new_bp); 2857 vhci_scsi_destroy_pkt(&new_pkt->pkt_address, new_pkt); 2858 rval = VHCI_CMD_ERROR; 2859 goto fail; 2860 } 2861 2862 /* 2863 * If we return VHCI_CMD_RETRY, that means the caller 2864 * is going to bail and wait for the reissued command 2865 * to complete. In that case, we need to decrement 2866 * the path command count right now. In any other 2867 * case, it'll be decremented by the caller. 2868 */ 2869 VHCI_DECR_PATH_CMDCOUNT(svp); 2870 goto exit; 2871 2872 } 2873 2874 if (rval == VHCI_CMD_CMPLT) { 2875 /* 2876 * The original request got all of the keys or the recovery 2877 * packet returns. 2878 */ 2879 int new; 2880 int old; 2881 int num_keys = prin_length / MHIOC_RESV_KEY_SIZE; 2882 2883 VHCI_DEBUG(4, (CE_NOTE, NULL, "vhci_do_prin: %d keys read\n", 2884 num_keys)); 2885 2886 #ifdef DEBUG 2887 VHCI_DEBUG(5, (CE_NOTE, NULL, "vhci_do_prin: from storage\n")); 2888 if (vhci_debug == 5) 2889 vhci_print_prin_keys(prin, num_keys); 2890 VHCI_DEBUG(5, (CE_NOTE, NULL, 2891 "vhci_do_prin: MPxIO old keys:\n")); 2892 if (vhci_debug == 5) 2893 vhci_print_prin_keys(&vlun->svl_prin, num_keys); 2894 #endif 2895 2896 /* 2897 * Filter out all duplicate keys returned from the device 2898 * We know that we use a different key for every host, so we 2899 * can simply strip out duplicates. Otherwise we would need to 2900 * do more bookkeeping to figure out which keys to strip out. 2901 */ 2902 2903 new = 0; 2904 2905 /* 2906 * If we got at least 1 key copy it. 2907 */ 2908 if (num_keys > 0) { 2909 vlun->svl_prin.keylist[0] = prin->keylist[0]; 2910 new++; 2911 } 2912 2913 /* 2914 * find next unique key. 2915 */ 2916 for (old = 1; old < num_keys; old++) { 2917 int j; 2918 int match = 0; 2919 2920 if (new >= VHCI_NUM_RESV_KEYS) 2921 break; 2922 for (j = 0; j < new; j++) { 2923 if (bcmp(&prin->keylist[old], 2924 &vlun->svl_prin.keylist[j], 2925 sizeof (mhioc_resv_key_t)) == 0) { 2926 match = 1; 2927 break; 2928 } 2929 } 2930 if (!match) { 2931 vlun->svl_prin.keylist[new] = 2932 prin->keylist[old]; 2933 new++; 2934 } 2935 } 2936 2937 /* Stored Big Endian */ 2938 vlun->svl_prin.generation = prin->generation; 2939 svl_prin_length = new * sizeof (mhioc_resv_key_t); 2940 /* Stored Big Endian */ 2941 vlun->svl_prin.length = BE_32(svl_prin_length); 2942 svl_prin_length += VHCI_PRIN_HEADER_SZ; 2943 2944 /* 2945 * If we arrived at this point after issuing a retry, make sure 2946 * that we put everything back the way it originally was so 2947 * that the target driver can complete the command correctly. 2948 */ 2949 if (vpkt->vpkt_org_vpkt != NULL) { 2950 new_bp = vpkt->vpkt_tgt_init_bp; 2951 2952 scsi_free_consistent_buf(new_bp); 2953 2954 vpkt = vhci_sync_retry_pkt(vpkt); 2955 *intr_vpkt = vpkt; 2956 2957 /* 2958 * Make sure the original buffer is mapped into kernel 2959 * space before we try to copy the filtered keys into 2960 * it. 2961 */ 2962 prin = (vhci_prin_readkeys_t *)bp_mapin_common( 2963 vpkt->vpkt_tgt_init_bp, VM_NOSLEEP); 2964 } 2965 2966 /* 2967 * Now copy the desired number of prin keys into the original 2968 * target buffer. 2969 */ 2970 if (svl_prin_length <= vpkt->vpkt_tgt_init_bp->b_bcount) { 2971 /* 2972 * It is safe to return all of the available unique 2973 * keys 2974 */ 2975 bcopy(&vlun->svl_prin, prin, svl_prin_length); 2976 } else { 2977 /* 2978 * Not all of the available keys were requested by the 2979 * original command. 2980 */ 2981 bcopy(&vlun->svl_prin, prin, 2982 vpkt->vpkt_tgt_init_bp->b_bcount); 2983 } 2984 #ifdef DEBUG 2985 VHCI_DEBUG(5, (CE_NOTE, NULL, 2986 "vhci_do_prin: To Application:\n")); 2987 if (vhci_debug == 5) 2988 vhci_print_prin_keys(prin, new); 2989 VHCI_DEBUG(5, (CE_NOTE, NULL, 2990 "vhci_do_prin: MPxIO new keys:\n")); 2991 if (vhci_debug == 5) 2992 vhci_print_prin_keys(&vlun->svl_prin, new); 2993 #endif 2994 } 2995 fail: 2996 if (rval == VHCI_CMD_ERROR) { 2997 /* 2998 * If we arrived at this point after issuing a 2999 * retry, make sure that we put everything back 3000 * the way it originally was so that ssd can 3001 * complete the command correctly. 3002 */ 3003 3004 if (vpkt->vpkt_org_vpkt != NULL) { 3005 new_bp = vpkt->vpkt_tgt_init_bp; 3006 if (new_bp != NULL) { 3007 scsi_free_consistent_buf(new_bp); 3008 } 3009 3010 new_vpkt = vpkt; 3011 vpkt = vpkt->vpkt_org_vpkt; 3012 3013 vhci_scsi_destroy_pkt(&svp->svp_psd->sd_address, 3014 new_vpkt->vpkt_tgt_pkt); 3015 } 3016 3017 /* 3018 * Mark this command completion as having an error so that 3019 * ssd will retry the command. 3020 */ 3021 3022 vpkt->vpkt_tgt_pkt->pkt_reason = CMD_ABORTED; 3023 vpkt->vpkt_tgt_pkt->pkt_statistics |= STAT_ABORTED; 3024 3025 rval = VHCI_CMD_CMPLT; 3026 } 3027 exit: 3028 /* 3029 * Make sure that the semaphore is only released once. 3030 */ 3031 if (rval == VHCI_CMD_CMPLT) { 3032 sema_v(&vlun->svl_pgr_sema); 3033 } 3034 3035 return (rval); 3036 } 3037 3038 static void 3039 vhci_intr(struct scsi_pkt *pkt) 3040 { 3041 struct vhci_pkt *vpkt = (struct vhci_pkt *)pkt->pkt_private; 3042 struct scsi_pkt *tpkt; 3043 scsi_vhci_priv_t *svp; 3044 scsi_vhci_lun_t *vlun; 3045 int rval, held; 3046 struct scsi_failover_ops *fops; 3047 struct scsi_extended_sense *sns; 3048 mdi_pathinfo_t *lpath; 3049 static char *timeout_err = "Command Timeout"; 3050 static char *parity_err = "Parity Error"; 3051 char *err_str = NULL; 3052 dev_info_t *vdip, *cdip, *pdip; 3053 char *cpath, *dpath; 3054 3055 ASSERT(vpkt != NULL); 3056 tpkt = vpkt->vpkt_tgt_pkt; 3057 ASSERT(tpkt != NULL); 3058 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(vpkt->vpkt_path); 3059 ASSERT(svp != NULL); 3060 vlun = svp->svp_svl; 3061 ASSERT(vlun != NULL); 3062 lpath = vpkt->vpkt_path; 3063 3064 /* 3065 * sync up the target driver's pkt with the pkt that 3066 * we actually used 3067 */ 3068 *(tpkt->pkt_scbp) = *(pkt->pkt_scbp); 3069 tpkt->pkt_resid = pkt->pkt_resid; 3070 tpkt->pkt_state = pkt->pkt_state; 3071 tpkt->pkt_statistics = pkt->pkt_statistics; 3072 tpkt->pkt_reason = pkt->pkt_reason; 3073 3074 /* Return path_instance information back to the target driver. */ 3075 if (scsi_pkt_allocated_correctly(tpkt)) { 3076 if (scsi_pkt_allocated_correctly(pkt)) { 3077 /* 3078 * If both packets were correctly allocated, 3079 * return path returned by pHCI. 3080 */ 3081 tpkt->pkt_path_instance = pkt->pkt_path_instance; 3082 } else { 3083 /* Otherwise return path of pHCI we used */ 3084 tpkt->pkt_path_instance = 3085 mdi_pi_get_path_instance(lpath); 3086 } 3087 } 3088 3089 if (pkt->pkt_cdbp[0] == SCMD_PROUT && 3090 ((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_REGISTER) || 3091 ((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_R_AND_IGNORE)) { 3092 if ((SCBP_C(pkt) != STATUS_GOOD) || 3093 (pkt->pkt_reason != CMD_CMPLT)) { 3094 sema_v(&vlun->svl_pgr_sema); 3095 } 3096 } else if (pkt->pkt_cdbp[0] == SCMD_PRIN) { 3097 if (pkt->pkt_reason != CMD_CMPLT || 3098 (SCBP_C(pkt) != STATUS_GOOD)) { 3099 sema_v(&vlun->svl_pgr_sema); 3100 } 3101 } 3102 3103 switch (pkt->pkt_reason) { 3104 case CMD_CMPLT: 3105 /* 3106 * cmd completed successfully, check for scsi errors 3107 */ 3108 switch (*(pkt->pkt_scbp)) { 3109 case STATUS_CHECK: 3110 if (pkt->pkt_state & STATE_ARQ_DONE) { 3111 sns = &(((struct scsi_arq_status *)(uintptr_t) 3112 (pkt->pkt_scbp))->sts_sensedata); 3113 fops = vlun->svl_fops; 3114 ASSERT(fops != NULL); 3115 VHCI_DEBUG(4, (CE_NOTE, NULL, "vhci_intr: " 3116 "Received sns key %x esc %x escq %x\n", 3117 sns->es_key, sns->es_add_code, 3118 sns->es_qual_code)); 3119 3120 if (vlun->svl_waiting_for_activepath == 1) { 3121 /* 3122 * if we are here it means we are 3123 * in the midst of a probe/attach 3124 * through a passive path; this 3125 * case is exempt from sense analysis 3126 * for detection of ext. failover 3127 * because that would unnecessarily 3128 * increase attach time. 3129 */ 3130 bcopy(pkt->pkt_scbp, tpkt->pkt_scbp, 3131 vpkt->vpkt_tgt_init_scblen); 3132 break; 3133 } 3134 if (sns->es_add_code == VHCI_SCSI_PERR) { 3135 /* 3136 * parity error 3137 */ 3138 err_str = parity_err; 3139 bcopy(pkt->pkt_scbp, tpkt->pkt_scbp, 3140 vpkt->vpkt_tgt_init_scblen); 3141 break; 3142 } 3143 rval = fops->sfo_analyze_sense(svp->svp_psd, 3144 sns, vlun->svl_fops_ctpriv); 3145 if ((rval == SCSI_SENSE_NOFAILOVER) || 3146 (rval == SCSI_SENSE_UNKNOWN) || 3147 (rval == SCSI_SENSE_NOT_READY)) { 3148 bcopy(pkt->pkt_scbp, tpkt->pkt_scbp, 3149 vpkt->vpkt_tgt_init_scblen); 3150 break; 3151 } else if (rval == SCSI_SENSE_STATE_CHANGED) { 3152 struct scsi_vhci *vhci; 3153 vhci = ADDR2VHCI(&tpkt->pkt_address); 3154 VHCI_HOLD_LUN(vlun, VH_NOSLEEP, held); 3155 if (!held) { 3156 /* 3157 * looks like some other thread 3158 * has already detected this 3159 * condition 3160 */ 3161 tpkt->pkt_state &= 3162 ~STATE_ARQ_DONE; 3163 *(tpkt->pkt_scbp) = 3164 STATUS_BUSY; 3165 break; 3166 } 3167 (void) taskq_dispatch( 3168 vhci->vhci_update_pathstates_taskq, 3169 vhci_update_pathstates, 3170 (void *)vlun, KM_SLEEP); 3171 } else { 3172 /* 3173 * externally initiated failover 3174 * has occurred or is in progress 3175 */ 3176 VHCI_HOLD_LUN(vlun, VH_NOSLEEP, held); 3177 if (!held) { 3178 /* 3179 * looks like some other thread 3180 * has already detected this 3181 * condition 3182 */ 3183 tpkt->pkt_state &= 3184 ~STATE_ARQ_DONE; 3185 *(tpkt->pkt_scbp) = 3186 STATUS_BUSY; 3187 break; 3188 } else { 3189 rval = vhci_handle_ext_fo 3190 (pkt, rval); 3191 if (rval == BUSY_RETURN) { 3192 tpkt->pkt_state &= 3193 ~STATE_ARQ_DONE; 3194 *(tpkt->pkt_scbp) = 3195 STATUS_BUSY; 3196 break; 3197 } 3198 bcopy(pkt->pkt_scbp, 3199 tpkt->pkt_scbp, 3200 vpkt->vpkt_tgt_init_scblen); 3201 break; 3202 } 3203 } 3204 } 3205 break; 3206 3207 /* 3208 * If this is a good SCSI-II RELEASE cmd completion then restore 3209 * the load balancing policy and reset VLUN_RESERVE_ACTIVE_FLG. 3210 * If this is a good SCSI-II RESERVE cmd completion then set 3211 * VLUN_RESERVE_ACTIVE_FLG. 3212 */ 3213 case STATUS_GOOD: 3214 if ((pkt->pkt_cdbp[0] == SCMD_RELEASE) || 3215 (pkt->pkt_cdbp[0] == SCMD_RELEASE_G1)) { 3216 (void) mdi_set_lb_policy(vlun->svl_dip, 3217 vlun->svl_lb_policy_save); 3218 vlun->svl_flags &= ~VLUN_RESERVE_ACTIVE_FLG; 3219 VHCI_DEBUG(1, (CE_WARN, NULL, 3220 "!vhci_intr: vlun 0x%p release path 0x%p", 3221 (void *)vlun, (void *)vpkt->vpkt_path)); 3222 } 3223 3224 if ((pkt->pkt_cdbp[0] == SCMD_RESERVE) || 3225 (pkt->pkt_cdbp[0] == SCMD_RESERVE_G1)) { 3226 vlun->svl_flags |= VLUN_RESERVE_ACTIVE_FLG; 3227 vlun->svl_resrv_pip = vpkt->vpkt_path; 3228 VHCI_DEBUG(1, (CE_WARN, NULL, 3229 "!vhci_intr: vlun 0x%p reserved path 0x%p", 3230 (void *)vlun, (void *)vpkt->vpkt_path)); 3231 } 3232 break; 3233 3234 case STATUS_RESERVATION_CONFLICT: 3235 VHCI_DEBUG(1, (CE_WARN, NULL, 3236 "!vhci_intr: vlun 0x%p " 3237 "reserve conflict on path 0x%p", 3238 (void *)vlun, (void *)vpkt->vpkt_path)); 3239 /* FALLTHROUGH */ 3240 default: 3241 break; 3242 } 3243 3244 /* 3245 * Update I/O completion statistics for the path 3246 */ 3247 mdi_pi_kstat_iosupdate(vpkt->vpkt_path, vpkt->vpkt_tgt_init_bp); 3248 3249 /* 3250 * Command completed successfully, release the dma binding and 3251 * destroy the transport side of the packet. 3252 */ 3253 if ((pkt->pkt_cdbp[0] == SCMD_PROUT) && 3254 (((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_REGISTER) || 3255 ((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_R_AND_IGNORE))) { 3256 if (SCBP_C(pkt) == STATUS_GOOD) { 3257 ASSERT(vlun->svl_taskq); 3258 svp->svp_last_pkt_reason = pkt->pkt_reason; 3259 (void) taskq_dispatch(vlun->svl_taskq, 3260 vhci_run_cmd, pkt, KM_SLEEP); 3261 return; 3262 } 3263 } 3264 if ((SCBP_C(pkt) == STATUS_GOOD) && 3265 (pkt->pkt_cdbp[0] == SCMD_PRIN) && vpkt->vpkt_tgt_init_bp) { 3266 /* 3267 * If the action (value in byte 1 of the cdb) is zero, 3268 * we're reading keys, and that's the only condition 3269 * where we need to be concerned with filtering keys 3270 * and potential retries. Otherwise, we simply signal 3271 * the semaphore and move on. 3272 */ 3273 if (pkt->pkt_cdbp[1] == 0) { 3274 /* 3275 * If this is the completion of an internal 3276 * retry then we need to make sure that the 3277 * pkt and tpkt pointers are readjusted so 3278 * the calls to scsi_destroy_pkt and pkt_comp 3279 * below work * correctly. 3280 */ 3281 if (vpkt->vpkt_org_vpkt != NULL) { 3282 pkt = vpkt->vpkt_org_vpkt->vpkt_hba_pkt; 3283 tpkt = vpkt->vpkt_org_vpkt-> 3284 vpkt_tgt_pkt; 3285 3286 /* 3287 * If this command was issued through 3288 * the taskq then we need to clear 3289 * this flag for proper processing in 3290 * the case of a retry from the target 3291 * driver. 3292 */ 3293 vpkt->vpkt_state &= 3294 ~VHCI_PKT_THRU_TASKQ; 3295 } 3296 3297 /* 3298 * if vhci_do_prin returns VHCI_CMD_CMPLT then 3299 * vpkt will contain the address of the 3300 * original vpkt 3301 */ 3302 if (vhci_do_prin(&vpkt) == VHCI_CMD_RETRY) { 3303 /* 3304 * The command has been resent to get 3305 * all the keys from the device. Don't 3306 * complete the command with ssd until 3307 * the retry completes. 3308 */ 3309 return; 3310 } 3311 } else { 3312 sema_v(&vlun->svl_pgr_sema); 3313 } 3314 } 3315 3316 break; 3317 3318 case CMD_TIMEOUT: 3319 if ((pkt->pkt_statistics & 3320 (STAT_BUS_RESET|STAT_DEV_RESET|STAT_ABORTED)) == 0) { 3321 3322 VHCI_DEBUG(1, (CE_NOTE, NULL, 3323 "!scsi vhci timeout invoked\n")); 3324 3325 (void) vhci_recovery_reset(vlun, &pkt->pkt_address, 3326 FALSE, VHCI_DEPTH_ALL); 3327 } 3328 MDI_PI_ERRSTAT(lpath, MDI_PI_TRANSERR); 3329 tpkt->pkt_statistics |= STAT_ABORTED; 3330 err_str = timeout_err; 3331 break; 3332 3333 case CMD_TRAN_ERR: 3334 /* 3335 * This status is returned if the transport has sent the cmd 3336 * down the link to the target and then some error occurs. 3337 * In case of SCSI-II RESERVE cmd, we don't know if the 3338 * reservation been accepted by the target or not, so we need 3339 * to clear the reservation. 3340 */ 3341 if ((pkt->pkt_cdbp[0] == SCMD_RESERVE) || 3342 (pkt->pkt_cdbp[0] == SCMD_RESERVE_G1)) { 3343 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_intr received" 3344 " cmd_tran_err for scsi-2 reserve cmd\n")); 3345 if (!vhci_recovery_reset(vlun, &pkt->pkt_address, 3346 TRUE, VHCI_DEPTH_TARGET)) { 3347 VHCI_DEBUG(1, (CE_WARN, NULL, 3348 "!vhci_intr cmd_tran_err reset failed!")); 3349 } 3350 } 3351 break; 3352 3353 case CMD_DEV_GONE: 3354 VHCI_DEBUG(1, (CE_NOTE, NULL, "vhci_intr received " 3355 "cmd_dev_gone\n")); 3356 tpkt->pkt_reason = CMD_CMPLT; 3357 tpkt->pkt_state = STATE_GOT_BUS | 3358 STATE_GOT_TARGET | STATE_SENT_CMD | 3359 STATE_GOT_STATUS; 3360 *(tpkt->pkt_scbp) = STATUS_BUSY; 3361 break; 3362 3363 default: 3364 break; 3365 } 3366 3367 /* 3368 * SCSI-II RESERVE cmd has been serviced by the lower layers clear 3369 * the flag so the lun is not QUIESCED any longer. 3370 * Also clear the VHCI_PKT_THRU_TASKQ flag, to ensure that if this pkt 3371 * is retried, a taskq shall again be dispatched to service it. Else 3372 * it may lead to a system hang if the retry is within interrupt 3373 * context. 3374 */ 3375 if ((pkt->pkt_cdbp[0] == SCMD_RESERVE) || 3376 (pkt->pkt_cdbp[0] == SCMD_RESERVE_G1)) { 3377 vlun->svl_flags &= ~VLUN_QUIESCED_FLG; 3378 vpkt->vpkt_state &= ~VHCI_PKT_THRU_TASKQ; 3379 } 3380 3381 /* 3382 * vpkt_org_vpkt should always be NULL here if the retry command 3383 * has been successfully processed. If vpkt_org_vpkt != NULL at 3384 * this point, it is an error so restore the original vpkt and 3385 * return an error to the target driver so it can retry the 3386 * command as appropriate. 3387 */ 3388 if (vpkt->vpkt_org_vpkt != NULL) { 3389 struct vhci_pkt *new_vpkt = vpkt; 3390 vpkt = vpkt->vpkt_org_vpkt; 3391 3392 vhci_scsi_destroy_pkt(&svp->svp_psd->sd_address, 3393 new_vpkt->vpkt_tgt_pkt); 3394 3395 /* 3396 * Mark this command completion as having an error so that 3397 * ssd will retry the command. 3398 */ 3399 vpkt->vpkt_tgt_pkt->pkt_reason = CMD_ABORTED; 3400 vpkt->vpkt_tgt_pkt->pkt_statistics |= STAT_ABORTED; 3401 3402 pkt = vpkt->vpkt_hba_pkt; 3403 tpkt = vpkt->vpkt_tgt_pkt; 3404 } 3405 3406 if ((err_str != NULL) && (pkt->pkt_reason != 3407 svp->svp_last_pkt_reason)) { 3408 cdip = vlun->svl_dip; 3409 pdip = mdi_pi_get_phci(vpkt->vpkt_path); 3410 vdip = ddi_get_parent(cdip); 3411 cpath = kmem_alloc(MAXPATHLEN, KM_SLEEP); 3412 dpath = kmem_alloc(MAXPATHLEN, KM_SLEEP); 3413 vhci_log(CE_WARN, vdip, "!%s (%s%d): %s on path %s (%s%d)", 3414 ddi_pathname(cdip, cpath), ddi_driver_name(cdip), 3415 ddi_get_instance(cdip), err_str, 3416 ddi_pathname(pdip, dpath), ddi_driver_name(pdip), 3417 ddi_get_instance(pdip)); 3418 kmem_free(cpath, MAXPATHLEN); 3419 kmem_free(dpath, MAXPATHLEN); 3420 } 3421 svp->svp_last_pkt_reason = pkt->pkt_reason; 3422 VHCI_DECR_PATH_CMDCOUNT(svp); 3423 3424 /* 3425 * For PARTIAL_DMA, vhci should not free the path. 3426 * Target driver will call into vhci_scsi_dmafree or 3427 * destroy pkt to release this path. 3428 */ 3429 if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) == 0) { 3430 scsi_destroy_pkt(pkt); 3431 vpkt->vpkt_hba_pkt = NULL; 3432 if (vpkt->vpkt_path) { 3433 mdi_rele_path(vpkt->vpkt_path); 3434 vpkt->vpkt_path = NULL; 3435 } 3436 } 3437 3438 if (tpkt->pkt_comp) { 3439 (*tpkt->pkt_comp)(tpkt); 3440 } 3441 } 3442 3443 /* 3444 * two possibilities: (1) failover has completed 3445 * or (2) is in progress; update our path states for 3446 * the former case; for the latter case, 3447 * initiate a scsi_watch request to 3448 * determine when failover completes - vlun is HELD 3449 * until failover completes; BUSY is returned to upper 3450 * layer in both the cases 3451 */ 3452 static int 3453 vhci_handle_ext_fo(struct scsi_pkt *pkt, int fostat) 3454 { 3455 struct vhci_pkt *vpkt = (struct vhci_pkt *)pkt->pkt_private; 3456 struct scsi_pkt *tpkt; 3457 scsi_vhci_priv_t *svp; 3458 scsi_vhci_lun_t *vlun; 3459 struct scsi_vhci *vhci; 3460 scsi_vhci_swarg_t *swarg; 3461 char *path; 3462 3463 ASSERT(vpkt != NULL); 3464 tpkt = vpkt->vpkt_tgt_pkt; 3465 ASSERT(tpkt != NULL); 3466 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(vpkt->vpkt_path); 3467 ASSERT(svp != NULL); 3468 vlun = svp->svp_svl; 3469 ASSERT(vlun != NULL); 3470 ASSERT(VHCI_LUN_IS_HELD(vlun)); 3471 3472 vhci = ADDR2VHCI(&tpkt->pkt_address); 3473 3474 if (fostat == SCSI_SENSE_INACTIVE) { 3475 VHCI_DEBUG(1, (CE_NOTE, NULL, "!Failover " 3476 "detected for %s; updating path states...\n", 3477 vlun->svl_lun_wwn)); 3478 /* 3479 * set the vlun flag to indicate to the task that the target 3480 * port group needs updating 3481 */ 3482 vlun->svl_flags |= VLUN_UPDATE_TPG; 3483 (void) taskq_dispatch(vhci->vhci_update_pathstates_taskq, 3484 vhci_update_pathstates, (void *)vlun, KM_SLEEP); 3485 } else { 3486 path = kmem_alloc(MAXPATHLEN, KM_SLEEP); 3487 vhci_log(CE_NOTE, ddi_get_parent(vlun->svl_dip), 3488 "!%s (%s%d): Waiting for externally initiated failover " 3489 "to complete", ddi_pathname(vlun->svl_dip, path), 3490 ddi_driver_name(vlun->svl_dip), 3491 ddi_get_instance(vlun->svl_dip)); 3492 kmem_free(path, MAXPATHLEN); 3493 swarg = kmem_alloc(sizeof (*swarg), KM_NOSLEEP); 3494 if (swarg == NULL) { 3495 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_handle_ext_fo: " 3496 "request packet allocation for %s failed....\n", 3497 vlun->svl_lun_wwn)); 3498 VHCI_RELEASE_LUN(vlun); 3499 return (PKT_RETURN); 3500 } 3501 swarg->svs_svp = svp; 3502 swarg->svs_tos = ddi_get_time(); 3503 swarg->svs_pi = vpkt->vpkt_path; 3504 swarg->svs_release_lun = 0; 3505 swarg->svs_done = 0; 3506 /* 3507 * place a hold on the path...we don't want it to 3508 * vanish while scsi_watch is in progress 3509 */ 3510 mdi_hold_path(vpkt->vpkt_path); 3511 svp->svp_sw_token = scsi_watch_request_submit(svp->svp_psd, 3512 VHCI_FOWATCH_INTERVAL, SENSE_LENGTH, vhci_efo_watch_cb, 3513 (caddr_t)swarg); 3514 } 3515 return (BUSY_RETURN); 3516 } 3517 3518 /* 3519 * vhci_efo_watch_cb: 3520 * Callback from scsi_watch request to check the failover status. 3521 * Completion is either due to successful failover or timeout. 3522 * Upon successful completion, vhci_update_path_states is called. 3523 * For timeout condition, vhci_efo_done is called. 3524 * Always returns 0 to scsi_watch to keep retrying till vhci_efo_done 3525 * terminates this request properly in a separate thread. 3526 */ 3527 3528 static int 3529 vhci_efo_watch_cb(caddr_t arg, struct scsi_watch_result *resultp) 3530 { 3531 struct scsi_status *statusp = resultp->statusp; 3532 struct scsi_extended_sense *sensep = resultp->sensep; 3533 struct scsi_pkt *pkt = resultp->pkt; 3534 scsi_vhci_swarg_t *swarg; 3535 scsi_vhci_priv_t *svp; 3536 scsi_vhci_lun_t *vlun; 3537 struct scsi_vhci *vhci; 3538 dev_info_t *vdip; 3539 int rval, updt_paths; 3540 3541 swarg = (scsi_vhci_swarg_t *)(uintptr_t)arg; 3542 svp = swarg->svs_svp; 3543 if (swarg->svs_done) { 3544 /* 3545 * Already completed failover or timedout. 3546 * Waiting for vhci_efo_done to terminate this scsi_watch. 3547 */ 3548 return (0); 3549 } 3550 3551 ASSERT(svp != NULL); 3552 vlun = svp->svp_svl; 3553 ASSERT(vlun != NULL); 3554 ASSERT(VHCI_LUN_IS_HELD(vlun)); 3555 vlun->svl_efo_update_path = 0; 3556 vdip = ddi_get_parent(vlun->svl_dip); 3557 vhci = ddi_get_soft_state(vhci_softstate, 3558 ddi_get_instance(vdip)); 3559 3560 updt_paths = 0; 3561 3562 if (pkt->pkt_reason != CMD_CMPLT) { 3563 if ((ddi_get_time() - swarg->svs_tos) >= VHCI_EXTFO_TIMEOUT) { 3564 swarg->svs_release_lun = 1; 3565 goto done; 3566 } 3567 return (0); 3568 } 3569 if (*((unsigned char *)statusp) == STATUS_CHECK) { 3570 rval = vlun->svl_fops->sfo_analyze_sense(svp->svp_psd, sensep, 3571 vlun->svl_fops_ctpriv); 3572 switch (rval) { 3573 /* 3574 * Only update path states in case path is definitely 3575 * inactive, or no failover occurred. For all other 3576 * check conditions continue pinging. A unexpected 3577 * check condition shouldn't cause pinging to complete 3578 * prematurely. 3579 */ 3580 case SCSI_SENSE_INACTIVE: 3581 case SCSI_SENSE_NOFAILOVER: 3582 updt_paths = 1; 3583 break; 3584 default: 3585 if ((ddi_get_time() - swarg->svs_tos) 3586 >= VHCI_EXTFO_TIMEOUT) { 3587 swarg->svs_release_lun = 1; 3588 goto done; 3589 } 3590 return (0); 3591 } 3592 } else if (*((unsigned char *)statusp) == 3593 STATUS_RESERVATION_CONFLICT) { 3594 updt_paths = 1; 3595 } else if ((*((unsigned char *)statusp)) & 3596 (STATUS_BUSY | STATUS_QFULL)) { 3597 return (0); 3598 } 3599 if ((*((unsigned char *)statusp) == STATUS_GOOD) || 3600 (updt_paths == 1)) { 3601 /* 3602 * we got here because we had detected an 3603 * externally initiated failover; things 3604 * have settled down now, so let's 3605 * start up a task to update the 3606 * path states and target port group 3607 */ 3608 vlun->svl_efo_update_path = 1; 3609 swarg->svs_done = 1; 3610 vlun->svl_swarg = swarg; 3611 vlun->svl_flags |= VLUN_UPDATE_TPG; 3612 (void) taskq_dispatch(vhci->vhci_update_pathstates_taskq, 3613 vhci_update_pathstates, (void *)vlun, 3614 KM_SLEEP); 3615 return (0); 3616 } 3617 if ((ddi_get_time() - swarg->svs_tos) >= VHCI_EXTFO_TIMEOUT) { 3618 swarg->svs_release_lun = 1; 3619 goto done; 3620 } 3621 return (0); 3622 done: 3623 swarg->svs_done = 1; 3624 (void) taskq_dispatch(vhci->vhci_taskq, 3625 vhci_efo_done, (void *)swarg, KM_SLEEP); 3626 return (0); 3627 } 3628 3629 /* 3630 * vhci_efo_done: 3631 * cleanly terminates scsi_watch and free up resources. 3632 * Called as taskq function in vhci_efo_watch_cb for EFO timeout condition 3633 * or by vhci_update_path_states invoked during external initiated 3634 * failover completion. 3635 */ 3636 static void 3637 vhci_efo_done(void *arg) 3638 { 3639 scsi_vhci_lun_t *vlun; 3640 scsi_vhci_swarg_t *swarg = (scsi_vhci_swarg_t *)arg; 3641 scsi_vhci_priv_t *svp = swarg->svs_svp; 3642 ASSERT(svp); 3643 3644 vlun = svp->svp_svl; 3645 ASSERT(vlun); 3646 3647 /* Wait for clean termination of scsi_watch */ 3648 (void) scsi_watch_request_terminate(svp->svp_sw_token, 3649 SCSI_WATCH_TERMINATE_ALL_WAIT); 3650 svp->svp_sw_token = NULL; 3651 3652 /* release path and freeup resources to indicate failover completion */ 3653 mdi_rele_path(swarg->svs_pi); 3654 if (swarg->svs_release_lun) { 3655 VHCI_RELEASE_LUN(vlun); 3656 } 3657 kmem_free((void *)swarg, sizeof (*swarg)); 3658 } 3659 3660 /* 3661 * Update the path states 3662 * vlun should be HELD when this is invoked. 3663 * Calls vhci_efo_done to cleanup resources allocated for EFO. 3664 */ 3665 void 3666 vhci_update_pathstates(void *arg) 3667 { 3668 mdi_pathinfo_t *pip, *npip; 3669 dev_info_t *dip, *pdip; 3670 struct scsi_failover_ops *fo; 3671 struct scsi_vhci_priv *svp; 3672 struct scsi_device *psd; 3673 struct scsi_path_opinfo opinfo; 3674 char *pclass, *tptr; 3675 struct scsi_vhci_lun *vlun = (struct scsi_vhci_lun *)arg; 3676 int sps; /* mdi_select_path() status */ 3677 char *cpath, *dpath; 3678 struct scsi_vhci *vhci; 3679 struct scsi_pkt *pkt; 3680 struct buf *bp; 3681 int reserve_conflict = 0; 3682 3683 ASSERT(VHCI_LUN_IS_HELD(vlun)); 3684 dip = vlun->svl_dip; 3685 pip = npip = NULL; 3686 3687 vhci = ddi_get_soft_state(vhci_softstate, 3688 ddi_get_instance(ddi_get_parent(dip))); 3689 3690 sps = mdi_select_path(dip, NULL, (MDI_SELECT_ONLINE_PATH | 3691 MDI_SELECT_STANDBY_PATH | MDI_SELECT_NO_PREFERRED), NULL, &npip); 3692 if ((npip == NULL) || (sps != MDI_SUCCESS)) { 3693 goto done; 3694 } 3695 3696 fo = vlun->svl_fops; 3697 do { 3698 pip = npip; 3699 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip); 3700 psd = svp->svp_psd; 3701 if (fo->sfo_path_get_opinfo(psd, &opinfo, 3702 vlun->svl_fops_ctpriv) != 0) { 3703 sps = mdi_select_path(dip, NULL, 3704 (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH | 3705 MDI_SELECT_NO_PREFERRED), pip, &npip); 3706 mdi_rele_path(pip); 3707 continue; 3708 } 3709 3710 if (mdi_prop_lookup_string(pip, "path-class", &pclass) != 3711 MDI_SUCCESS) { 3712 VHCI_DEBUG(1, (CE_NOTE, NULL, 3713 "!vhci_update_pathstates: prop lookup failed for " 3714 "path 0x%p\n", (void *)pip)); 3715 sps = mdi_select_path(dip, NULL, 3716 (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH | 3717 MDI_SELECT_NO_PREFERRED), pip, &npip); 3718 mdi_rele_path(pip); 3719 continue; 3720 } 3721 3722 /* 3723 * Need to update the "path-class" property 3724 * value in the device tree if different 3725 * from the existing value. 3726 */ 3727 if (strcmp(pclass, opinfo.opinfo_path_attr) != 0) { 3728 (void) mdi_prop_update_string(pip, "path-class", 3729 opinfo.opinfo_path_attr); 3730 } 3731 3732 /* 3733 * Only change the state if needed. i.e. Don't call 3734 * mdi_pi_set_state to ONLINE a path if its already 3735 * ONLINE. Same for STANDBY paths. 3736 */ 3737 3738 if ((opinfo.opinfo_path_state == SCSI_PATH_ACTIVE || 3739 opinfo.opinfo_path_state == SCSI_PATH_ACTIVE_NONOPT)) { 3740 if (!(MDI_PI_IS_ONLINE(pip))) { 3741 VHCI_DEBUG(1, (CE_NOTE, NULL, 3742 "!vhci_update_pathstates: marking path" 3743 " 0x%p as ONLINE\n", (void *)pip)); 3744 pdip = mdi_pi_get_phci(pip); 3745 cpath = kmem_alloc(MAXPATHLEN, KM_SLEEP); 3746 dpath = kmem_alloc(MAXPATHLEN, KM_SLEEP); 3747 vhci_log(CE_NOTE, ddi_get_parent(dip), "!%s" 3748 " (%s%d): path %s (%s%d) target address %s" 3749 " is now ONLINE because of" 3750 " an externally initiated failover", 3751 ddi_pathname(dip, cpath), 3752 ddi_driver_name(dip), 3753 ddi_get_instance(dip), 3754 ddi_pathname(pdip, dpath), 3755 ddi_driver_name(pdip), 3756 ddi_get_instance(pdip), 3757 mdi_pi_get_addr(pip)); 3758 kmem_free(cpath, MAXPATHLEN); 3759 kmem_free(dpath, MAXPATHLEN); 3760 mdi_pi_set_state(pip, 3761 MDI_PATHINFO_STATE_ONLINE); 3762 mdi_pi_set_preferred(pip, 3763 opinfo.opinfo_preferred); 3764 tptr = kmem_alloc(strlen 3765 (opinfo.opinfo_path_attr)+1, KM_SLEEP); 3766 (void) strlcpy(tptr, opinfo.opinfo_path_attr, 3767 (strlen(opinfo.opinfo_path_attr)+1)); 3768 mutex_enter(&vlun->svl_mutex); 3769 if (vlun->svl_active_pclass != NULL) { 3770 kmem_free(vlun->svl_active_pclass, 3771 strlen(vlun->svl_active_pclass)+1); 3772 } 3773 vlun->svl_active_pclass = tptr; 3774 if (vlun->svl_waiting_for_activepath) { 3775 vlun->svl_waiting_for_activepath = 0; 3776 } 3777 mutex_exit(&vlun->svl_mutex); 3778 /* Check for Reservation Conflict */ 3779 bp = scsi_alloc_consistent_buf( 3780 &svp->svp_psd->sd_address, 3781 (struct buf *)NULL, DEV_BSIZE, B_READ, 3782 NULL, NULL); 3783 if (!bp) { 3784 VHCI_DEBUG(1, (CE_NOTE, NULL, 3785 "vhci_update_pathstates: " 3786 "!No resources (buf)\n")); 3787 mdi_rele_path(pip); 3788 goto done; 3789 } 3790 pkt = scsi_init_pkt(&svp->svp_psd->sd_address, 3791 NULL, bp, CDB_GROUP1, 3792 sizeof (struct scsi_arq_status), 0, 3793 PKT_CONSISTENT, NULL, NULL); 3794 if (pkt) { 3795 (void) scsi_setup_cdb((union scsi_cdb *) 3796 (uintptr_t)pkt->pkt_cdbp, 3797 SCMD_READ, 1, 1, 0); 3798 pkt->pkt_time = 3*30; 3799 pkt->pkt_flags = FLAG_NOINTR; 3800 pkt->pkt_path_instance = 3801 mdi_pi_get_path_instance(pip); 3802 3803 if ((scsi_transport(pkt) == 3804 TRAN_ACCEPT) && (pkt->pkt_reason 3805 == CMD_CMPLT) && (SCBP_C(pkt) == 3806 STATUS_RESERVATION_CONFLICT)) { 3807 reserve_conflict = 1; 3808 } 3809 scsi_destroy_pkt(pkt); 3810 } 3811 scsi_free_consistent_buf(bp); 3812 } else if (MDI_PI_IS_ONLINE(pip)) { 3813 if (strcmp(pclass, opinfo.opinfo_path_attr) 3814 != 0) { 3815 mdi_pi_set_preferred(pip, 3816 opinfo.opinfo_preferred); 3817 mutex_enter(&vlun->svl_mutex); 3818 if (vlun->svl_active_pclass == NULL || 3819 strcmp(opinfo.opinfo_path_attr, 3820 vlun->svl_active_pclass) != 0) { 3821 mutex_exit(&vlun->svl_mutex); 3822 tptr = kmem_alloc(strlen 3823 (opinfo.opinfo_path_attr)+1, 3824 KM_SLEEP); 3825 (void) strlcpy(tptr, 3826 opinfo.opinfo_path_attr, 3827 (strlen 3828 (opinfo.opinfo_path_attr) 3829 +1)); 3830 mutex_enter(&vlun->svl_mutex); 3831 } else { 3832 /* 3833 * No need to update 3834 * svl_active_pclass 3835 */ 3836 tptr = NULL; 3837 mutex_exit(&vlun->svl_mutex); 3838 } 3839 if (tptr) { 3840 if (vlun->svl_active_pclass 3841 != NULL) { 3842 kmem_free(vlun-> 3843 svl_active_pclass, 3844 strlen(vlun-> 3845 svl_active_pclass) 3846 +1); 3847 } 3848 vlun->svl_active_pclass = tptr; 3849 mutex_exit(&vlun->svl_mutex); 3850 } 3851 } 3852 } 3853 } else if ((opinfo.opinfo_path_state == SCSI_PATH_INACTIVE) && 3854 !(MDI_PI_IS_STANDBY(pip))) { 3855 VHCI_DEBUG(1, (CE_NOTE, NULL, 3856 "!vhci_update_pathstates: marking path" 3857 " 0x%p as STANDBY\n", (void *)pip)); 3858 pdip = mdi_pi_get_phci(pip); 3859 cpath = kmem_alloc(MAXPATHLEN, KM_SLEEP); 3860 dpath = kmem_alloc(MAXPATHLEN, KM_SLEEP); 3861 vhci_log(CE_NOTE, ddi_get_parent(dip), "!%s" 3862 " (%s%d): path %s (%s%d) target address %s" 3863 " is now STANDBY because of" 3864 " an externally initiated failover", 3865 ddi_pathname(dip, cpath), 3866 ddi_driver_name(dip), 3867 ddi_get_instance(dip), 3868 ddi_pathname(pdip, dpath), 3869 ddi_driver_name(pdip), 3870 ddi_get_instance(pdip), 3871 mdi_pi_get_addr(pip)); 3872 kmem_free(cpath, MAXPATHLEN); 3873 kmem_free(dpath, MAXPATHLEN); 3874 mdi_pi_set_state(pip, 3875 MDI_PATHINFO_STATE_STANDBY); 3876 mdi_pi_set_preferred(pip, 3877 opinfo.opinfo_preferred); 3878 mutex_enter(&vlun->svl_mutex); 3879 if (vlun->svl_active_pclass != NULL) { 3880 if (strcmp(vlun->svl_active_pclass, 3881 opinfo.opinfo_path_attr) == 0) { 3882 kmem_free(vlun-> 3883 svl_active_pclass, 3884 strlen(vlun-> 3885 svl_active_pclass)+1); 3886 vlun->svl_active_pclass = NULL; 3887 } 3888 } 3889 mutex_exit(&vlun->svl_mutex); 3890 } 3891 (void) mdi_prop_free(pclass); 3892 sps = mdi_select_path(dip, NULL, 3893 (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH | 3894 MDI_SELECT_NO_PREFERRED), pip, &npip); 3895 mdi_rele_path(pip); 3896 3897 } while ((npip != NULL) && (sps == MDI_SUCCESS)); 3898 3899 /* 3900 * Check to see if this vlun has an active SCSI-II RESERVE. If so 3901 * clear the reservation by sending a reset, so the host doesn't 3902 * receive a reservation conflict. 3903 * Reset VLUN_RESERVE_ACTIVE_FLG for this vlun. Also notify ssd 3904 * of the reset, explicitly. 3905 */ 3906 if (vlun->svl_flags & VLUN_RESERVE_ACTIVE_FLG) { 3907 if (reserve_conflict && (vlun->svl_xlf_capable == 0)) { 3908 (void) vhci_recovery_reset(vlun, 3909 &svp->svp_psd->sd_address, FALSE, 3910 VHCI_DEPTH_TARGET); 3911 } 3912 vlun->svl_flags &= ~VLUN_RESERVE_ACTIVE_FLG; 3913 mutex_enter(&vhci->vhci_mutex); 3914 scsi_hba_reset_notify_callback(&vhci->vhci_mutex, 3915 &vhci->vhci_reset_notify_listf); 3916 mutex_exit(&vhci->vhci_mutex); 3917 } 3918 if (vlun->svl_flags & VLUN_UPDATE_TPG) { 3919 /* 3920 * Update the AccessState of related MP-API TPGs 3921 */ 3922 (void) vhci_mpapi_update_tpg_acc_state_for_lu(vhci, vlun); 3923 vlun->svl_flags &= ~VLUN_UPDATE_TPG; 3924 } 3925 done: 3926 if (vlun->svl_efo_update_path) { 3927 vlun->svl_efo_update_path = 0; 3928 vhci_efo_done(vlun->svl_swarg); 3929 vlun->svl_swarg = 0; 3930 } 3931 VHCI_RELEASE_LUN(vlun); 3932 } 3933 3934 /* ARGSUSED */ 3935 static int 3936 vhci_pathinfo_init(dev_info_t *vdip, mdi_pathinfo_t *pip, int flags) 3937 { 3938 scsi_hba_tran_t *hba = NULL; 3939 struct scsi_device *psd = NULL; 3940 scsi_vhci_lun_t *vlun = NULL; 3941 dev_info_t *pdip = NULL; 3942 dev_info_t *tgt_dip; 3943 struct scsi_vhci *vhci; 3944 char *guid; 3945 scsi_vhci_priv_t *svp = NULL; 3946 int rval = MDI_FAILURE; 3947 int vlun_alloced = 0; 3948 3949 ASSERT(vdip != NULL); 3950 ASSERT(pip != NULL); 3951 3952 vhci = ddi_get_soft_state(vhci_softstate, ddi_get_instance(vdip)); 3953 ASSERT(vhci != NULL); 3954 3955 pdip = mdi_pi_get_phci(pip); 3956 ASSERT(pdip != NULL); 3957 3958 hba = ddi_get_driver_private(pdip); 3959 ASSERT(hba != NULL); 3960 3961 tgt_dip = mdi_pi_get_client(pip); 3962 ASSERT(tgt_dip != NULL); 3963 3964 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, tgt_dip, PROPFLAGS, 3965 MDI_CLIENT_GUID_PROP, &guid) != DDI_SUCCESS) { 3966 VHCI_DEBUG(1, (CE_WARN, NULL, 3967 "vhci_pathinfo_init: lun guid property failed")); 3968 goto failure; 3969 } 3970 3971 vlun = vhci_lun_lookup_alloc(tgt_dip, guid, &vlun_alloced); 3972 ddi_prop_free(guid); 3973 3974 vlun->svl_dip = tgt_dip; 3975 3976 svp = kmem_zalloc(sizeof (*svp), KM_SLEEP); 3977 svp->svp_svl = vlun; 3978 3979 vlun->svl_lb_policy_save = mdi_get_lb_policy(tgt_dip); 3980 mutex_init(&svp->svp_mutex, NULL, MUTEX_DRIVER, NULL); 3981 cv_init(&svp->svp_cv, NULL, CV_DRIVER, NULL); 3982 3983 psd = kmem_zalloc(sizeof (*psd), KM_SLEEP); 3984 mutex_init(&psd->sd_mutex, NULL, MUTEX_DRIVER, NULL); 3985 3986 /* 3987 * Clone transport structure if requested, so 3988 * Self enumerating HBAs always need to use cloning 3989 */ 3990 3991 if (hba->tran_hba_flags & SCSI_HBA_TRAN_CLONE) { 3992 scsi_hba_tran_t *clone = 3993 kmem_alloc(sizeof (scsi_hba_tran_t), KM_SLEEP); 3994 bcopy(hba, clone, sizeof (scsi_hba_tran_t)); 3995 hba = clone; 3996 hba->tran_sd = psd; 3997 } else { 3998 ASSERT(hba->tran_sd == NULL); 3999 } 4000 psd->sd_dev = tgt_dip; 4001 psd->sd_address.a_hba_tran = hba; 4002 psd->sd_private = (caddr_t)pip; 4003 svp->svp_psd = psd; 4004 mdi_pi_set_vhci_private(pip, (caddr_t)svp); 4005 4006 /* 4007 * call hba's target init entry point if it exists 4008 */ 4009 if (hba->tran_tgt_init != NULL) { 4010 if ((rval = (*hba->tran_tgt_init)(pdip, tgt_dip, 4011 hba, psd)) != DDI_SUCCESS) { 4012 VHCI_DEBUG(1, (CE_WARN, pdip, 4013 "!vhci_pathinfo_init: tran_tgt_init failed for " 4014 "path=0x%p rval=%x", (void *)pip, rval)); 4015 goto failure; 4016 } 4017 } 4018 4019 svp->svp_new_path = 1; 4020 4021 psd->sd_inq = NULL; 4022 4023 VHCI_DEBUG(4, (CE_NOTE, NULL, "!vhci_pathinfo_init: path:%p\n", 4024 (void *)pip)); 4025 return (MDI_SUCCESS); 4026 4027 failure: 4028 if (psd) { 4029 mutex_destroy(&psd->sd_mutex); 4030 kmem_free(psd, sizeof (*psd)); 4031 } 4032 if (svp) { 4033 mdi_pi_set_vhci_private(pip, NULL); 4034 mutex_destroy(&svp->svp_mutex); 4035 cv_destroy(&svp->svp_cv); 4036 kmem_free(svp, sizeof (*svp)); 4037 } 4038 if (hba && hba->tran_hba_flags & SCSI_HBA_TRAN_CLONE) 4039 kmem_free(hba, sizeof (scsi_hba_tran_t)); 4040 4041 if (vlun_alloced) 4042 vhci_lun_free(tgt_dip); 4043 4044 return (rval); 4045 } 4046 4047 /* ARGSUSED */ 4048 static int 4049 vhci_pathinfo_uninit(dev_info_t *vdip, mdi_pathinfo_t *pip, int flags) 4050 { 4051 scsi_hba_tran_t *hba = NULL; 4052 struct scsi_device *psd = NULL; 4053 dev_info_t *pdip = NULL; 4054 dev_info_t *cdip = NULL; 4055 scsi_vhci_priv_t *svp = NULL; 4056 4057 ASSERT(vdip != NULL); 4058 ASSERT(pip != NULL); 4059 4060 pdip = mdi_pi_get_phci(pip); 4061 ASSERT(pdip != NULL); 4062 4063 cdip = mdi_pi_get_client(pip); 4064 ASSERT(cdip != NULL); 4065 4066 hba = ddi_get_driver_private(pdip); 4067 ASSERT(hba != NULL); 4068 4069 vhci_mpapi_set_path_state(vdip, pip, MP_DRVR_PATH_STATE_REMOVED); 4070 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip); 4071 if (svp == NULL) { 4072 /* path already freed. Nothing to do. */ 4073 return (MDI_SUCCESS); 4074 } 4075 4076 psd = svp->svp_psd; 4077 ASSERT(psd != NULL); 4078 4079 if (hba->tran_hba_flags & SCSI_HBA_TRAN_CLONE) { 4080 hba = psd->sd_address.a_hba_tran; 4081 ASSERT(hba->tran_hba_flags & SCSI_HBA_TRAN_CLONE); 4082 ASSERT(hba->tran_sd == psd); 4083 } else { 4084 ASSERT(hba->tran_sd == NULL); 4085 } 4086 4087 if (hba->tran_tgt_free != NULL) { 4088 (*hba->tran_tgt_free) (pdip, cdip, hba, psd); 4089 } 4090 mutex_destroy(&psd->sd_mutex); 4091 if (hba->tran_hba_flags & SCSI_HBA_TRAN_CLONE) { 4092 kmem_free(hba, sizeof (*hba)); 4093 } 4094 4095 mdi_pi_set_vhci_private(pip, NULL); 4096 kmem_free((caddr_t)psd, sizeof (*psd)); 4097 4098 mutex_destroy(&svp->svp_mutex); 4099 cv_destroy(&svp->svp_cv); 4100 kmem_free((caddr_t)svp, sizeof (*svp)); 4101 4102 /* 4103 * If this is the last path to the client, 4104 * then free up the vlun as well. 4105 */ 4106 if (mdi_client_get_path_count(cdip) == 1) { 4107 vhci_lun_free(cdip); 4108 } 4109 4110 VHCI_DEBUG(4, (CE_NOTE, NULL, "!vhci_pathinfo_uninit: path=0x%p\n", 4111 (void *)pip)); 4112 return (MDI_SUCCESS); 4113 } 4114 4115 /* ARGSUSED */ 4116 static int 4117 vhci_pathinfo_state_change(dev_info_t *vdip, mdi_pathinfo_t *pip, 4118 mdi_pathinfo_state_t state, uint32_t ext_state, int flags) 4119 { 4120 int rval = MDI_SUCCESS; 4121 scsi_vhci_priv_t *svp; 4122 scsi_vhci_lun_t *vlun; 4123 int held; 4124 int op = (flags & 0xf00) >> 8; 4125 struct scsi_vhci *vhci; 4126 4127 vhci = ddi_get_soft_state(vhci_softstate, ddi_get_instance(vdip)); 4128 4129 if (flags & MDI_EXT_STATE_CHANGE) { 4130 /* 4131 * We do not want to issue any commands down the path in case 4132 * sync flag is set. Lower layers might not be ready to accept 4133 * any I/O commands. 4134 */ 4135 if (op == DRIVER_DISABLE) 4136 return (MDI_SUCCESS); 4137 4138 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip); 4139 if (svp == NULL) { 4140 return (MDI_FAILURE); 4141 } 4142 vlun = svp->svp_svl; 4143 4144 if (flags & MDI_BEFORE_STATE_CHANGE) { 4145 /* 4146 * Hold the LUN. 4147 */ 4148 VHCI_HOLD_LUN(vlun, VH_SLEEP, held); 4149 if (flags & MDI_DISABLE_OP) { 4150 /* 4151 * Issue scsi reset if it happens to be 4152 * reserved path. 4153 */ 4154 if (vlun->svl_flags & VLUN_RESERVE_ACTIVE_FLG) { 4155 /* 4156 * if reservation pending on 4157 * this path, dont' mark the 4158 * path busy 4159 */ 4160 if (op == DRIVER_DISABLE_TRANSIENT) { 4161 VHCI_DEBUG(1, (CE_NOTE, NULL, 4162 "!vhci_pathinfo" 4163 "_state_change (pip:%p): " 4164 " reservation: fail busy\n", 4165 (void *)pip)); 4166 return (MDI_FAILURE); 4167 } 4168 if (pip == vlun->svl_resrv_pip) { 4169 if (vhci_recovery_reset( 4170 svp->svp_svl, 4171 &svp->svp_psd->sd_address, 4172 TRUE, 4173 VHCI_DEPTH_TARGET) == 0) { 4174 VHCI_DEBUG(1, 4175 (CE_NOTE, NULL, 4176 "!vhci_pathinfo" 4177 "_state_change " 4178 " (pip:%p): " 4179 "reset failed, " 4180 "give up!\n", 4181 (void *)pip)); 4182 } 4183 vlun->svl_flags &= 4184 ~VLUN_RESERVE_ACTIVE_FLG; 4185 } 4186 } 4187 } else if (flags & MDI_ENABLE_OP) { 4188 if (((vhci->vhci_conf_flags & 4189 VHCI_CONF_FLAGS_AUTO_FAILBACK) == 4190 VHCI_CONF_FLAGS_AUTO_FAILBACK) && 4191 MDI_PI_IS_USER_DISABLE(pip) && 4192 MDI_PI_IS_STANDBY(pip)) { 4193 struct scsi_failover_ops *fo; 4194 char *best_pclass, *pclass = NULL; 4195 int best_class, rv; 4196 /* 4197 * Failback if enabling a standby path 4198 * and it is the primary class or 4199 * preferred class 4200 */ 4201 best_class = mdi_pi_get_preferred(pip); 4202 if (best_class == 0) { 4203 /* 4204 * if not preferred - compare 4205 * path-class with class 4206 */ 4207 fo = vlun->svl_fops; 4208 (void) fo->sfo_pathclass_next( 4209 NULL, &best_pclass, 4210 vlun->svl_fops_ctpriv); 4211 pclass = NULL; 4212 rv = mdi_prop_lookup_string(pip, 4213 "path-class", &pclass); 4214 if (rv != MDI_SUCCESS || 4215 pclass == NULL) { 4216 vhci_log(CE_NOTE, vdip, 4217 "!path-class " 4218 " lookup " 4219 "failed. rv: %d" 4220 "class: %p", rv, 4221 (void *)pclass); 4222 } else if (strncmp(pclass, 4223 best_pclass, 4224 strlen(best_pclass)) == 0) { 4225 best_class = 1; 4226 } 4227 if (rv == MDI_SUCCESS && 4228 pclass != NULL) { 4229 rv = mdi_prop_free( 4230 pclass); 4231 if (rv != 4232 DDI_PROP_SUCCESS) { 4233 vhci_log( 4234 CE_NOTE, 4235 vdip, 4236 "!path-" 4237 "class" 4238 " free" 4239 " failed" 4240 " rv: %d" 4241 " class: " 4242 "%p", 4243 rv, 4244 (void *) 4245 pclass); 4246 } 4247 } 4248 } 4249 if (best_class == 1) { 4250 VHCI_DEBUG(1, (CE_NOTE, NULL, 4251 "preferred path: %p " 4252 "USER_DISABLE->USER_ENABLE " 4253 "transition for lun %s\n", 4254 (void *)pip, 4255 vlun->svl_lun_wwn)); 4256 (void) taskq_dispatch( 4257 vhci->vhci_taskq, 4258 vhci_initiate_auto_failback, 4259 (void *) vlun, KM_SLEEP); 4260 } 4261 } 4262 /* 4263 * if PGR is active, revalidate key and 4264 * register on this path also, if key is 4265 * still valid 4266 */ 4267 sema_p(&vlun->svl_pgr_sema); 4268 if (vlun->svl_pgr_active) 4269 (void) 4270 vhci_pgr_validate_and_register(svp); 4271 sema_v(&vlun->svl_pgr_sema); 4272 /* 4273 * Inform target driver about any 4274 * reservations to be reinstated if target 4275 * has dropped reservation during the busy 4276 * period. 4277 */ 4278 mutex_enter(&vhci->vhci_mutex); 4279 scsi_hba_reset_notify_callback( 4280 &vhci->vhci_mutex, 4281 &vhci->vhci_reset_notify_listf); 4282 mutex_exit(&vhci->vhci_mutex); 4283 } 4284 } 4285 if (flags & MDI_AFTER_STATE_CHANGE) { 4286 if (flags & MDI_ENABLE_OP) { 4287 mutex_enter(&vhci_global_mutex); 4288 cv_broadcast(&vhci_cv); 4289 mutex_exit(&vhci_global_mutex); 4290 } 4291 if (vlun->svl_setcap_done) { 4292 (void) vhci_pHCI_cap(&svp->svp_psd->sd_address, 4293 "sector-size", vlun->svl_sector_size, 4294 1, pip); 4295 } 4296 4297 /* 4298 * Release the LUN 4299 */ 4300 VHCI_RELEASE_LUN(vlun); 4301 4302 /* 4303 * Path transition is complete. 4304 * Run callback to indicate target driver to 4305 * retry to prevent IO starvation. 4306 */ 4307 if (scsi_callback_id != 0) { 4308 ddi_run_callback(&scsi_callback_id); 4309 } 4310 } 4311 } else { 4312 switch (state) { 4313 case MDI_PATHINFO_STATE_ONLINE: 4314 rval = vhci_pathinfo_online(vdip, pip, flags); 4315 break; 4316 4317 case MDI_PATHINFO_STATE_OFFLINE: 4318 rval = vhci_pathinfo_offline(vdip, pip, flags); 4319 break; 4320 4321 default: 4322 break; 4323 } 4324 /* 4325 * Path transition is complete. 4326 * Run callback to indicate target driver to 4327 * retry to prevent IO starvation. 4328 */ 4329 if ((rval == MDI_SUCCESS) && (scsi_callback_id != 0)) { 4330 ddi_run_callback(&scsi_callback_id); 4331 } 4332 return (rval); 4333 } 4334 4335 return (MDI_SUCCESS); 4336 } 4337 4338 /* 4339 * Parse the mpxio load balancing options. The datanameptr 4340 * will point to a string containing the load-balance-options value. 4341 * The load-balance-options value will be a property that 4342 * defines the load-balance algorithm and any arguments to that 4343 * algorithm. 4344 * For example: 4345 * device-type-mpxio-options-list= 4346 * "device-type=SUN SENA", "load-balance-options=logical-block-options" 4347 * "device-type=SUN SE6920", "round-robin-options"; 4348 * logical-block-options="load-balance=logical-block", "region-size=15"; 4349 * round-robin-options="load-balance=round-robin"; 4350 * 4351 * If the load-balance is not defined the load balance algorithm will 4352 * default to the global setting. There will be default values assigned 4353 * to the arguments (region-size=18) and if an argument is one 4354 * that is not known, it will be ignored. 4355 */ 4356 static void 4357 vhci_parse_mpxio_lb_options(dev_info_t *dip, dev_info_t *cdip, 4358 caddr_t datanameptr) 4359 { 4360 char *dataptr, *next_entry; 4361 caddr_t config_list = NULL; 4362 int config_list_len = 0, list_len = 0; 4363 int region_size = -1; 4364 client_lb_t load_balance; 4365 4366 if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, datanameptr, 4367 (caddr_t)&config_list, &config_list_len) != DDI_PROP_SUCCESS) { 4368 return; 4369 } 4370 4371 list_len = config_list_len; 4372 next_entry = config_list; 4373 while (config_list_len > 0) { 4374 dataptr = next_entry; 4375 4376 if (strncmp(mdi_load_balance, dataptr, 4377 strlen(mdi_load_balance)) == 0) { 4378 /* get the load-balance scheme */ 4379 dataptr += strlen(mdi_load_balance) + 1; 4380 if (strcmp(dataptr, LOAD_BALANCE_PROP_RR) == 0) { 4381 (void) mdi_set_lb_policy(cdip, LOAD_BALANCE_RR); 4382 load_balance = LOAD_BALANCE_RR; 4383 } else if (strcmp(dataptr, 4384 LOAD_BALANCE_PROP_LBA) == 0) { 4385 (void) mdi_set_lb_policy(cdip, 4386 LOAD_BALANCE_LBA); 4387 load_balance = LOAD_BALANCE_LBA; 4388 } else if (strcmp(dataptr, 4389 LOAD_BALANCE_PROP_NONE) == 0) { 4390 (void) mdi_set_lb_policy(cdip, 4391 LOAD_BALANCE_NONE); 4392 load_balance = LOAD_BALANCE_NONE; 4393 } 4394 } else if (strncmp(dataptr, LOGICAL_BLOCK_REGION_SIZE, 4395 strlen(LOGICAL_BLOCK_REGION_SIZE)) == 0) { 4396 int i = 0; 4397 char *ptr; 4398 char *tmp; 4399 4400 tmp = dataptr + (strlen(LOGICAL_BLOCK_REGION_SIZE) + 1); 4401 /* check for numeric value */ 4402 for (ptr = tmp; i < strlen(tmp); i++, ptr++) { 4403 if (!isdigit(*ptr)) { 4404 cmn_err(CE_WARN, 4405 "Illegal region size: %s." 4406 " Setting to default value: %d", 4407 tmp, 4408 LOAD_BALANCE_DEFAULT_REGION_SIZE); 4409 region_size = 4410 LOAD_BALANCE_DEFAULT_REGION_SIZE; 4411 break; 4412 } 4413 } 4414 if (i >= strlen(tmp)) { 4415 region_size = stoi(&tmp); 4416 } 4417 (void) mdi_set_lb_region_size(cdip, region_size); 4418 } 4419 config_list_len -= (strlen(next_entry) + 1); 4420 next_entry += strlen(next_entry) + 1; 4421 } 4422 #ifdef DEBUG 4423 if ((region_size >= 0) && (load_balance != LOAD_BALANCE_LBA)) { 4424 VHCI_DEBUG(1, (CE_NOTE, dip, 4425 "!vhci_parse_mpxio_lb_options: region-size: %d" 4426 "only valid for load-balance=logical-block\n", 4427 region_size)); 4428 } 4429 #endif 4430 if ((region_size == -1) && (load_balance == LOAD_BALANCE_LBA)) { 4431 VHCI_DEBUG(1, (CE_NOTE, dip, 4432 "!vhci_parse_mpxio_lb_options: No region-size" 4433 " defined load-balance=logical-block." 4434 " Default to: %d\n", LOAD_BALANCE_DEFAULT_REGION_SIZE)); 4435 (void) mdi_set_lb_region_size(cdip, 4436 LOAD_BALANCE_DEFAULT_REGION_SIZE); 4437 } 4438 if (list_len > 0) { 4439 kmem_free(config_list, list_len); 4440 } 4441 } 4442 4443 /* 4444 * Parse the device-type-mpxio-options-list looking for the key of 4445 * "load-balance-options". If found, parse the load balancing options. 4446 * Check the comment of the vhci_get_device_type_mpxio_options() 4447 * for the device-type-mpxio-options-list. 4448 */ 4449 static void 4450 vhci_parse_mpxio_options(dev_info_t *dip, dev_info_t *cdip, 4451 caddr_t datanameptr, int list_len) 4452 { 4453 char *dataptr; 4454 int len; 4455 4456 /* 4457 * get the data list 4458 */ 4459 dataptr = datanameptr; 4460 len = 0; 4461 while (len < list_len && 4462 strncmp(dataptr, DEVICE_TYPE_STR, strlen(DEVICE_TYPE_STR)) 4463 != 0) { 4464 if (strncmp(dataptr, LOAD_BALANCE_OPTIONS, 4465 strlen(LOAD_BALANCE_OPTIONS)) == 0) { 4466 len += strlen(LOAD_BALANCE_OPTIONS) + 1; 4467 dataptr += strlen(LOAD_BALANCE_OPTIONS) + 1; 4468 vhci_parse_mpxio_lb_options(dip, cdip, dataptr); 4469 } 4470 len += strlen(dataptr) + 1; 4471 dataptr += strlen(dataptr) + 1; 4472 } 4473 } 4474 4475 /* 4476 * Check the inquriy string returned from the device wiith the device-type 4477 * Check for the existence of the device-type-mpxio-options-list and 4478 * if found parse the list checking for a match with the device-type 4479 * value and the inquiry string returned from the device. If a match 4480 * is found, parse the mpxio options list. The format of the 4481 * device-type-mpxio-options-list is: 4482 * device-type-mpxio-options-list= 4483 * "device-type=SUN SENA", "load-balance-options=logical-block-options" 4484 * "device-type=SUN SE6920", "round-robin-options"; 4485 * logical-block-options="load-balance=logical-block", "region-size=15"; 4486 * round-robin-options="load-balance=round-robin"; 4487 */ 4488 void 4489 vhci_get_device_type_mpxio_options(dev_info_t *dip, dev_info_t *cdip, 4490 struct scsi_device *devp) 4491 { 4492 4493 caddr_t config_list = NULL; 4494 caddr_t vidptr, datanameptr; 4495 int vidlen, dupletlen = 0; 4496 int config_list_len = 0, len; 4497 struct scsi_inquiry *inq = devp->sd_inq; 4498 4499 /* 4500 * look up the device-type-mpxio-options-list and walk thru 4501 * the list compare the vendor ids of the earlier inquiry command and 4502 * with those vids in the list if there is a match, lookup 4503 * the mpxio-options value 4504 */ 4505 if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 4506 MPXIO_OPTIONS_LIST, 4507 (caddr_t)&config_list, &config_list_len) == DDI_PROP_SUCCESS) { 4508 4509 /* 4510 * Compare vids in each duplet - if it matches, 4511 * parse the mpxio options list. 4512 */ 4513 for (len = config_list_len, vidptr = config_list; len > 0; 4514 len -= dupletlen) { 4515 4516 dupletlen = 0; 4517 4518 if (strlen(vidptr) != 0 && 4519 strncmp(vidptr, DEVICE_TYPE_STR, 4520 strlen(DEVICE_TYPE_STR)) == 0) { 4521 /* point to next duplet */ 4522 datanameptr = vidptr + strlen(vidptr) + 1; 4523 /* add len of this duplet */ 4524 dupletlen += strlen(vidptr) + 1; 4525 /* get to device type */ 4526 vidptr += strlen(DEVICE_TYPE_STR) + 1; 4527 vidlen = strlen(vidptr); 4528 if ((vidlen != 0) && 4529 bcmp(inq->inq_vid, vidptr, vidlen) == 0) { 4530 vhci_parse_mpxio_options(dip, cdip, 4531 datanameptr, len - dupletlen); 4532 break; 4533 } 4534 /* get to next duplet */ 4535 vidptr += strlen(vidptr) + 1; 4536 } 4537 /* get to the next device-type */ 4538 while (len - dupletlen > 0 && 4539 strlen(vidptr) != 0 && 4540 strncmp(vidptr, DEVICE_TYPE_STR, 4541 strlen(DEVICE_TYPE_STR)) != 0) { 4542 dupletlen += strlen(vidptr) + 1; 4543 vidptr += strlen(vidptr) + 1; 4544 } 4545 } 4546 if (config_list_len > 0) { 4547 kmem_free(config_list, config_list_len); 4548 } 4549 } 4550 } 4551 4552 static int 4553 vhci_update_pathinfo(struct scsi_device *psd, mdi_pathinfo_t *pip, 4554 struct scsi_failover_ops *fo, 4555 scsi_vhci_lun_t *vlun, 4556 struct scsi_vhci *vhci) 4557 { 4558 struct scsi_path_opinfo opinfo; 4559 char *pclass, *best_pclass; 4560 4561 if (fo->sfo_path_get_opinfo(psd, &opinfo, vlun->svl_fops_ctpriv) != 0) { 4562 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_update_pathinfo: " 4563 "Failed to get operation info for path:%p\n", (void *)pip)); 4564 return (MDI_FAILURE); 4565 } 4566 /* set the xlf capable flag in the vlun for future use */ 4567 vlun->svl_xlf_capable = opinfo.opinfo_xlf_capable; 4568 (void) mdi_prop_update_string(pip, "path-class", 4569 opinfo.opinfo_path_attr); 4570 4571 pclass = opinfo.opinfo_path_attr; 4572 if (opinfo.opinfo_path_state == SCSI_PATH_ACTIVE) { 4573 mutex_enter(&vlun->svl_mutex); 4574 if (vlun->svl_active_pclass != NULL) { 4575 if (strcmp(vlun->svl_active_pclass, pclass) != 0) { 4576 mutex_exit(&vlun->svl_mutex); 4577 /* 4578 * Externally initiated failover has happened; 4579 * force the path state to be STANDBY/ONLINE, 4580 * next IO will trigger failover and thus 4581 * sync-up the pathstates. Reason we don't 4582 * sync-up immediately by invoking 4583 * vhci_update_pathstates() is because it 4584 * needs a VHCI_HOLD_LUN() and we don't 4585 * want to block here. 4586 * 4587 * Further, if the device is an ALUA device, 4588 * then failure to exactly match 'pclass' and 4589 * 'svl_active_pclass'(as is the case here) 4590 * indicates that the currently active path 4591 * is a 'non-optimized' path - which means 4592 * that 'svl_active_pclass' needs to be 4593 * replaced with opinfo.opinfo_path_state 4594 * value. 4595 */ 4596 4597 if (SCSI_FAILOVER_IS_TPGS(vlun->svl_fops)) { 4598 char *tptr; 4599 4600 /* 4601 * The device is ALUA compliant. The 4602 * state need to be changed to online 4603 * rather than standby state which is 4604 * done typically for a asymmetric 4605 * device that is non ALUA compliant. 4606 */ 4607 mdi_pi_set_state(pip, 4608 MDI_PATHINFO_STATE_ONLINE); 4609 tptr = kmem_alloc(strlen 4610 (opinfo.opinfo_path_attr)+1, 4611 KM_SLEEP); 4612 (void) strlcpy(tptr, 4613 opinfo.opinfo_path_attr, 4614 (strlen(opinfo.opinfo_path_attr) 4615 +1)); 4616 mutex_enter(&vlun->svl_mutex); 4617 kmem_free(vlun->svl_active_pclass, 4618 strlen(vlun->svl_active_pclass)+1); 4619 vlun->svl_active_pclass = tptr; 4620 mutex_exit(&vlun->svl_mutex); 4621 } else { 4622 /* 4623 * Non ALUA device case. 4624 */ 4625 mdi_pi_set_state(pip, 4626 MDI_PATHINFO_STATE_STANDBY); 4627 } 4628 vlun->svl_fo_support = opinfo.opinfo_mode; 4629 mdi_pi_set_preferred(pip, 4630 opinfo.opinfo_preferred); 4631 return (MDI_SUCCESS); 4632 } 4633 } else { 4634 char *tptr; 4635 4636 /* 4637 * lets release the mutex before we try to 4638 * allocate since the potential to sleep is 4639 * possible. 4640 */ 4641 mutex_exit(&vlun->svl_mutex); 4642 tptr = kmem_alloc(strlen(pclass)+1, KM_SLEEP); 4643 (void) strlcpy(tptr, pclass, (strlen(pclass)+1)); 4644 mutex_enter(&vlun->svl_mutex); 4645 vlun->svl_active_pclass = tptr; 4646 } 4647 mutex_exit(&vlun->svl_mutex); 4648 mdi_pi_set_state(pip, MDI_PATHINFO_STATE_ONLINE); 4649 vlun->svl_waiting_for_activepath = 0; 4650 } else if (opinfo.opinfo_path_state == SCSI_PATH_ACTIVE_NONOPT) { 4651 mutex_enter(&vlun->svl_mutex); 4652 if (vlun->svl_active_pclass == NULL) { 4653 char *tptr; 4654 4655 mutex_exit(&vlun->svl_mutex); 4656 tptr = kmem_alloc(strlen(pclass)+1, KM_SLEEP); 4657 (void) strlcpy(tptr, pclass, (strlen(pclass)+1)); 4658 mutex_enter(&vlun->svl_mutex); 4659 vlun->svl_active_pclass = tptr; 4660 } 4661 mutex_exit(&vlun->svl_mutex); 4662 mdi_pi_set_state(pip, MDI_PATHINFO_STATE_ONLINE); 4663 vlun->svl_waiting_for_activepath = 0; 4664 } else if (opinfo.opinfo_path_state == SCSI_PATH_INACTIVE) { 4665 mutex_enter(&vlun->svl_mutex); 4666 if (vlun->svl_active_pclass != NULL) { 4667 if (strcmp(vlun->svl_active_pclass, pclass) == 0) { 4668 mutex_exit(&vlun->svl_mutex); 4669 /* 4670 * externally initiated failover has happened; 4671 * force state to ONLINE (see comment above) 4672 */ 4673 mdi_pi_set_state(pip, 4674 MDI_PATHINFO_STATE_ONLINE); 4675 vlun->svl_fo_support = opinfo.opinfo_mode; 4676 mdi_pi_set_preferred(pip, 4677 opinfo.opinfo_preferred); 4678 return (MDI_SUCCESS); 4679 } 4680 } 4681 mutex_exit(&vlun->svl_mutex); 4682 mdi_pi_set_state(pip, MDI_PATHINFO_STATE_STANDBY); 4683 4684 /* 4685 * Initiate auto-failback, if enabled, for path if path-state 4686 * is transitioning from OFFLINE->STANDBY and pathclass is the 4687 * prefered pathclass for this storage. 4688 * NOTE: In case where opinfo_path_state is SCSI_PATH_ACTIVE 4689 * (above), where the pi state is set to STANDBY, we don't 4690 * initiate auto-failback as the next IO shall take care of. 4691 * this. See comment above. 4692 */ 4693 (void) fo->sfo_pathclass_next(NULL, &best_pclass, 4694 vlun->svl_fops_ctpriv); 4695 if (((vhci->vhci_conf_flags & VHCI_CONF_FLAGS_AUTO_FAILBACK) == 4696 VHCI_CONF_FLAGS_AUTO_FAILBACK) && 4697 ((strcmp(pclass, best_pclass) == 0) || 4698 mdi_pi_get_preferred(pip) == 1) && 4699 ((MDI_PI_OLD_STATE(pip) == MDI_PATHINFO_STATE_OFFLINE)|| 4700 (MDI_PI_OLD_STATE(pip) == MDI_PATHINFO_STATE_INIT))) { 4701 VHCI_DEBUG(1, (CE_NOTE, NULL, "%s pathclass path: %p" 4702 " OFFLINE->STANDBY transition for lun %s\n", 4703 best_pclass, (void *)pip, vlun->svl_lun_wwn)); 4704 (void) taskq_dispatch(vhci->vhci_taskq, 4705 vhci_initiate_auto_failback, (void *) vlun, 4706 KM_SLEEP); 4707 } 4708 } 4709 vlun->svl_fo_support = opinfo.opinfo_mode; 4710 mdi_pi_set_preferred(pip, opinfo.opinfo_preferred); 4711 4712 VHCI_DEBUG(8, (CE_NOTE, NULL, "vhci_update_pathinfo: opinfo_rev = %x," 4713 " opinfo_path_state = %x opinfo_preferred = %x, opinfo_mode = %x\n", 4714 opinfo.opinfo_rev, opinfo.opinfo_path_state, 4715 opinfo.opinfo_preferred, opinfo.opinfo_mode)); 4716 4717 return (MDI_SUCCESS); 4718 } 4719 4720 /* 4721 * Form the kstat name and and call mdi_pi_kstat_create() 4722 */ 4723 void 4724 vhci_kstat_create_pathinfo(mdi_pathinfo_t *pip) 4725 { 4726 dev_info_t *tgt_dip; 4727 dev_info_t *pdip; 4728 char *guid; 4729 char *target_port, *target_port_dup; 4730 char ks_name[KSTAT_STRLEN]; 4731 uint_t pid; 4732 int by_id; 4733 mod_hash_val_t hv; 4734 4735 4736 /* return if we have already allocated kstats */ 4737 if (mdi_pi_kstat_exists(pip)) 4738 return; 4739 4740 /* 4741 * We need instance numbers to create a kstat name, return if we don't 4742 * have instance numbers assigned yet. 4743 */ 4744 tgt_dip = mdi_pi_get_client(pip); 4745 pdip = mdi_pi_get_phci(pip); 4746 if ((ddi_get_instance(tgt_dip) == -1) || (ddi_get_instance(pdip) == -1)) 4747 return; 4748 4749 /* 4750 * A path oriented kstat has a ks_name of the form: 4751 * 4752 * <client-driver><instance>.t<pid>.<pHCI-driver><instance> 4753 * 4754 * We maintain a bidirectional 'target-port' to <pid> map, 4755 * called targetmap. All pathinfo nodes with the same 4756 * 'target-port' map to the same <pid>. The iostat(1M) code, 4757 * when parsing a path oriented kstat name, uses the <pid> as 4758 * a SCSI_VHCI_GET_TARGET_LONGNAME ioctl argument in order 4759 * to get the 'target-port'. For KSTAT_FLAG_PERSISTENT kstats, 4760 * this ioctl needs to translate a <pid> to a 'target-port' 4761 * even after all pathinfo nodes associated with the 4762 * 'target-port' have been destroyed. This is needed to support 4763 * consistent first-iteration activity-since-boot iostat(1M) 4764 * output. Because of this requirement, the mapping can't be 4765 * based on pathinfo information in a devinfo snapshot. 4766 */ 4767 4768 /* determine 'target-port' */ 4769 if (mdi_prop_lookup_string(pip, 4770 "target-port", &target_port) == MDI_SUCCESS) { 4771 target_port_dup = i_ddi_strdup(target_port, KM_SLEEP); 4772 (void) mdi_prop_free(target_port); 4773 by_id = 1; 4774 } else { 4775 /* 4776 * If the pHCI did not set up 'target-port' on this 4777 * pathinfo node, assume that our client is the only 4778 * one with paths to the device by using the guid 4779 * value as the 'target-port'. Since no other client 4780 * will have the same guid, no other client will use 4781 * the same <pid>. NOTE: a client with an instance 4782 * number always has a guid. 4783 */ 4784 (void) ddi_prop_lookup_string(DDI_DEV_T_ANY, tgt_dip, 4785 PROPFLAGS, MDI_CLIENT_GUID_PROP, &guid); 4786 target_port_dup = i_ddi_strdup(guid, KM_SLEEP); 4787 ddi_prop_free(guid); 4788 4789 /* 4790 * For this type of mapping we don't want the 4791 * <id> -> 'target-port' mapping to be made. This 4792 * will cause the SCSI_VHCI_GET_TARGET_LONGNAME ioctl 4793 * to fail, and the iostat(1M) long '-n' output will 4794 * still use the <pid>. We do this because we just 4795 * made up the 'target-port' using the guid, and we 4796 * don't want to expose that fact in iostat output. 4797 */ 4798 by_id = 0; 4799 } 4800 4801 /* find/establish <pid> given 'target-port' */ 4802 mutex_enter(&vhci_targetmap_mutex); 4803 if (mod_hash_find(vhci_targetmap_byport, 4804 (mod_hash_key_t)target_port_dup, &hv) == 0) { 4805 pid = (int)(intptr_t)hv; /* mapping exists */ 4806 } else { 4807 pid = vhci_targetmap_pid++; /* new mapping */ 4808 4809 (void) mod_hash_insert(vhci_targetmap_byport, 4810 (mod_hash_key_t)target_port_dup, 4811 (mod_hash_val_t)(intptr_t)pid); 4812 if (by_id) { 4813 (void) mod_hash_insert(vhci_targetmap_bypid, 4814 (mod_hash_key_t)(uintptr_t)pid, 4815 (mod_hash_val_t)(uintptr_t)target_port_dup); 4816 } 4817 target_port_dup = NULL; /* owned by hash */ 4818 } 4819 mutex_exit(&vhci_targetmap_mutex); 4820 4821 /* form kstat name */ 4822 (void) snprintf(ks_name, KSTAT_STRLEN, "%s%d.t%d.%s%d", 4823 ddi_driver_name(tgt_dip), ddi_get_instance(tgt_dip), 4824 pid, ddi_driver_name(pdip), ddi_get_instance(pdip)); 4825 4826 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_path_online: path:%p " 4827 "kstat %s: pid %x <-> port %s\n", (void *)pip, 4828 ks_name, pid, target_port_dup)); 4829 if (target_port_dup) 4830 kmem_free(target_port_dup, strlen(target_port_dup) + 1); 4831 4832 /* call mdi to create kstats with the name we built */ 4833 (void) mdi_pi_kstat_create(pip, ks_name); 4834 } 4835 4836 /* ARGSUSED */ 4837 static int 4838 vhci_pathinfo_online(dev_info_t *vdip, mdi_pathinfo_t *pip, int flags) 4839 { 4840 scsi_hba_tran_t *hba = NULL; 4841 struct scsi_device *psd = NULL; 4842 scsi_vhci_lun_t *vlun = NULL; 4843 dev_info_t *pdip = NULL; 4844 dev_info_t *tgt_dip; 4845 struct scsi_vhci *vhci; 4846 char *guid; 4847 struct scsi_failover *sf; 4848 struct scsi_failover_ops *sfo; 4849 char *sfo_name; 4850 char *override; 4851 scsi_vhci_priv_t *svp = NULL; 4852 struct buf *bp; 4853 struct scsi_address *ap; 4854 struct scsi_pkt *pkt; 4855 int rval = MDI_FAILURE; 4856 uint_t inq_size = VHCI_STD_INQ_SIZE; 4857 mpapi_item_list_t *list_ptr; 4858 mpapi_lu_data_t *ld; 4859 4860 ASSERT(vdip != NULL); 4861 ASSERT(pip != NULL); 4862 4863 vhci = ddi_get_soft_state(vhci_softstate, ddi_get_instance(vdip)); 4864 ASSERT(vhci != NULL); 4865 4866 pdip = mdi_pi_get_phci(pip); 4867 hba = ddi_get_driver_private(pdip); 4868 ASSERT(hba != NULL); 4869 4870 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip); 4871 ASSERT(svp != NULL); 4872 4873 tgt_dip = mdi_pi_get_client(pip); 4874 ASSERT(tgt_dip != NULL); 4875 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, tgt_dip, PROPFLAGS, 4876 MDI_CLIENT_GUID_PROP, &guid) != DDI_SUCCESS) { 4877 VHCI_DEBUG(1, (CE_WARN, NULL, "vhci_path_online: lun guid " 4878 "property failed")); 4879 goto failure; 4880 } 4881 4882 vlun = vhci_lun_lookup(tgt_dip); 4883 ASSERT(vlun != NULL); 4884 4885 ddi_prop_free(guid); 4886 4887 vlun->svl_dip = mdi_pi_get_client(pip); 4888 ASSERT(vlun->svl_dip != NULL); 4889 4890 psd = svp->svp_psd; 4891 ASSERT(psd != NULL); 4892 4893 /* 4894 * For INQUIRY response buffer size, we use VHCI_STD_INQ_SIZE(132bytes) 4895 * instead of SUN_INQSIZE(48bytes) which is used in sd layer. This is 4896 * because we could get the Vendor specific parameters(present 97th 4897 * byte onwards) which are required to process Vendor specific data 4898 * based on array type. 4899 * This INQUIRY buffer is freed in vhci_pathinfo_offline but NEVER 4900 * in a different layer like sd/phci transport. In other words, vhci 4901 * maintains its own copy of scsi_device and scsi_inquiry data on a 4902 * per-path basis. 4903 */ 4904 if (psd->sd_inq == NULL) { 4905 psd->sd_inq = (struct scsi_inquiry *) 4906 kmem_zalloc(inq_size, KM_SLEEP); 4907 } 4908 4909 tgt_dip = psd->sd_dev; 4910 ASSERT(tgt_dip != NULL); 4911 4912 /* 4913 * do inquiry to pass into probe routine; this 4914 * will avoid each probe routine doing scsi inquiry 4915 */ 4916 bp = getrbuf(KM_SLEEP); 4917 bp->b_un.b_addr = (caddr_t)psd->sd_inq; 4918 bp->b_flags = B_READ; 4919 bp->b_bcount = inq_size; 4920 bp->b_resid = 0; 4921 4922 ap = &psd->sd_address; 4923 pkt = scsi_init_pkt(ap, NULL, bp, CDB_GROUP0, 4924 sizeof (struct scsi_arq_status), 0, 0, SLEEP_FUNC, NULL); 4925 if (pkt == NULL) { 4926 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_pathinfo_online: " 4927 "Inquiry init_pkt failed :%p\n", (void *)pip)); 4928 rval = MDI_FAILURE; 4929 goto failure; 4930 } 4931 pkt->pkt_cdbp[0] = SCMD_INQUIRY; 4932 pkt->pkt_cdbp[4] = (uchar_t)inq_size; 4933 pkt->pkt_time = 60; 4934 4935 rval = vhci_do_scsi_cmd(pkt); 4936 scsi_destroy_pkt(pkt); 4937 freerbuf(bp); 4938 if (rval == 0) { 4939 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_pathinfo_online: " 4940 "Failover Inquiry failed path:%p rval:%x\n", 4941 (void *)pip, rval)); 4942 rval = MDI_FAILURE; 4943 goto failure; 4944 } 4945 4946 /* 4947 * Determine if device is supported under scsi_vhci, and select 4948 * failover module. 4949 * 4950 * See if there is a scsi_vhci.conf file override for this devices's 4951 * VID/PID. The following values can be returned: 4952 * 4953 * NULL If the NULL is returned then there is no scsi_vhci.conf 4954 * override. For NULL, we determine the failover_ops for 4955 * this device by checking the sfo_device_probe entry 4956 * point for each 'fops' module, in order. 4957 * 4958 * NOTE: Correct operation may depend on module ordering 4959 * of 'specific' (failover modules that are completely 4960 * VID/PID table based) to 'generic' (failover modules 4961 * that based on T10 standards like TPGS). Currently, 4962 * the value of 'ddi-forceload' in scsi_vhci.conf is used 4963 * to establish the module list and probe order. 4964 * 4965 * "NONE" If value "NONE" is returned then there is a 4966 * scsi_vhci.conf VID/PID override to indicate the device 4967 * should not be supported under scsi_vhci (even if there 4968 * is an 'fops' module supporting the device). 4969 * 4970 * "<other>" If another value is returned then that value is the 4971 * name of the 'fops' module that should be used. 4972 */ 4973 sfo = NULL; /* "NONE" */ 4974 sfo_name = NULL; 4975 override = scsi_get_device_type_string( 4976 "scsi-vhci-failover-override", vdip, psd); 4977 4978 if (override == NULL) { 4979 /* NULL: default: select based on sfo_device_probe results */ 4980 for (sf = scsi_failover_table; sf->sf_mod; sf++) { 4981 if ((sf->sf_sfo == NULL) || 4982 sf->sf_sfo->sfo_device_probe(psd, psd->sd_inq, 4983 &vlun->svl_fops_ctpriv) == SFO_DEVICE_PROBE_PHCI) 4984 continue; 4985 4986 /* found failover module, supported under scsi_vhci */ 4987 sfo = sf->sf_sfo; 4988 sfo_name = i_ddi_strdup(sfo->sfo_name, KM_SLEEP); 4989 break; 4990 } 4991 } else if (strcasecmp(override, "NONE")) { 4992 /* !"NONE": select based on driver.conf specified name */ 4993 for (sf = scsi_failover_table, sfo = NULL; sf->sf_mod; sf++) { 4994 if ((sf->sf_sfo == NULL) || 4995 (sf->sf_sfo->sfo_name == NULL) || 4996 strcmp(override, sf->sf_sfo->sfo_name)) 4997 continue; 4998 4999 /* found failover module, supported under scsi_vhci */ 5000 sfo = sf->sf_sfo; 5001 sfo_name = kmem_alloc(strlen("conf ") + 5002 strlen(sfo->sfo_name) + 1, KM_SLEEP); 5003 (void) sprintf(sfo_name, "conf %s", sfo->sfo_name); 5004 break; 5005 } 5006 } 5007 if (override) 5008 kmem_free(override, strlen(override) + 1); 5009 5010 if (sfo == NULL) { 5011 /* no failover module - device not supported */ 5012 VHCI_DEBUG(1, (CE_NOTE, vhci->vhci_dip, 5013 "!vhci_pathinfo_online: dev (path 0x%p) not " 5014 "supported\n", (void *)pip)); 5015 vlun->svl_not_supported = 1; 5016 rval = MDI_NOT_SUPPORTED; 5017 goto done; 5018 } 5019 5020 /* failover supported for device - save failover_ops in vlun */ 5021 vlun->svl_fops = sfo; 5022 ASSERT(sfo_name != NULL); 5023 /* to avoid memory leak, free the fops_name if it has already */ 5024 /* been set one, a vlun has more than one path, the function */ 5025 /* vhci_pathinfo_online() will be invoked when each path comes */ 5026 /* online, so the fops name might be set more than one times. */ 5027 if (vlun->svl_fops_name) { 5028 kmem_free(vlun->svl_fops_name, 5029 strlen(vlun->svl_fops_name) + 1); 5030 } 5031 vlun->svl_fops_name = sfo_name; 5032 5033 /* 5034 * Obtain the device-type based mpxio options as specified in 5035 * scsi_vhci.conf file. 5036 * 5037 * NOTE: currently, the end result is a call to 5038 * mdi_set_lb_region_size(). 5039 */ 5040 vhci_get_device_type_mpxio_options(vdip, tgt_dip, psd); 5041 5042 /* 5043 * The device probe or options in conf file may have set/changed the 5044 * lb policy, save the current value. 5045 */ 5046 vlun->svl_lb_policy_save = mdi_get_lb_policy(tgt_dip); 5047 5048 /* 5049 * if PGR is active, revalidate key and register on this path also, 5050 * if key is still valid 5051 */ 5052 sema_p(&vlun->svl_pgr_sema); 5053 if (vlun->svl_pgr_active) { 5054 rval = vhci_pgr_validate_and_register(svp); 5055 if (rval != 1) { 5056 rval = MDI_FAILURE; 5057 sema_v(&vlun->svl_pgr_sema); 5058 goto failure; 5059 } 5060 } 5061 sema_v(&vlun->svl_pgr_sema); 5062 5063 if (svp->svp_new_path) { 5064 /* 5065 * Last chance to perform any cleanup operations on this 5066 * new path before making this path completely online. 5067 */ 5068 svp->svp_new_path = 0; 5069 5070 /* 5071 * If scsi_vhci knows the lun is alread RESERVE'd, 5072 * then skip the issue of RELEASE on new path. 5073 */ 5074 if ((vlun->svl_flags & VLUN_RESERVE_ACTIVE_FLG) == 0) { 5075 /* 5076 * Issue SCSI-2 RELEASE only for the first time on 5077 * a new path just in case the host rebooted and 5078 * a reservation is still pending on this path. 5079 * IBM Shark storage does not clear RESERVE upon 5080 * host reboot. 5081 */ 5082 ap = &psd->sd_address; 5083 pkt = scsi_init_pkt(ap, NULL, NULL, CDB_GROUP0, 5084 sizeof (struct scsi_arq_status), 0, 0, 5085 SLEEP_FUNC, NULL); 5086 if (pkt == NULL) { 5087 VHCI_DEBUG(1, (CE_NOTE, NULL, 5088 "!vhci_pathinfo_online: " 5089 "Release init_pkt failed :%p\n", 5090 (void *)pip)); 5091 rval = MDI_FAILURE; 5092 goto failure; 5093 } 5094 pkt->pkt_cdbp[0] = SCMD_RELEASE; 5095 pkt->pkt_time = 60; 5096 5097 VHCI_DEBUG(1, (CE_NOTE, NULL, 5098 "!vhci_path_online: path:%p " 5099 "Issued SCSI-2 RELEASE\n", (void *)pip)); 5100 5101 /* Ignore the return value */ 5102 (void) vhci_do_scsi_cmd(pkt); 5103 scsi_destroy_pkt(pkt); 5104 } 5105 } 5106 5107 rval = vhci_update_pathinfo(psd, pip, sfo, vlun, vhci); 5108 if (rval == MDI_FAILURE) { 5109 goto failure; 5110 } 5111 5112 /* Initialize MP-API data */ 5113 vhci_update_mpapi_data(vhci, vlun, pip); 5114 5115 /* 5116 * MP-API also needs the Inquiry data to be maintained in the 5117 * mp_vendor_prop_t structure, so find the lun and update its 5118 * structure with this data. 5119 */ 5120 list_ptr = (mpapi_item_list_t *)vhci_get_mpapi_item(vhci, NULL, 5121 MP_OBJECT_TYPE_MULTIPATH_LU, (void *)vlun); 5122 ld = (mpapi_lu_data_t *)list_ptr->item->idata; 5123 if (ld != NULL) { 5124 bcopy(psd->sd_inq->inq_vid, ld->prop.prodInfo.vendor, 8); 5125 bcopy(psd->sd_inq->inq_pid, ld->prop.prodInfo.product, 16); 5126 bcopy(psd->sd_inq->inq_revision, ld->prop.prodInfo.revision, 4); 5127 } else { 5128 VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_pathinfo_online: " 5129 "mpapi_lu_data_t is NULL")); 5130 } 5131 5132 /* create kstats for path */ 5133 vhci_kstat_create_pathinfo(pip); 5134 5135 done: 5136 mutex_enter(&vhci_global_mutex); 5137 cv_broadcast(&vhci_cv); 5138 mutex_exit(&vhci_global_mutex); 5139 5140 if (vlun->svl_setcap_done) { 5141 (void) vhci_pHCI_cap(ap, "sector-size", 5142 vlun->svl_sector_size, 1, pip); 5143 } 5144 5145 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_path_online: path:%p\n", 5146 (void *)pip)); 5147 5148 failure: 5149 if ((rval != MDI_SUCCESS) && psd->sd_inq) { 5150 kmem_free((caddr_t)psd->sd_inq, inq_size); 5151 psd->sd_inq = (struct scsi_inquiry *)NULL; 5152 } 5153 return (rval); 5154 } 5155 5156 /* 5157 * path offline handler. Release all bindings that will not be 5158 * released by the normal packet transport/completion code path. 5159 * Since we don't (presently) keep any bindings alive outside of 5160 * the in-transport packets (which will be released on completion) 5161 * there is not much to do here. 5162 */ 5163 /* ARGSUSED */ 5164 static int 5165 vhci_pathinfo_offline(dev_info_t *vdip, mdi_pathinfo_t *pip, int flags) 5166 { 5167 scsi_hba_tran_t *hba = NULL; 5168 struct scsi_device *psd = NULL; 5169 dev_info_t *pdip = NULL; 5170 dev_info_t *cdip = NULL; 5171 scsi_vhci_priv_t *svp = NULL; 5172 uint_t inq_size = VHCI_STD_INQ_SIZE; 5173 5174 ASSERT(vdip != NULL); 5175 ASSERT(pip != NULL); 5176 5177 pdip = mdi_pi_get_phci(pip); 5178 ASSERT(pdip != NULL); 5179 if (pdip == NULL) { 5180 VHCI_DEBUG(1, (CE_WARN, vdip, "Invalid path 0x%p: NULL " 5181 "phci dip", (void *)pip)); 5182 return (MDI_FAILURE); 5183 } 5184 5185 cdip = mdi_pi_get_client(pip); 5186 ASSERT(cdip != NULL); 5187 if (cdip == NULL) { 5188 VHCI_DEBUG(1, (CE_WARN, vdip, "Invalid path 0x%p: NULL " 5189 "client dip", (void *)pip)); 5190 return (MDI_FAILURE); 5191 } 5192 5193 hba = ddi_get_driver_private(pdip); 5194 ASSERT(hba != NULL); 5195 5196 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip); 5197 if (svp == NULL) { 5198 /* 5199 * mdi_pathinfo node in INIT state can have vHCI private 5200 * information set to null 5201 */ 5202 VHCI_DEBUG(1, (CE_NOTE, vdip, "!vhci_pathinfo_offline: " 5203 "svp is NULL for pip 0x%p\n", (void *)pip)); 5204 return (MDI_SUCCESS); 5205 } 5206 5207 psd = svp->svp_psd; 5208 ASSERT(psd != NULL); 5209 5210 mutex_enter(&svp->svp_mutex); 5211 5212 VHCI_DEBUG(1, (CE_NOTE, vdip, "!vhci_pathinfo_offline: " 5213 "%d cmds pending on path: 0x%p\n", svp->svp_cmds, (void *)pip)); 5214 while (svp->svp_cmds != 0) { 5215 if (cv_timedwait(&svp->svp_cv, &svp->svp_mutex, 5216 ddi_get_lbolt() + 5217 drv_usectohz(vhci_path_quiesce_timeout * 1000000)) == -1) { 5218 /* 5219 * The timeout time reached without the condition 5220 * being signaled. 5221 */ 5222 VHCI_DEBUG(1, (CE_NOTE, vdip, "!vhci_pathinfo_offline: " 5223 "Timeout reached on path 0x%p without the cond\n", 5224 (void *)pip)); 5225 VHCI_DEBUG(1, (CE_NOTE, vdip, "!vhci_pathinfo_offline: " 5226 "%d cmds still pending on path: 0x%p\n", 5227 svp->svp_cmds, (void *)pip)); 5228 break; 5229 } 5230 } 5231 mutex_exit(&svp->svp_mutex); 5232 5233 /* 5234 * Check to see if this vlun has an active SCSI-II RESERVE. And this 5235 * is the pip for the path that has been reserved. 5236 * If so clear the reservation by sending a reset, so the host will not 5237 * get a reservation conflict. Reset the flag VLUN_RESERVE_ACTIVE_FLG 5238 * for this lun. Also a reset notify is sent to the target driver 5239 * just in case the POR check condition is cleared by some other layer 5240 * in the stack. 5241 */ 5242 if (svp->svp_svl->svl_flags & VLUN_RESERVE_ACTIVE_FLG) { 5243 if (pip == svp->svp_svl->svl_resrv_pip) { 5244 if (vhci_recovery_reset(svp->svp_svl, 5245 &svp->svp_psd->sd_address, TRUE, 5246 VHCI_DEPTH_TARGET) == 0) { 5247 VHCI_DEBUG(1, (CE_NOTE, NULL, 5248 "!vhci_pathinfo_offline (pip:%p):" 5249 "reset failed, retrying\n", (void *)pip)); 5250 delay(1*drv_usectohz(1000000)); 5251 if (vhci_recovery_reset(svp->svp_svl, 5252 &svp->svp_psd->sd_address, TRUE, 5253 VHCI_DEPTH_TARGET) == 0) { 5254 VHCI_DEBUG(1, (CE_NOTE, NULL, 5255 "!vhci_pathinfo_offline " 5256 "(pip:%p): reset failed, " 5257 "giving up!\n", (void *)pip)); 5258 } 5259 } 5260 svp->svp_svl->svl_flags &= ~VLUN_RESERVE_ACTIVE_FLG; 5261 } 5262 } 5263 5264 mdi_pi_set_state(pip, MDI_PATHINFO_STATE_OFFLINE); 5265 if (psd->sd_inq) { 5266 kmem_free((caddr_t)psd->sd_inq, inq_size); 5267 psd->sd_inq = (struct scsi_inquiry *)NULL; 5268 } 5269 vhci_mpapi_set_path_state(vdip, pip, MP_DRVR_PATH_STATE_REMOVED); 5270 5271 VHCI_DEBUG(1, (CE_NOTE, NULL, 5272 "!vhci_pathinfo_offline: offlined path 0x%p\n", (void *)pip)); 5273 return (MDI_SUCCESS); 5274 } 5275 5276 5277 /* 5278 * routine for SCSI VHCI IOCTL implementation. 5279 */ 5280 /* ARGSUSED */ 5281 static int 5282 vhci_ctl(dev_t dev, int cmd, intptr_t data, int mode, cred_t *credp, int *rval) 5283 { 5284 struct scsi_vhci *vhci; 5285 dev_info_t *vdip; 5286 mdi_pathinfo_t *pip; 5287 int instance, held; 5288 int retval = 0; 5289 caddr_t phci_path = NULL, client_path = NULL; 5290 caddr_t paddr = NULL; 5291 sv_iocdata_t ioc; 5292 sv_iocdata_t *pioc = &ioc; 5293 sv_switch_to_cntlr_iocdata_t iocsc; 5294 sv_switch_to_cntlr_iocdata_t *piocsc = &iocsc; 5295 caddr_t s; 5296 scsi_vhci_lun_t *vlun; 5297 struct scsi_failover_ops *fo; 5298 char *pclass; 5299 5300 /* Check for validity of vhci structure */ 5301 vhci = ddi_get_soft_state(vhci_softstate, MINOR2INST(getminor(dev))); 5302 if (vhci == NULL) { 5303 return (ENXIO); 5304 } 5305 5306 mutex_enter(&vhci->vhci_mutex); 5307 if ((vhci->vhci_state & VHCI_STATE_OPEN) == 0) { 5308 mutex_exit(&vhci->vhci_mutex); 5309 return (ENXIO); 5310 } 5311 mutex_exit(&vhci->vhci_mutex); 5312 5313 /* Get the vhci dip */ 5314 vdip = vhci->vhci_dip; 5315 ASSERT(vdip != NULL); 5316 instance = ddi_get_instance(vdip); 5317 5318 /* Allocate memory for getting parameters from userland */ 5319 phci_path = kmem_zalloc(MAXPATHLEN, KM_SLEEP); 5320 client_path = kmem_zalloc(MAXPATHLEN, KM_SLEEP); 5321 paddr = kmem_zalloc(MAXNAMELEN, KM_SLEEP); 5322 5323 /* 5324 * Set a local variable indicating the ioctl name. Used for 5325 * printing debug strings. 5326 */ 5327 switch (cmd) { 5328 case SCSI_VHCI_GET_CLIENT_MULTIPATH_INFO: 5329 s = "GET_CLIENT_MULTIPATH_INFO"; 5330 break; 5331 5332 case SCSI_VHCI_GET_PHCI_MULTIPATH_INFO: 5333 s = "GET_PHCI_MULTIPATH_INFO"; 5334 break; 5335 5336 case SCSI_VHCI_GET_CLIENT_NAME: 5337 s = "GET_CLIENT_NAME"; 5338 break; 5339 5340 case SCSI_VHCI_PATH_ONLINE: 5341 s = "PATH_ONLINE"; 5342 break; 5343 5344 case SCSI_VHCI_PATH_OFFLINE: 5345 s = "PATH_OFFLINE"; 5346 break; 5347 5348 case SCSI_VHCI_PATH_STANDBY: 5349 s = "PATH_STANDBY"; 5350 break; 5351 5352 case SCSI_VHCI_PATH_TEST: 5353 s = "PATH_TEST"; 5354 break; 5355 5356 case SCSI_VHCI_SWITCH_TO_CNTLR: 5357 s = "SWITCH_TO_CNTLR"; 5358 break; 5359 case SCSI_VHCI_PATH_DISABLE: 5360 s = "PATH_DISABLE"; 5361 break; 5362 case SCSI_VHCI_PATH_ENABLE: 5363 s = "PATH_ENABLE"; 5364 break; 5365 5366 case SCSI_VHCI_GET_TARGET_LONGNAME: 5367 s = "GET_TARGET_LONGNAME"; 5368 break; 5369 5370 #ifdef DEBUG 5371 case SCSI_VHCI_CONFIGURE_PHCI: 5372 s = "CONFIGURE_PHCI"; 5373 break; 5374 5375 case SCSI_VHCI_UNCONFIGURE_PHCI: 5376 s = "UNCONFIGURE_PHCI"; 5377 break; 5378 #endif 5379 5380 default: 5381 s = "Unknown"; 5382 vhci_log(CE_NOTE, vdip, 5383 "!vhci%d: ioctl %x (unsupported ioctl)", instance, cmd); 5384 retval = ENOTSUP; 5385 break; 5386 } 5387 if (retval != 0) { 5388 goto end; 5389 } 5390 5391 VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci%d: ioctl <%s>", instance, s)); 5392 5393 /* 5394 * Get IOCTL parameters from userland 5395 */ 5396 switch (cmd) { 5397 case SCSI_VHCI_GET_CLIENT_MULTIPATH_INFO: 5398 case SCSI_VHCI_GET_PHCI_MULTIPATH_INFO: 5399 case SCSI_VHCI_GET_CLIENT_NAME: 5400 case SCSI_VHCI_PATH_ONLINE: 5401 case SCSI_VHCI_PATH_OFFLINE: 5402 case SCSI_VHCI_PATH_STANDBY: 5403 case SCSI_VHCI_PATH_TEST: 5404 case SCSI_VHCI_PATH_DISABLE: 5405 case SCSI_VHCI_PATH_ENABLE: 5406 case SCSI_VHCI_GET_TARGET_LONGNAME: 5407 #ifdef DEBUG 5408 case SCSI_VHCI_CONFIGURE_PHCI: 5409 case SCSI_VHCI_UNCONFIGURE_PHCI: 5410 #endif 5411 retval = vhci_get_iocdata((const void *)data, pioc, mode, s); 5412 break; 5413 5414 case SCSI_VHCI_SWITCH_TO_CNTLR: 5415 retval = vhci_get_iocswitchdata((const void *)data, piocsc, 5416 mode, s); 5417 break; 5418 } 5419 if (retval != 0) { 5420 goto end; 5421 } 5422 5423 5424 /* 5425 * Process the IOCTL 5426 */ 5427 switch (cmd) { 5428 case SCSI_VHCI_GET_CLIENT_MULTIPATH_INFO: 5429 { 5430 uint_t num_paths; /* Num paths to client dev */ 5431 sv_path_info_t *upibuf = NULL; /* To keep userland values */ 5432 sv_path_info_t *kpibuf = NULL; /* Kernel data for ioctls */ 5433 dev_info_t *cdip; /* Client device dip */ 5434 5435 if (pioc->ret_elem == NULL) { 5436 retval = EINVAL; 5437 break; 5438 } 5439 5440 /* Get client device path from user land */ 5441 if (vhci_ioc_get_client_path(pioc, client_path, mode, s)) { 5442 retval = EFAULT; 5443 break; 5444 } 5445 5446 VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> " 5447 "client <%s>", s, client_path)); 5448 5449 /* Get number of paths to this client device */ 5450 if ((cdip = mdi_client_path2devinfo(vdip, client_path)) 5451 == NULL) { 5452 retval = ENXIO; 5453 VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> " 5454 "client dip doesn't exist. invalid path <%s>", 5455 s, client_path)); 5456 break; 5457 } 5458 num_paths = mdi_client_get_path_count(cdip); 5459 5460 if (ddi_copyout(&num_paths, pioc->ret_elem, 5461 sizeof (num_paths), mode)) { 5462 VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> " 5463 "num_paths copyout failed", s)); 5464 retval = EFAULT; 5465 break; 5466 } 5467 5468 /* If user just wanted num_paths, then return */ 5469 if (pioc->buf_elem == 0 || pioc->ret_buf == NULL || 5470 num_paths == 0) { 5471 break; 5472 } 5473 5474 /* Set num_paths to value as much as can be sent to userland */ 5475 if (num_paths > pioc->buf_elem) { 5476 num_paths = pioc->buf_elem; 5477 } 5478 5479 /* Allocate memory and get userland pointers */ 5480 if (vhci_ioc_alloc_pathinfo(&upibuf, &kpibuf, num_paths, 5481 pioc, mode, s) != 0) { 5482 retval = EFAULT; 5483 break; 5484 } 5485 ASSERT(upibuf != NULL); 5486 ASSERT(kpibuf != NULL); 5487 5488 /* 5489 * Get the path information and send it to userland. 5490 */ 5491 if (vhci_get_client_path_list(cdip, kpibuf, num_paths) 5492 != MDI_SUCCESS) { 5493 retval = ENXIO; 5494 vhci_ioc_free_pathinfo(upibuf, kpibuf, num_paths); 5495 break; 5496 } 5497 5498 if (vhci_ioc_send_pathinfo(upibuf, kpibuf, num_paths, 5499 pioc, mode, s)) { 5500 retval = EFAULT; 5501 vhci_ioc_free_pathinfo(upibuf, kpibuf, num_paths); 5502 break; 5503 } 5504 5505 /* Free the memory allocated for path information */ 5506 vhci_ioc_free_pathinfo(upibuf, kpibuf, num_paths); 5507 break; 5508 } 5509 5510 case SCSI_VHCI_GET_PHCI_MULTIPATH_INFO: 5511 { 5512 uint_t num_paths; /* Num paths to client dev */ 5513 sv_path_info_t *upibuf = NULL; /* To keep userland values */ 5514 sv_path_info_t *kpibuf = NULL; /* Kernel data for ioctls */ 5515 dev_info_t *pdip; /* PHCI device dip */ 5516 5517 if (pioc->ret_elem == NULL) { 5518 retval = EINVAL; 5519 break; 5520 } 5521 5522 /* Get PHCI device path from user land */ 5523 if (vhci_ioc_get_phci_path(pioc, phci_path, mode, s)) { 5524 retval = EFAULT; 5525 break; 5526 } 5527 5528 VHCI_DEBUG(6, (CE_WARN, vdip, 5529 "!vhci_ioctl: ioctl <%s> phci <%s>", s, phci_path)); 5530 5531 /* Get number of devices associated with this PHCI device */ 5532 if ((pdip = mdi_phci_path2devinfo(vdip, phci_path)) == NULL) { 5533 VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> " 5534 "phci dip doesn't exist. invalid path <%s>", 5535 s, phci_path)); 5536 retval = ENXIO; 5537 break; 5538 } 5539 5540 num_paths = mdi_phci_get_path_count(pdip); 5541 5542 if (ddi_copyout(&num_paths, pioc->ret_elem, 5543 sizeof (num_paths), mode)) { 5544 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> " 5545 "num_paths copyout failed", s)); 5546 retval = EFAULT; 5547 break; 5548 } 5549 5550 /* If user just wanted num_paths, then return */ 5551 if (pioc->buf_elem == 0 || pioc->ret_buf == NULL || 5552 num_paths == 0) { 5553 break; 5554 } 5555 5556 /* Set num_paths to value as much as can be sent to userland */ 5557 if (num_paths > pioc->buf_elem) { 5558 num_paths = pioc->buf_elem; 5559 } 5560 5561 /* Allocate memory and get userland pointers */ 5562 if (vhci_ioc_alloc_pathinfo(&upibuf, &kpibuf, num_paths, 5563 pioc, mode, s) != 0) { 5564 retval = EFAULT; 5565 break; 5566 } 5567 ASSERT(upibuf != NULL); 5568 ASSERT(kpibuf != NULL); 5569 5570 /* 5571 * Get the path information and send it to userland. 5572 */ 5573 if (vhci_get_phci_path_list(pdip, kpibuf, num_paths) 5574 != MDI_SUCCESS) { 5575 retval = ENXIO; 5576 vhci_ioc_free_pathinfo(upibuf, kpibuf, num_paths); 5577 break; 5578 } 5579 5580 if (vhci_ioc_send_pathinfo(upibuf, kpibuf, num_paths, 5581 pioc, mode, s)) { 5582 retval = EFAULT; 5583 vhci_ioc_free_pathinfo(upibuf, kpibuf, num_paths); 5584 break; 5585 } 5586 5587 /* Free the memory allocated for path information */ 5588 vhci_ioc_free_pathinfo(upibuf, kpibuf, num_paths); 5589 break; 5590 } 5591 5592 case SCSI_VHCI_GET_CLIENT_NAME: 5593 { 5594 dev_info_t *cdip, *pdip; 5595 5596 /* Get PHCI path and device address from user land */ 5597 if (vhci_ioc_get_phci_path(pioc, phci_path, mode, s) || 5598 vhci_ioc_get_paddr(pioc, paddr, mode, s)) { 5599 retval = EFAULT; 5600 break; 5601 } 5602 5603 VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> " 5604 "phci <%s>, paddr <%s>", s, phci_path, paddr)); 5605 5606 /* Get the PHCI dip */ 5607 if ((pdip = mdi_phci_path2devinfo(vdip, phci_path)) == NULL) { 5608 VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> " 5609 "phci dip doesn't exist. invalid path <%s>", 5610 s, phci_path)); 5611 retval = ENXIO; 5612 break; 5613 } 5614 5615 if ((pip = mdi_pi_find(pdip, NULL, paddr)) == NULL) { 5616 VHCI_DEBUG(1, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> " 5617 "pathinfo doesn't exist. invalid device addr", s)); 5618 retval = ENXIO; 5619 break; 5620 } 5621 5622 /* Get the client device pathname and send to userland */ 5623 cdip = mdi_pi_get_client(pip); 5624 vhci_ioc_devi_to_path(cdip, client_path); 5625 5626 VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> " 5627 "client <%s>", s, client_path)); 5628 5629 if (vhci_ioc_send_client_path(client_path, pioc, mode, s)) { 5630 retval = EFAULT; 5631 break; 5632 } 5633 break; 5634 } 5635 5636 case SCSI_VHCI_PATH_ONLINE: 5637 case SCSI_VHCI_PATH_OFFLINE: 5638 case SCSI_VHCI_PATH_STANDBY: 5639 case SCSI_VHCI_PATH_TEST: 5640 { 5641 dev_info_t *pdip; /* PHCI dip */ 5642 5643 /* Get PHCI path and device address from user land */ 5644 if (vhci_ioc_get_phci_path(pioc, phci_path, mode, s) || 5645 vhci_ioc_get_paddr(pioc, paddr, mode, s)) { 5646 retval = EFAULT; 5647 break; 5648 } 5649 5650 VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> " 5651 "phci <%s>, paddr <%s>", s, phci_path, paddr)); 5652 5653 /* Get the PHCI dip */ 5654 if ((pdip = mdi_phci_path2devinfo(vdip, phci_path)) == NULL) { 5655 VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> " 5656 "phci dip doesn't exist. invalid path <%s>", 5657 s, phci_path)); 5658 retval = ENXIO; 5659 break; 5660 } 5661 5662 if ((pip = mdi_pi_find(pdip, NULL, paddr)) == NULL) { 5663 VHCI_DEBUG(1, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> " 5664 "pathinfo doesn't exist. invalid device addr", s)); 5665 retval = ENXIO; 5666 break; 5667 } 5668 5669 VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> " 5670 "Calling MDI function to change device state", s)); 5671 5672 switch (cmd) { 5673 case SCSI_VHCI_PATH_ONLINE: 5674 retval = mdi_pi_online(pip, 0); 5675 break; 5676 5677 case SCSI_VHCI_PATH_OFFLINE: 5678 retval = mdi_pi_offline(pip, 0); 5679 break; 5680 5681 case SCSI_VHCI_PATH_STANDBY: 5682 retval = mdi_pi_standby(pip, 0); 5683 break; 5684 5685 case SCSI_VHCI_PATH_TEST: 5686 break; 5687 } 5688 break; 5689 } 5690 5691 case SCSI_VHCI_SWITCH_TO_CNTLR: 5692 { 5693 dev_info_t *cdip; 5694 struct scsi_device *devp; 5695 5696 /* Get the client device pathname */ 5697 if (ddi_copyin(piocsc->client, client_path, 5698 MAXPATHLEN, mode)) { 5699 VHCI_DEBUG(2, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> " 5700 "client_path copyin failed", s)); 5701 retval = EFAULT; 5702 break; 5703 } 5704 5705 /* Get the path class to which user wants to switch */ 5706 if (ddi_copyin(piocsc->class, paddr, MAXNAMELEN, mode)) { 5707 VHCI_DEBUG(2, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> " 5708 "controller_class copyin failed", s)); 5709 retval = EFAULT; 5710 break; 5711 } 5712 5713 /* Perform validity checks */ 5714 if ((cdip = mdi_client_path2devinfo(vdip, 5715 client_path)) == NULL) { 5716 VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> " 5717 "client dip doesn't exist. invalid path <%s>", 5718 s, client_path)); 5719 retval = ENXIO; 5720 break; 5721 } 5722 5723 VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: Calling MDI func " 5724 "to switch controller")); 5725 VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: client <%s> " 5726 "class <%s>", client_path, paddr)); 5727 5728 if (strcmp(paddr, PCLASS_PRIMARY) && 5729 strcmp(paddr, PCLASS_SECONDARY)) { 5730 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> " 5731 "invalid path class <%s>", s, paddr)); 5732 retval = ENXIO; 5733 break; 5734 } 5735 5736 devp = ddi_get_driver_private(cdip); 5737 if (devp == NULL) { 5738 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> " 5739 "invalid scsi device <%s>", s, client_path)); 5740 retval = ENXIO; 5741 break; 5742 } 5743 vlun = ADDR2VLUN(&devp->sd_address); 5744 ASSERT(vlun); 5745 5746 /* 5747 * Checking to see if device has only one pclass, PRIMARY. 5748 * If so this device doesn't support failovers. Assumed 5749 * that the devices with one pclass is PRIMARY, as thats the 5750 * case today. If this is not true and in future other 5751 * symmetric devices are supported with other pclass, this 5752 * IOCTL shall have to be overhauled anyways as now the only 5753 * arguments it accepts are PRIMARY and SECONDARY. 5754 */ 5755 fo = vlun->svl_fops; 5756 if (fo->sfo_pathclass_next(PCLASS_PRIMARY, &pclass, 5757 vlun->svl_fops_ctpriv)) { 5758 retval = ENOTSUP; 5759 break; 5760 } 5761 5762 VHCI_HOLD_LUN(vlun, VH_SLEEP, held); 5763 mutex_enter(&vlun->svl_mutex); 5764 if (vlun->svl_active_pclass != NULL) { 5765 if (strcmp(vlun->svl_active_pclass, paddr) == 0) { 5766 mutex_exit(&vlun->svl_mutex); 5767 retval = EALREADY; 5768 VHCI_RELEASE_LUN(vlun); 5769 break; 5770 } 5771 } 5772 mutex_exit(&vlun->svl_mutex); 5773 /* Call mdi function to cause a switch over */ 5774 retval = mdi_failover(vdip, cdip, MDI_FAILOVER_SYNC); 5775 if (retval == MDI_SUCCESS) { 5776 retval = 0; 5777 } else if (retval == MDI_BUSY) { 5778 retval = EBUSY; 5779 } else { 5780 retval = EIO; 5781 } 5782 VHCI_RELEASE_LUN(vlun); 5783 break; 5784 } 5785 5786 case SCSI_VHCI_PATH_ENABLE: 5787 case SCSI_VHCI_PATH_DISABLE: 5788 { 5789 dev_info_t *cdip, *pdip; 5790 5791 /* 5792 * Get client device path from user land 5793 */ 5794 if (vhci_ioc_get_client_path(pioc, client_path, mode, s)) { 5795 retval = EFAULT; 5796 break; 5797 } 5798 5799 /* 5800 * Get Phci device path from user land 5801 */ 5802 if (vhci_ioc_get_phci_path(pioc, phci_path, mode, s)) { 5803 retval = EFAULT; 5804 break; 5805 } 5806 5807 /* 5808 * Get the devinfo for the Phci. 5809 */ 5810 if ((pdip = mdi_phci_path2devinfo(vdip, phci_path)) == NULL) { 5811 VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> " 5812 "phci dip doesn't exist. invalid path <%s>", 5813 s, phci_path)); 5814 retval = ENXIO; 5815 break; 5816 } 5817 5818 /* 5819 * If the client path is set to /scsi_vhci then we need 5820 * to do the operation on all the clients so set cdip to NULL. 5821 * Else, try to get the client dip. 5822 */ 5823 if (strcmp(client_path, "/scsi_vhci") == 0) { 5824 cdip = NULL; 5825 } else { 5826 if ((cdip = mdi_client_path2devinfo(vdip, 5827 client_path)) == NULL) { 5828 retval = ENXIO; 5829 VHCI_DEBUG(1, (CE_WARN, NULL, 5830 "!vhci_ioctl: ioctl <%s> client dip " 5831 "doesn't exist. invalid path <%s>", 5832 s, client_path)); 5833 break; 5834 } 5835 } 5836 5837 if (cmd == SCSI_VHCI_PATH_ENABLE) 5838 retval = mdi_pi_enable(cdip, pdip, USER_DISABLE); 5839 else 5840 retval = mdi_pi_disable(cdip, pdip, USER_DISABLE); 5841 5842 break; 5843 } 5844 5845 case SCSI_VHCI_GET_TARGET_LONGNAME: 5846 { 5847 uint_t pid = pioc->buf_elem; 5848 char *target_port; 5849 mod_hash_val_t hv; 5850 5851 /* targetmap lookup of 'target-port' by <pid> */ 5852 if (mod_hash_find(vhci_targetmap_bypid, 5853 (mod_hash_key_t)(uintptr_t)pid, &hv) != 0) { 5854 /* 5855 * NOTE: failure to find the mapping is OK for guid 5856 * based 'target-port' values. 5857 */ 5858 VHCI_DEBUG(3, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> " 5859 "targetport mapping doesn't exist: pid %d", 5860 s, pid)); 5861 retval = ENXIO; 5862 break; 5863 } 5864 5865 /* copyout 'target-port' result */ 5866 target_port = (char *)hv; 5867 if (copyoutstr(target_port, pioc->addr, MAXNAMELEN, NULL)) { 5868 VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> " 5869 "targetport copyout failed: len: %d", 5870 s, (int)strlen(target_port))); 5871 retval = EFAULT; 5872 } 5873 break; 5874 } 5875 5876 #ifdef DEBUG 5877 case SCSI_VHCI_CONFIGURE_PHCI: 5878 { 5879 dev_info_t *pdip; 5880 5881 /* Get PHCI path and device address from user land */ 5882 if (vhci_ioc_get_phci_path(pioc, phci_path, mode, s)) { 5883 retval = EFAULT; 5884 break; 5885 } 5886 5887 VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> " 5888 "phci <%s>", s, phci_path)); 5889 5890 /* Get the PHCI dip */ 5891 if ((pdip = e_ddi_hold_devi_by_path(phci_path, 0)) == NULL) { 5892 VHCI_DEBUG(3, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> " 5893 "phci dip doesn't exist. invalid path <%s>", 5894 s, phci_path)); 5895 retval = ENXIO; 5896 break; 5897 } 5898 5899 if (ndi_devi_config(pdip, 5900 NDI_DEVFS_CLEAN|NDI_DEVI_PERSIST) != NDI_SUCCESS) { 5901 retval = EIO; 5902 } 5903 5904 ddi_release_devi(pdip); 5905 break; 5906 } 5907 5908 case SCSI_VHCI_UNCONFIGURE_PHCI: 5909 { 5910 dev_info_t *pdip; 5911 5912 /* Get PHCI path and device address from user land */ 5913 if (vhci_ioc_get_phci_path(pioc, phci_path, mode, s)) { 5914 retval = EFAULT; 5915 break; 5916 } 5917 5918 VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> " 5919 "phci <%s>", s, phci_path)); 5920 5921 /* Get the PHCI dip */ 5922 if ((pdip = e_ddi_hold_devi_by_path(phci_path, 0)) == NULL) { 5923 VHCI_DEBUG(3, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> " 5924 "phci dip doesn't exist. invalid path <%s>", 5925 s, phci_path)); 5926 retval = ENXIO; 5927 break; 5928 } 5929 5930 if (ndi_devi_unconfig(pdip, 5931 NDI_DEVI_REMOVE|NDI_DEVFS_CLEAN) != NDI_SUCCESS) { 5932 retval = EBUSY; 5933 } 5934 5935 ddi_release_devi(pdip); 5936 break; 5937 } 5938 #endif 5939 } 5940 5941 end: 5942 /* Free the memory allocated above */ 5943 if (phci_path != NULL) { 5944 kmem_free(phci_path, MAXPATHLEN); 5945 } 5946 if (client_path != NULL) { 5947 kmem_free(client_path, MAXPATHLEN); 5948 } 5949 if (paddr != NULL) { 5950 kmem_free(paddr, MAXNAMELEN); 5951 } 5952 return (retval); 5953 } 5954 5955 /* 5956 * devctl IOCTL support for client device DR 5957 */ 5958 /* ARGSUSED */ 5959 int 5960 vhci_devctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp, 5961 int *rvalp) 5962 { 5963 dev_info_t *self; 5964 dev_info_t *child; 5965 scsi_hba_tran_t *hba; 5966 struct devctl_iocdata *dcp; 5967 struct scsi_vhci *vhci; 5968 int rv = 0; 5969 int retval = 0; 5970 scsi_vhci_priv_t *svp; 5971 mdi_pathinfo_t *pip; 5972 5973 if ((vhci = ddi_get_soft_state(vhci_softstate, 5974 MINOR2INST(getminor(dev)))) == NULL) 5975 return (ENXIO); 5976 5977 /* 5978 * check if :devctl minor device has been opened 5979 */ 5980 mutex_enter(&vhci->vhci_mutex); 5981 if ((vhci->vhci_state & VHCI_STATE_OPEN) == 0) { 5982 mutex_exit(&vhci->vhci_mutex); 5983 return (ENXIO); 5984 } 5985 mutex_exit(&vhci->vhci_mutex); 5986 5987 self = vhci->vhci_dip; 5988 hba = ddi_get_driver_private(self); 5989 if (hba == NULL) 5990 return (ENXIO); 5991 5992 /* 5993 * We can use the generic implementation for these ioctls 5994 */ 5995 switch (cmd) { 5996 case DEVCTL_DEVICE_GETSTATE: 5997 case DEVCTL_DEVICE_ONLINE: 5998 case DEVCTL_DEVICE_OFFLINE: 5999 case DEVCTL_DEVICE_REMOVE: 6000 case DEVCTL_BUS_GETSTATE: 6001 return (ndi_devctl_ioctl(self, cmd, arg, mode, 0)); 6002 } 6003 6004 /* 6005 * read devctl ioctl data 6006 */ 6007 if (ndi_dc_allochdl((void *)arg, &dcp) != NDI_SUCCESS) 6008 return (EFAULT); 6009 6010 switch (cmd) { 6011 6012 case DEVCTL_DEVICE_RESET: 6013 /* 6014 * lookup and hold child device 6015 */ 6016 if ((child = ndi_devi_find(self, ndi_dc_getname(dcp), 6017 ndi_dc_getaddr(dcp))) == NULL) { 6018 rv = ENXIO; 6019 break; 6020 } 6021 retval = mdi_select_path(child, NULL, 6022 (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH), 6023 NULL, &pip); 6024 if ((retval != MDI_SUCCESS) || (pip == NULL)) { 6025 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioctl:" 6026 "Unable to get a path, dip 0x%p", (void *)child)); 6027 rv = ENXIO; 6028 break; 6029 } 6030 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip); 6031 if (vhci_recovery_reset(svp->svp_svl, 6032 &svp->svp_psd->sd_address, TRUE, 6033 VHCI_DEPTH_TARGET) == 0) { 6034 VHCI_DEBUG(1, (CE_NOTE, NULL, 6035 "!vhci_ioctl(pip:%p): " 6036 "reset failed\n", (void *)pip)); 6037 rv = ENXIO; 6038 } 6039 mdi_rele_path(pip); 6040 break; 6041 6042 case DEVCTL_BUS_QUIESCE: 6043 case DEVCTL_BUS_UNQUIESCE: 6044 case DEVCTL_BUS_RESET: 6045 case DEVCTL_BUS_RESETALL: 6046 #ifdef DEBUG 6047 case DEVCTL_BUS_CONFIGURE: 6048 case DEVCTL_BUS_UNCONFIGURE: 6049 #endif 6050 rv = ENOTSUP; 6051 break; 6052 6053 default: 6054 rv = ENOTTY; 6055 } /* end of outer switch */ 6056 6057 ndi_dc_freehdl(dcp); 6058 return (rv); 6059 } 6060 6061 /* 6062 * Routine to get the PHCI pathname from ioctl structures in userland 6063 */ 6064 /* ARGSUSED */ 6065 static int 6066 vhci_ioc_get_phci_path(sv_iocdata_t *pioc, caddr_t phci_path, 6067 int mode, caddr_t s) 6068 { 6069 int retval = 0; 6070 6071 if (ddi_copyin(pioc->phci, phci_path, MAXPATHLEN, mode)) { 6072 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioc_get_phci: ioctl <%s> " 6073 "phci_path copyin failed", s)); 6074 retval = EFAULT; 6075 } 6076 return (retval); 6077 6078 } 6079 6080 6081 /* 6082 * Routine to get the Client device pathname from ioctl structures in userland 6083 */ 6084 /* ARGSUSED */ 6085 static int 6086 vhci_ioc_get_client_path(sv_iocdata_t *pioc, caddr_t client_path, 6087 int mode, caddr_t s) 6088 { 6089 int retval = 0; 6090 6091 if (ddi_copyin(pioc->client, client_path, MAXPATHLEN, mode)) { 6092 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioc_get_client: " 6093 "ioctl <%s> client_path copyin failed", s)); 6094 retval = EFAULT; 6095 } 6096 return (retval); 6097 } 6098 6099 6100 /* 6101 * Routine to get physical device address from ioctl structure in userland 6102 */ 6103 /* ARGSUSED */ 6104 static int 6105 vhci_ioc_get_paddr(sv_iocdata_t *pioc, caddr_t paddr, int mode, caddr_t s) 6106 { 6107 int retval = 0; 6108 6109 if (ddi_copyin(pioc->addr, paddr, MAXNAMELEN, mode)) { 6110 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioc_get_paddr: " 6111 "ioctl <%s> device addr copyin failed", s)); 6112 retval = EFAULT; 6113 } 6114 return (retval); 6115 } 6116 6117 6118 /* 6119 * Routine to send client device pathname to userland. 6120 */ 6121 /* ARGSUSED */ 6122 static int 6123 vhci_ioc_send_client_path(caddr_t client_path, sv_iocdata_t *pioc, 6124 int mode, caddr_t s) 6125 { 6126 int retval = 0; 6127 6128 if (ddi_copyout(client_path, pioc->client, MAXPATHLEN, mode)) { 6129 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioc_send_client: " 6130 "ioctl <%s> client_path copyout failed", s)); 6131 retval = EFAULT; 6132 } 6133 return (retval); 6134 } 6135 6136 6137 /* 6138 * Routine to translated dev_info pointer (dip) to device pathname. 6139 */ 6140 static void 6141 vhci_ioc_devi_to_path(dev_info_t *dip, caddr_t path) 6142 { 6143 (void) ddi_pathname(dip, path); 6144 } 6145 6146 6147 /* 6148 * vhci_get_phci_path_list: 6149 * get information about devices associated with a 6150 * given PHCI device. 6151 * 6152 * Return Values: 6153 * path information elements 6154 */ 6155 int 6156 vhci_get_phci_path_list(dev_info_t *pdip, sv_path_info_t *pibuf, 6157 uint_t num_elems) 6158 { 6159 uint_t count, done; 6160 mdi_pathinfo_t *pip; 6161 sv_path_info_t *ret_pip; 6162 int status; 6163 size_t prop_size; 6164 int circular; 6165 6166 /* 6167 * Get the PHCI structure and retrieve the path information 6168 * from the GUID hash table. 6169 */ 6170 6171 ret_pip = pibuf; 6172 count = 0; 6173 6174 ndi_devi_enter(pdip, &circular); 6175 6176 done = (count >= num_elems); 6177 pip = mdi_get_next_client_path(pdip, NULL); 6178 while (pip && !done) { 6179 mdi_pi_lock(pip); 6180 (void) ddi_pathname(mdi_pi_get_phci(pip), 6181 ret_pip->device.ret_phci); 6182 (void) strcpy(ret_pip->ret_addr, mdi_pi_get_addr(pip)); 6183 (void) mdi_pi_get_state2(pip, &ret_pip->ret_state, 6184 &ret_pip->ret_ext_state); 6185 6186 status = mdi_prop_size(pip, &prop_size); 6187 if (status == MDI_SUCCESS && ret_pip->ret_prop.ret_buf_size) { 6188 *ret_pip->ret_prop.ret_buf_size = (uint_t)prop_size; 6189 } 6190 6191 #ifdef DEBUG 6192 if (status != MDI_SUCCESS) { 6193 VHCI_DEBUG(2, (CE_WARN, NULL, 6194 "!vhci_get_phci_path_list: " 6195 "phci <%s>, prop size failure 0x%x", 6196 ret_pip->device.ret_phci, status)); 6197 } 6198 #endif /* DEBUG */ 6199 6200 6201 if (status == MDI_SUCCESS && ret_pip->ret_prop.buf && 6202 prop_size && ret_pip->ret_prop.buf_size >= prop_size) { 6203 status = mdi_prop_pack(pip, 6204 &ret_pip->ret_prop.buf, 6205 ret_pip->ret_prop.buf_size); 6206 6207 #ifdef DEBUG 6208 if (status != MDI_SUCCESS) { 6209 VHCI_DEBUG(2, (CE_WARN, NULL, 6210 "!vhci_get_phci_path_list: " 6211 "phci <%s>, prop pack failure 0x%x", 6212 ret_pip->device.ret_phci, status)); 6213 } 6214 #endif /* DEBUG */ 6215 } 6216 6217 mdi_pi_unlock(pip); 6218 pip = mdi_get_next_client_path(pdip, pip); 6219 ret_pip++; 6220 count++; 6221 done = (count >= num_elems); 6222 } 6223 6224 ndi_devi_exit(pdip, circular); 6225 6226 return (MDI_SUCCESS); 6227 } 6228 6229 6230 /* 6231 * vhci_get_client_path_list: 6232 * get information about various paths associated with a 6233 * given client device. 6234 * 6235 * Return Values: 6236 * path information elements 6237 */ 6238 int 6239 vhci_get_client_path_list(dev_info_t *cdip, sv_path_info_t *pibuf, 6240 uint_t num_elems) 6241 { 6242 uint_t count, done; 6243 mdi_pathinfo_t *pip; 6244 sv_path_info_t *ret_pip; 6245 int status; 6246 size_t prop_size; 6247 int circular; 6248 6249 ret_pip = pibuf; 6250 count = 0; 6251 6252 ndi_devi_enter(cdip, &circular); 6253 6254 done = (count >= num_elems); 6255 pip = mdi_get_next_phci_path(cdip, NULL); 6256 while (pip && !done) { 6257 mdi_pi_lock(pip); 6258 (void) ddi_pathname(mdi_pi_get_phci(pip), 6259 ret_pip->device.ret_phci); 6260 (void) strcpy(ret_pip->ret_addr, mdi_pi_get_addr(pip)); 6261 (void) mdi_pi_get_state2(pip, &ret_pip->ret_state, 6262 &ret_pip->ret_ext_state); 6263 6264 status = mdi_prop_size(pip, &prop_size); 6265 if (status == MDI_SUCCESS && ret_pip->ret_prop.ret_buf_size) { 6266 *ret_pip->ret_prop.ret_buf_size = (uint_t)prop_size; 6267 } 6268 6269 #ifdef DEBUG 6270 if (status != MDI_SUCCESS) { 6271 VHCI_DEBUG(2, (CE_WARN, NULL, 6272 "!vhci_get_client_path_list: " 6273 "phci <%s>, prop size failure 0x%x", 6274 ret_pip->device.ret_phci, status)); 6275 } 6276 #endif /* DEBUG */ 6277 6278 6279 if (status == MDI_SUCCESS && ret_pip->ret_prop.buf && 6280 prop_size && ret_pip->ret_prop.buf_size >= prop_size) { 6281 status = mdi_prop_pack(pip, 6282 &ret_pip->ret_prop.buf, 6283 ret_pip->ret_prop.buf_size); 6284 6285 #ifdef DEBUG 6286 if (status != MDI_SUCCESS) { 6287 VHCI_DEBUG(2, (CE_WARN, NULL, 6288 "!vhci_get_client_path_list: " 6289 "phci <%s>, prop pack failure 0x%x", 6290 ret_pip->device.ret_phci, status)); 6291 } 6292 #endif /* DEBUG */ 6293 } 6294 6295 mdi_pi_unlock(pip); 6296 pip = mdi_get_next_phci_path(cdip, pip); 6297 ret_pip++; 6298 count++; 6299 done = (count >= num_elems); 6300 } 6301 6302 ndi_devi_exit(cdip, circular); 6303 6304 return (MDI_SUCCESS); 6305 } 6306 6307 6308 /* 6309 * Routine to get ioctl argument structure from userland. 6310 */ 6311 /* ARGSUSED */ 6312 static int 6313 vhci_get_iocdata(const void *data, sv_iocdata_t *pioc, int mode, caddr_t s) 6314 { 6315 int retval = 0; 6316 6317 #ifdef _MULTI_DATAMODEL 6318 switch (ddi_model_convert_from(mode & FMODELS)) { 6319 case DDI_MODEL_ILP32: 6320 { 6321 sv_iocdata32_t ioc32; 6322 6323 if (ddi_copyin(data, &ioc32, sizeof (ioc32), mode)) { 6324 retval = EFAULT; 6325 break; 6326 } 6327 pioc->client = (caddr_t)(uintptr_t)ioc32.client; 6328 pioc->phci = (caddr_t)(uintptr_t)ioc32.phci; 6329 pioc->addr = (caddr_t)(uintptr_t)ioc32.addr; 6330 pioc->buf_elem = (uint_t)ioc32.buf_elem; 6331 pioc->ret_buf = (sv_path_info_t *)(uintptr_t)ioc32.ret_buf; 6332 pioc->ret_elem = (uint_t *)(uintptr_t)ioc32.ret_elem; 6333 break; 6334 } 6335 6336 case DDI_MODEL_NONE: 6337 if (ddi_copyin(data, pioc, sizeof (*pioc), mode)) { 6338 retval = EFAULT; 6339 break; 6340 } 6341 break; 6342 } 6343 #else /* _MULTI_DATAMODEL */ 6344 if (ddi_copyin(data, pioc, sizeof (*pioc), mode)) { 6345 retval = EFAULT; 6346 } 6347 #endif /* _MULTI_DATAMODEL */ 6348 6349 #ifdef DEBUG 6350 if (retval) { 6351 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_get_ioc: cmd <%s> " 6352 "iocdata copyin failed", s)); 6353 } 6354 #endif 6355 6356 return (retval); 6357 } 6358 6359 6360 /* 6361 * Routine to get the ioctl argument for ioctl causing controller switchover. 6362 */ 6363 /* ARGSUSED */ 6364 static int 6365 vhci_get_iocswitchdata(const void *data, sv_switch_to_cntlr_iocdata_t *piocsc, 6366 int mode, caddr_t s) 6367 { 6368 int retval = 0; 6369 6370 #ifdef _MULTI_DATAMODEL 6371 switch (ddi_model_convert_from(mode & FMODELS)) { 6372 case DDI_MODEL_ILP32: 6373 { 6374 sv_switch_to_cntlr_iocdata32_t ioc32; 6375 6376 if (ddi_copyin(data, &ioc32, sizeof (ioc32), mode)) { 6377 retval = EFAULT; 6378 break; 6379 } 6380 piocsc->client = (caddr_t)(uintptr_t)ioc32.client; 6381 piocsc->class = (caddr_t)(uintptr_t)ioc32.class; 6382 break; 6383 } 6384 6385 case DDI_MODEL_NONE: 6386 if (ddi_copyin(data, piocsc, sizeof (*piocsc), mode)) { 6387 retval = EFAULT; 6388 } 6389 break; 6390 } 6391 #else /* _MULTI_DATAMODEL */ 6392 if (ddi_copyin(data, piocsc, sizeof (*piocsc), mode)) { 6393 retval = EFAULT; 6394 } 6395 #endif /* _MULTI_DATAMODEL */ 6396 6397 #ifdef DEBUG 6398 if (retval) { 6399 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_get_ioc: cmd <%s> " 6400 "switch_to_cntlr_iocdata copyin failed", s)); 6401 } 6402 #endif 6403 6404 return (retval); 6405 } 6406 6407 6408 /* 6409 * Routine to allocate memory for the path information structures. 6410 * It allocates two chunks of memory - one for keeping userland 6411 * pointers/values for path information and path properties, second for 6412 * keeping allocating kernel memory for path properties. These path 6413 * properties are finally copied to userland. 6414 */ 6415 /* ARGSUSED */ 6416 static int 6417 vhci_ioc_alloc_pathinfo(sv_path_info_t **upibuf, sv_path_info_t **kpibuf, 6418 uint_t num_paths, sv_iocdata_t *pioc, int mode, caddr_t s) 6419 { 6420 sv_path_info_t *pi; 6421 uint_t bufsize; 6422 int retval = 0; 6423 int index; 6424 6425 /* Allocate memory */ 6426 *upibuf = (sv_path_info_t *) 6427 kmem_zalloc(sizeof (sv_path_info_t) * num_paths, KM_SLEEP); 6428 ASSERT(*upibuf != NULL); 6429 *kpibuf = (sv_path_info_t *) 6430 kmem_zalloc(sizeof (sv_path_info_t) * num_paths, KM_SLEEP); 6431 ASSERT(*kpibuf != NULL); 6432 6433 /* 6434 * Get the path info structure from the user space. 6435 * We are interested in the following fields: 6436 * - user size of buffer for per path properties. 6437 * - user address of buffer for path info properties. 6438 * - user pointer for returning actual buffer size 6439 * Keep these fields in the 'upibuf' structures. 6440 * Allocate buffer for per path info properties in kernel 6441 * structure ('kpibuf'). 6442 * Size of these buffers will be equal to the size of buffers 6443 * in the user space. 6444 */ 6445 #ifdef _MULTI_DATAMODEL 6446 switch (ddi_model_convert_from(mode & FMODELS)) { 6447 case DDI_MODEL_ILP32: 6448 { 6449 sv_path_info32_t *src; 6450 sv_path_info32_t pi32; 6451 6452 src = (sv_path_info32_t *)pioc->ret_buf; 6453 pi = (sv_path_info_t *)*upibuf; 6454 for (index = 0; index < num_paths; index++, src++, pi++) { 6455 if (ddi_copyin(src, &pi32, sizeof (pi32), mode)) { 6456 retval = EFAULT; 6457 break; 6458 } 6459 6460 pi->ret_prop.buf_size = 6461 (uint_t)pi32.ret_prop.buf_size; 6462 pi->ret_prop.ret_buf_size = 6463 (uint_t *)(uintptr_t)pi32.ret_prop.ret_buf_size; 6464 pi->ret_prop.buf = 6465 (caddr_t)(uintptr_t)pi32.ret_prop.buf; 6466 } 6467 break; 6468 } 6469 6470 case DDI_MODEL_NONE: 6471 if (ddi_copyin(pioc->ret_buf, *upibuf, 6472 sizeof (sv_path_info_t) * num_paths, mode)) { 6473 retval = EFAULT; 6474 } 6475 break; 6476 } 6477 #else /* _MULTI_DATAMODEL */ 6478 if (ddi_copyin(pioc->ret_buf, *upibuf, 6479 sizeof (sv_path_info_t) * num_paths, mode)) { 6480 retval = EFAULT; 6481 } 6482 #endif /* _MULTI_DATAMODEL */ 6483 6484 if (retval != 0) { 6485 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_alloc_path_info: " 6486 "ioctl <%s> normal: path_info copyin failed", s)); 6487 kmem_free(*upibuf, sizeof (sv_path_info_t) * num_paths); 6488 kmem_free(*kpibuf, sizeof (sv_path_info_t) * num_paths); 6489 *upibuf = NULL; 6490 *kpibuf = NULL; 6491 return (retval); 6492 } 6493 6494 /* 6495 * Allocate memory for per path properties. 6496 */ 6497 for (index = 0, pi = *kpibuf; index < num_paths; index++, pi++) { 6498 bufsize = (*upibuf)[index].ret_prop.buf_size; 6499 6500 if (bufsize && bufsize <= SV_PROP_MAX_BUF_SIZE) { 6501 pi->ret_prop.buf_size = bufsize; 6502 pi->ret_prop.buf = (caddr_t) 6503 kmem_zalloc(bufsize, KM_SLEEP); 6504 ASSERT(pi->ret_prop.buf != NULL); 6505 } else { 6506 pi->ret_prop.buf_size = 0; 6507 pi->ret_prop.buf = NULL; 6508 } 6509 6510 if ((*upibuf)[index].ret_prop.ret_buf_size != NULL) { 6511 pi->ret_prop.ret_buf_size = (uint_t *)kmem_zalloc( 6512 sizeof (*pi->ret_prop.ret_buf_size), KM_SLEEP); 6513 ASSERT(pi->ret_prop.ret_buf_size != NULL); 6514 } else { 6515 pi->ret_prop.ret_buf_size = NULL; 6516 } 6517 } 6518 6519 return (0); 6520 } 6521 6522 6523 /* 6524 * Routine to free memory for the path information structures. 6525 * This is the memory which was allocated earlier. 6526 */ 6527 /* ARGSUSED */ 6528 static void 6529 vhci_ioc_free_pathinfo(sv_path_info_t *upibuf, sv_path_info_t *kpibuf, 6530 uint_t num_paths) 6531 { 6532 sv_path_info_t *pi; 6533 int index; 6534 6535 /* Free memory for per path properties */ 6536 for (index = 0, pi = kpibuf; index < num_paths; index++, pi++) { 6537 if (pi->ret_prop.ret_buf_size != NULL) { 6538 kmem_free(pi->ret_prop.ret_buf_size, 6539 sizeof (*pi->ret_prop.ret_buf_size)); 6540 } 6541 6542 if (pi->ret_prop.buf != NULL) { 6543 kmem_free(pi->ret_prop.buf, pi->ret_prop.buf_size); 6544 } 6545 } 6546 6547 /* Free memory for path info structures */ 6548 kmem_free(upibuf, sizeof (sv_path_info_t) * num_paths); 6549 kmem_free(kpibuf, sizeof (sv_path_info_t) * num_paths); 6550 } 6551 6552 6553 /* 6554 * Routine to copy path information and path properties to userland. 6555 */ 6556 /* ARGSUSED */ 6557 static int 6558 vhci_ioc_send_pathinfo(sv_path_info_t *upibuf, sv_path_info_t *kpibuf, 6559 uint_t num_paths, sv_iocdata_t *pioc, int mode, caddr_t s) 6560 { 6561 int retval = 0, index; 6562 sv_path_info_t *upi_ptr; 6563 sv_path_info32_t *upi32_ptr; 6564 6565 #ifdef _MULTI_DATAMODEL 6566 switch (ddi_model_convert_from(mode & FMODELS)) { 6567 case DDI_MODEL_ILP32: 6568 goto copy_32bit; 6569 6570 case DDI_MODEL_NONE: 6571 goto copy_normal; 6572 } 6573 #else /* _MULTI_DATAMODEL */ 6574 6575 goto copy_normal; 6576 6577 #endif /* _MULTI_DATAMODEL */ 6578 6579 copy_normal: 6580 6581 /* 6582 * Copy path information and path properties to user land. 6583 * Pointer fields inside the path property structure were 6584 * saved in the 'upibuf' structure earlier. 6585 */ 6586 upi_ptr = pioc->ret_buf; 6587 for (index = 0; index < num_paths; index++) { 6588 if (ddi_copyout(kpibuf[index].device.ret_ct, 6589 upi_ptr[index].device.ret_ct, MAXPATHLEN, mode)) { 6590 retval = EFAULT; 6591 break; 6592 } 6593 6594 if (ddi_copyout(kpibuf[index].ret_addr, 6595 upi_ptr[index].ret_addr, MAXNAMELEN, mode)) { 6596 retval = EFAULT; 6597 break; 6598 } 6599 6600 if (ddi_copyout(&kpibuf[index].ret_state, 6601 &upi_ptr[index].ret_state, sizeof (kpibuf[index].ret_state), 6602 mode)) { 6603 retval = EFAULT; 6604 break; 6605 } 6606 6607 if (ddi_copyout(&kpibuf[index].ret_ext_state, 6608 &upi_ptr[index].ret_ext_state, 6609 sizeof (kpibuf[index].ret_ext_state), mode)) { 6610 retval = EFAULT; 6611 break; 6612 } 6613 6614 if ((kpibuf[index].ret_prop.ret_buf_size != NULL) && 6615 ddi_copyout(kpibuf[index].ret_prop.ret_buf_size, 6616 upibuf[index].ret_prop.ret_buf_size, 6617 sizeof (*upibuf[index].ret_prop.ret_buf_size), mode)) { 6618 retval = EFAULT; 6619 break; 6620 } 6621 6622 if ((kpibuf[index].ret_prop.buf != NULL) && 6623 ddi_copyout(kpibuf[index].ret_prop.buf, 6624 upibuf[index].ret_prop.buf, 6625 upibuf[index].ret_prop.buf_size, mode)) { 6626 retval = EFAULT; 6627 break; 6628 } 6629 } 6630 6631 #ifdef DEBUG 6632 if (retval) { 6633 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_get_ioc: ioctl <%s> " 6634 "normal: path_info copyout failed", s)); 6635 } 6636 #endif 6637 6638 return (retval); 6639 6640 copy_32bit: 6641 /* 6642 * Copy path information and path properties to user land. 6643 * Pointer fields inside the path property structure were 6644 * saved in the 'upibuf' structure earlier. 6645 */ 6646 upi32_ptr = (sv_path_info32_t *)pioc->ret_buf; 6647 for (index = 0; index < num_paths; index++) { 6648 if (ddi_copyout(kpibuf[index].device.ret_ct, 6649 upi32_ptr[index].device.ret_ct, MAXPATHLEN, mode)) { 6650 retval = EFAULT; 6651 break; 6652 } 6653 6654 if (ddi_copyout(kpibuf[index].ret_addr, 6655 upi32_ptr[index].ret_addr, MAXNAMELEN, mode)) { 6656 retval = EFAULT; 6657 break; 6658 } 6659 6660 if (ddi_copyout(&kpibuf[index].ret_state, 6661 &upi32_ptr[index].ret_state, 6662 sizeof (kpibuf[index].ret_state), mode)) { 6663 retval = EFAULT; 6664 break; 6665 } 6666 6667 if (ddi_copyout(&kpibuf[index].ret_ext_state, 6668 &upi32_ptr[index].ret_ext_state, 6669 sizeof (kpibuf[index].ret_ext_state), mode)) { 6670 retval = EFAULT; 6671 break; 6672 } 6673 if ((kpibuf[index].ret_prop.ret_buf_size != NULL) && 6674 ddi_copyout(kpibuf[index].ret_prop.ret_buf_size, 6675 upibuf[index].ret_prop.ret_buf_size, 6676 sizeof (*upibuf[index].ret_prop.ret_buf_size), mode)) { 6677 retval = EFAULT; 6678 break; 6679 } 6680 6681 if ((kpibuf[index].ret_prop.buf != NULL) && 6682 ddi_copyout(kpibuf[index].ret_prop.buf, 6683 upibuf[index].ret_prop.buf, 6684 upibuf[index].ret_prop.buf_size, mode)) { 6685 retval = EFAULT; 6686 break; 6687 } 6688 } 6689 6690 #ifdef DEBUG 6691 if (retval) { 6692 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_get_ioc: ioctl <%s> " 6693 "normal: path_info copyout failed", s)); 6694 } 6695 #endif 6696 6697 return (retval); 6698 } 6699 6700 6701 /* 6702 * vhci_failover() 6703 * This routine expects VHCI_HOLD_LUN before being invoked. It can be invoked 6704 * as MDI_FAILOVER_ASYNC or MDI_FAILOVER_SYNC. For Asynchronous failovers 6705 * this routine shall VHCI_RELEASE_LUN on exiting. For synchronous failovers 6706 * it is the callers responsibility to release lun. 6707 */ 6708 6709 /* ARGSUSED */ 6710 static int 6711 vhci_failover(dev_info_t *vdip, dev_info_t *cdip, int flags) 6712 { 6713 char *guid; 6714 scsi_vhci_lun_t *vlun = NULL; 6715 struct scsi_vhci *vhci; 6716 mdi_pathinfo_t *pip, *npip; 6717 char *s_pclass, *pclass1, *pclass2, *pclass; 6718 char active_pclass_copy[255], *active_pclass_ptr; 6719 char *ptr1, *ptr2; 6720 mdi_pathinfo_state_t pi_state; 6721 uint32_t pi_ext_state; 6722 scsi_vhci_priv_t *svp; 6723 struct scsi_device *sd; 6724 struct scsi_failover_ops *sfo; 6725 int sps; /* mdi_select_path() status */ 6726 int activation_done = 0; 6727 int rval, retval = MDI_FAILURE; 6728 int reserve_pending, check_condition, UA_condition; 6729 struct scsi_pkt *pkt; 6730 struct buf *bp; 6731 6732 vhci = ddi_get_soft_state(vhci_softstate, ddi_get_instance(vdip)); 6733 sd = ddi_get_driver_private(cdip); 6734 vlun = ADDR2VLUN(&sd->sd_address); 6735 ASSERT(vlun != 0); 6736 ASSERT(VHCI_LUN_IS_HELD(vlun)); 6737 guid = vlun->svl_lun_wwn; 6738 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(1): guid %s\n", guid)); 6739 vhci_log(CE_NOTE, vdip, "!Initiating failover for device %s " 6740 "(GUID %s)", ddi_node_name(cdip), guid); 6741 6742 /* 6743 * Lets maintain a local copy of the vlun->svl_active_pclass 6744 * for the rest of the processing. Accessing the field 6745 * directly in the loop below causes loop logic to break 6746 * especially when the field gets updated by other threads 6747 * update path status etc and causes 'paths are not currently 6748 * available' condition to be declared prematurely. 6749 */ 6750 mutex_enter(&vlun->svl_mutex); 6751 if (vlun->svl_active_pclass != NULL) { 6752 (void) strlcpy(active_pclass_copy, vlun->svl_active_pclass, 6753 sizeof (active_pclass_copy)); 6754 active_pclass_ptr = &active_pclass_copy[0]; 6755 mutex_exit(&vlun->svl_mutex); 6756 if (vhci_quiesce_paths(vdip, cdip, vlun, guid, 6757 active_pclass_ptr) != 0) { 6758 retval = MDI_FAILURE; 6759 } 6760 } else { 6761 /* 6762 * can happen only when the available path to device 6763 * discovered is a STANDBY path. 6764 */ 6765 mutex_exit(&vlun->svl_mutex); 6766 active_pclass_copy[0] = '\0'; 6767 active_pclass_ptr = NULL; 6768 } 6769 6770 sfo = vlun->svl_fops; 6771 ASSERT(sfo != NULL); 6772 pclass1 = s_pclass = active_pclass_ptr; 6773 VHCI_DEBUG(1, (CE_NOTE, NULL, "!(%s)failing over from %s\n", guid, 6774 (s_pclass == NULL ? "<none>" : s_pclass))); 6775 6776 next_pathclass: 6777 6778 rval = sfo->sfo_pathclass_next(pclass1, &pclass2, 6779 vlun->svl_fops_ctpriv); 6780 if (rval == ENOENT) { 6781 if (s_pclass == NULL) { 6782 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(4)(%s): " 6783 "failed, no more pathclasses\n", guid)); 6784 goto done; 6785 } else { 6786 (void) sfo->sfo_pathclass_next(NULL, &pclass2, 6787 vlun->svl_fops_ctpriv); 6788 } 6789 } else if (rval == EINVAL) { 6790 vhci_log(CE_NOTE, vdip, "!Failover operation failed for " 6791 "device %s (GUID %s): Invalid path-class %s", 6792 ddi_node_name(cdip), guid, 6793 ((pclass1 == NULL) ? "<none>" : pclass1)); 6794 goto done; 6795 } 6796 if ((s_pclass != NULL) && (strcmp(pclass2, s_pclass) == 0)) { 6797 /* 6798 * paths are not currently available 6799 */ 6800 vhci_log(CE_NOTE, vdip, "!Failover path currently unavailable" 6801 " for device %s (GUID %s)", 6802 ddi_node_name(cdip), guid); 6803 goto done; 6804 } 6805 pip = npip = NULL; 6806 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(5.2)(%s): considering " 6807 "%s as failover destination\n", guid, pclass2)); 6808 sps = mdi_select_path(cdip, NULL, MDI_SELECT_STANDBY_PATH, NULL, &npip); 6809 if ((npip == NULL) || (sps != MDI_SUCCESS)) { 6810 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(%s): no " 6811 "STANDBY paths found (status:%x)!\n", guid, sps)); 6812 pclass1 = pclass2; 6813 goto next_pathclass; 6814 } 6815 do { 6816 pclass = NULL; 6817 if ((mdi_prop_lookup_string(npip, "path-class", 6818 &pclass) != MDI_SUCCESS) || (strcmp(pclass2, 6819 pclass) != 0)) { 6820 VHCI_DEBUG(1, (CE_NOTE, NULL, 6821 "!vhci_failover(5.5)(%s): skipping path " 6822 "%p(%s)...\n", guid, (void *)npip, pclass)); 6823 pip = npip; 6824 sps = mdi_select_path(cdip, NULL, 6825 MDI_SELECT_STANDBY_PATH, pip, &npip); 6826 mdi_rele_path(pip); 6827 (void) mdi_prop_free(pclass); 6828 continue; 6829 } 6830 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(npip); 6831 6832 /* 6833 * Issue READ at non-zer block on this STANDBY path. 6834 * Purple returns 6835 * 1. RESERVATION_CONFLICT if reservation is pending 6836 * 2. POR check condition if it reset happened. 6837 * 2. failover Check Conditions if one is already in progress. 6838 */ 6839 reserve_pending = 0; 6840 check_condition = 0; 6841 UA_condition = 0; 6842 6843 bp = scsi_alloc_consistent_buf(&svp->svp_psd->sd_address, 6844 (struct buf *)NULL, DEV_BSIZE, B_READ, NULL, NULL); 6845 if (!bp) { 6846 VHCI_DEBUG(1, (CE_NOTE, NULL, 6847 "vhci_failover !No resources (buf)\n")); 6848 mdi_rele_path(npip); 6849 goto done; 6850 } 6851 pkt = scsi_init_pkt(&svp->svp_psd->sd_address, NULL, bp, 6852 CDB_GROUP1, sizeof (struct scsi_arq_status), 0, 6853 PKT_CONSISTENT, NULL, NULL); 6854 if (pkt) { 6855 (void) scsi_setup_cdb((union scsi_cdb *)(uintptr_t) 6856 pkt->pkt_cdbp, SCMD_READ, 1, 1, 0); 6857 pkt->pkt_flags = FLAG_NOINTR; 6858 check_path_again: 6859 pkt->pkt_path_instance = mdi_pi_get_path_instance(npip); 6860 pkt->pkt_time = 3*30; 6861 6862 if (scsi_transport(pkt) == TRAN_ACCEPT) { 6863 switch (pkt->pkt_reason) { 6864 case CMD_CMPLT: 6865 switch (SCBP_C(pkt)) { 6866 case STATUS_GOOD: 6867 /* Already failed over */ 6868 activation_done = 1; 6869 break; 6870 case STATUS_RESERVATION_CONFLICT: 6871 reserve_pending = 1; 6872 break; 6873 case STATUS_CHECK: 6874 check_condition = 1; 6875 break; 6876 } 6877 } 6878 } 6879 if (check_condition && 6880 (pkt->pkt_state & STATE_ARQ_DONE)) { 6881 struct scsi_extended_sense *sns = 6882 &(((struct scsi_arq_status *)(uintptr_t) 6883 (pkt->pkt_scbp))->sts_sensedata); 6884 if (sns->es_key == KEY_UNIT_ATTENTION && 6885 sns->es_add_code == 0x29) { 6886 /* Already failed over */ 6887 VHCI_DEBUG(1, (CE_NOTE, NULL, 6888 "!vhci_failover(7)(%s): " 6889 "path 0x%p POR UA condition\n", 6890 guid, (void *)npip)); 6891 if (UA_condition == 0) { 6892 UA_condition = 1; 6893 goto check_path_again; 6894 } 6895 } else { 6896 activation_done = 0; 6897 VHCI_DEBUG(1, (CE_NOTE, NULL, 6898 "!vhci_failover(%s): path 0x%p " 6899 "unhandled chkcond %x %x %x\n", 6900 guid, (void *)npip, sns->es_key, 6901 sns->es_add_code, 6902 sns->es_qual_code)); 6903 } 6904 } 6905 scsi_destroy_pkt(pkt); 6906 } 6907 scsi_free_consistent_buf(bp); 6908 6909 if (activation_done) { 6910 mdi_rele_path(npip); 6911 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(7)(%s): " 6912 "path 0x%p already failedover\n", guid, 6913 (void *)npip)); 6914 break; 6915 } 6916 if (reserve_pending && (vlun->svl_xlf_capable == 0)) { 6917 (void) vhci_recovery_reset(vlun, 6918 &svp->svp_psd->sd_address, 6919 FALSE, VHCI_DEPTH_ALL); 6920 } 6921 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(6)(%s): " 6922 "activating path 0x%p(psd:%p)\n", guid, (void *)npip, 6923 (void *)svp->svp_psd)); 6924 if (sfo->sfo_path_activate(svp->svp_psd, pclass2, 6925 vlun->svl_fops_ctpriv) == 0) { 6926 activation_done = 1; 6927 mdi_rele_path(npip); 6928 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(7)(%s): " 6929 "path 0x%p successfully activated\n", guid, 6930 (void *)npip)); 6931 break; 6932 } 6933 pip = npip; 6934 sps = mdi_select_path(cdip, NULL, MDI_SELECT_STANDBY_PATH, 6935 pip, &npip); 6936 mdi_rele_path(pip); 6937 } while ((npip != NULL) && (sps == MDI_SUCCESS)); 6938 if (activation_done == 0) { 6939 pclass1 = pclass2; 6940 goto next_pathclass; 6941 } 6942 6943 /* 6944 * if we are here, we have succeeded in activating path npip of 6945 * pathclass pclass2; let us validate all paths of pclass2 by 6946 * "ping"-ing each one and mark the good ones ONLINE 6947 * Also, set the state of the paths belonging to the previously 6948 * active pathclass to STANDBY 6949 */ 6950 pip = npip = NULL; 6951 sps = mdi_select_path(cdip, NULL, (MDI_SELECT_ONLINE_PATH | 6952 MDI_SELECT_STANDBY_PATH | MDI_SELECT_USER_DISABLE_PATH), 6953 NULL, &npip); 6954 if (npip == NULL || sps != MDI_SUCCESS) { 6955 VHCI_DEBUG(1, (CE_NOTE, NULL, "!Failover operation failed for " 6956 "device %s (GUID %s): paths may be busy\n", 6957 ddi_node_name(cdip), guid)); 6958 goto done; 6959 } 6960 do { 6961 (void) mdi_pi_get_state2(npip, &pi_state, &pi_ext_state); 6962 if (mdi_prop_lookup_string(npip, "path-class", &pclass) 6963 != MDI_SUCCESS) { 6964 pip = npip; 6965 sps = mdi_select_path(cdip, NULL, 6966 (MDI_SELECT_ONLINE_PATH | 6967 MDI_SELECT_STANDBY_PATH | 6968 MDI_SELECT_USER_DISABLE_PATH), 6969 pip, &npip); 6970 mdi_rele_path(pip); 6971 continue; 6972 } 6973 if (strcmp(pclass, pclass2) == 0) { 6974 if (pi_state == MDI_PATHINFO_STATE_STANDBY) { 6975 svp = (scsi_vhci_priv_t *) 6976 mdi_pi_get_vhci_private(npip); 6977 VHCI_DEBUG(1, (CE_NOTE, NULL, 6978 "!vhci_failover(8)(%s): " 6979 "pinging path 0x%p\n", 6980 guid, (void *)npip)); 6981 if (sfo->sfo_path_ping(svp->svp_psd, 6982 vlun->svl_fops_ctpriv) == 1) { 6983 mdi_pi_set_state(npip, 6984 MDI_PATHINFO_STATE_ONLINE); 6985 VHCI_DEBUG(1, (CE_NOTE, NULL, 6986 "!vhci_failover(9)(%s): " 6987 "path 0x%p ping successful, " 6988 "marked online\n", guid, 6989 (void *)npip)); 6990 MDI_PI_ERRSTAT(npip, MDI_PI_FAILTO); 6991 } 6992 } 6993 } else if ((s_pclass != NULL) && (strcmp(pclass, s_pclass) 6994 == 0)) { 6995 if (pi_state == MDI_PATHINFO_STATE_ONLINE) { 6996 mdi_pi_set_state(npip, 6997 MDI_PATHINFO_STATE_STANDBY); 6998 VHCI_DEBUG(1, (CE_NOTE, NULL, 6999 "!vhci_failover(10)(%s): path 0x%p marked " 7000 "STANDBY\n", guid, (void *)npip)); 7001 MDI_PI_ERRSTAT(npip, MDI_PI_FAILFROM); 7002 } 7003 } 7004 (void) mdi_prop_free(pclass); 7005 pip = npip; 7006 sps = mdi_select_path(cdip, NULL, (MDI_SELECT_ONLINE_PATH | 7007 MDI_SELECT_STANDBY_PATH|MDI_SELECT_USER_DISABLE_PATH), 7008 pip, &npip); 7009 mdi_rele_path(pip); 7010 } while ((npip != NULL) && (sps == MDI_SUCCESS)); 7011 7012 /* 7013 * Update the AccessState of related MP-API TPGs 7014 */ 7015 (void) vhci_mpapi_update_tpg_acc_state_for_lu(vhci, vlun); 7016 7017 vhci_log(CE_NOTE, vdip, "!Failover operation completed successfully " 7018 "for device %s (GUID %s): failed over from %s to %s", 7019 ddi_node_name(cdip), guid, ((s_pclass == NULL) ? "<none>" : 7020 s_pclass), pclass2); 7021 ptr1 = kmem_alloc(strlen(pclass2)+1, KM_SLEEP); 7022 (void) strlcpy(ptr1, pclass2, (strlen(pclass2)+1)); 7023 mutex_enter(&vlun->svl_mutex); 7024 ptr2 = vlun->svl_active_pclass; 7025 vlun->svl_active_pclass = ptr1; 7026 mutex_exit(&vlun->svl_mutex); 7027 if (ptr2) { 7028 kmem_free(ptr2, strlen(ptr2)+1); 7029 } 7030 mutex_enter(&vhci->vhci_mutex); 7031 scsi_hba_reset_notify_callback(&vhci->vhci_mutex, 7032 &vhci->vhci_reset_notify_listf); 7033 /* All reservations are cleared upon these resets. */ 7034 vlun->svl_flags &= ~VLUN_RESERVE_ACTIVE_FLG; 7035 mutex_exit(&vhci->vhci_mutex); 7036 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(11): DONE! Active " 7037 "pathclass for %s is now %s\n", guid, pclass2)); 7038 retval = MDI_SUCCESS; 7039 7040 done: 7041 if (flags == MDI_FAILOVER_ASYNC) { 7042 VHCI_RELEASE_LUN(vlun); 7043 VHCI_DEBUG(6, (CE_NOTE, NULL, "!vhci_failover(12): DONE! " 7044 "releasing lun, as failover was ASYNC\n")); 7045 } else { 7046 VHCI_DEBUG(6, (CE_NOTE, NULL, "!vhci_failover(12): DONE! " 7047 "NOT releasing lun, as failover was SYNC\n")); 7048 } 7049 return (retval); 7050 } 7051 7052 /* 7053 * vhci_client_attached is called after the successful attach of a 7054 * client devinfo node. 7055 */ 7056 static void 7057 vhci_client_attached(dev_info_t *cdip) 7058 { 7059 mdi_pathinfo_t *pip; 7060 int circular; 7061 7062 /* 7063 * At this point the client has attached and it's instance number is 7064 * valid, so we can set up kstats. We need to do this here because it 7065 * is possible for paths to go online prior to client attach, in which 7066 * case the call to vhci_kstat_create_pathinfo in vhci_pathinfo_online 7067 * was a noop. 7068 */ 7069 ndi_devi_enter(cdip, &circular); 7070 for (pip = mdi_get_next_phci_path(cdip, NULL); pip; 7071 pip = mdi_get_next_phci_path(cdip, pip)) 7072 vhci_kstat_create_pathinfo(pip); 7073 ndi_devi_exit(cdip, circular); 7074 } 7075 7076 /* 7077 * quiesce all of the online paths 7078 */ 7079 static int 7080 vhci_quiesce_paths(dev_info_t *vdip, dev_info_t *cdip, scsi_vhci_lun_t *vlun, 7081 char *guid, char *active_pclass_ptr) 7082 { 7083 scsi_vhci_priv_t *svp; 7084 char *s_pclass = NULL; 7085 mdi_pathinfo_t *npip, *pip; 7086 int sps; 7087 7088 /* quiesce currently active paths */ 7089 s_pclass = NULL; 7090 pip = npip = NULL; 7091 sps = mdi_select_path(cdip, NULL, MDI_SELECT_ONLINE_PATH, NULL, &npip); 7092 if ((npip == NULL) || (sps != MDI_SUCCESS)) { 7093 return (1); 7094 } 7095 do { 7096 if (mdi_prop_lookup_string(npip, "path-class", 7097 &s_pclass) != MDI_SUCCESS) { 7098 mdi_rele_path(npip); 7099 vhci_log(CE_NOTE, vdip, "!Failover operation failed " 7100 "for device %s (GUID %s) due to an internal " 7101 "error", ddi_node_name(cdip), guid); 7102 return (1); 7103 } 7104 if (strcmp(s_pclass, active_pclass_ptr) == 0) { 7105 /* 7106 * quiesce path. Free s_pclass since 7107 * we don't need it anymore 7108 */ 7109 VHCI_DEBUG(1, (CE_NOTE, NULL, 7110 "!vhci_failover(2)(%s): failing over " 7111 "from %s; quiescing path %p\n", 7112 guid, s_pclass, (void *)npip)); 7113 (void) mdi_prop_free(s_pclass); 7114 svp = (scsi_vhci_priv_t *) 7115 mdi_pi_get_vhci_private(npip); 7116 if (svp == NULL) { 7117 VHCI_DEBUG(1, (CE_NOTE, NULL, 7118 "!vhci_failover(2.5)(%s): no " 7119 "client priv! %p offlined?\n", 7120 guid, (void *)npip)); 7121 pip = npip; 7122 sps = mdi_select_path(cdip, NULL, 7123 MDI_SELECT_ONLINE_PATH, pip, &npip); 7124 mdi_rele_path(pip); 7125 continue; 7126 } 7127 if (scsi_abort(&svp->svp_psd->sd_address, NULL) 7128 == 0) { 7129 (void) vhci_recovery_reset(vlun, 7130 &svp->svp_psd->sd_address, FALSE, 7131 VHCI_DEPTH_TARGET); 7132 } 7133 mutex_enter(&svp->svp_mutex); 7134 if (svp->svp_cmds == 0) { 7135 VHCI_DEBUG(1, (CE_NOTE, NULL, 7136 "!vhci_failover(3)(%s):" 7137 "quiesced path %p\n", guid, (void *)npip)); 7138 } else { 7139 while (svp->svp_cmds != 0) { 7140 cv_wait(&svp->svp_cv, &svp->svp_mutex); 7141 VHCI_DEBUG(1, (CE_NOTE, NULL, 7142 "!vhci_failover(3.cv)(%s):" 7143 "quiesced path %p\n", guid, 7144 (void *)npip)); 7145 } 7146 } 7147 mutex_exit(&svp->svp_mutex); 7148 } else { 7149 /* 7150 * make sure we freeup the memory 7151 */ 7152 (void) mdi_prop_free(s_pclass); 7153 } 7154 pip = npip; 7155 sps = mdi_select_path(cdip, NULL, MDI_SELECT_ONLINE_PATH, 7156 pip, &npip); 7157 mdi_rele_path(pip); 7158 } while ((npip != NULL) && (sps == MDI_SUCCESS)); 7159 return (0); 7160 } 7161 7162 static struct scsi_vhci_lun * 7163 vhci_lun_lookup(dev_info_t *tgt_dip) 7164 { 7165 return ((struct scsi_vhci_lun *) 7166 mdi_client_get_vhci_private(tgt_dip)); 7167 } 7168 7169 static struct scsi_vhci_lun * 7170 vhci_lun_lookup_alloc(dev_info_t *tgt_dip, char *guid, int *didalloc) 7171 { 7172 struct scsi_vhci_lun *svl; 7173 7174 if (svl = vhci_lun_lookup(tgt_dip)) { 7175 return (svl); 7176 } 7177 7178 svl = kmem_zalloc(sizeof (*svl), KM_SLEEP); 7179 svl->svl_lun_wwn = kmem_zalloc(strlen(guid)+1, KM_SLEEP); 7180 (void) strcpy(svl->svl_lun_wwn, guid); 7181 mutex_init(&svl->svl_mutex, NULL, MUTEX_DRIVER, NULL); 7182 cv_init(&svl->svl_cv, NULL, CV_DRIVER, NULL); 7183 sema_init(&svl->svl_pgr_sema, 1, NULL, SEMA_DRIVER, NULL); 7184 svl->svl_waiting_for_activepath = 1; 7185 svl->svl_sector_size = 1; 7186 mdi_client_set_vhci_private(tgt_dip, svl); 7187 *didalloc = 1; 7188 VHCI_DEBUG(1, (CE_NOTE, NULL, 7189 "vhci_lun_lookup_alloc: guid %s vlun 0x%p\n", 7190 guid, (void *)svl)); 7191 return (svl); 7192 } 7193 7194 static void 7195 vhci_lun_free(dev_info_t *tgt_dip) 7196 { 7197 struct scsi_vhci_lun *dvlp; 7198 char *guid; 7199 struct scsi_device *sd; 7200 7201 /* 7202 * The scsi_device was set to driver private during child node 7203 * initialization in the scsi_hba_bus_ctl(). 7204 */ 7205 sd = (struct scsi_device *)ddi_get_driver_private(tgt_dip); 7206 7207 dvlp = (struct scsi_vhci_lun *) 7208 mdi_client_get_vhci_private(tgt_dip); 7209 ASSERT(dvlp != NULL); 7210 7211 mdi_client_set_vhci_private(tgt_dip, NULL); 7212 7213 guid = dvlp->svl_lun_wwn; 7214 ASSERT(guid != NULL); 7215 VHCI_DEBUG(4, (CE_NOTE, NULL, "!vhci_lun_free: %s\n", guid)); 7216 7217 mutex_enter(&dvlp->svl_mutex); 7218 if (dvlp->svl_active_pclass != NULL) { 7219 kmem_free(dvlp->svl_active_pclass, 7220 strlen(dvlp->svl_active_pclass)+1); 7221 } 7222 dvlp->svl_active_pclass = NULL; 7223 mutex_exit(&dvlp->svl_mutex); 7224 7225 if (dvlp->svl_lun_wwn != NULL) { 7226 kmem_free(dvlp->svl_lun_wwn, strlen(dvlp->svl_lun_wwn)+1); 7227 } 7228 dvlp->svl_lun_wwn = NULL; 7229 7230 if (dvlp->svl_fops_name) { 7231 kmem_free(dvlp->svl_fops_name, strlen(dvlp->svl_fops_name)+1); 7232 } 7233 dvlp->svl_fops_name = NULL; 7234 7235 if (dvlp->svl_fops_ctpriv != NULL) { 7236 dvlp->svl_fops->sfo_device_unprobe(sd, dvlp->svl_fops_ctpriv); 7237 } 7238 7239 if (dvlp->svl_flags & VLUN_TASK_D_ALIVE_FLG) 7240 taskq_destroy(dvlp->svl_taskq); 7241 7242 mutex_destroy(&dvlp->svl_mutex); 7243 cv_destroy(&dvlp->svl_cv); 7244 sema_destroy(&dvlp->svl_pgr_sema); 7245 kmem_free(dvlp, sizeof (*dvlp)); 7246 /* 7247 * vhci_lun_free may be called before the tgt_dip 7248 * initialization so check if the sd is NULL. 7249 */ 7250 if (sd != NULL) 7251 sd->sd_address.a_hba_tran->tran_tgt_private = NULL; 7252 } 7253 7254 7255 int 7256 vhci_do_scsi_cmd(struct scsi_pkt *pkt) 7257 { 7258 int err = 0; 7259 int retry_cnt = 0; 7260 struct scsi_extended_sense *sns; 7261 7262 #ifdef DEBUG 7263 if (vhci_debug > 5) { 7264 vhci_print_cdb(pkt->pkt_address.a_hba_tran->tran_hba_dip, 7265 CE_WARN, "Vhci command", pkt->pkt_cdbp); 7266 } 7267 #endif 7268 7269 retry: 7270 err = scsi_poll(pkt); 7271 if (err) { 7272 if (pkt->pkt_cdbp[0] == SCMD_RELEASE) { 7273 if (SCBP_C(pkt) == STATUS_RESERVATION_CONFLICT) { 7274 VHCI_DEBUG(1, (CE_NOTE, NULL, 7275 "!v_s_do_s_c: RELEASE conflict\n")); 7276 return (0); 7277 } 7278 } 7279 if (retry_cnt++ < 3) { 7280 VHCI_DEBUG(1, (CE_WARN, NULL, 7281 "!v_s_do_s_c:retry packet 0x%p " 7282 "status 0x%x reason %s", 7283 (void *)pkt, SCBP_C(pkt), 7284 scsi_rname(pkt->pkt_reason))); 7285 if ((pkt->pkt_reason == CMD_CMPLT) && 7286 (SCBP_C(pkt) == STATUS_CHECK) && 7287 (pkt->pkt_state & STATE_ARQ_DONE)) { 7288 sns = &(((struct scsi_arq_status *)(uintptr_t) 7289 (pkt->pkt_scbp))->sts_sensedata); 7290 VHCI_DEBUG(1, (CE_WARN, NULL, 7291 "!v_s_do_s_c:retry " 7292 "packet 0x%p sense data %s", (void *)pkt, 7293 scsi_sname(sns->es_key))); 7294 } 7295 goto retry; 7296 } 7297 VHCI_DEBUG(1, (CE_WARN, NULL, 7298 "!v_s_do_s_c: failed transport 0x%p 0x%x", 7299 (void *)pkt, SCBP_C(pkt))); 7300 return (0); 7301 } 7302 7303 switch (pkt->pkt_reason) { 7304 case CMD_TIMEOUT: 7305 VHCI_DEBUG(1, (CE_WARN, NULL, "!pkt timed " 7306 "out (pkt 0x%p)", (void *)pkt)); 7307 return (0); 7308 case CMD_CMPLT: 7309 switch (SCBP_C(pkt)) { 7310 case STATUS_GOOD: 7311 break; 7312 case STATUS_CHECK: 7313 if (pkt->pkt_state & STATE_ARQ_DONE) { 7314 sns = &((( 7315 struct scsi_arq_status *) 7316 (uintptr_t) 7317 (pkt->pkt_scbp))-> 7318 sts_sensedata); 7319 if ((sns->es_key == 7320 KEY_UNIT_ATTENTION) || 7321 (sns->es_key == 7322 KEY_NOT_READY)) { 7323 /* 7324 * clear unit attn. 7325 */ 7326 7327 VHCI_DEBUG(1, 7328 (CE_WARN, NULL, 7329 "!v_s_do_s_c: " 7330 "retry " 7331 "packet 0x%p sense " 7332 "data %s", 7333 (void *)pkt, 7334 scsi_sname 7335 (sns->es_key))); 7336 goto retry; 7337 } 7338 VHCI_DEBUG(4, (CE_WARN, NULL, 7339 "!ARQ while " 7340 "transporting " 7341 "(pkt 0x%p)", 7342 (void *)pkt)); 7343 return (0); 7344 } 7345 return (0); 7346 default: 7347 VHCI_DEBUG(1, (CE_WARN, NULL, 7348 "!Bad status returned " 7349 "(pkt 0x%p, status %x)", 7350 (void *)pkt, SCBP_C(pkt))); 7351 return (0); 7352 } 7353 break; 7354 case CMD_INCOMPLETE: 7355 case CMD_RESET: 7356 case CMD_ABORTED: 7357 case CMD_TRAN_ERR: 7358 if (retry_cnt++ < 1) { 7359 VHCI_DEBUG(1, (CE_WARN, NULL, 7360 "!v_s_do_s_c: retry packet 0x%p %s", 7361 (void *)pkt, scsi_rname(pkt->pkt_reason))); 7362 goto retry; 7363 } 7364 /* FALLTHROUGH */ 7365 default: 7366 VHCI_DEBUG(1, (CE_WARN, NULL, "!pkt did not " 7367 "complete successfully (pkt 0x%p," 7368 "reason %x)", (void *)pkt, pkt->pkt_reason)); 7369 return (0); 7370 } 7371 return (1); 7372 } 7373 7374 static int 7375 vhci_quiesce_lun(struct scsi_vhci_lun *vlun) 7376 { 7377 mdi_pathinfo_t *pip, *spip; 7378 dev_info_t *cdip; 7379 struct scsi_vhci_priv *svp; 7380 mdi_pathinfo_state_t pstate; 7381 uint32_t p_ext_state; 7382 int circular; 7383 7384 cdip = vlun->svl_dip; 7385 pip = spip = NULL; 7386 ndi_devi_enter(cdip, &circular); 7387 pip = mdi_get_next_phci_path(cdip, NULL); 7388 while (pip != NULL) { 7389 (void) mdi_pi_get_state2(pip, &pstate, &p_ext_state); 7390 if (pstate != MDI_PATHINFO_STATE_ONLINE) { 7391 spip = pip; 7392 pip = mdi_get_next_phci_path(cdip, spip); 7393 continue; 7394 } 7395 mdi_hold_path(pip); 7396 ndi_devi_exit(cdip, circular); 7397 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip); 7398 mutex_enter(&svp->svp_mutex); 7399 while (svp->svp_cmds != 0) { 7400 if (cv_timedwait(&svp->svp_cv, &svp->svp_mutex, 7401 ddi_get_lbolt() + drv_usectohz 7402 (vhci_path_quiesce_timeout * 1000000)) == -1) { 7403 mutex_exit(&svp->svp_mutex); 7404 mdi_rele_path(pip); 7405 VHCI_DEBUG(1, (CE_WARN, NULL, 7406 "Quiesce of lun is not successful " 7407 "vlun: 0x%p.", (void *)vlun)); 7408 return (0); 7409 } 7410 } 7411 mutex_exit(&svp->svp_mutex); 7412 ndi_devi_enter(cdip, &circular); 7413 spip = pip; 7414 pip = mdi_get_next_phci_path(cdip, spip); 7415 mdi_rele_path(spip); 7416 } 7417 ndi_devi_exit(cdip, circular); 7418 return (1); 7419 } 7420 7421 static int 7422 vhci_pgr_validate_and_register(scsi_vhci_priv_t *svp) 7423 { 7424 scsi_vhci_lun_t *vlun; 7425 vhci_prout_t *prout; 7426 int rval, success; 7427 mdi_pathinfo_t *pip, *npip; 7428 scsi_vhci_priv_t *osvp; 7429 dev_info_t *cdip; 7430 uchar_t cdb_1; 7431 uchar_t temp_res_key[MHIOC_RESV_KEY_SIZE]; 7432 7433 7434 /* 7435 * see if there are any other paths available; if none, 7436 * then there is nothing to do. 7437 */ 7438 cdip = svp->svp_svl->svl_dip; 7439 rval = mdi_select_path(cdip, NULL, MDI_SELECT_ONLINE_PATH | 7440 MDI_SELECT_STANDBY_PATH, NULL, &pip); 7441 if ((rval != MDI_SUCCESS) || (pip == NULL)) { 7442 VHCI_DEBUG(4, (CE_NOTE, NULL, 7443 "%s%d: vhci_pgr_validate_and_register: first path\n", 7444 ddi_driver_name(cdip), ddi_get_instance(cdip))); 7445 return (1); 7446 } 7447 7448 vlun = svp->svp_svl; 7449 prout = &vlun->svl_prout; 7450 ASSERT(vlun->svl_pgr_active != 0); 7451 7452 /* 7453 * When the path was busy/offlined, some other host might have 7454 * cleared this key. Validate key on some other path first. 7455 * If it fails, return failure. 7456 */ 7457 7458 npip = pip; 7459 pip = NULL; 7460 success = 0; 7461 7462 /* Save the res key */ 7463 bcopy(prout->res_key, temp_res_key, MHIOC_RESV_KEY_SIZE); 7464 7465 /* 7466 * Sometimes CDB from application can be a Register_And_Ignore. 7467 * Instead of validation, this cdb would result in force registration. 7468 * Convert it to normal cdb for validation. 7469 * After that be sure to restore the cdb. 7470 */ 7471 cdb_1 = vlun->svl_cdb[1]; 7472 vlun->svl_cdb[1] &= 0xe0; 7473 7474 do { 7475 osvp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(npip); 7476 if (osvp == NULL) { 7477 VHCI_DEBUG(4, (CE_NOTE, NULL, 7478 "vhci_pgr_validate_and_register: no " 7479 "client priv! 0x%p offlined?\n", 7480 (void *)npip)); 7481 goto next_path_1; 7482 } 7483 7484 if (osvp == svp) { 7485 VHCI_DEBUG(4, (CE_NOTE, NULL, 7486 "vhci_pgr_validate_and_register: same svp 0x%p" 7487 " npip 0x%p vlun 0x%p\n", 7488 (void *)svp, (void *)npip, (void *)vlun)); 7489 goto next_path_1; 7490 } 7491 7492 VHCI_DEBUG(4, (CE_NOTE, NULL, 7493 "vhci_pgr_validate_and_register: First validate on" 7494 " osvp 0x%p being done. vlun 0x%p thread 0x%p Before bcopy" 7495 " cdb1 %x\n", (void *)osvp, (void *)vlun, 7496 (void *)curthread, vlun->svl_cdb[1])); 7497 vhci_print_prout_keys(vlun, "v_pgr_val_reg: before bcopy:"); 7498 7499 bcopy(prout->service_key, prout->res_key, MHIOC_RESV_KEY_SIZE); 7500 7501 VHCI_DEBUG(4, (CE_WARN, NULL, "vlun 0x%p After bcopy", 7502 (void *)vlun)); 7503 vhci_print_prout_keys(vlun, "v_pgr_val_reg: after bcopy: "); 7504 7505 rval = vhci_do_prout(osvp); 7506 if (rval == 1) { 7507 VHCI_DEBUG(4, (CE_NOTE, NULL, 7508 "%s%d: vhci_pgr_validate_and_register: key" 7509 " validated thread 0x%p\n", ddi_driver_name(cdip), 7510 ddi_get_instance(cdip), (void *)curthread)); 7511 pip = npip; 7512 success = 1; 7513 break; 7514 } else { 7515 VHCI_DEBUG(4, (CE_NOTE, NULL, 7516 "vhci_pgr_validate_and_register: First validation" 7517 " on osvp 0x%p failed %x\n", (void *)osvp, rval)); 7518 vhci_print_prout_keys(vlun, "v_pgr_val_reg: failed:"); 7519 } 7520 7521 /* 7522 * Try other paths 7523 */ 7524 next_path_1: 7525 pip = npip; 7526 rval = mdi_select_path(cdip, NULL, 7527 MDI_SELECT_ONLINE_PATH|MDI_SELECT_STANDBY_PATH, 7528 pip, &npip); 7529 mdi_rele_path(pip); 7530 } while ((rval == MDI_SUCCESS) && (npip != NULL)); 7531 7532 7533 /* Be sure to restore original cdb */ 7534 vlun->svl_cdb[1] = cdb_1; 7535 7536 /* Restore the res_key */ 7537 bcopy(temp_res_key, prout->res_key, MHIOC_RESV_KEY_SIZE); 7538 7539 /* 7540 * If key could not be registered on any path for the first time, 7541 * return success as online should still continue. 7542 */ 7543 if (success == 0) { 7544 return (1); 7545 } 7546 7547 ASSERT(pip != NULL); 7548 7549 /* 7550 * Force register on new path 7551 */ 7552 cdb_1 = vlun->svl_cdb[1]; /* store the cdb */ 7553 7554 vlun->svl_cdb[1] &= 0xe0; 7555 vlun->svl_cdb[1] |= VHCI_PROUT_R_AND_IGNORE; 7556 7557 vhci_print_prout_keys(vlun, "v_pgr_val_reg: keys before bcopy: "); 7558 7559 bcopy(prout->active_service_key, prout->service_key, 7560 MHIOC_RESV_KEY_SIZE); 7561 bcopy(prout->active_res_key, prout->res_key, MHIOC_RESV_KEY_SIZE); 7562 7563 vhci_print_prout_keys(vlun, "v_pgr_val_reg:keys after bcopy: "); 7564 7565 rval = vhci_do_prout(svp); 7566 vlun->svl_cdb[1] = cdb_1; /* restore the cdb */ 7567 if (rval != 1) { 7568 VHCI_DEBUG(4, (CE_NOTE, NULL, 7569 "vhci_pgr_validate_and_register: register on new" 7570 " path 0x%p svp 0x%p failed %x\n", 7571 (void *)pip, (void *)svp, rval)); 7572 vhci_print_prout_keys(vlun, "v_pgr_val_reg: reg failed: "); 7573 mdi_rele_path(pip); 7574 return (0); 7575 } 7576 7577 if (bcmp(prout->service_key, zero_key, MHIOC_RESV_KEY_SIZE) == 0) { 7578 VHCI_DEBUG(4, (CE_NOTE, NULL, 7579 "vhci_pgr_validate_and_register: zero service key\n")); 7580 mdi_rele_path(pip); 7581 return (rval); 7582 } 7583 7584 /* 7585 * While the key was force registered, some other host might have 7586 * cleared the key. Re-validate key on another pre-existing path 7587 * before declaring success. 7588 */ 7589 npip = pip; 7590 pip = NULL; 7591 7592 /* 7593 * Sometimes CDB from application can be Register and Ignore. 7594 * Instead of validation, it would result in force registration. 7595 * Convert it to normal cdb for validation. 7596 * After that be sure to restore the cdb. 7597 */ 7598 cdb_1 = vlun->svl_cdb[1]; 7599 vlun->svl_cdb[1] &= 0xe0; 7600 success = 0; 7601 7602 do { 7603 osvp = (scsi_vhci_priv_t *) 7604 mdi_pi_get_vhci_private(npip); 7605 if (osvp == NULL) { 7606 VHCI_DEBUG(4, (CE_NOTE, NULL, 7607 "vhci_pgr_validate_and_register: no " 7608 "client priv! 0x%p offlined?\n", 7609 (void *)npip)); 7610 goto next_path_2; 7611 } 7612 7613 if (osvp == svp) { 7614 VHCI_DEBUG(4, (CE_NOTE, NULL, 7615 "vhci_pgr_validate_and_register: same osvp 0x%p" 7616 " npip 0x%p vlun 0x%p\n", 7617 (void *)svp, (void *)npip, (void *)vlun)); 7618 goto next_path_2; 7619 } 7620 7621 VHCI_DEBUG(4, (CE_NOTE, NULL, 7622 "vhci_pgr_validate_and_register: Re-validation on" 7623 " osvp 0x%p being done. vlun 0x%p Before bcopy cdb1 %x\n", 7624 (void *)osvp, (void *)vlun, vlun->svl_cdb[1])); 7625 vhci_print_prout_keys(vlun, "v_pgr_val_reg: before bcopy: "); 7626 7627 bcopy(prout->service_key, prout->res_key, MHIOC_RESV_KEY_SIZE); 7628 7629 vhci_print_prout_keys(vlun, "v_pgr_val_reg: after bcopy: "); 7630 7631 rval = vhci_do_prout(osvp); 7632 if (rval == 1) { 7633 VHCI_DEBUG(4, (CE_NOTE, NULL, 7634 "%s%d: vhci_pgr_validate_and_register: key" 7635 " validated thread 0x%p\n", ddi_driver_name(cdip), 7636 ddi_get_instance(cdip), (void *)curthread)); 7637 pip = npip; 7638 success = 1; 7639 break; 7640 } else { 7641 VHCI_DEBUG(4, (CE_NOTE, NULL, 7642 "vhci_pgr_validate_and_register: Re-validation on" 7643 " osvp 0x%p failed %x\n", (void *)osvp, rval)); 7644 vhci_print_prout_keys(vlun, 7645 "v_pgr_val_reg: reval failed: "); 7646 } 7647 7648 /* 7649 * Try other paths 7650 */ 7651 next_path_2: 7652 pip = npip; 7653 rval = mdi_select_path(cdip, NULL, 7654 MDI_SELECT_ONLINE_PATH|MDI_SELECT_STANDBY_PATH, 7655 pip, &npip); 7656 mdi_rele_path(pip); 7657 } while ((rval == MDI_SUCCESS) && (npip != NULL)); 7658 7659 /* Be sure to restore original cdb */ 7660 vlun->svl_cdb[1] = cdb_1; 7661 7662 if (success == 1) { 7663 /* Successfully validated registration */ 7664 mdi_rele_path(pip); 7665 return (1); 7666 } 7667 7668 VHCI_DEBUG(4, (CE_WARN, NULL, "key validation failed")); 7669 7670 /* 7671 * key invalid, back out by registering key value of 0 7672 */ 7673 VHCI_DEBUG(4, (CE_NOTE, NULL, 7674 "vhci_pgr_validate_and_register: backout on" 7675 " svp 0x%p being done\n", (void *)svp)); 7676 vhci_print_prout_keys(vlun, "v_pgr_val_reg: before bcopy: "); 7677 7678 bcopy(prout->service_key, prout->res_key, MHIOC_RESV_KEY_SIZE); 7679 bzero(prout->service_key, MHIOC_RESV_KEY_SIZE); 7680 7681 vhci_print_prout_keys(vlun, "v_pgr_val_reg: before bcopy: "); 7682 7683 /* 7684 * Get a new path 7685 */ 7686 rval = mdi_select_path(cdip, NULL, MDI_SELECT_ONLINE_PATH | 7687 MDI_SELECT_STANDBY_PATH, NULL, &pip); 7688 if ((rval != MDI_SUCCESS) || (pip == NULL)) { 7689 VHCI_DEBUG(4, (CE_NOTE, NULL, 7690 "%s%d: vhci_pgr_validate_and_register: no valid pip\n", 7691 ddi_driver_name(cdip), ddi_get_instance(cdip))); 7692 return (0); 7693 } 7694 7695 if ((rval = vhci_do_prout(svp)) != 1) { 7696 VHCI_DEBUG(4, (CE_NOTE, NULL, 7697 "vhci_pgr_validate_and_register: backout on" 7698 " svp 0x%p failed\n", (void *)svp)); 7699 vhci_print_prout_keys(vlun, "backout failed"); 7700 7701 VHCI_DEBUG(4, (CE_WARN, NULL, 7702 "%s%d: vhci_pgr_validate_and_register: key" 7703 " validation and backout failed", ddi_driver_name(cdip), 7704 ddi_get_instance(cdip))); 7705 if (rval == VHCI_PGR_ILLEGALOP) { 7706 VHCI_DEBUG(4, (CE_WARN, NULL, 7707 "%s%d: vhci_pgr_validate_and_register: key" 7708 " already cleared", ddi_driver_name(cdip), 7709 ddi_get_instance(cdip))); 7710 rval = 1; 7711 } else 7712 rval = 0; 7713 } else { 7714 VHCI_DEBUG(4, (CE_NOTE, NULL, 7715 "%s%d: vhci_pgr_validate_and_register: key" 7716 " validation failed, key backed out\n", 7717 ddi_driver_name(cdip), ddi_get_instance(cdip))); 7718 vhci_print_prout_keys(vlun, "v_pgr_val_reg: key backed out: "); 7719 } 7720 mdi_rele_path(pip); 7721 7722 return (rval); 7723 } 7724 7725 /* 7726 * taskq routine to dispatch a scsi cmd to vhci_scsi_start. This ensures 7727 * that vhci_scsi_start is not called in interrupt context. 7728 * As the upper layer gets TRAN_ACCEPT when the command is dispatched, we 7729 * need to complete the command if something goes wrong. 7730 */ 7731 static void 7732 vhci_dispatch_scsi_start(void *arg) 7733 { 7734 struct vhci_pkt *vpkt = (struct vhci_pkt *)arg; 7735 struct scsi_pkt *tpkt = vpkt->vpkt_tgt_pkt; 7736 int rval = TRAN_BUSY; 7737 7738 VHCI_DEBUG(6, (CE_NOTE, NULL, "!vhci_dispatch_scsi_start: sending" 7739 " scsi-2 reserve for 0x%p\n", 7740 (void *)ADDR2DIP(&(vpkt->vpkt_tgt_pkt->pkt_address)))); 7741 7742 /* 7743 * To prevent the taskq from being called recursively we set the 7744 * the VHCI_PKT_THRU_TASKQ bit in the vhci_pkt_states. 7745 */ 7746 vpkt->vpkt_state |= VHCI_PKT_THRU_TASKQ; 7747 7748 /* 7749 * Wait for the transport to get ready to send packets 7750 * and if it times out, it will return something other than 7751 * TRAN_BUSY. The vhci_reserve_delay may want to 7752 * get tuned for other transports and is therefore a global. 7753 * Using delay since this routine is called by taskq dispatch 7754 * and not called during interrupt context. 7755 */ 7756 while ((rval = vhci_scsi_start(&(vpkt->vpkt_tgt_pkt->pkt_address), 7757 vpkt->vpkt_tgt_pkt)) == TRAN_BUSY) { 7758 delay(drv_usectohz(vhci_reserve_delay)); 7759 } 7760 7761 switch (rval) { 7762 case TRAN_ACCEPT: 7763 return; 7764 7765 default: 7766 /* 7767 * This pkt shall be retried, and to ensure another taskq 7768 * is dispatched for it, clear the VHCI_PKT_THRU_TASKQ 7769 * flag. 7770 */ 7771 vpkt->vpkt_state &= ~VHCI_PKT_THRU_TASKQ; 7772 7773 /* Ensure that the pkt is retried without a reset */ 7774 tpkt->pkt_reason = CMD_ABORTED; 7775 tpkt->pkt_statistics |= STAT_ABORTED; 7776 VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_dispatch_scsi_start: " 7777 "TRAN_rval %d returned for dip 0x%p", rval, 7778 (void *)ADDR2DIP(&(vpkt->vpkt_tgt_pkt->pkt_address)))); 7779 break; 7780 } 7781 7782 /* 7783 * vpkt_org_vpkt should always be NULL here if the retry command 7784 * has been successfully dispatched. If vpkt_org_vpkt != NULL at 7785 * this point, it is an error so restore the original vpkt and 7786 * return an error to the target driver so it can retry the 7787 * command as appropriate. 7788 */ 7789 if (vpkt->vpkt_org_vpkt != NULL) { 7790 struct vhci_pkt *new_vpkt = vpkt; 7791 scsi_vhci_priv_t *svp = (scsi_vhci_priv_t *) 7792 mdi_pi_get_vhci_private(vpkt->vpkt_path); 7793 7794 vpkt = vpkt->vpkt_org_vpkt; 7795 7796 vpkt->vpkt_tgt_pkt->pkt_reason = tpkt->pkt_reason; 7797 vpkt->vpkt_tgt_pkt->pkt_statistics = tpkt->pkt_statistics; 7798 7799 vhci_scsi_destroy_pkt(&svp->svp_psd->sd_address, 7800 new_vpkt->vpkt_tgt_pkt); 7801 7802 tpkt = vpkt->vpkt_tgt_pkt; 7803 } 7804 7805 if (tpkt->pkt_comp) { 7806 tpkt->pkt_comp(tpkt); 7807 } 7808 } 7809 7810 static void 7811 vhci_initiate_auto_failback(void *arg) 7812 { 7813 struct scsi_vhci_lun *vlun = (struct scsi_vhci_lun *)arg; 7814 dev_info_t *vdip, *cdip; 7815 int held; 7816 7817 cdip = vlun->svl_dip; 7818 vdip = ddi_get_parent(cdip); 7819 7820 VHCI_HOLD_LUN(vlun, VH_SLEEP, held); 7821 7822 /* 7823 * Perform a final check to see if the active path class is indeed 7824 * not the prefered path class. As in the time the auto failback 7825 * was dispatched, an external failover could have been detected. 7826 * [Some other host could have detected this condition and triggered 7827 * the auto failback before]. 7828 * In such a case if we go ahead with failover we will be negating the 7829 * whole purpose of auto failback. 7830 */ 7831 mutex_enter(&vlun->svl_mutex); 7832 if (vlun->svl_active_pclass != NULL) { 7833 char *best_pclass; 7834 struct scsi_failover_ops *fo; 7835 7836 fo = vlun->svl_fops; 7837 7838 (void) fo->sfo_pathclass_next(NULL, &best_pclass, 7839 vlun->svl_fops_ctpriv); 7840 if (strcmp(vlun->svl_active_pclass, best_pclass) == 0) { 7841 mutex_exit(&vlun->svl_mutex); 7842 VHCI_RELEASE_LUN(vlun); 7843 VHCI_DEBUG(1, (CE_NOTE, NULL, "Not initiating " 7844 "auto failback for %s as %s pathclass already " 7845 "active.\n", vlun->svl_lun_wwn, best_pclass)); 7846 return; 7847 } 7848 } 7849 mutex_exit(&vlun->svl_mutex); 7850 if (mdi_failover(vdip, vlun->svl_dip, MDI_FAILOVER_SYNC) 7851 == MDI_SUCCESS) { 7852 vhci_log(CE_NOTE, vdip, "!Auto failback operation " 7853 "succeeded for device %s (GUID %s)", 7854 ddi_node_name(cdip), vlun->svl_lun_wwn); 7855 } else { 7856 vhci_log(CE_NOTE, vdip, "!Auto failback operation " 7857 "failed for device %s (GUID %s)", 7858 ddi_node_name(cdip), vlun->svl_lun_wwn); 7859 } 7860 VHCI_RELEASE_LUN(vlun); 7861 } 7862 7863 #ifdef DEBUG 7864 static void 7865 vhci_print_prin_keys(vhci_prin_readkeys_t *prin, int numkeys) 7866 { 7867 vhci_clean_print(NULL, 5, "Current PGR Keys", 7868 (uchar_t *)prin, numkeys * 8); 7869 } 7870 #endif 7871 7872 static void 7873 vhci_print_prout_keys(scsi_vhci_lun_t *vlun, char *msg) 7874 { 7875 int i; 7876 vhci_prout_t *prout; 7877 char buf1[4*MHIOC_RESV_KEY_SIZE + 1]; 7878 char buf2[4*MHIOC_RESV_KEY_SIZE + 1]; 7879 char buf3[4*MHIOC_RESV_KEY_SIZE + 1]; 7880 char buf4[4*MHIOC_RESV_KEY_SIZE + 1]; 7881 7882 prout = &vlun->svl_prout; 7883 7884 for (i = 0; i < MHIOC_RESV_KEY_SIZE; i++) 7885 (void) sprintf(&buf1[4*i], "[%02x]", prout->res_key[i]); 7886 for (i = 0; i < MHIOC_RESV_KEY_SIZE; i++) 7887 (void) sprintf(&buf2[(4*i)], "[%02x]", prout->service_key[i]); 7888 for (i = 0; i < MHIOC_RESV_KEY_SIZE; i++) 7889 (void) sprintf(&buf3[4*i], "[%02x]", prout->active_res_key[i]); 7890 for (i = 0; i < MHIOC_RESV_KEY_SIZE; i++) 7891 (void) sprintf(&buf4[4*i], "[%02x]", 7892 prout->active_service_key[i]); 7893 7894 /* Printing all in one go. Otherwise it will jumble up */ 7895 VHCI_DEBUG(5, (CE_CONT, NULL, "%s vlun 0x%p, thread 0x%p\n" 7896 "res_key: : %s\n" 7897 "service_key : %s\n" 7898 "active_res_key : %s\n" 7899 "active_service_key: %s\n", 7900 msg, (void *)vlun, (void *)curthread, buf1, buf2, buf3, buf4)); 7901 } 7902 7903 /* 7904 * Called from vhci_scsi_start to update the pHCI pkt with target packet. 7905 */ 7906 static void 7907 vhci_update_pHCI_pkt(struct vhci_pkt *vpkt, struct scsi_pkt *pkt) 7908 { 7909 7910 ASSERT(vpkt->vpkt_hba_pkt); 7911 7912 vpkt->vpkt_hba_pkt->pkt_flags = pkt->pkt_flags; 7913 vpkt->vpkt_hba_pkt->pkt_flags |= FLAG_NOQUEUE; 7914 7915 if ((vpkt->vpkt_hba_pkt->pkt_flags & FLAG_NOINTR) || 7916 MDI_PI_IS_SUSPENDED(vpkt->vpkt_path)) { 7917 /* 7918 * Polled Command is requested or HBA is in 7919 * suspended state 7920 */ 7921 vpkt->vpkt_hba_pkt->pkt_flags |= FLAG_NOINTR; 7922 vpkt->vpkt_hba_pkt->pkt_comp = NULL; 7923 } else { 7924 vpkt->vpkt_hba_pkt->pkt_comp = vhci_intr; 7925 } 7926 vpkt->vpkt_hba_pkt->pkt_time = pkt->pkt_time; 7927 bcopy(pkt->pkt_cdbp, vpkt->vpkt_hba_pkt->pkt_cdbp, 7928 vpkt->vpkt_tgt_init_cdblen); 7929 vpkt->vpkt_hba_pkt->pkt_resid = pkt->pkt_resid; 7930 7931 /* Re-initialize the following pHCI packet state information */ 7932 vpkt->vpkt_hba_pkt->pkt_state = 0; 7933 vpkt->vpkt_hba_pkt->pkt_statistics = 0; 7934 vpkt->vpkt_hba_pkt->pkt_reason = 0; 7935 } 7936 7937 static int 7938 vhci_scsi_bus_power(dev_info_t *parent, void *impl_arg, pm_bus_power_op_t op, 7939 void *arg, void *result) 7940 { 7941 int ret = DDI_SUCCESS; 7942 7943 /* 7944 * Generic processing in MPxIO framework 7945 */ 7946 ret = mdi_bus_power(parent, impl_arg, op, arg, result); 7947 7948 switch (ret) { 7949 case MDI_SUCCESS: 7950 ret = DDI_SUCCESS; 7951 break; 7952 case MDI_FAILURE: 7953 ret = DDI_FAILURE; 7954 break; 7955 default: 7956 break; 7957 } 7958 7959 return (ret); 7960 } 7961 7962 static int 7963 vhci_pHCI_cap(struct scsi_address *ap, char *cap, int val, int whom, 7964 mdi_pathinfo_t *pip) 7965 { 7966 dev_info_t *cdip; 7967 mdi_pathinfo_t *npip = NULL; 7968 scsi_vhci_priv_t *svp = NULL; 7969 struct scsi_address *pap = NULL; 7970 scsi_hba_tran_t *hba = NULL; 7971 int sps; 7972 int mps_flag; 7973 int rval = 0; 7974 7975 mps_flag = (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH); 7976 if (pip) { 7977 /* 7978 * If the call is from vhci_pathinfo_state_change, 7979 * then this path was busy and is becoming ready to accept IO. 7980 */ 7981 ASSERT(ap != NULL); 7982 hba = ap->a_hba_tran; 7983 ASSERT(hba != NULL); 7984 rval = scsi_ifsetcap(ap, cap, val, whom); 7985 7986 VHCI_DEBUG(2, (CE_NOTE, NULL, 7987 "!vhci_pHCI_cap: only on path %p, ap %p, rval %x\n", 7988 (void *)pip, (void *)ap, rval)); 7989 7990 return (rval); 7991 } 7992 7993 /* 7994 * Set capability on all the pHCIs. 7995 * If any path is busy, then the capability would be set by 7996 * vhci_pathinfo_state_change. 7997 */ 7998 7999 cdip = ADDR2DIP(ap); 8000 ASSERT(cdip != NULL); 8001 sps = mdi_select_path(cdip, NULL, mps_flag, NULL, &pip); 8002 if ((sps != MDI_SUCCESS) || (pip == NULL)) { 8003 VHCI_DEBUG(2, (CE_WARN, NULL, 8004 "!vhci_pHCI_cap: Unable to get a path, dip 0x%p", 8005 (void *)cdip)); 8006 return (0); 8007 } 8008 8009 again: 8010 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip); 8011 if (svp == NULL) { 8012 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_pHCI_cap: " 8013 "priv is NULL, pip 0x%p", (void *)pip)); 8014 mdi_rele_path(pip); 8015 return (rval); 8016 } 8017 8018 if (svp->svp_psd == NULL) { 8019 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_pHCI_cap: " 8020 "psd is NULL, pip 0x%p, svp 0x%p", 8021 (void *)pip, (void *)svp)); 8022 mdi_rele_path(pip); 8023 return (rval); 8024 } 8025 8026 pap = &svp->svp_psd->sd_address; 8027 ASSERT(pap != NULL); 8028 hba = pap->a_hba_tran; 8029 ASSERT(hba != NULL); 8030 8031 if (hba->tran_setcap != NULL) { 8032 rval = scsi_ifsetcap(pap, cap, val, whom); 8033 8034 VHCI_DEBUG(2, (CE_NOTE, NULL, 8035 "!vhci_pHCI_cap: path %p, ap %p, rval %x\n", 8036 (void *)pip, (void *)ap, rval)); 8037 8038 /* 8039 * Select next path and issue the setcap, repeat 8040 * until all paths are exhausted 8041 */ 8042 sps = mdi_select_path(cdip, NULL, mps_flag, pip, &npip); 8043 if ((sps != MDI_SUCCESS) || (npip == NULL)) { 8044 mdi_rele_path(pip); 8045 return (1); 8046 } 8047 mdi_rele_path(pip); 8048 pip = npip; 8049 goto again; 8050 } 8051 mdi_rele_path(pip); 8052 return (rval); 8053 } 8054 8055 static int 8056 vhci_scsi_bus_config(dev_info_t *pdip, uint_t flags, ddi_bus_config_op_t op, 8057 void *arg, dev_info_t **child) 8058 { 8059 char *guid; 8060 8061 if (op == BUS_CONFIG_ONE || op == BUS_UNCONFIG_ONE) 8062 guid = vhci_devnm_to_guid((char *)arg); 8063 else 8064 guid = NULL; 8065 8066 if (mdi_vhci_bus_config(pdip, flags, op, arg, child, guid) 8067 == MDI_SUCCESS) 8068 return (NDI_SUCCESS); 8069 else 8070 return (NDI_FAILURE); 8071 } 8072 8073 /* 8074 * Take the original vhci_pkt, create a duplicate of the pkt for resending 8075 * as though it originated in ssd. 8076 */ 8077 static struct scsi_pkt * 8078 vhci_create_retry_pkt(struct vhci_pkt *vpkt) 8079 { 8080 struct vhci_pkt *new_vpkt = NULL; 8081 struct scsi_pkt *pkt = NULL; 8082 8083 scsi_vhci_priv_t *svp = (scsi_vhci_priv_t *) 8084 mdi_pi_get_vhci_private(vpkt->vpkt_path); 8085 8086 /* 8087 * Ensure consistent data at completion time by setting PKT_CONSISTENT 8088 */ 8089 pkt = vhci_scsi_init_pkt(&svp->svp_psd->sd_address, pkt, 8090 vpkt->vpkt_tgt_init_bp, vpkt->vpkt_tgt_init_cdblen, 8091 vpkt->vpkt_tgt_init_scblen, 0, PKT_CONSISTENT, NULL_FUNC, NULL); 8092 if (pkt != NULL) { 8093 new_vpkt = TGTPKT2VHCIPKT(pkt); 8094 8095 pkt->pkt_address = vpkt->vpkt_tgt_pkt->pkt_address; 8096 pkt->pkt_flags = vpkt->vpkt_tgt_pkt->pkt_flags; 8097 pkt->pkt_time = vpkt->vpkt_tgt_pkt->pkt_time; 8098 pkt->pkt_comp = vpkt->vpkt_tgt_pkt->pkt_comp; 8099 8100 pkt->pkt_resid = 0; 8101 pkt->pkt_statistics = 0; 8102 pkt->pkt_reason = 0; 8103 8104 bcopy(vpkt->vpkt_tgt_pkt->pkt_cdbp, 8105 pkt->pkt_cdbp, vpkt->vpkt_tgt_init_cdblen); 8106 8107 /* 8108 * Save a pointer to the original vhci_pkt 8109 */ 8110 new_vpkt->vpkt_org_vpkt = vpkt; 8111 } 8112 8113 return (pkt); 8114 } 8115 8116 /* 8117 * Copy the successful completion information from the hba packet into 8118 * the original target pkt from the upper layer. Returns the original 8119 * vpkt and destroys the new vpkt from the internal retry. 8120 */ 8121 static struct vhci_pkt * 8122 vhci_sync_retry_pkt(struct vhci_pkt *vpkt) 8123 { 8124 struct vhci_pkt *ret_vpkt = NULL; 8125 struct scsi_pkt *tpkt = NULL; 8126 struct scsi_pkt *hba_pkt = NULL; 8127 scsi_vhci_priv_t *svp = (scsi_vhci_priv_t *) 8128 mdi_pi_get_vhci_private(vpkt->vpkt_path); 8129 8130 ASSERT(vpkt->vpkt_org_vpkt != NULL); 8131 VHCI_DEBUG(0, (CE_NOTE, NULL, "vhci_sync_retry_pkt: Retry pkt " 8132 "completed successfully!\n")); 8133 8134 ret_vpkt = vpkt->vpkt_org_vpkt; 8135 tpkt = ret_vpkt->vpkt_tgt_pkt; 8136 hba_pkt = vpkt->vpkt_hba_pkt; 8137 8138 /* 8139 * Copy the good status into the target driver's packet 8140 */ 8141 *(tpkt->pkt_scbp) = *(hba_pkt->pkt_scbp); 8142 tpkt->pkt_resid = hba_pkt->pkt_resid; 8143 tpkt->pkt_state = hba_pkt->pkt_state; 8144 tpkt->pkt_statistics = hba_pkt->pkt_statistics; 8145 tpkt->pkt_reason = hba_pkt->pkt_reason; 8146 8147 /* 8148 * Destroy the internally created vpkt for the retry 8149 */ 8150 vhci_scsi_destroy_pkt(&svp->svp_psd->sd_address, 8151 vpkt->vpkt_tgt_pkt); 8152 8153 return (ret_vpkt); 8154 } 8155 8156 /* restart the request sense request */ 8157 static void 8158 vhci_uscsi_restart_sense(void *arg) 8159 { 8160 struct buf *rqbp; 8161 struct buf *bp; 8162 struct scsi_pkt *rqpkt = (struct scsi_pkt *)arg; 8163 mp_uscsi_cmd_t *mp_uscmdp; 8164 8165 VHCI_DEBUG(4, (CE_WARN, NULL, 8166 "vhci_uscsi_restart_sense: enter: rqpkt: %p", (void *)rqpkt)); 8167 8168 if (scsi_transport(rqpkt) != TRAN_ACCEPT) { 8169 /* if it fails - need to wakeup the original command */ 8170 mp_uscmdp = rqpkt->pkt_private; 8171 bp = mp_uscmdp->cmdbp; 8172 rqbp = mp_uscmdp->rqbp; 8173 ASSERT(mp_uscmdp && bp && rqbp); 8174 scsi_free_consistent_buf(rqbp); 8175 scsi_destroy_pkt(rqpkt); 8176 bp->b_resid = bp->b_bcount; 8177 bioerror(bp, EIO); 8178 biodone(bp); 8179 } 8180 } 8181 8182 /* 8183 * auto-rqsense is not enabled so we have to retrieve the request sense 8184 * manually. 8185 */ 8186 static int 8187 vhci_uscsi_send_sense(struct scsi_pkt *pkt, mp_uscsi_cmd_t *mp_uscmdp) 8188 { 8189 struct buf *rqbp, *cmdbp; 8190 struct scsi_pkt *rqpkt; 8191 int rval = 0; 8192 8193 cmdbp = mp_uscmdp->cmdbp; 8194 ASSERT(cmdbp != NULL); 8195 8196 VHCI_DEBUG(4, (CE_WARN, NULL, 8197 "vhci_uscsi_send_sense: enter: bp: %p pkt: %p scmd: %p", 8198 (void *)cmdbp, (void *)pkt, (void *)mp_uscmdp)); 8199 /* set up the packet information and cdb */ 8200 if ((rqbp = scsi_alloc_consistent_buf(mp_uscmdp->ap, NULL, 8201 SENSE_LENGTH, B_READ, NULL, NULL)) == NULL) { 8202 return (-1); 8203 } 8204 8205 if ((rqpkt = scsi_init_pkt(mp_uscmdp->ap, NULL, rqbp, 8206 CDB_GROUP0, 1, 0, PKT_CONSISTENT, NULL, NULL)) == NULL) { 8207 scsi_free_consistent_buf(rqbp); 8208 return (-1); 8209 } 8210 8211 (void) scsi_setup_cdb((union scsi_cdb *)(intptr_t)rqpkt->pkt_cdbp, 8212 SCMD_REQUEST_SENSE, 0, SENSE_LENGTH, 0); 8213 8214 mp_uscmdp->rqbp = rqbp; 8215 rqbp->b_private = mp_uscmdp; 8216 rqpkt->pkt_flags |= FLAG_SENSING; 8217 rqpkt->pkt_time = 60; 8218 rqpkt->pkt_comp = vhci_uscsi_iodone; 8219 rqpkt->pkt_private = mp_uscmdp; 8220 8221 /* 8222 * NOTE: This code path is related to MPAPI uscsi(7I), so path 8223 * selection is not based on path_instance. 8224 */ 8225 if (scsi_pkt_allocated_correctly(rqpkt)) 8226 rqpkt->pkt_path_instance = 0; 8227 8228 /* get her done */ 8229 switch (scsi_transport(rqpkt)) { 8230 case TRAN_ACCEPT: 8231 VHCI_DEBUG(1, (CE_NOTE, NULL, "vhci_uscsi_send_sense: " 8232 "transport accepted.")); 8233 break; 8234 case TRAN_BUSY: 8235 VHCI_DEBUG(1, (CE_NOTE, NULL, "vhci_uscsi_send_sense: " 8236 "transport busy, setting timeout.")); 8237 vhci_restart_timeid = timeout(vhci_uscsi_restart_sense, rqpkt, 8238 (drv_usectohz(5 * 1000000))); 8239 break; 8240 default: 8241 VHCI_DEBUG(1, (CE_NOTE, NULL, "vhci_uscsi_send_sense: " 8242 "transport failed")); 8243 scsi_free_consistent_buf(rqbp); 8244 scsi_destroy_pkt(rqpkt); 8245 rval = -1; 8246 } 8247 8248 return (rval); 8249 } 8250 8251 /* 8252 * done routine for the mpapi uscsi command - this is behaving as though 8253 * FLAG_DIAGNOSE is set meaning there are no retries except for a manual 8254 * request sense. 8255 */ 8256 void 8257 vhci_uscsi_iodone(struct scsi_pkt *pkt) 8258 { 8259 struct buf *bp; 8260 mp_uscsi_cmd_t *mp_uscmdp; 8261 struct uscsi_cmd *uscmdp; 8262 struct scsi_arq_status *arqstat; 8263 int err; 8264 8265 mp_uscmdp = (mp_uscsi_cmd_t *)pkt->pkt_private; 8266 uscmdp = mp_uscmdp->uscmdp; 8267 bp = mp_uscmdp->cmdbp; 8268 ASSERT(bp != NULL); 8269 VHCI_DEBUG(4, (CE_WARN, NULL, 8270 "vhci_uscsi_iodone: enter: bp: %p pkt: %p scmd: %p", 8271 (void *)bp, (void *)pkt, (void *)mp_uscmdp)); 8272 /* Save the status and the residual into the uscsi_cmd struct */ 8273 uscmdp->uscsi_status = ((*(pkt)->pkt_scbp) & STATUS_MASK); 8274 uscmdp->uscsi_resid = bp->b_resid; 8275 8276 /* return on a very successful command */ 8277 if (pkt->pkt_reason == CMD_CMPLT && 8278 SCBP_C(pkt) == 0 && ((pkt->pkt_flags & FLAG_SENSING) == 0) && 8279 pkt->pkt_resid == 0) { 8280 mdi_pi_kstat_iosupdate(mp_uscmdp->pip, bp); 8281 scsi_destroy_pkt(pkt); 8282 biodone(bp); 8283 return; 8284 } 8285 VHCI_DEBUG(4, (CE_NOTE, NULL, "iodone: reason=0x%x " 8286 " pkt_resid=%ld pkt_state: 0x%x b_count: %ld b_resid: %ld", 8287 pkt->pkt_reason, pkt->pkt_resid, 8288 pkt->pkt_state, bp->b_bcount, bp->b_resid)); 8289 8290 err = EIO; 8291 8292 arqstat = (struct scsi_arq_status *)(intptr_t)(pkt->pkt_scbp); 8293 if (pkt->pkt_reason != CMD_CMPLT) { 8294 /* 8295 * The command did not complete. 8296 */ 8297 VHCI_DEBUG(4, (CE_NOTE, NULL, 8298 "vhci_uscsi_iodone: command did not complete." 8299 " reason: %x flag: %x", pkt->pkt_reason, pkt->pkt_flags)); 8300 if (pkt->pkt_flags & FLAG_SENSING) { 8301 MDI_PI_ERRSTAT(mp_uscmdp->pip, MDI_PI_TRANSERR); 8302 } else if (pkt->pkt_reason == CMD_TIMEOUT) { 8303 MDI_PI_ERRSTAT(mp_uscmdp->pip, MDI_PI_HARDERR); 8304 err = ETIMEDOUT; 8305 } 8306 } else if (pkt->pkt_state & STATE_ARQ_DONE && mp_uscmdp->arq_enabled) { 8307 /* 8308 * The auto-rqsense happened, and the packet has a filled-in 8309 * scsi_arq_status structure, pointed to by pkt_scbp. 8310 */ 8311 VHCI_DEBUG(4, (CE_NOTE, NULL, 8312 "vhci_uscsi_iodone: received auto-requested sense")); 8313 if (uscmdp->uscsi_flags & USCSI_RQENABLE) { 8314 /* get the amount of data to copy into rqbuf */ 8315 int rqlen = SENSE_LENGTH - arqstat->sts_rqpkt_resid; 8316 rqlen = min(((int)uscmdp->uscsi_rqlen), rqlen); 8317 uscmdp->uscsi_rqresid = uscmdp->uscsi_rqlen - rqlen; 8318 uscmdp->uscsi_rqstatus = 8319 *((char *)&arqstat->sts_rqpkt_status); 8320 if (uscmdp->uscsi_rqbuf && uscmdp->uscsi_rqlen && 8321 rqlen != 0) { 8322 bcopy(&(arqstat->sts_sensedata), 8323 uscmdp->uscsi_rqbuf, rqlen); 8324 } 8325 mdi_pi_kstat_iosupdate(mp_uscmdp->pip, bp); 8326 VHCI_DEBUG(4, (CE_NOTE, NULL, 8327 "vhci_uscsi_iodone: ARQ " 8328 "uscsi_rqstatus=0x%x uscsi_rqresid=%d rqlen: %d " 8329 "xfer: %d rqpkt_resid: %d\n", 8330 uscmdp->uscsi_rqstatus, uscmdp->uscsi_rqresid, 8331 uscmdp->uscsi_rqlen, rqlen, 8332 arqstat->sts_rqpkt_resid)); 8333 } 8334 } else if (pkt->pkt_flags & FLAG_SENSING) { 8335 struct buf *rqbp; 8336 struct scsi_status *rqstatus; 8337 8338 rqstatus = (struct scsi_status *)pkt->pkt_scbp; 8339 /* a manual request sense was done - get the information */ 8340 if (uscmdp->uscsi_flags & USCSI_RQENABLE) { 8341 int rqlen = SENSE_LENGTH - pkt->pkt_resid; 8342 8343 rqbp = mp_uscmdp->rqbp; 8344 /* get the amount of data to copy into rqbuf */ 8345 rqlen = min(((int)uscmdp->uscsi_rqlen), rqlen); 8346 uscmdp->uscsi_rqresid = uscmdp->uscsi_rqlen - rqlen; 8347 uscmdp->uscsi_rqstatus = *((char *)rqstatus); 8348 if (uscmdp->uscsi_rqlen && uscmdp->uscsi_rqbuf) { 8349 bcopy(rqbp->b_un.b_addr, uscmdp->uscsi_rqbuf, 8350 rqlen); 8351 } 8352 MDI_PI_ERRSTAT(mp_uscmdp->pip, MDI_PI_TRANSERR); 8353 scsi_free_consistent_buf(rqbp); 8354 } 8355 VHCI_DEBUG(4, (CE_NOTE, NULL, "vhci_uscsi_iodone: FLAG_SENSING" 8356 "uscsi_rqstatus=0x%x uscsi_rqresid=%d\n", 8357 uscmdp->uscsi_rqstatus, uscmdp->uscsi_rqresid)); 8358 } else { 8359 struct scsi_status *status = 8360 (struct scsi_status *)pkt->pkt_scbp; 8361 /* 8362 * Command completed and we're not getting sense. Check for 8363 * errors and decide what to do next. 8364 */ 8365 VHCI_DEBUG(4, (CE_NOTE, NULL, 8366 "vhci_uscsi_iodone: command appears complete: reason: %x", 8367 pkt->pkt_reason)); 8368 if (status->sts_chk) { 8369 /* need to manually get the request sense */ 8370 if (vhci_uscsi_send_sense(pkt, mp_uscmdp) == 0) { 8371 scsi_destroy_pkt(pkt); 8372 return; 8373 } 8374 } else { 8375 VHCI_DEBUG(4, (CE_NOTE, NULL, 8376 "vhci_chk_err: appears complete")); 8377 err = 0; 8378 mdi_pi_kstat_iosupdate(mp_uscmdp->pip, bp); 8379 if (pkt->pkt_resid) { 8380 bp->b_resid += pkt->pkt_resid; 8381 } 8382 } 8383 } 8384 8385 if (err) { 8386 if (bp->b_resid == 0) 8387 bp->b_resid = bp->b_bcount; 8388 bioerror(bp, err); 8389 bp->b_flags |= B_ERROR; 8390 } 8391 8392 scsi_destroy_pkt(pkt); 8393 biodone(bp); 8394 8395 VHCI_DEBUG(4, (CE_WARN, NULL, "vhci_uscsi_iodone: exit")); 8396 } 8397 8398 /* 8399 * start routine for the mpapi uscsi command 8400 */ 8401 int 8402 vhci_uscsi_iostart(struct buf *bp) 8403 { 8404 struct scsi_pkt *pkt; 8405 struct uscsi_cmd *uscmdp; 8406 mp_uscsi_cmd_t *mp_uscmdp; 8407 int stat_size, rval; 8408 int retry = 0; 8409 8410 ASSERT(bp->b_private != NULL); 8411 8412 mp_uscmdp = (mp_uscsi_cmd_t *)bp->b_private; 8413 uscmdp = mp_uscmdp->uscmdp; 8414 if (uscmdp->uscsi_flags & USCSI_RQENABLE) { 8415 stat_size = SENSE_LENGTH; 8416 } else { 8417 stat_size = 1; 8418 } 8419 8420 pkt = scsi_init_pkt(mp_uscmdp->ap, NULL, bp, uscmdp->uscsi_cdblen, 8421 stat_size, 0, 0, SLEEP_FUNC, NULL); 8422 if (pkt == NULL) { 8423 VHCI_DEBUG(4, (CE_NOTE, NULL, 8424 "vhci_uscsi_iostart: rval: EINVAL")); 8425 bp->b_resid = bp->b_bcount; 8426 uscmdp->uscsi_resid = bp->b_bcount; 8427 bioerror(bp, EINVAL); 8428 biodone(bp); 8429 return (EINVAL); 8430 } 8431 8432 pkt->pkt_time = uscmdp->uscsi_timeout; 8433 bcopy(uscmdp->uscsi_cdb, pkt->pkt_cdbp, (size_t)uscmdp->uscsi_cdblen); 8434 pkt->pkt_comp = vhci_uscsi_iodone; 8435 pkt->pkt_private = mp_uscmdp; 8436 if (uscmdp->uscsi_flags & USCSI_SILENT) 8437 pkt->pkt_flags |= FLAG_SILENT; 8438 if (uscmdp->uscsi_flags & USCSI_ISOLATE) 8439 pkt->pkt_flags |= FLAG_ISOLATE; 8440 if (uscmdp->uscsi_flags & USCSI_DIAGNOSE) 8441 pkt->pkt_flags |= FLAG_DIAGNOSE; 8442 if (uscmdp->uscsi_flags & USCSI_RENEGOT) { 8443 pkt->pkt_flags |= FLAG_RENEGOTIATE_WIDE_SYNC; 8444 } 8445 VHCI_DEBUG(4, (CE_WARN, NULL, 8446 "vhci_uscsi_iostart: ap: %p pkt: %p pcdbp: %p uscmdp: %p" 8447 " ucdbp: %p pcdblen: %d bp: %p count: %ld pip: %p" 8448 " stat_size: %d", 8449 (void *)mp_uscmdp->ap, (void *)pkt, (void *)pkt->pkt_cdbp, 8450 (void *)uscmdp, (void *)uscmdp->uscsi_cdb, pkt->pkt_cdblen, 8451 (void *)bp, bp->b_bcount, (void *)mp_uscmdp->pip, stat_size)); 8452 8453 /* 8454 * NOTE: This code path is related to MPAPI uscsi(7I), so path 8455 * selection is not based on path_instance. 8456 */ 8457 if (scsi_pkt_allocated_correctly(pkt)) 8458 pkt->pkt_path_instance = 0; 8459 8460 while (((rval = scsi_transport(pkt)) == TRAN_BUSY) && 8461 retry < vhci_uscsi_retry_count) { 8462 delay(drv_usectohz(vhci_uscsi_delay)); 8463 retry++; 8464 } 8465 if (retry >= vhci_uscsi_retry_count) { 8466 VHCI_DEBUG(4, (CE_NOTE, NULL, 8467 "vhci_uscsi_iostart: tran_busy - retry: %d", retry)); 8468 } 8469 switch (rval) { 8470 case TRAN_ACCEPT: 8471 rval = 0; 8472 break; 8473 8474 default: 8475 VHCI_DEBUG(4, (CE_NOTE, NULL, 8476 "vhci_uscsi_iostart: rval: %d count: %ld res: %ld", 8477 rval, bp->b_bcount, bp->b_resid)); 8478 bp->b_resid = bp->b_bcount; 8479 uscmdp->uscsi_resid = bp->b_bcount; 8480 bioerror(bp, EIO); 8481 scsi_destroy_pkt(pkt); 8482 biodone(bp); 8483 rval = EIO; 8484 MDI_PI_ERRSTAT(mp_uscmdp->pip, MDI_PI_TRANSERR); 8485 break; 8486 } 8487 VHCI_DEBUG(4, (CE_NOTE, NULL, 8488 "vhci_uscsi_iostart: exit: rval: %d", rval)); 8489 return (rval); 8490 } 8491 8492 #ifdef DEBUG 8493 8494 extern struct scsi_key_strings scsi_cmds[]; 8495 8496 static char * 8497 vhci_print_scsi_cmd(char cmd) 8498 { 8499 char tmp[64]; 8500 char *cpnt; 8501 8502 cpnt = scsi_cmd_name(cmd, scsi_cmds, tmp); 8503 /* tmp goes out of scope on return and caller sees garbage */ 8504 if (cpnt == tmp) { 8505 cpnt = "Unknown Command"; 8506 } 8507 return (cpnt); 8508 } 8509 8510 extern uchar_t scsi_cdb_size[]; 8511 8512 static void 8513 vhci_print_cdb(dev_info_t *dip, uint_t level, char *title, uchar_t *cdb) 8514 { 8515 int len = scsi_cdb_size[CDB_GROUPID(cdb[0])]; 8516 char buf[256]; 8517 8518 if (level == CE_NOTE) { 8519 vhci_log(level, dip, "path cmd %s\n", 8520 vhci_print_scsi_cmd(*cdb)); 8521 return; 8522 } 8523 8524 (void) sprintf(buf, "%s for cmd(%s)", title, vhci_print_scsi_cmd(*cdb)); 8525 vhci_clean_print(dip, level, buf, cdb, len); 8526 } 8527 8528 static void 8529 vhci_clean_print(dev_info_t *dev, uint_t level, char *title, uchar_t *data, 8530 int len) 8531 { 8532 int i; 8533 int c; 8534 char *format; 8535 char buf[256]; 8536 uchar_t byte; 8537 8538 (void) sprintf(buf, "%s:\n", title); 8539 vhci_log(level, dev, "%s", buf); 8540 level = CE_CONT; 8541 for (i = 0; i < len; ) { 8542 buf[0] = 0; 8543 for (c = 0; c < 8 && i < len; c++, i++) { 8544 byte = (uchar_t)data[i]; 8545 if (byte < 0x10) 8546 format = "0x0%x "; 8547 else 8548 format = "0x%x "; 8549 (void) sprintf(&buf[(int)strlen(buf)], format, byte); 8550 } 8551 (void) sprintf(&buf[(int)strlen(buf)], "\n"); 8552 8553 vhci_log(level, dev, "%s\n", buf); 8554 } 8555 } 8556 #endif 8557