1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 #pragma ident "%Z%%M% %I% %E% SMI" 26 27 /* 28 * Multiplexed I/O SCSI vHCI implementation 29 */ 30 31 #include <sys/conf.h> 32 #include <sys/file.h> 33 #include <sys/ddi.h> 34 #include <sys/sunddi.h> 35 #include <sys/scsi/scsi.h> 36 #include <sys/scsi/impl/scsi_reset_notify.h> 37 #include <sys/scsi/impl/services.h> 38 #include <sys/sunmdi.h> 39 #include <sys/mdi_impldefs.h> 40 #include <sys/scsi/adapters/scsi_vhci.h> 41 #include <sys/disp.h> 42 #include <sys/byteorder.h> 43 44 extern uintptr_t scsi_callback_id; 45 extern ddi_dma_attr_t scsi_alloc_attr; 46 47 #ifdef DEBUG 48 int vhci_debug = VHCI_DEBUG_DEFAULT_VAL; 49 #endif 50 51 /* retry for the vhci_do_prout command when a not ready is returned */ 52 int vhci_prout_not_ready_retry = 180; 53 54 /* 55 * These values are defined to support the internal retry of 56 * SCSI packets for better sense code handling. 57 */ 58 #define VHCI_CMD_CMPLT 0 59 #define VHCI_CMD_RETRY 1 60 #define VHCI_CMD_ERROR -1 61 62 #define PROPFLAGS (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM) 63 #define VHCI_SCSI_PERR 0x47 64 #define VHCI_PGR_ILLEGALOP -2 65 #define VHCI_NUM_UPDATE_TASKQ 8 66 /* changed to 132 to accomodate HDS */ 67 #define VHCI_STD_INQ_SIZE 132 68 69 /* 70 * Version Macros 71 */ 72 #define VHCI_NAME_VERSION "SCSI VHCI Driver %I%" 73 char vhci_version_name[] = VHCI_NAME_VERSION; 74 75 int vhci_first_time = 0; 76 clock_t vhci_to_ticks = 0; 77 int vhci_init_wait_timeout = VHCI_INIT_WAIT_TIMEOUT; 78 kcondvar_t vhci_cv; 79 kmutex_t vhci_global_mutex; 80 void *vhci_softstate = NULL; /* for soft state */ 81 82 /* 83 * Flag to delay the retry of the reserve command 84 */ 85 int vhci_reserve_delay = 100000; 86 static int vhci_path_quiesce_timeout = 60; 87 static uchar_t zero_key[MHIOC_RESV_KEY_SIZE]; 88 89 /* uscsi delay for a TRAN_BUSY */ 90 static int vhci_uscsi_delay = 100000; 91 static int vhci_uscsi_retry_count = 180; 92 /* uscsi_restart_sense timeout id in case it needs to get canceled */ 93 static timeout_id_t vhci_restart_timeid = 0; 94 95 /* 96 * Bidirectional map of 'target-port' to port id <pid> for support of 97 * iostat(1M) '-Xx' and '-Yx' output. 98 */ 99 static kmutex_t vhci_targetmap_mutex; 100 static uint_t vhci_targetmap_pid = 1; 101 static mod_hash_t *vhci_targetmap_bypid; /* <pid> -> 'target-port' */ 102 static mod_hash_t *vhci_targetmap_byport; /* 'target-port' -> <pid> */ 103 104 /* 105 * functions exported by scsi_vhci struct cb_ops 106 */ 107 static int vhci_open(dev_t *, int, int, cred_t *); 108 static int vhci_close(dev_t, int, int, cred_t *); 109 static int vhci_ioctl(dev_t, int, intptr_t, int, cred_t *, int *); 110 111 /* 112 * functions exported by scsi_vhci struct dev_ops 113 */ 114 static int vhci_getinfo(dev_info_t *, ddi_info_cmd_t, void *, void **); 115 static int vhci_attach(dev_info_t *, ddi_attach_cmd_t); 116 static int vhci_detach(dev_info_t *, ddi_detach_cmd_t); 117 118 /* 119 * functions exported by scsi_vhci scsi_hba_tran_t transport table 120 */ 121 static int vhci_scsi_tgt_init(dev_info_t *, dev_info_t *, 122 scsi_hba_tran_t *, struct scsi_device *); 123 static void vhci_scsi_tgt_free(dev_info_t *, dev_info_t *, scsi_hba_tran_t *, 124 struct scsi_device *); 125 static int vhci_pgr_register_start(scsi_vhci_lun_t *, struct scsi_pkt *); 126 static int vhci_scsi_start(struct scsi_address *, struct scsi_pkt *); 127 static int vhci_scsi_abort(struct scsi_address *, struct scsi_pkt *); 128 static int vhci_scsi_reset(struct scsi_address *, int); 129 static int vhci_scsi_reset_target(struct scsi_address *, int level, 130 uint8_t select_path); 131 static int vhci_scsi_reset_bus(struct scsi_address *); 132 static int vhci_scsi_getcap(struct scsi_address *, char *, int); 133 static int vhci_scsi_setcap(struct scsi_address *, char *, int, int); 134 static int vhci_commoncap(struct scsi_address *, char *, int, int, int); 135 static int vhci_pHCI_cap(struct scsi_address *ap, char *cap, int val, int whom, 136 mdi_pathinfo_t *pip); 137 static struct scsi_pkt *vhci_scsi_init_pkt(struct scsi_address *, 138 struct scsi_pkt *, struct buf *, int, int, int, int, int (*)(), caddr_t); 139 static void vhci_scsi_destroy_pkt(struct scsi_address *, struct scsi_pkt *); 140 static void vhci_scsi_dmafree(struct scsi_address *, struct scsi_pkt *); 141 static void vhci_scsi_sync_pkt(struct scsi_address *, struct scsi_pkt *); 142 static int vhci_scsi_reset_notify(struct scsi_address *, int, void (*)(caddr_t), 143 caddr_t); 144 static int vhci_scsi_get_bus_addr(struct scsi_device *, char *, int); 145 static int vhci_scsi_get_name(struct scsi_device *, char *, int); 146 static int vhci_scsi_bus_power(dev_info_t *, void *, pm_bus_power_op_t, 147 void *, void *); 148 static int vhci_scsi_bus_config(dev_info_t *, uint_t, ddi_bus_config_op_t, 149 void *, dev_info_t **); 150 151 /* 152 * functions registered with the mpxio framework via mdi_vhci_ops_t 153 */ 154 static int vhci_pathinfo_init(dev_info_t *, mdi_pathinfo_t *, int); 155 static int vhci_pathinfo_uninit(dev_info_t *, mdi_pathinfo_t *, int); 156 static int vhci_pathinfo_state_change(dev_info_t *, mdi_pathinfo_t *, 157 mdi_pathinfo_state_t, uint32_t, int); 158 static int vhci_pathinfo_online(dev_info_t *, mdi_pathinfo_t *, int); 159 static int vhci_pathinfo_offline(dev_info_t *, mdi_pathinfo_t *, int); 160 static int vhci_failover(dev_info_t *, dev_info_t *, int); 161 static void vhci_client_attached(dev_info_t *); 162 163 static int vhci_ctl(dev_t, int, intptr_t, int, cred_t *, int *); 164 static int vhci_devctl(dev_t, int, intptr_t, int, cred_t *, int *); 165 static int vhci_ioc_get_phci_path(sv_iocdata_t *, caddr_t, int, caddr_t); 166 static int vhci_ioc_get_client_path(sv_iocdata_t *, caddr_t, int, caddr_t); 167 static int vhci_ioc_get_paddr(sv_iocdata_t *, caddr_t, int, caddr_t); 168 static int vhci_ioc_send_client_path(caddr_t, sv_iocdata_t *, int, caddr_t); 169 static void vhci_ioc_devi_to_path(dev_info_t *, caddr_t); 170 static int vhci_get_phci_path_list(dev_info_t *, sv_path_info_t *, uint_t); 171 static int vhci_get_client_path_list(dev_info_t *, sv_path_info_t *, uint_t); 172 static int vhci_get_iocdata(const void *, sv_iocdata_t *, int, caddr_t); 173 static int vhci_get_iocswitchdata(const void *, sv_switch_to_cntlr_iocdata_t *, 174 int, caddr_t); 175 static int vhci_ioc_alloc_pathinfo(sv_path_info_t **, sv_path_info_t **, 176 uint_t, sv_iocdata_t *, int, caddr_t); 177 static void vhci_ioc_free_pathinfo(sv_path_info_t *, sv_path_info_t *, uint_t); 178 static int vhci_ioc_send_pathinfo(sv_path_info_t *, sv_path_info_t *, uint_t, 179 sv_iocdata_t *, int, caddr_t); 180 static int vhci_handle_ext_fo(struct scsi_pkt *, int); 181 static int vhci_efo_watch_cb(caddr_t, struct scsi_watch_result *); 182 static int vhci_quiesce_lun(struct scsi_vhci_lun *); 183 static int vhci_pgr_validate_and_register(scsi_vhci_priv_t *); 184 static void vhci_dispatch_scsi_start(void *); 185 static void vhci_efo_done(void *); 186 static void vhci_initiate_auto_failback(void *); 187 static void vhci_update_pHCI_pkt(struct vhci_pkt *, struct scsi_pkt *); 188 static int vhci_update_pathinfo(struct scsi_device *, mdi_pathinfo_t *, 189 struct scsi_failover_ops *, scsi_vhci_lun_t *, struct scsi_vhci *); 190 static void vhci_kstat_create_pathinfo(mdi_pathinfo_t *); 191 static int vhci_quiesce_paths(dev_info_t *, dev_info_t *, 192 scsi_vhci_lun_t *, char *, char *); 193 194 static char *vhci_devnm_to_guid(char *); 195 static int vhci_bind_transport(struct scsi_address *, struct vhci_pkt *, 196 int, int (*func)(caddr_t)); 197 static void vhci_intr(struct scsi_pkt *); 198 static int vhci_do_prout(scsi_vhci_priv_t *); 199 static void vhci_run_cmd(void *); 200 static int vhci_do_prin(struct vhci_pkt *); 201 static struct scsi_pkt *vhci_create_retry_pkt(struct vhci_pkt *); 202 static struct vhci_pkt *vhci_sync_retry_pkt(struct vhci_pkt *); 203 static struct scsi_vhci_lun *vhci_lun_lookup(dev_info_t *); 204 static struct scsi_vhci_lun *vhci_lun_lookup_alloc(dev_info_t *, char *, int *); 205 static void vhci_lun_free(dev_info_t *); 206 static int vhci_recovery_reset(scsi_vhci_lun_t *, struct scsi_address *, 207 uint8_t, uint8_t); 208 void vhci_update_pathstates(void *); 209 210 #ifdef DEBUG 211 static void vhci_print_prin_keys(vhci_prin_readkeys_t *, int); 212 static void vhci_print_cdb(dev_info_t *dip, uint_t level, 213 char *title, uchar_t *cdb); 214 static void vhci_clean_print(dev_info_t *dev, uint_t level, 215 char *title, uchar_t *data, int len); 216 #endif 217 static void vhci_print_prout_keys(scsi_vhci_lun_t *, char *); 218 static void vhci_uscsi_iodone(struct scsi_pkt *pkt); 219 220 /* 221 * MP-API related functions 222 */ 223 extern int vhci_mpapi_init(struct scsi_vhci *); 224 extern void vhci_mpapi_add_dev_prod(struct scsi_vhci *, char *); 225 extern int vhci_mpapi_ctl(dev_t, int, intptr_t, int, cred_t *, int *); 226 extern void vhci_update_mpapi_data(struct scsi_vhci *, 227 scsi_vhci_lun_t *, mdi_pathinfo_t *); 228 extern void* vhci_get_mpapi_item(struct scsi_vhci *, mpapi_list_header_t *, 229 uint8_t, void*); 230 extern void vhci_mpapi_set_path_state(dev_info_t *, mdi_pathinfo_t *, int); 231 extern int vhci_mpapi_update_tpg_acc_state_for_lu(struct scsi_vhci *, 232 scsi_vhci_lun_t *); 233 234 #define VHCI_DMA_MAX_XFER_CAP INT_MAX 235 236 #define VHCI_MAX_PGR_RETRIES 3 237 238 /* 239 * Macros for the device-type mpxio options 240 */ 241 #define LOAD_BALANCE_OPTIONS "load-balance-options" 242 #define LOGICAL_BLOCK_REGION_SIZE "region-size" 243 #define MPXIO_OPTIONS_LIST "device-type-mpxio-options-list" 244 #define DEVICE_TYPE_STR "device-type" 245 #define isdigit(ch) ((ch) >= '0' && (ch) <= '9') 246 247 static struct cb_ops vhci_cb_ops = { 248 vhci_open, /* open */ 249 vhci_close, /* close */ 250 nodev, /* strategy */ 251 nodev, /* print */ 252 nodev, /* dump */ 253 nodev, /* read */ 254 nodev, /* write */ 255 vhci_ioctl, /* ioctl */ 256 nodev, /* devmap */ 257 nodev, /* mmap */ 258 nodev, /* segmap */ 259 nochpoll, /* chpoll */ 260 ddi_prop_op, /* cb_prop_op */ 261 0, /* streamtab */ 262 D_NEW | D_MP, /* cb_flag */ 263 CB_REV, /* rev */ 264 nodev, /* aread */ 265 nodev /* awrite */ 266 }; 267 268 static struct dev_ops vhci_ops = { 269 DEVO_REV, 270 0, 271 vhci_getinfo, 272 nulldev, /* identify */ 273 nulldev, /* probe */ 274 vhci_attach, /* attach and detach are mandatory */ 275 vhci_detach, 276 nodev, /* reset */ 277 &vhci_cb_ops, /* cb_ops */ 278 NULL, /* bus_ops */ 279 NULL, /* power */ 280 }; 281 282 extern struct mod_ops mod_driverops; 283 284 static struct modldrv modldrv = { 285 &mod_driverops, 286 vhci_version_name, /* module name */ 287 &vhci_ops 288 }; 289 290 static struct modlinkage modlinkage = { 291 MODREV_1, 292 &modldrv, 293 NULL 294 }; 295 296 static mdi_vhci_ops_t vhci_opinfo = { 297 MDI_VHCI_OPS_REV, 298 vhci_pathinfo_init, /* Pathinfo node init callback */ 299 vhci_pathinfo_uninit, /* Pathinfo uninit callback */ 300 vhci_pathinfo_state_change, /* Pathinfo node state change */ 301 vhci_failover, /* failover callback */ 302 vhci_client_attached /* client attached callback */ 303 }; 304 305 /* 306 * The scsi_failover table defines an ordered set of 'fops' modules supported 307 * by scsi_vhci. Currently, initialize this table from the 'ddi-forceload' 308 * property specified in scsi_vhci.conf. 309 */ 310 struct scsi_failover { 311 ddi_modhandle_t sf_mod; 312 struct scsi_failover_ops *sf_sfo; 313 } *scsi_failover_table; 314 uint_t scsi_nfailover; 315 316 int 317 _init(void) 318 { 319 int rval; 320 321 /* 322 * Allocate soft state and prepare to do ddi_soft_state_zalloc() 323 * before registering with the transport first. 324 */ 325 if ((rval = ddi_soft_state_init(&vhci_softstate, 326 sizeof (struct scsi_vhci), 1)) != 0) { 327 VHCI_DEBUG(1, (CE_NOTE, NULL, 328 "!_init:soft state init failed\n")); 329 return (rval); 330 } 331 332 if ((rval = scsi_hba_init(&modlinkage)) != 0) { 333 VHCI_DEBUG(1, (CE_NOTE, NULL, 334 "!_init: scsi hba init failed\n")); 335 ddi_soft_state_fini(&vhci_softstate); 336 return (rval); 337 } 338 339 mutex_init(&vhci_global_mutex, NULL, MUTEX_DRIVER, NULL); 340 cv_init(&vhci_cv, NULL, CV_DRIVER, NULL); 341 342 mutex_init(&vhci_targetmap_mutex, NULL, MUTEX_DRIVER, NULL); 343 vhci_targetmap_byport = mod_hash_create_strhash( 344 "vhci_targetmap_byport", 256, mod_hash_null_valdtor); 345 vhci_targetmap_bypid = mod_hash_create_idhash( 346 "vhci_targetmap_bypid", 256, mod_hash_null_valdtor); 347 348 if ((rval = mod_install(&modlinkage)) != 0) { 349 VHCI_DEBUG(1, (CE_NOTE, NULL, "!_init: mod_install failed\n")); 350 if (vhci_targetmap_bypid) 351 mod_hash_destroy_idhash(vhci_targetmap_bypid); 352 if (vhci_targetmap_byport) 353 mod_hash_destroy_strhash(vhci_targetmap_byport); 354 mutex_destroy(&vhci_targetmap_mutex); 355 cv_destroy(&vhci_cv); 356 mutex_destroy(&vhci_global_mutex); 357 scsi_hba_fini(&modlinkage); 358 ddi_soft_state_fini(&vhci_softstate); 359 } 360 return (rval); 361 } 362 363 364 /* 365 * the system is done with us as a driver, so clean up 366 */ 367 int 368 _fini(void) 369 { 370 int rval; 371 372 /* 373 * don't start cleaning up until we know that the module remove 374 * has worked -- if this works, then we know that each instance 375 * has successfully been DDI_DETACHed 376 */ 377 if ((rval = mod_remove(&modlinkage)) != 0) { 378 VHCI_DEBUG(4, (CE_NOTE, NULL, "!_fini: mod_remove failed\n")); 379 return (rval); 380 } 381 382 if (vhci_targetmap_bypid) 383 mod_hash_destroy_idhash(vhci_targetmap_bypid); 384 if (vhci_targetmap_byport) 385 mod_hash_destroy_strhash(vhci_targetmap_byport); 386 mutex_destroy(&vhci_targetmap_mutex); 387 cv_destroy(&vhci_cv); 388 mutex_destroy(&vhci_global_mutex); 389 scsi_hba_fini(&modlinkage); 390 ddi_soft_state_fini(&vhci_softstate); 391 392 return (rval); 393 } 394 395 int 396 _info(struct modinfo *modinfop) 397 { 398 return (mod_info(&modlinkage, modinfop)); 399 } 400 401 /* 402 * Lookup scsi_failover by "short name" of failover module. 403 */ 404 struct scsi_failover_ops * 405 vhci_failover_ops_by_name(char *name) 406 { 407 struct scsi_failover *sf; 408 409 for (sf = scsi_failover_table; sf->sf_mod; sf++) { 410 if (sf->sf_sfo == NULL) 411 continue; 412 if (strcmp(sf->sf_sfo->sfo_name, name) == 0) 413 return (sf->sf_sfo); 414 } 415 return (NULL); 416 } 417 418 /* 419 * Load all scsi_failover_ops 'fops' modules. 420 */ 421 static void 422 vhci_failover_modopen(struct scsi_vhci *vhci) 423 { 424 char **module; 425 int i; 426 struct scsi_failover *sf; 427 char **dt; 428 int e; 429 430 if (scsi_failover_table) 431 return; 432 433 /* Get the list of modules from scsi_vhci.conf */ 434 if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, 435 vhci->vhci_dip, DDI_PROP_DONTPASS, "ddi-forceload", 436 &module, &scsi_nfailover) != DDI_PROP_SUCCESS) { 437 cmn_err(CE_WARN, "scsi_vhci: " 438 "scsi_vhci.conf is missing 'ddi-forceload'"); 439 return; 440 } 441 if (scsi_nfailover == 0) { 442 cmn_err(CE_WARN, "scsi_vhci: " 443 "scsi_vhci.conf has empty 'ddi-forceload'"); 444 ddi_prop_free(module); 445 return; 446 } 447 448 /* allocate failover table based on number of modules */ 449 scsi_failover_table = (struct scsi_failover *) 450 kmem_zalloc(sizeof (struct scsi_failover) * (scsi_nfailover + 1), 451 KM_SLEEP); 452 453 /* loop over modules specified in scsi_vhci.conf and open each module */ 454 for (i = 0, sf = scsi_failover_table; i < scsi_nfailover; i++) { 455 if (module[i] == NULL) 456 continue; 457 458 sf->sf_mod = ddi_modopen(module[i], KRTLD_MODE_FIRST, &e); 459 if (sf->sf_mod == NULL) { 460 /* 461 * A module returns EEXIST if other software is 462 * supporting the intended function: for example 463 * the scsi_vhci_f_sum_emc module returns EEXIST 464 * from _init if EMC powerpath software is installed. 465 */ 466 if (e != EEXIST) 467 cmn_err(CE_WARN, "scsi_vhci: unable to open " 468 "module '%s', error %d", module[i], e); 469 continue; 470 } 471 sf->sf_sfo = ddi_modsym(sf->sf_mod, 472 "scsi_vhci_failover_ops", &e); 473 if (sf->sf_sfo == NULL) { 474 cmn_err(CE_WARN, "scsi_vhci: " 475 "unable to import 'scsi_failover_ops' from '%s', " 476 "error %d", module[i], e); 477 (void) ddi_modclose(sf->sf_mod); 478 sf->sf_mod = NULL; 479 continue; 480 } 481 482 /* register vid/pid of devices supported with mpapi */ 483 for (dt = sf->sf_sfo->sfo_devices; *dt; dt++) 484 vhci_mpapi_add_dev_prod(vhci, *dt); 485 sf++; 486 } 487 488 /* verify that at least the "well-known" modules were there */ 489 if (vhci_failover_ops_by_name(SFO_NAME_SYM) == NULL) 490 cmn_err(CE_WARN, "scsi_vhci: well-known module \"" 491 SFO_NAME_SYM "\" not defined in scsi_vhci.conf's " 492 "'ddi-forceload'"); 493 if (vhci_failover_ops_by_name(SFO_NAME_TPGS) == NULL) 494 cmn_err(CE_WARN, "scsi_vhci: well-known module \"" 495 SFO_NAME_TPGS "\" not defined in scsi_vhci.conf's " 496 "'ddi-forceload'"); 497 498 /* call sfo_init for modules that need it */ 499 for (sf = scsi_failover_table; sf->sf_mod; sf++) { 500 if (sf->sf_sfo && sf->sf_sfo->sfo_init) 501 sf->sf_sfo->sfo_init(); 502 } 503 504 ddi_prop_free(module); 505 } 506 507 /* 508 * unload all loaded scsi_failover_ops modules 509 */ 510 static void 511 vhci_failover_modclose() 512 { 513 struct scsi_failover *sf; 514 515 for (sf = scsi_failover_table; sf->sf_mod; sf++) { 516 if ((sf->sf_mod == NULL) || (sf->sf_sfo == NULL)) 517 continue; 518 (void) ddi_modclose(sf->sf_mod); 519 sf->sf_mod = NULL; 520 sf->sf_sfo = NULL; 521 } 522 523 if (scsi_failover_table && scsi_nfailover) 524 kmem_free(scsi_failover_table, 525 sizeof (struct scsi_failover) * (scsi_nfailover + 1)); 526 scsi_failover_table = NULL; 527 scsi_nfailover = 0; 528 } 529 530 /* ARGSUSED */ 531 static int 532 vhci_open(dev_t *devp, int flag, int otype, cred_t *credp) 533 { 534 struct scsi_vhci *vhci; 535 536 if (otype != OTYP_CHR) { 537 return (EINVAL); 538 } 539 540 vhci = ddi_get_soft_state(vhci_softstate, MINOR2INST(getminor(*devp))); 541 if (vhci == NULL) { 542 VHCI_DEBUG(1, (CE_NOTE, NULL, "vhci_open: failed ENXIO\n")); 543 return (ENXIO); 544 } 545 546 mutex_enter(&vhci->vhci_mutex); 547 if ((flag & FEXCL) && (vhci->vhci_state & VHCI_STATE_OPEN)) { 548 mutex_exit(&vhci->vhci_mutex); 549 vhci_log(CE_NOTE, vhci->vhci_dip, 550 "!vhci%d: Already open\n", getminor(*devp)); 551 return (EBUSY); 552 } 553 554 vhci->vhci_state |= VHCI_STATE_OPEN; 555 mutex_exit(&vhci->vhci_mutex); 556 return (0); 557 } 558 559 560 /* ARGSUSED */ 561 static int 562 vhci_close(dev_t dev, int flag, int otype, cred_t *credp) 563 { 564 struct scsi_vhci *vhci; 565 566 if (otype != OTYP_CHR) { 567 return (EINVAL); 568 } 569 570 vhci = ddi_get_soft_state(vhci_softstate, MINOR2INST(getminor(dev))); 571 if (vhci == NULL) { 572 VHCI_DEBUG(1, (CE_NOTE, NULL, "vhci_close: failed ENXIO\n")); 573 return (ENXIO); 574 } 575 576 mutex_enter(&vhci->vhci_mutex); 577 vhci->vhci_state &= ~VHCI_STATE_OPEN; 578 mutex_exit(&vhci->vhci_mutex); 579 580 return (0); 581 } 582 583 /* ARGSUSED */ 584 static int 585 vhci_ioctl(dev_t dev, int cmd, intptr_t data, int mode, 586 cred_t *credp, int *rval) 587 { 588 if (IS_DEVCTL(cmd)) { 589 return (vhci_devctl(dev, cmd, data, mode, credp, rval)); 590 } else if (cmd == MP_CMD) { 591 return (vhci_mpapi_ctl(dev, cmd, data, mode, credp, rval)); 592 } else { 593 return (vhci_ctl(dev, cmd, data, mode, credp, rval)); 594 } 595 } 596 597 /* 598 * attach the module 599 */ 600 static int 601 vhci_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 602 { 603 int rval = DDI_FAILURE; 604 int scsi_hba_attached = 0; 605 int vhci_attached = 0; 606 int mutex_initted = 0; 607 int instance; 608 struct scsi_vhci *vhci; 609 scsi_hba_tran_t *tran; 610 char cache_name_buf[64]; 611 char *data; 612 613 VHCI_DEBUG(4, (CE_NOTE, NULL, "vhci_attach: cmd=0x%x\n", cmd)); 614 615 instance = ddi_get_instance(dip); 616 617 switch (cmd) { 618 case DDI_ATTACH: 619 break; 620 621 case DDI_RESUME: 622 case DDI_PM_RESUME: 623 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_attach: resume not yet" 624 "implemented\n")); 625 return (rval); 626 627 default: 628 VHCI_DEBUG(1, (CE_NOTE, NULL, 629 "!vhci_attach: unknown ddi command\n")); 630 return (rval); 631 } 632 633 /* 634 * Allocate vhci data structure. 635 */ 636 if (ddi_soft_state_zalloc(vhci_softstate, instance) != DDI_SUCCESS) { 637 VHCI_DEBUG(1, (CE_NOTE, dip, "!vhci_attach:" 638 "soft state alloc failed\n")); 639 return (DDI_FAILURE); 640 } 641 642 if ((vhci = ddi_get_soft_state(vhci_softstate, instance)) == NULL) { 643 VHCI_DEBUG(1, (CE_NOTE, dip, "!vhci_attach:" 644 "bad soft state\n")); 645 ddi_soft_state_free(vhci_softstate, instance); 646 return (DDI_FAILURE); 647 } 648 649 /* Allocate packet cache */ 650 (void) snprintf(cache_name_buf, sizeof (cache_name_buf), 651 "vhci%d_cache", instance); 652 653 mutex_init(&vhci->vhci_mutex, NULL, MUTEX_DRIVER, NULL); 654 mutex_initted++; 655 656 /* 657 * Allocate a transport structure 658 */ 659 tran = scsi_hba_tran_alloc(dip, SCSI_HBA_CANSLEEP); 660 ASSERT(tran != NULL); 661 662 vhci->vhci_tran = tran; 663 vhci->vhci_dip = dip; 664 vhci->vhci_instance = instance; 665 666 tran->tran_hba_private = vhci; 667 tran->tran_tgt_private = NULL; 668 tran->tran_tgt_init = vhci_scsi_tgt_init; 669 tran->tran_tgt_probe = NULL; 670 tran->tran_tgt_free = vhci_scsi_tgt_free; 671 672 tran->tran_start = vhci_scsi_start; 673 tran->tran_abort = vhci_scsi_abort; 674 tran->tran_reset = vhci_scsi_reset; 675 tran->tran_getcap = vhci_scsi_getcap; 676 tran->tran_setcap = vhci_scsi_setcap; 677 tran->tran_init_pkt = vhci_scsi_init_pkt; 678 tran->tran_destroy_pkt = vhci_scsi_destroy_pkt; 679 tran->tran_dmafree = vhci_scsi_dmafree; 680 tran->tran_sync_pkt = vhci_scsi_sync_pkt; 681 tran->tran_reset_notify = vhci_scsi_reset_notify; 682 683 tran->tran_get_bus_addr = vhci_scsi_get_bus_addr; 684 tran->tran_get_name = vhci_scsi_get_name; 685 tran->tran_bus_reset = NULL; 686 tran->tran_quiesce = NULL; 687 tran->tran_unquiesce = NULL; 688 689 /* 690 * register event notification routines with scsa 691 */ 692 tran->tran_get_eventcookie = NULL; 693 tran->tran_add_eventcall = NULL; 694 tran->tran_remove_eventcall = NULL; 695 tran->tran_post_event = NULL; 696 697 tran->tran_bus_power = vhci_scsi_bus_power; 698 699 tran->tran_bus_config = vhci_scsi_bus_config; 700 701 /* 702 * Attach this instance with the mpxio framework 703 */ 704 if (mdi_vhci_register(MDI_HCI_CLASS_SCSI, dip, &vhci_opinfo, 0) 705 != MDI_SUCCESS) { 706 VHCI_DEBUG(1, (CE_NOTE, dip, "!vhci_attach:" 707 "mdi_vhci_register failed\n")); 708 goto attach_fail; 709 } 710 vhci_attached++; 711 712 /* 713 * Attach this instance of the hba. 714 * 715 * Regarding dma attributes: Since scsi_vhci is a virtual scsi HBA 716 * driver, it has nothing to do with DMA. However, when calling 717 * scsi_hba_attach_setup() we need to pass something valid in the 718 * dma attributes parameter. So we just use scsi_alloc_attr. 719 * SCSA itself seems to care only for dma_attr_minxfer and 720 * dma_attr_burstsizes fields of dma attributes structure. 721 * It expects those fileds to be non-zero. 722 */ 723 if (scsi_hba_attach_setup(dip, &scsi_alloc_attr, tran, 724 SCSI_HBA_TRAN_CLONE) != DDI_SUCCESS) { 725 VHCI_DEBUG(1, (CE_NOTE, dip, "!vhci_attach:" 726 "hba attach failed\n")); 727 goto attach_fail; 728 } 729 scsi_hba_attached++; 730 731 if (ddi_create_minor_node(dip, "devctl", S_IFCHR, 732 INST2DEVCTL(instance), DDI_NT_SCSI_NEXUS, 0) != DDI_SUCCESS) { 733 VHCI_DEBUG(1, (CE_NOTE, dip, "!vhci_attach:" 734 " ddi_create_minor_node failed\n")); 735 goto attach_fail; 736 } 737 738 /* 739 * Set pm-want-child-notification property for 740 * power management of the phci and client 741 */ 742 if (ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP, 743 "pm-want-child-notification?", NULL, NULL) != DDI_PROP_SUCCESS) { 744 cmn_err(CE_WARN, 745 "%s%d fail to create pm-want-child-notification? prop", 746 ddi_driver_name(dip), ddi_get_instance(dip)); 747 goto attach_fail; 748 } 749 750 vhci->vhci_taskq = taskq_create("vhci_taskq", 1, MINCLSYSPRI, 1, 4, 0); 751 vhci->vhci_update_pathstates_taskq = 752 taskq_create("vhci_update_pathstates", VHCI_NUM_UPDATE_TASKQ, 753 MINCLSYSPRI, 1, 4, 0); 754 ASSERT(vhci->vhci_taskq); 755 ASSERT(vhci->vhci_update_pathstates_taskq); 756 757 /* 758 * Set appropriate configuration flags based on options set in 759 * conf file. 760 */ 761 vhci->vhci_conf_flags = 0; 762 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, PROPFLAGS, 763 "auto-failback", &data) == DDI_SUCCESS) { 764 if (strcmp(data, "enable") == 0) 765 vhci->vhci_conf_flags |= VHCI_CONF_FLAGS_AUTO_FAILBACK; 766 ddi_prop_free(data); 767 } 768 769 if (!(vhci->vhci_conf_flags & VHCI_CONF_FLAGS_AUTO_FAILBACK)) 770 vhci_log(CE_NOTE, dip, "!Auto-failback capability " 771 "disabled through scsi_vhci.conf file."); 772 773 /* 774 * Allocate an mpapi private structure 775 */ 776 vhci->mp_priv = kmem_zalloc(sizeof (mpapi_priv_t), KM_SLEEP); 777 if (vhci_mpapi_init(vhci) != 0) { 778 VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_attach: " 779 "vhci_mpapi_init() failed")); 780 } 781 782 vhci_failover_modopen(vhci); /* load failover modules */ 783 784 ddi_report_dev(dip); 785 return (DDI_SUCCESS); 786 787 attach_fail: 788 if (vhci_attached) 789 (void) mdi_vhci_unregister(dip, 0); 790 791 if (scsi_hba_attached) 792 (void) scsi_hba_detach(dip); 793 794 if (vhci->vhci_tran) 795 scsi_hba_tran_free(vhci->vhci_tran); 796 797 if (mutex_initted) { 798 mutex_destroy(&vhci->vhci_mutex); 799 } 800 801 ddi_soft_state_free(vhci_softstate, instance); 802 return (DDI_FAILURE); 803 } 804 805 806 /*ARGSUSED*/ 807 static int 808 vhci_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 809 { 810 int instance = ddi_get_instance(dip); 811 scsi_hba_tran_t *tran; 812 struct scsi_vhci *vhci; 813 814 VHCI_DEBUG(4, (CE_NOTE, NULL, "vhci_detach: cmd=0x%x\n", cmd)); 815 816 if ((tran = ddi_get_driver_private(dip)) == NULL) 817 return (DDI_FAILURE); 818 819 vhci = TRAN2HBAPRIVATE(tran); 820 if (!vhci) { 821 return (DDI_FAILURE); 822 } 823 824 switch (cmd) { 825 case DDI_DETACH: 826 break; 827 828 case DDI_SUSPEND: 829 case DDI_PM_SUSPEND: 830 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_detach: suspend/pm not yet" 831 "implemented\n")); 832 return (DDI_FAILURE); 833 834 default: 835 VHCI_DEBUG(1, (CE_NOTE, NULL, 836 "!vhci_detach: unknown ddi command\n")); 837 return (DDI_FAILURE); 838 } 839 840 (void) mdi_vhci_unregister(dip, 0); 841 (void) scsi_hba_detach(dip); 842 scsi_hba_tran_free(tran); 843 844 if (ddi_prop_remove(DDI_DEV_T_NONE, dip, 845 "pm-want-child-notification?") != DDI_PROP_SUCCESS) { 846 cmn_err(CE_WARN, 847 "%s%d unable to remove prop pm-want_child_notification?", 848 ddi_driver_name(dip), ddi_get_instance(dip)); 849 } 850 if (vhci_restart_timeid != 0) { 851 (void) untimeout(vhci_restart_timeid); 852 } 853 vhci_restart_timeid = 0; 854 855 mutex_destroy(&vhci->vhci_mutex); 856 vhci->vhci_dip = NULL; 857 vhci->vhci_tran = NULL; 858 taskq_destroy(vhci->vhci_taskq); 859 taskq_destroy(vhci->vhci_update_pathstates_taskq); 860 ddi_remove_minor_node(dip, NULL); 861 ddi_soft_state_free(vhci_softstate, instance); 862 863 vhci_failover_modclose(); /* unload failover modules */ 864 return (DDI_SUCCESS); 865 } 866 867 /* 868 * vhci_getinfo() 869 * Given the device number, return the devinfo pointer or the 870 * instance number. 871 * Note: always succeed DDI_INFO_DEVT2INSTANCE, even before attach. 872 */ 873 874 /*ARGSUSED*/ 875 static int 876 vhci_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result) 877 { 878 struct scsi_vhci *vhcip; 879 int instance = MINOR2INST(getminor((dev_t)arg)); 880 881 switch (cmd) { 882 case DDI_INFO_DEVT2DEVINFO: 883 vhcip = ddi_get_soft_state(vhci_softstate, instance); 884 if (vhcip != NULL) 885 *result = vhcip->vhci_dip; 886 else { 887 *result = NULL; 888 return (DDI_FAILURE); 889 } 890 break; 891 892 case DDI_INFO_DEVT2INSTANCE: 893 *result = (void *)(uintptr_t)instance; 894 break; 895 896 default: 897 return (DDI_FAILURE); 898 } 899 900 return (DDI_SUCCESS); 901 } 902 903 /*ARGSUSED*/ 904 static int 905 vhci_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip, 906 scsi_hba_tran_t *hba_tran, struct scsi_device *sd) 907 { 908 char *guid; 909 scsi_vhci_lun_t *vlun; 910 struct scsi_vhci *vhci; 911 clock_t from_ticks; 912 mdi_pathinfo_t *pip; 913 int rval; 914 915 ASSERT(hba_dip != NULL); 916 ASSERT(tgt_dip != NULL); 917 918 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, tgt_dip, PROPFLAGS, 919 MDI_CLIENT_GUID_PROP, &guid) != DDI_SUCCESS) { 920 /* 921 * This must be the .conf node without GUID property. 922 * The node under fp already inserts a delay, so we 923 * just return from here. We rely on this delay to have 924 * all dips be posted to the ndi hotplug thread's newdev 925 * list. This is necessary for the deferred attach 926 * mechanism to work and opens() done soon after boot to 927 * succeed. 928 */ 929 VHCI_DEBUG(4, (CE_WARN, hba_dip, "tgt_init: lun guid " 930 "property failed")); 931 return (DDI_NOT_WELL_FORMED); 932 } 933 934 if (ndi_dev_is_persistent_node(tgt_dip) == 0) { 935 /* 936 * This must be .conf node with the GUID property. We don't 937 * merge property by ndi_merge_node() here because the 938 * devi_addr_buf of .conf node is "" always according the 939 * implementation of vhci_scsi_get_name_bus_addr(). 940 */ 941 ddi_set_name_addr(tgt_dip, NULL); 942 return (DDI_FAILURE); 943 } 944 945 vhci = ddi_get_soft_state(vhci_softstate, ddi_get_instance(hba_dip)); 946 ASSERT(vhci != NULL); 947 948 VHCI_DEBUG(4, (CE_NOTE, hba_dip, 949 "!tgt_init: called for %s (instance %d)\n", 950 ddi_driver_name(tgt_dip), ddi_get_instance(tgt_dip))); 951 952 vlun = vhci_lun_lookup(tgt_dip); 953 954 mutex_enter(&vhci_global_mutex); 955 956 from_ticks = ddi_get_lbolt(); 957 if (vhci_to_ticks == 0) { 958 vhci_to_ticks = from_ticks + 959 drv_usectohz(vhci_init_wait_timeout); 960 } 961 962 #if DEBUG 963 if (vlun) { 964 VHCI_DEBUG(1, (CE_WARN, hba_dip, "tgt_init: " 965 "vhci_scsi_tgt_init: guid %s : found vlun 0x%p " 966 "from_ticks %lx to_ticks %lx", 967 guid, (void *)vlun, from_ticks, vhci_to_ticks)); 968 } else { 969 VHCI_DEBUG(1, (CE_WARN, hba_dip, "tgt_init: " 970 "vhci_scsi_tgt_init: guid %s : vlun not found " 971 "from_ticks %lx to_ticks %lx", guid, from_ticks, 972 vhci_to_ticks)); 973 } 974 #endif 975 976 rval = mdi_select_path(tgt_dip, NULL, 977 (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH), NULL, &pip); 978 if (rval == MDI_SUCCESS) { 979 mdi_rele_path(pip); 980 } 981 982 /* 983 * Wait for the following conditions : 984 * 1. no vlun available yet 985 * 2. no path established 986 * 3. timer did not expire 987 */ 988 while ((vlun == NULL) || (mdi_client_get_path_count(tgt_dip) == 0) || 989 (rval != MDI_SUCCESS)) { 990 if (vlun && vlun->svl_not_supported) { 991 VHCI_DEBUG(1, (CE_WARN, hba_dip, "tgt_init: " 992 "vlun 0x%p lun guid %s not supported!", 993 (void *)vlun, guid)); 994 mutex_exit(&vhci_global_mutex); 995 ddi_prop_free(guid); 996 return (DDI_NOT_WELL_FORMED); 997 } 998 if ((vhci_first_time == 0) && (from_ticks >= vhci_to_ticks)) { 999 vhci_first_time = 1; 1000 } 1001 if (vhci_first_time == 1) { 1002 VHCI_DEBUG(1, (CE_WARN, hba_dip, "vhci_scsi_tgt_init: " 1003 "no wait for %s. from_tick %lx, to_tick %lx", 1004 guid, from_ticks, vhci_to_ticks)); 1005 mutex_exit(&vhci_global_mutex); 1006 ddi_prop_free(guid); 1007 return (DDI_NOT_WELL_FORMED); 1008 } 1009 1010 if (cv_timedwait(&vhci_cv, 1011 &vhci_global_mutex, vhci_to_ticks) == -1) { 1012 /* Timed out */ 1013 #ifdef DEBUG 1014 if (vlun == NULL) { 1015 VHCI_DEBUG(1, (CE_WARN, hba_dip, 1016 "tgt_init: no vlun for %s!", guid)); 1017 } else if (mdi_client_get_path_count(tgt_dip) == 0) { 1018 VHCI_DEBUG(1, (CE_WARN, hba_dip, 1019 "tgt_init: client path count is " 1020 "zero for %s!", guid)); 1021 } else { 1022 VHCI_DEBUG(1, (CE_WARN, hba_dip, 1023 "tgt_init: client path not " 1024 "available yet for %s!", guid)); 1025 } 1026 #endif /* DEBUG */ 1027 mutex_exit(&vhci_global_mutex); 1028 ddi_prop_free(guid); 1029 return (DDI_NOT_WELL_FORMED); 1030 } 1031 vlun = vhci_lun_lookup(tgt_dip); 1032 rval = mdi_select_path(tgt_dip, NULL, 1033 (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH), 1034 NULL, &pip); 1035 if (rval == MDI_SUCCESS) { 1036 mdi_rele_path(pip); 1037 } 1038 from_ticks = ddi_get_lbolt(); 1039 } 1040 mutex_exit(&vhci_global_mutex); 1041 1042 ASSERT(vlun != NULL); 1043 ddi_prop_free(guid); 1044 hba_tran->tran_tgt_private = vlun; 1045 1046 return (DDI_SUCCESS); 1047 } 1048 1049 /*ARGSUSED*/ 1050 static void 1051 vhci_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip, 1052 scsi_hba_tran_t *hba_tran, struct scsi_device *sd) 1053 { 1054 } 1055 1056 /* 1057 * a PGR register command has started; copy the info we need 1058 */ 1059 int 1060 vhci_pgr_register_start(scsi_vhci_lun_t *vlun, struct scsi_pkt *pkt) 1061 { 1062 struct vhci_pkt *vpkt = TGTPKT2VHCIPKT(pkt); 1063 void *addr; 1064 1065 if (!vpkt->vpkt_tgt_init_bp) 1066 return (TRAN_BADPKT); 1067 1068 addr = bp_mapin_common(vpkt->vpkt_tgt_init_bp, 1069 (vpkt->vpkt_flags & CFLAG_NOWAIT) ? VM_NOSLEEP : VM_SLEEP); 1070 if (addr == NULL) 1071 return (TRAN_BUSY); 1072 1073 mutex_enter(&vlun->svl_mutex); 1074 1075 vhci_print_prout_keys(vlun, "v_pgr_reg_start: before bcopy:"); 1076 1077 bcopy(addr, &vlun->svl_prout, sizeof (vhci_prout_t) - 1078 (2 * MHIOC_RESV_KEY_SIZE*sizeof (char))); 1079 bcopy(pkt->pkt_cdbp, vlun->svl_cdb, sizeof (vlun->svl_cdb)); 1080 1081 vhci_print_prout_keys(vlun, "v_pgr_reg_start: after bcopy:"); 1082 1083 vlun->svl_time = pkt->pkt_time; 1084 vlun->svl_bcount = vpkt->vpkt_tgt_init_bp->b_bcount; 1085 vlun->svl_first_path = vpkt->vpkt_path; 1086 mutex_exit(&vlun->svl_mutex); 1087 return (0); 1088 } 1089 1090 /* 1091 * Function name : vhci_scsi_start() 1092 * 1093 * Return Values : TRAN_FATAL_ERROR - vhci has been shutdown 1094 * or other fatal failure 1095 * preventing packet transportation 1096 * TRAN_BUSY - request queue is full 1097 * TRAN_ACCEPT - pkt has been submitted to phci 1098 * (or is held in the waitQ) 1099 * Description : Implements SCSA's tran_start() entry point for 1100 * packet transport 1101 * 1102 */ 1103 static int 1104 vhci_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt) 1105 { 1106 int rval = TRAN_ACCEPT; 1107 int instance, held; 1108 struct scsi_vhci *vhci = ADDR2VHCI(ap); 1109 struct scsi_vhci_lun *vlun = ADDR2VLUN(ap); 1110 struct vhci_pkt *vpkt = TGTPKT2VHCIPKT(pkt); 1111 int flags = 0; 1112 scsi_vhci_priv_t *svp; 1113 dev_info_t *cdip; 1114 client_lb_t lbp; 1115 int restore_lbp = 0; 1116 /* set if pkt is SCSI-II RESERVE cmd */ 1117 int pkt_reserve_cmd = 0; 1118 int reserve_failed = 0; 1119 1120 ASSERT(vhci != NULL); 1121 ASSERT(vpkt != NULL); 1122 ASSERT(vpkt->vpkt_state != VHCI_PKT_ISSUED); 1123 cdip = ADDR2DIP(ap); 1124 1125 /* 1126 * Block IOs if LUN is held or QUIESCED for IOs. 1127 */ 1128 if ((VHCI_LUN_IS_HELD(vlun)) || 1129 ((vlun->svl_flags & VLUN_QUIESCED_FLG) == VLUN_QUIESCED_FLG)) { 1130 return (TRAN_BUSY); 1131 } 1132 1133 /* 1134 * vhci_lun needs to be quiesced before SCSI-II RESERVE command 1135 * can be issued. This may require a cv_timedwait, which is 1136 * dangerous to perform in an interrupt context. So if this 1137 * is a RESERVE command a taskq is dispatched to service it. 1138 * This taskq shall again call vhci_scsi_start, but we shall be 1139 * sure its not in an interrupt context. 1140 */ 1141 if ((pkt->pkt_cdbp[0] == SCMD_RESERVE) || 1142 (pkt->pkt_cdbp[0] == SCMD_RESERVE_G1)) { 1143 if (!(vpkt->vpkt_state & VHCI_PKT_THRU_TASKQ)) { 1144 if (taskq_dispatch(vhci->vhci_taskq, 1145 vhci_dispatch_scsi_start, (void *) vpkt, 1146 KM_NOSLEEP)) { 1147 return (TRAN_ACCEPT); 1148 } else { 1149 return (TRAN_BUSY); 1150 } 1151 } 1152 1153 /* 1154 * Here we ensure that simultaneous SCSI-II RESERVE cmds don't 1155 * get serviced for a lun. 1156 */ 1157 VHCI_HOLD_LUN(vlun, VH_NOSLEEP, held); 1158 if (!held) { 1159 return (TRAN_BUSY); 1160 } else if ((vlun->svl_flags & VLUN_QUIESCED_FLG) == 1161 VLUN_QUIESCED_FLG) { 1162 VHCI_RELEASE_LUN(vlun); 1163 return (TRAN_BUSY); 1164 } 1165 1166 /* 1167 * To ensure that no IOs occur for this LUN for the duration 1168 * of this pkt set the VLUN_QUIESCED_FLG. 1169 * In case this routine needs to exit on error make sure that 1170 * this flag is cleared. 1171 */ 1172 vlun->svl_flags |= VLUN_QUIESCED_FLG; 1173 pkt_reserve_cmd = 1; 1174 1175 /* 1176 * if this is a SCSI-II RESERVE command, set load balancing 1177 * policy to be ALTERNATE PATH to ensure that all subsequent 1178 * IOs are routed on the same path. This is because if commands 1179 * are routed across multiple paths then IOs on paths other than 1180 * the one on which the RESERVE was executed will get a 1181 * RESERVATION CONFLICT 1182 */ 1183 lbp = mdi_get_lb_policy(cdip); 1184 if (lbp != LOAD_BALANCE_NONE) { 1185 if (vhci_quiesce_lun(vlun) != 1) { 1186 vlun->svl_flags &= ~VLUN_QUIESCED_FLG; 1187 VHCI_RELEASE_LUN(vlun); 1188 return (TRAN_FATAL_ERROR); 1189 } 1190 vlun->svl_lb_policy_save = lbp; 1191 if (mdi_set_lb_policy(cdip, LOAD_BALANCE_NONE) != 1192 MDI_SUCCESS) { 1193 vlun->svl_flags &= ~VLUN_QUIESCED_FLG; 1194 VHCI_RELEASE_LUN(vlun); 1195 return (TRAN_FATAL_ERROR); 1196 } 1197 restore_lbp = 1; 1198 } 1199 /* 1200 * See comments for VLUN_RESERVE_ACTIVE_FLG in scsi_vhci.h 1201 * To narrow this window where a reserve command may be sent 1202 * down an inactive path the path states first need to be 1203 * updated. Before calling vhci_update_pathstates reset 1204 * VLUN_RESERVE_ACTIVE_FLG, just in case it was already set 1205 * for this lun. This shall prevent an unnecessary reset 1206 * from being sent out. 1207 */ 1208 vlun->svl_flags &= ~VLUN_RESERVE_ACTIVE_FLG; 1209 vhci_update_pathstates((void *)vlun); 1210 } 1211 1212 instance = ddi_get_instance(vhci->vhci_dip); 1213 1214 /* 1215 * If the command is PRIN with action of zero, then the cmd 1216 * is reading PR keys which requires filtering on completion. 1217 * Data cache sync must be guaranteed. 1218 */ 1219 if ((pkt->pkt_cdbp[0] == SCMD_PRIN) && (pkt->pkt_cdbp[1] == 0) && 1220 (vpkt->vpkt_org_vpkt == NULL)) { 1221 vpkt->vpkt_tgt_init_pkt_flags |= PKT_CONSISTENT; 1222 } 1223 1224 /* 1225 * Do not defer bind for PKT_DMA_PARTIAL 1226 */ 1227 if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) == 0) { 1228 1229 /* This is a non pkt_dma_partial case */ 1230 if ((rval = vhci_bind_transport( 1231 ap, vpkt, vpkt->vpkt_tgt_init_pkt_flags, NULL_FUNC)) 1232 != TRAN_ACCEPT) { 1233 VHCI_DEBUG(6, (CE_WARN, vhci->vhci_dip, 1234 "!vhci%d %x: failed to bind transport: " 1235 "vlun 0x%p pkt_reserved %x restore_lbp %x," 1236 "lbp %x", instance, rval, (void *)vlun, 1237 pkt_reserve_cmd, restore_lbp, lbp)); 1238 if (restore_lbp) 1239 (void) mdi_set_lb_policy(cdip, lbp); 1240 if (pkt_reserve_cmd) 1241 vlun->svl_flags &= ~VLUN_QUIESCED_FLG; 1242 return (rval); 1243 } 1244 VHCI_DEBUG(8, (CE_NOTE, NULL, 1245 "vhci_scsi_start: v_b_t called 0x%p\n", (void *)vpkt)); 1246 } 1247 ASSERT(vpkt->vpkt_hba_pkt != NULL); 1248 ASSERT(vpkt->vpkt_path != NULL); 1249 1250 /* 1251 * This is the chance to adjust the pHCI's pkt and other information 1252 * from target driver's pkt. 1253 */ 1254 VHCI_DEBUG(8, (CE_NOTE, vhci->vhci_dip, "vhci_scsi_start vpkt %p\n", 1255 (void *)vpkt)); 1256 vhci_update_pHCI_pkt(vpkt, pkt); 1257 1258 if (vlun->svl_flags & VLUN_RESERVE_ACTIVE_FLG) { 1259 if (vpkt->vpkt_path != vlun->svl_resrv_pip) { 1260 VHCI_DEBUG(1, (CE_WARN, vhci->vhci_dip, 1261 "!vhci_bind: reserve flag set for vlun 0x%p, but, " 1262 "pktpath 0x%p resrv path 0x%p differ. lb_policy %x", 1263 (void *)vlun, (void *)vpkt->vpkt_path, 1264 (void *)vlun->svl_resrv_pip, 1265 mdi_get_lb_policy(cdip))); 1266 reserve_failed = 1; 1267 } 1268 } 1269 1270 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(vpkt->vpkt_path); 1271 if (svp == NULL || reserve_failed) { 1272 if (pkt_reserve_cmd) { 1273 VHCI_DEBUG(6, (CE_WARN, vhci->vhci_dip, 1274 "!vhci_bind returned null svp vlun 0x%p", 1275 (void *)vlun)); 1276 vlun->svl_flags &= ~VLUN_QUIESCED_FLG; 1277 if (restore_lbp) 1278 (void) mdi_set_lb_policy(cdip, lbp); 1279 } 1280 pkt_cleanup: 1281 if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) == 0) { 1282 scsi_destroy_pkt(vpkt->vpkt_hba_pkt); 1283 vpkt->vpkt_hba_pkt = NULL; 1284 if (vpkt->vpkt_path) { 1285 mdi_rele_path(vpkt->vpkt_path); 1286 vpkt->vpkt_path = NULL; 1287 } 1288 } 1289 if ((pkt->pkt_cdbp[0] == SCMD_PROUT) && 1290 (((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_REGISTER) || 1291 ((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_R_AND_IGNORE))) { 1292 sema_v(&vlun->svl_pgr_sema); 1293 } 1294 return (TRAN_BUSY); 1295 } 1296 1297 VHCI_INCR_PATH_CMDCOUNT(svp); 1298 1299 /* 1300 * Ensure that no other IOs raced ahead, while a RESERVE cmd was 1301 * QUIESCING the same lun. 1302 */ 1303 if ((!pkt_reserve_cmd) && 1304 ((vlun->svl_flags & VLUN_QUIESCED_FLG) == VLUN_QUIESCED_FLG)) { 1305 VHCI_DECR_PATH_CMDCOUNT(svp); 1306 goto pkt_cleanup; 1307 } 1308 1309 if ((pkt->pkt_cdbp[0] == SCMD_PRIN) || 1310 (pkt->pkt_cdbp[0] == SCMD_PROUT)) { 1311 /* 1312 * currently this thread only handles running PGR 1313 * commands, so don't bother creating it unless 1314 * something interesting is going to happen (like 1315 * either a PGR out, or a PGR in with enough space 1316 * to hold the keys that are getting returned) 1317 */ 1318 mutex_enter(&vlun->svl_mutex); 1319 if (((vlun->svl_flags & VLUN_TASK_D_ALIVE_FLG) == 0) && 1320 (pkt->pkt_cdbp[0] == SCMD_PROUT)) { 1321 vlun->svl_taskq = taskq_create("vlun_pgr_task_daemon", 1322 1, MINCLSYSPRI, 1, 4, 0); 1323 vlun->svl_flags |= VLUN_TASK_D_ALIVE_FLG; 1324 } 1325 mutex_exit(&vlun->svl_mutex); 1326 if ((pkt->pkt_cdbp[0] == SCMD_PROUT) && 1327 (((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_REGISTER) || 1328 ((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_R_AND_IGNORE))) { 1329 if (rval = vhci_pgr_register_start(vlun, pkt)) { 1330 /* an error */ 1331 sema_v(&vlun->svl_pgr_sema); 1332 return (rval); 1333 } 1334 } 1335 } 1336 1337 /* 1338 * SCSI-II RESERVE cmd is not expected in polled mode. 1339 * If this changes it needs to be handled for the polled scenario. 1340 */ 1341 flags = vpkt->vpkt_hba_pkt->pkt_flags; 1342 1343 /* 1344 * Set the path_instance *before* sending the scsi_pkt down the path 1345 * to mpxio's pHCI so that additional path abstractions at a pHCI 1346 * level (like maybe iSCSI at some point in the future) can update 1347 * the path_instance. 1348 */ 1349 if (scsi_pkt_allocated_correctly(vpkt->vpkt_hba_pkt)) 1350 vpkt->vpkt_hba_pkt->pkt_path_instance = 1351 mdi_pi_get_path_instance(vpkt->vpkt_path); 1352 1353 rval = scsi_transport(vpkt->vpkt_hba_pkt); 1354 if (rval == TRAN_ACCEPT) { 1355 if (flags & FLAG_NOINTR) { 1356 struct scsi_pkt *tpkt = vpkt->vpkt_tgt_pkt; 1357 struct scsi_pkt *pkt = vpkt->vpkt_hba_pkt; 1358 1359 ASSERT(tpkt != NULL); 1360 *(tpkt->pkt_scbp) = *(pkt->pkt_scbp); 1361 tpkt->pkt_resid = pkt->pkt_resid; 1362 tpkt->pkt_state = pkt->pkt_state; 1363 tpkt->pkt_statistics = pkt->pkt_statistics; 1364 tpkt->pkt_reason = pkt->pkt_reason; 1365 1366 if ((*(pkt->pkt_scbp) == STATUS_CHECK) && 1367 (pkt->pkt_state & STATE_ARQ_DONE)) { 1368 bcopy(pkt->pkt_scbp, tpkt->pkt_scbp, 1369 vpkt->vpkt_tgt_init_scblen); 1370 } 1371 1372 VHCI_DECR_PATH_CMDCOUNT(svp); 1373 if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) == 0) { 1374 scsi_destroy_pkt(vpkt->vpkt_hba_pkt); 1375 vpkt->vpkt_hba_pkt = NULL; 1376 if (vpkt->vpkt_path) { 1377 mdi_rele_path(vpkt->vpkt_path); 1378 vpkt->vpkt_path = NULL; 1379 } 1380 } 1381 /* 1382 * This path will not automatically retry pkts 1383 * internally, therefore, vpkt_org_vpkt should 1384 * never be set. 1385 */ 1386 ASSERT(vpkt->vpkt_org_vpkt == NULL); 1387 if (tpkt->pkt_comp) { 1388 tpkt->pkt_comp(tpkt); 1389 } 1390 } 1391 return (rval); 1392 } else if ((pkt->pkt_cdbp[0] == SCMD_PROUT) && 1393 (((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_REGISTER) || 1394 ((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_R_AND_IGNORE))) { 1395 /* the command exited with bad status */ 1396 sema_v(&vlun->svl_pgr_sema); 1397 } else if (vpkt->vpkt_tgt_pkt->pkt_cdbp[0] == SCMD_PRIN) { 1398 /* the command exited with bad status */ 1399 sema_v(&vlun->svl_pgr_sema); 1400 } else if (pkt_reserve_cmd) { 1401 VHCI_DEBUG(6, (CE_WARN, vhci->vhci_dip, 1402 "!vhci_scsi_start: reserve failed vlun 0x%p", 1403 (void *)vlun)); 1404 vlun->svl_flags &= ~VLUN_QUIESCED_FLG; 1405 if (restore_lbp) 1406 (void) mdi_set_lb_policy(cdip, lbp); 1407 } 1408 1409 ASSERT(vpkt->vpkt_hba_pkt != NULL); 1410 VHCI_DECR_PATH_CMDCOUNT(svp); 1411 1412 /* Do not destroy phci packet information for PKT_DMA_PARTIAL */ 1413 if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) == 0) { 1414 scsi_destroy_pkt(vpkt->vpkt_hba_pkt); 1415 vpkt->vpkt_hba_pkt = NULL; 1416 if (vpkt->vpkt_path) { 1417 MDI_PI_ERRSTAT(vpkt->vpkt_path, MDI_PI_TRANSERR); 1418 mdi_rele_path(vpkt->vpkt_path); 1419 vpkt->vpkt_path = NULL; 1420 } 1421 } 1422 return (TRAN_BUSY); 1423 } 1424 1425 /* 1426 * Function name : vhci_scsi_reset() 1427 * 1428 * Return Values : 0 - reset failed 1429 * 1 - reset succeeded 1430 */ 1431 1432 /* ARGSUSED */ 1433 static int 1434 vhci_scsi_reset(struct scsi_address *ap, int level) 1435 { 1436 int rval = 0; 1437 1438 cmn_err(CE_WARN, "!vhci_scsi_reset 0x%x", level); 1439 if ((level == RESET_TARGET) || (level == RESET_LUN)) { 1440 return (vhci_scsi_reset_target(ap, level, TRUE)); 1441 } else if (level == RESET_ALL) { 1442 return (vhci_scsi_reset_bus(ap)); 1443 } 1444 1445 return (rval); 1446 } 1447 1448 /* 1449 * vhci_recovery_reset: 1450 * Issues reset to the device 1451 * Input: 1452 * vlun - vhci lun pointer of the device 1453 * ap - address of the device 1454 * select_path: 1455 * If select_path is FALSE, then the address specified in ap is 1456 * the path on which reset will be issued. 1457 * If select_path is TRUE, then path is obtained by calling 1458 * mdi_select_path. 1459 * 1460 * recovery_depth: 1461 * Caller can specify the level of reset. 1462 * VHCI_DEPTH_LUN - 1463 * Issues LUN RESET if device supports lun reset. 1464 * VHCI_DEPTH_TARGET - 1465 * If Lun Reset fails or the device does not support 1466 * Lun Reset, issues TARGET RESET 1467 * VHCI_DEPTH_ALL - 1468 * If Lun Reset fails or the device does not support 1469 * Lun Reset, issues TARGET RESET. 1470 * If TARGET RESET does not succeed, issues Bus Reset. 1471 */ 1472 1473 static int 1474 vhci_recovery_reset(scsi_vhci_lun_t *vlun, struct scsi_address *ap, 1475 uint8_t select_path, uint8_t recovery_depth) 1476 { 1477 int ret = 0; 1478 1479 ASSERT(ap != NULL); 1480 1481 if (vlun && vlun->svl_support_lun_reset == 1) { 1482 ret = vhci_scsi_reset_target(ap, RESET_LUN, 1483 select_path); 1484 } 1485 1486 recovery_depth--; 1487 1488 if ((ret == 0) && recovery_depth) { 1489 ret = vhci_scsi_reset_target(ap, RESET_TARGET, 1490 select_path); 1491 recovery_depth--; 1492 } 1493 1494 if ((ret == 0) && recovery_depth) { 1495 (void) scsi_reset(ap, RESET_ALL); 1496 } 1497 1498 return (ret); 1499 } 1500 1501 /* 1502 * Note: The scsi_address passed to this routine could be the scsi_address 1503 * for the virtual device or the physical device. No assumptions should be 1504 * made in this routine about the ap structure and a_hba_tran->tran_tgt_private 1505 * field of ap can not be assumed to be the vhci structure. 1506 * Further note that the child dip would be the dip of the ssd node irrespective 1507 * of the scsi_address passed. 1508 */ 1509 1510 static int 1511 vhci_scsi_reset_target(struct scsi_address *ap, int level, uint8_t select_path) 1512 { 1513 dev_info_t *vdip, *pdip, *cdip = ADDR2DIP(ap); 1514 mdi_pathinfo_t *pip = NULL; 1515 mdi_pathinfo_t *npip = NULL; 1516 int rval = -1; 1517 scsi_vhci_priv_t *svp = NULL; 1518 struct scsi_address *pap = NULL; 1519 scsi_hba_tran_t *hba = NULL; 1520 int sps; 1521 struct scsi_vhci *vhci = NULL; 1522 1523 if (select_path != TRUE) { 1524 ASSERT(ap != NULL); 1525 if (level == RESET_LUN) { 1526 hba = ap->a_hba_tran; 1527 ASSERT(hba != NULL); 1528 return (hba->tran_reset(ap, RESET_LUN)); 1529 } 1530 return (scsi_reset(ap, level)); 1531 } 1532 1533 ASSERT(cdip != NULL); 1534 vdip = ddi_get_parent(cdip); 1535 ASSERT(vdip != NULL); 1536 vhci = ddi_get_soft_state(vhci_softstate, ddi_get_instance(vdip)); 1537 ASSERT(vhci != NULL); 1538 1539 rval = mdi_select_path(cdip, NULL, MDI_SELECT_ONLINE_PATH, NULL, &pip); 1540 if ((rval != MDI_SUCCESS) || (pip == NULL)) { 1541 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_scsi_reset_target: " 1542 "Unable to get a path, dip 0x%p", (void *)cdip)); 1543 return (0); 1544 } 1545 again: 1546 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip); 1547 if (svp == NULL) { 1548 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_scsi_reset_target: " 1549 "priv is NULL, pip 0x%p", (void *)pip)); 1550 mdi_rele_path(pip); 1551 return (0); 1552 } 1553 1554 if (svp->svp_psd == NULL) { 1555 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_scsi_reset_target: " 1556 "psd is NULL, pip 0x%p, svp 0x%p", 1557 (void *)pip, (void *)svp)); 1558 mdi_rele_path(pip); 1559 return (0); 1560 } 1561 1562 pap = &svp->svp_psd->sd_address; 1563 hba = pap->a_hba_tran; 1564 1565 ASSERT(pap != NULL); 1566 ASSERT(hba != NULL); 1567 1568 if (hba->tran_reset != NULL) { 1569 if (hba->tran_reset(pap, level) == 0) { 1570 pdip = mdi_pi_get_phci(pip); 1571 vhci_log(CE_WARN, vdip, "!(%s%d):" 1572 " path (%s%d), reset %d failed", 1573 ddi_driver_name(cdip), ddi_get_instance(cdip), 1574 ddi_driver_name(pdip), ddi_get_instance(pdip), 1575 level); 1576 1577 /* 1578 * Select next path and issue the reset, repeat 1579 * until all paths are exhausted 1580 */ 1581 sps = mdi_select_path(cdip, NULL, 1582 MDI_SELECT_ONLINE_PATH, pip, &npip); 1583 if ((sps != MDI_SUCCESS) || (npip == NULL)) { 1584 mdi_rele_path(pip); 1585 return (0); 1586 } 1587 mdi_rele_path(pip); 1588 pip = npip; 1589 goto again; 1590 } 1591 mdi_rele_path(pip); 1592 mutex_enter(&vhci->vhci_mutex); 1593 scsi_hba_reset_notify_callback(&vhci->vhci_mutex, 1594 &vhci->vhci_reset_notify_listf); 1595 mutex_exit(&vhci->vhci_mutex); 1596 VHCI_DEBUG(6, (CE_NOTE, NULL, "!vhci_scsi_reset_target: " 1597 "reset %d sent down pip:%p for cdip:%p\n", level, 1598 (void *)pip, (void *)cdip)); 1599 return (1); 1600 } 1601 mdi_rele_path(pip); 1602 return (0); 1603 } 1604 1605 1606 /* ARGSUSED */ 1607 static int 1608 vhci_scsi_reset_bus(struct scsi_address *ap) 1609 { 1610 return (1); 1611 } 1612 1613 1614 /* 1615 * called by vhci_getcap and vhci_setcap to get and set (respectively) 1616 * SCSI capabilities 1617 */ 1618 /* ARGSUSED */ 1619 static int 1620 vhci_commoncap(struct scsi_address *ap, char *cap, 1621 int val, int tgtonly, int doset) 1622 { 1623 struct scsi_vhci *vhci = ADDR2VHCI(ap); 1624 struct scsi_vhci_lun *vlun = ADDR2VLUN(ap); 1625 int cidx; 1626 int rval = 0; 1627 1628 if (cap == (char *)0) { 1629 VHCI_DEBUG(3, (CE_WARN, vhci->vhci_dip, 1630 "!vhci_commoncap: invalid arg")); 1631 return (rval); 1632 } 1633 1634 if (vlun == NULL) { 1635 VHCI_DEBUG(3, (CE_WARN, vhci->vhci_dip, 1636 "!vhci_commoncap: vlun is null")); 1637 return (rval); 1638 } 1639 1640 if ((cidx = scsi_hba_lookup_capstr(cap)) == -1) { 1641 return (UNDEFINED); 1642 } 1643 1644 /* 1645 * Process setcap request. 1646 */ 1647 if (doset) { 1648 /* 1649 * At present, we can only set binary (0/1) values 1650 */ 1651 switch (cidx) { 1652 case SCSI_CAP_ARQ: 1653 if (val == 0) { 1654 rval = 0; 1655 } else { 1656 rval = 1; 1657 } 1658 break; 1659 1660 case SCSI_CAP_LUN_RESET: 1661 if (tgtonly == 0) { 1662 VHCI_DEBUG(1, (CE_WARN, vhci->vhci_dip, 1663 "scsi_vhci_setcap: " 1664 "Returning error since whom = 0")); 1665 rval = -1; 1666 break; 1667 } 1668 /* 1669 * Set the capability accordingly. 1670 */ 1671 mutex_enter(&vlun->svl_mutex); 1672 vlun->svl_support_lun_reset = val; 1673 rval = val; 1674 mutex_exit(&vlun->svl_mutex); 1675 break; 1676 1677 case SCSI_CAP_SECTOR_SIZE: 1678 mutex_enter(&vlun->svl_mutex); 1679 vlun->svl_sector_size = val; 1680 vlun->svl_setcap_done = 1; 1681 mutex_exit(&vlun->svl_mutex); 1682 (void) vhci_pHCI_cap(ap, cap, val, tgtonly, NULL); 1683 1684 /* Always return success */ 1685 rval = 1; 1686 break; 1687 1688 default: 1689 VHCI_DEBUG(6, (CE_WARN, vhci->vhci_dip, 1690 "!vhci_setcap: unsupported %d", cidx)); 1691 rval = UNDEFINED; 1692 break; 1693 } 1694 1695 VHCI_DEBUG(6, (CE_NOTE, vhci->vhci_dip, 1696 "!set cap: cap=%s, val/tgtonly/doset/rval = " 1697 "0x%x/0x%x/0x%x/%d\n", 1698 cap, val, tgtonly, doset, rval)); 1699 1700 } else { 1701 /* 1702 * Process getcap request. 1703 */ 1704 switch (cidx) { 1705 case SCSI_CAP_DMA_MAX: 1706 /* 1707 * For X86 this capability is caught in scsi_ifgetcap(). 1708 * XXX Should this be getting the value from the pHCI? 1709 */ 1710 rval = (int)VHCI_DMA_MAX_XFER_CAP; 1711 break; 1712 1713 case SCSI_CAP_INITIATOR_ID: 1714 rval = 0x00; 1715 break; 1716 1717 case SCSI_CAP_ARQ: 1718 case SCSI_CAP_RESET_NOTIFICATION: 1719 case SCSI_CAP_TAGGED_QING: 1720 rval = 1; 1721 break; 1722 1723 case SCSI_CAP_SCSI_VERSION: 1724 rval = 3; 1725 break; 1726 1727 case SCSI_CAP_INTERCONNECT_TYPE: 1728 rval = INTERCONNECT_FABRIC; 1729 break; 1730 1731 case SCSI_CAP_LUN_RESET: 1732 /* 1733 * scsi_vhci will always return success for LUN reset. 1734 * When request for doing LUN reset comes 1735 * through scsi_reset entry point, at that time attempt 1736 * will be made to do reset through all the possible 1737 * paths. 1738 */ 1739 mutex_enter(&vlun->svl_mutex); 1740 rval = vlun->svl_support_lun_reset; 1741 mutex_exit(&vlun->svl_mutex); 1742 VHCI_DEBUG(4, (CE_WARN, vhci->vhci_dip, 1743 "scsi_vhci_getcap:" 1744 "Getting the Lun reset capability %d", rval)); 1745 break; 1746 1747 case SCSI_CAP_SECTOR_SIZE: 1748 mutex_enter(&vlun->svl_mutex); 1749 rval = vlun->svl_sector_size; 1750 mutex_exit(&vlun->svl_mutex); 1751 break; 1752 1753 case SCSI_CAP_CDB_LEN: 1754 rval = VHCI_SCSI_CDB_SIZE; 1755 break; 1756 1757 case SCSI_CAP_DMA_MAX_ARCH: 1758 /* 1759 * For X86 this capability is caught in scsi_ifgetcap(). 1760 * XXX Should this be getting the value from the pHCI? 1761 */ 1762 rval = 0; 1763 break; 1764 1765 default: 1766 VHCI_DEBUG(6, (CE_WARN, vhci->vhci_dip, 1767 "!vhci_getcap: unsupported %d", cidx)); 1768 rval = UNDEFINED; 1769 break; 1770 } 1771 1772 VHCI_DEBUG(6, (CE_NOTE, vhci->vhci_dip, 1773 "!get cap: cap=%s, val/tgtonly/doset/rval = " 1774 "0x%x/0x%x/0x%x/%d\n", 1775 cap, val, tgtonly, doset, rval)); 1776 } 1777 return (rval); 1778 } 1779 1780 1781 /* 1782 * Function name : vhci_scsi_getcap() 1783 * 1784 */ 1785 static int 1786 vhci_scsi_getcap(struct scsi_address *ap, char *cap, int whom) 1787 { 1788 return (vhci_commoncap(ap, cap, 0, whom, 0)); 1789 } 1790 1791 static int 1792 vhci_scsi_setcap(struct scsi_address *ap, char *cap, int value, int whom) 1793 { 1794 return (vhci_commoncap(ap, cap, value, whom, 1)); 1795 } 1796 1797 /* 1798 * Function name : vhci_scsi_abort() 1799 */ 1800 /* ARGSUSED */ 1801 static int 1802 vhci_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt) 1803 { 1804 return (0); 1805 } 1806 1807 /* 1808 * Function name : vhci_scsi_init_pkt 1809 * 1810 * Return Values : pointer to scsi_pkt, or NULL 1811 */ 1812 /* ARGSUSED */ 1813 static struct scsi_pkt * 1814 vhci_scsi_init_pkt(struct scsi_address *ap, struct scsi_pkt *pkt, 1815 struct buf *bp, int cmdlen, int statuslen, int tgtlen, 1816 int flags, int (*callback)(caddr_t), caddr_t arg) 1817 { 1818 struct scsi_vhci *vhci = ADDR2VHCI(ap); 1819 struct vhci_pkt *vpkt; 1820 int rval; 1821 int newpkt = 0; 1822 struct scsi_pkt *pktp; 1823 1824 1825 if (pkt == NULL) { 1826 if (cmdlen > VHCI_SCSI_CDB_SIZE) { 1827 VHCI_DEBUG(1, (CE_NOTE, NULL, 1828 "!init pkt: cdb size not supported\n")); 1829 return (NULL); 1830 } 1831 1832 pktp = scsi_hba_pkt_alloc(vhci->vhci_dip, 1833 ap, cmdlen, statuslen, tgtlen, sizeof (*vpkt), callback, 1834 arg); 1835 1836 if (pktp == NULL) { 1837 return (NULL); 1838 } 1839 1840 /* Get the vhci's private structure */ 1841 vpkt = (struct vhci_pkt *)(pktp->pkt_ha_private); 1842 ASSERT(vpkt); 1843 1844 /* Save the target driver's packet */ 1845 vpkt->vpkt_tgt_pkt = pktp; 1846 1847 /* 1848 * Save pkt_tgt_init_pkt fields if deferred binding 1849 * is needed or for other purposes. 1850 */ 1851 vpkt->vpkt_tgt_init_pkt_flags = flags; 1852 vpkt->vpkt_flags = (callback == NULL_FUNC) ? CFLAG_NOWAIT : 0; 1853 vpkt->vpkt_state = VHCI_PKT_IDLE; 1854 vpkt->vpkt_tgt_init_cdblen = cmdlen; 1855 vpkt->vpkt_tgt_init_scblen = statuslen; 1856 newpkt = 1; 1857 } else { /* pkt not NULL */ 1858 vpkt = pkt->pkt_ha_private; 1859 } 1860 1861 VHCI_DEBUG(8, (CE_NOTE, NULL, "vhci_scsi_init_pkt " 1862 "vpkt %p flags %x\n", (void *)vpkt, flags)); 1863 1864 /* Clear any stale error flags */ 1865 if (bp) { 1866 bioerror(bp, 0); 1867 } 1868 1869 vpkt->vpkt_tgt_init_bp = bp; 1870 1871 if (flags & PKT_DMA_PARTIAL) { 1872 1873 /* 1874 * Immediate binding is needed. 1875 * Target driver may not set this flag in next invocation. 1876 * vhci has to remember this flag was set during first 1877 * invocation of vhci_scsi_init_pkt. 1878 */ 1879 vpkt->vpkt_flags |= CFLAG_DMA_PARTIAL; 1880 } 1881 1882 if (vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) { 1883 1884 /* 1885 * Re-initialize some of the target driver packet state 1886 * information. 1887 */ 1888 vpkt->vpkt_tgt_pkt->pkt_state = 0; 1889 vpkt->vpkt_tgt_pkt->pkt_statistics = 0; 1890 vpkt->vpkt_tgt_pkt->pkt_reason = 0; 1891 1892 /* 1893 * Binding a vpkt->vpkt_path for this IO at init_time. 1894 * If an IO error happens later, target driver will clear 1895 * this vpkt->vpkt_path binding before re-init IO again. 1896 */ 1897 VHCI_DEBUG(8, (CE_NOTE, NULL, 1898 "vhci_scsi_init_pkt: calling v_b_t %p, newpkt %d\n", 1899 (void *)vpkt, newpkt)); 1900 if (pkt && vpkt->vpkt_hba_pkt) { 1901 VHCI_DEBUG(4, (CE_NOTE, NULL, 1902 "v_s_i_p calling update_pHCI_pkt resid %ld\n", 1903 pkt->pkt_resid)); 1904 vhci_update_pHCI_pkt(vpkt, pkt); 1905 } 1906 if (callback == SLEEP_FUNC) { 1907 rval = vhci_bind_transport( 1908 ap, vpkt, flags, callback); 1909 } else { 1910 rval = vhci_bind_transport( 1911 ap, vpkt, flags, NULL_FUNC); 1912 } 1913 VHCI_DEBUG(8, (CE_NOTE, NULL, 1914 "vhci_scsi_init_pkt: v_b_t called 0x%p rval 0x%x\n", 1915 (void *)vpkt, rval)); 1916 if (bp) { 1917 if (rval == TRAN_FATAL_ERROR) { 1918 /* 1919 * No paths available. Could not bind 1920 * any pHCI. Setting EFAULT as a way 1921 * to indicate no DMA is mapped. 1922 */ 1923 bioerror(bp, EFAULT); 1924 } else { 1925 /* 1926 * Do not indicate any pHCI errors to 1927 * target driver otherwise. 1928 */ 1929 bioerror(bp, 0); 1930 } 1931 } 1932 if (rval != TRAN_ACCEPT) { 1933 VHCI_DEBUG(8, (CE_NOTE, NULL, 1934 "vhci_scsi_init_pkt: " 1935 "v_b_t failed 0x%p newpkt %x\n", 1936 (void *)vpkt, newpkt)); 1937 if (newpkt) { 1938 scsi_hba_pkt_free(ap, 1939 vpkt->vpkt_tgt_pkt); 1940 } 1941 return (NULL); 1942 } 1943 ASSERT(vpkt->vpkt_hba_pkt != NULL); 1944 ASSERT(vpkt->vpkt_path != NULL); 1945 1946 /* Update the resid for the target driver */ 1947 vpkt->vpkt_tgt_pkt->pkt_resid = 1948 vpkt->vpkt_hba_pkt->pkt_resid; 1949 } 1950 1951 return (vpkt->vpkt_tgt_pkt); 1952 } 1953 1954 /* 1955 * Function name : vhci_scsi_destroy_pkt 1956 * 1957 * Return Values : none 1958 */ 1959 static void 1960 vhci_scsi_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt) 1961 { 1962 struct vhci_pkt *vpkt = (struct vhci_pkt *)pkt->pkt_ha_private; 1963 1964 VHCI_DEBUG(8, (CE_NOTE, NULL, 1965 "vhci_scsi_destroy_pkt: vpkt 0x%p\n", (void *)vpkt)); 1966 1967 vpkt->vpkt_tgt_init_pkt_flags = 0; 1968 if (vpkt->vpkt_hba_pkt) { 1969 scsi_destroy_pkt(vpkt->vpkt_hba_pkt); 1970 vpkt->vpkt_hba_pkt = NULL; 1971 } 1972 if (vpkt->vpkt_path) { 1973 mdi_rele_path(vpkt->vpkt_path); 1974 vpkt->vpkt_path = NULL; 1975 } 1976 1977 ASSERT(vpkt->vpkt_state != VHCI_PKT_ISSUED); 1978 scsi_hba_pkt_free(ap, vpkt->vpkt_tgt_pkt); 1979 } 1980 1981 /* 1982 * Function name : vhci_scsi_dmafree() 1983 * 1984 * Return Values : none 1985 */ 1986 /*ARGSUSED*/ 1987 static void 1988 vhci_scsi_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt) 1989 { 1990 struct vhci_pkt *vpkt = (struct vhci_pkt *)pkt->pkt_ha_private; 1991 1992 VHCI_DEBUG(6, (CE_NOTE, NULL, 1993 "vhci_scsi_dmafree: vpkt 0x%p\n", (void *)vpkt)); 1994 1995 ASSERT(vpkt != NULL); 1996 if (vpkt->vpkt_hba_pkt) { 1997 scsi_destroy_pkt(vpkt->vpkt_hba_pkt); 1998 vpkt->vpkt_hba_pkt = NULL; 1999 } 2000 if (vpkt->vpkt_path) { 2001 mdi_rele_path(vpkt->vpkt_path); 2002 vpkt->vpkt_path = NULL; 2003 } 2004 } 2005 2006 /* 2007 * Function name : vhci_scsi_sync_pkt() 2008 * 2009 * Return Values : none 2010 */ 2011 /*ARGSUSED*/ 2012 static void 2013 vhci_scsi_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt) 2014 { 2015 struct vhci_pkt *vpkt = (struct vhci_pkt *)pkt->pkt_ha_private; 2016 2017 ASSERT(vpkt != NULL); 2018 if (vpkt->vpkt_hba_pkt) { 2019 scsi_sync_pkt(vpkt->vpkt_hba_pkt); 2020 } 2021 } 2022 2023 /* 2024 * routine for reset notification setup, to register or cancel. 2025 */ 2026 static int 2027 vhci_scsi_reset_notify(struct scsi_address *ap, int flag, 2028 void (*callback)(caddr_t), caddr_t arg) 2029 { 2030 struct scsi_vhci *vhci = ADDR2VHCI(ap); 2031 return (scsi_hba_reset_notify_setup(ap, flag, callback, arg, 2032 &vhci->vhci_mutex, &vhci->vhci_reset_notify_listf)); 2033 } 2034 2035 static int 2036 vhci_scsi_get_name_bus_addr(struct scsi_device *sd, 2037 char *name, int len, int bus_addr) 2038 { 2039 dev_info_t *cdip; 2040 char *guid; 2041 scsi_vhci_lun_t *vlun; 2042 2043 ASSERT(sd != NULL); 2044 ASSERT(name != NULL); 2045 2046 cdip = sd->sd_dev; 2047 2048 ASSERT(cdip != NULL); 2049 2050 if (mdi_component_is_client(cdip, NULL) != MDI_SUCCESS) { 2051 name[0] = '\0'; 2052 return (1); 2053 } 2054 2055 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, cdip, PROPFLAGS, 2056 MDI_CLIENT_GUID_PROP, &guid) != DDI_SUCCESS) { 2057 name[0] = '\0'; 2058 return (1); 2059 } 2060 2061 vlun = ADDR2VLUN(&sd->sd_address); 2062 if (bus_addr && vlun && vlun->svl_fops_name) { 2063 /* report the guid and the name of the failover module */ 2064 (void) snprintf(name, len, "g%s %s", guid, vlun->svl_fops_name); 2065 } else { 2066 /* report the guid */ 2067 (void) snprintf(name, len, "g%s", guid); 2068 } 2069 2070 ddi_prop_free(guid); 2071 return (1); 2072 } 2073 2074 static int 2075 vhci_scsi_get_bus_addr(struct scsi_device *sd, char *name, int len) 2076 { 2077 return (vhci_scsi_get_name_bus_addr(sd, name, len, 1)); 2078 } 2079 2080 static int 2081 vhci_scsi_get_name(struct scsi_device *sd, char *name, int len) 2082 { 2083 return (vhci_scsi_get_name_bus_addr(sd, name, len, 0)); 2084 } 2085 2086 /* 2087 * Return a pointer to the guid part of the devnm. 2088 * devnm format is "nodename@busaddr", busaddr format is "gGUID". 2089 */ 2090 static char * 2091 vhci_devnm_to_guid(char *devnm) 2092 { 2093 char *cp = devnm; 2094 2095 if (devnm == NULL) 2096 return (NULL); 2097 2098 while (*cp != '\0' && *cp != '@') 2099 cp++; 2100 if (*cp == '@' && *(cp + 1) == 'g') 2101 return (cp + 2); 2102 return (NULL); 2103 } 2104 2105 static int 2106 vhci_bind_transport(struct scsi_address *ap, struct vhci_pkt *vpkt, int flags, 2107 int (*func)(caddr_t)) 2108 { 2109 struct scsi_vhci *vhci = ADDR2VHCI(ap); 2110 dev_info_t *cdip = ADDR2DIP(ap); 2111 mdi_pathinfo_t *pip = NULL; 2112 mdi_pathinfo_t *npip = NULL; 2113 scsi_vhci_priv_t *svp = NULL; 2114 struct scsi_device *psd = NULL; 2115 struct scsi_address *address = NULL; 2116 struct scsi_pkt *pkt = NULL; 2117 int rval = -1; 2118 int pgr_sema_held = 0; 2119 int held; 2120 int mps_flag = MDI_SELECT_ONLINE_PATH; 2121 struct scsi_vhci_lun *vlun; 2122 time_t tnow; 2123 int path_instance = 0; 2124 2125 vlun = ADDR2VLUN(ap); 2126 ASSERT(vlun != 0); 2127 2128 if ((vpkt->vpkt_tgt_pkt->pkt_cdbp[0] == SCMD_PROUT) && 2129 (((vpkt->vpkt_tgt_pkt->pkt_cdbp[1] & 0x1f) == 2130 VHCI_PROUT_REGISTER) || 2131 ((vpkt->vpkt_tgt_pkt->pkt_cdbp[1] & 0x1f) == 2132 VHCI_PROUT_R_AND_IGNORE))) { 2133 if (!sema_tryp(&vlun->svl_pgr_sema)) 2134 return (TRAN_BUSY); 2135 pgr_sema_held = 1; 2136 if (vlun->svl_first_path != NULL) { 2137 rval = mdi_select_path(cdip, NULL, 2138 MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH, 2139 NULL, &pip); 2140 if ((rval != MDI_SUCCESS) || (pip == NULL)) { 2141 VHCI_DEBUG(4, (CE_NOTE, NULL, 2142 "vhci_bind_transport: path select fail\n")); 2143 } else { 2144 npip = pip; 2145 do { 2146 if (npip == vlun->svl_first_path) { 2147 VHCI_DEBUG(4, (CE_NOTE, NULL, 2148 "vhci_bind_transport: " 2149 "valid first path 0x%p\n", 2150 (void *) 2151 vlun->svl_first_path)); 2152 pip = vlun->svl_first_path; 2153 goto bind_path; 2154 } 2155 pip = npip; 2156 rval = mdi_select_path(cdip, NULL, 2157 MDI_SELECT_ONLINE_PATH | 2158 MDI_SELECT_STANDBY_PATH, 2159 pip, &npip); 2160 mdi_rele_path(pip); 2161 } while ((rval == MDI_SUCCESS) && 2162 (npip != NULL)); 2163 } 2164 } 2165 2166 if (vlun->svl_first_path) { 2167 VHCI_DEBUG(4, (CE_NOTE, NULL, 2168 "vhci_bind_transport: invalid first path 0x%p\n", 2169 (void *)vlun->svl_first_path)); 2170 vlun->svl_first_path = NULL; 2171 } 2172 } else if (vpkt->vpkt_tgt_pkt->pkt_cdbp[0] == SCMD_PRIN) { 2173 if ((vpkt->vpkt_state & VHCI_PKT_THRU_TASKQ) == 0) { 2174 if (!sema_tryp(&vlun->svl_pgr_sema)) 2175 return (TRAN_BUSY); 2176 } 2177 pgr_sema_held = 1; 2178 } 2179 2180 /* 2181 * If the path is already bound for PKT_PARTIAL_DMA case, 2182 * try to use the same path. 2183 */ 2184 if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) && vpkt->vpkt_path) { 2185 VHCI_DEBUG(4, (CE_NOTE, NULL, 2186 "vhci_bind_transport: PKT_PARTIAL_DMA " 2187 "vpkt 0x%p, path 0x%p\n", 2188 (void *)vpkt, (void *)vpkt->vpkt_path)); 2189 pip = vpkt->vpkt_path; 2190 goto bind_path; 2191 } 2192 2193 /* 2194 * Get path_instance. Non-zero with FLAG_PKT_PATH_INSTANCE set 2195 * indicates that mdi_select_path should be called to select a 2196 * specific instance. 2197 * 2198 * NB: Condition pkt_path_instance reference on proper allocation. 2199 */ 2200 if ((vpkt->vpkt_tgt_pkt->pkt_flags & FLAG_PKT_PATH_INSTANCE) && 2201 scsi_pkt_allocated_correctly(vpkt->vpkt_tgt_pkt)) { 2202 path_instance = vpkt->vpkt_tgt_pkt->pkt_path_instance; 2203 } 2204 2205 /* 2206 * If reservation is active bind the transport directly to the pip 2207 * with the reservation. 2208 */ 2209 if (vpkt->vpkt_hba_pkt == NULL) { 2210 if (vlun->svl_flags & VLUN_RESERVE_ACTIVE_FLG) { 2211 if (MDI_PI_IS_ONLINE(vlun->svl_resrv_pip)) { 2212 pip = vlun->svl_resrv_pip; 2213 mdi_hold_path(pip); 2214 vlun->svl_waiting_for_activepath = 0; 2215 rval = MDI_SUCCESS; 2216 goto bind_path; 2217 } else { 2218 if (pgr_sema_held) { 2219 sema_v(&vlun->svl_pgr_sema); 2220 } 2221 return (TRAN_BUSY); 2222 } 2223 } 2224 try_again: 2225 rval = mdi_select_path(cdip, vpkt->vpkt_tgt_init_bp, 2226 path_instance ? MDI_SELECT_PATH_INSTANCE : 0, 2227 (void *)(intptr_t)path_instance, &pip); 2228 if (rval == MDI_BUSY) { 2229 if (pgr_sema_held) { 2230 sema_v(&vlun->svl_pgr_sema); 2231 } 2232 return (TRAN_BUSY); 2233 } else if (rval == MDI_DEVI_ONLINING) { 2234 /* 2235 * if we are here then we are in the midst of 2236 * an attach/probe of the client device. 2237 * We attempt to bind to ONLINE path if available, 2238 * else it is OK to bind to a STANDBY path (instead 2239 * of triggering a failover) because IO associated 2240 * with attach/probe (eg. INQUIRY, block 0 read) 2241 * are completed by targets even on passive paths 2242 * If no ONLINE paths available, it is important 2243 * to set svl_waiting_for_activepath for two 2244 * reasons: (1) avoid sense analysis in the 2245 * "external failure detection" codepath in 2246 * vhci_intr(). Failure to do so will result in 2247 * infinite loop (unless an ONLINE path becomes 2248 * available at some point) (2) avoid 2249 * unnecessary failover (see "---Waiting For Active 2250 * Path---" comment below). 2251 */ 2252 VHCI_DEBUG(1, (CE_NOTE, NULL, "!%p in onlining " 2253 "state\n", (void *)cdip)); 2254 pip = NULL; 2255 rval = mdi_select_path(cdip, vpkt->vpkt_tgt_init_bp, 2256 mps_flag, NULL, &pip); 2257 if ((rval != MDI_SUCCESS) || (pip == NULL)) { 2258 if (vlun->svl_waiting_for_activepath == 0) { 2259 vlun->svl_waiting_for_activepath = 1; 2260 vlun->svl_wfa_time = ddi_get_time(); 2261 } 2262 mps_flag |= MDI_SELECT_STANDBY_PATH; 2263 rval = mdi_select_path(cdip, 2264 vpkt->vpkt_tgt_init_bp, 2265 mps_flag, NULL, &pip); 2266 if ((rval != MDI_SUCCESS) || (pip == NULL)) { 2267 if (pgr_sema_held) { 2268 sema_v(&vlun->svl_pgr_sema); 2269 } 2270 return (TRAN_FATAL_ERROR); 2271 } 2272 goto bind_path; 2273 } 2274 } else if (rval == MDI_FAILURE) { 2275 if (pgr_sema_held) { 2276 sema_v(&vlun->svl_pgr_sema); 2277 } 2278 return (TRAN_FATAL_ERROR); 2279 } 2280 2281 if ((pip == NULL) || (rval == MDI_NOPATH)) { 2282 while (vlun->svl_waiting_for_activepath) { 2283 /* 2284 * ---Waiting For Active Path--- 2285 * This device was discovered across a 2286 * passive path; lets wait for a little 2287 * bit, hopefully an active path will 2288 * show up obviating the need for a 2289 * failover 2290 */ 2291 tnow = ddi_get_time(); 2292 if (tnow - vlun->svl_wfa_time >= 60) { 2293 vlun->svl_waiting_for_activepath = 0; 2294 } else { 2295 drv_usecwait(1000); 2296 if (vlun->svl_waiting_for_activepath 2297 == 0) { 2298 /* 2299 * an active path has come 2300 * online! 2301 */ 2302 goto try_again; 2303 } 2304 } 2305 } 2306 VHCI_HOLD_LUN(vlun, VH_NOSLEEP, held); 2307 if (!held) { 2308 VHCI_DEBUG(4, (CE_NOTE, NULL, 2309 "!Lun not held\n")); 2310 if (pgr_sema_held) { 2311 sema_v(&vlun->svl_pgr_sema); 2312 } 2313 return (TRAN_BUSY); 2314 } 2315 /* 2316 * now that the LUN is stable, one last check 2317 * to make sure no other changes sneaked in 2318 * (like a path coming online or a 2319 * failover initiated by another thread) 2320 */ 2321 pip = NULL; 2322 rval = mdi_select_path(cdip, vpkt->vpkt_tgt_init_bp, 2323 0, NULL, &pip); 2324 if (pip != NULL) { 2325 VHCI_RELEASE_LUN(vlun); 2326 vlun->svl_waiting_for_activepath = 0; 2327 goto bind_path; 2328 } 2329 2330 /* 2331 * Check if there is an ONLINE path OR a STANDBY path 2332 * available. If none is available, do not attempt 2333 * to do a failover, just return a fatal error at this 2334 * point. 2335 */ 2336 npip = NULL; 2337 rval = mdi_select_path(cdip, NULL, 2338 (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH), 2339 NULL, &npip); 2340 if ((npip == NULL) || (rval != MDI_SUCCESS)) { 2341 /* 2342 * No paths available, jus return FATAL error. 2343 */ 2344 VHCI_RELEASE_LUN(vlun); 2345 if (pgr_sema_held) { 2346 sema_v(&vlun->svl_pgr_sema); 2347 } 2348 return (TRAN_FATAL_ERROR); 2349 } 2350 mdi_rele_path(npip); 2351 VHCI_DEBUG(1, (CE_NOTE, NULL, "!invoking " 2352 "mdi_failover\n")); 2353 rval = mdi_failover(vhci->vhci_dip, cdip, 2354 MDI_FAILOVER_ASYNC); 2355 if (rval == MDI_FAILURE) { 2356 VHCI_RELEASE_LUN(vlun); 2357 if (pgr_sema_held) { 2358 sema_v(&vlun->svl_pgr_sema); 2359 } 2360 return (TRAN_FATAL_ERROR); 2361 } else if (rval == MDI_BUSY) { 2362 VHCI_RELEASE_LUN(vlun); 2363 if (pgr_sema_held) { 2364 sema_v(&vlun->svl_pgr_sema); 2365 } 2366 return (TRAN_BUSY); 2367 } else { 2368 if (pgr_sema_held) { 2369 sema_v(&vlun->svl_pgr_sema); 2370 } 2371 return (TRAN_BUSY); 2372 } 2373 } 2374 vlun->svl_waiting_for_activepath = 0; 2375 bind_path: 2376 vpkt->vpkt_path = pip; 2377 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip); 2378 ASSERT(svp != NULL); 2379 2380 psd = svp->svp_psd; 2381 ASSERT(psd != NULL); 2382 address = &psd->sd_address; 2383 } else { 2384 pkt = vpkt->vpkt_hba_pkt; 2385 address = &pkt->pkt_address; 2386 } 2387 2388 /* Verify match of specified path_instance and selected path_instance */ 2389 ASSERT((path_instance == 0) || 2390 (path_instance == mdi_pi_get_path_instance(vpkt->vpkt_path))); 2391 2392 /* 2393 * For PKT_PARTIAL_DMA case, call pHCI's scsi_init_pkt whenever 2394 * target driver calls vhci_scsi_init_pkt. 2395 */ 2396 if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) && 2397 vpkt->vpkt_path && vpkt->vpkt_hba_pkt) { 2398 VHCI_DEBUG(4, (CE_NOTE, NULL, 2399 "vhci_bind_transport: PKT_PARTIAL_DMA " 2400 "vpkt 0x%p, path 0x%p hba_pkt 0x%p\n", 2401 (void *)vpkt, (void *)vpkt->vpkt_path, (void *)pkt)); 2402 pkt = vpkt->vpkt_hba_pkt; 2403 address = &pkt->pkt_address; 2404 } 2405 2406 if (pkt == NULL || (vpkt->vpkt_flags & CFLAG_DMA_PARTIAL)) { 2407 pkt = scsi_init_pkt(address, pkt, 2408 vpkt->vpkt_tgt_init_bp, vpkt->vpkt_tgt_init_cdblen, 2409 vpkt->vpkt_tgt_init_scblen, 0, flags, func, NULL); 2410 2411 if (pkt == NULL) { 2412 VHCI_DEBUG(4, (CE_NOTE, NULL, 2413 "!bind transport: 0x%p 0x%p 0x%p\n", 2414 (void *)vhci, (void *)psd, (void *)vpkt)); 2415 if ((vpkt->vpkt_hba_pkt == NULL) && vpkt->vpkt_path) { 2416 MDI_PI_ERRSTAT(vpkt->vpkt_path, 2417 MDI_PI_TRANSERR); 2418 mdi_rele_path(vpkt->vpkt_path); 2419 vpkt->vpkt_path = NULL; 2420 } 2421 if (pgr_sema_held) { 2422 sema_v(&vlun->svl_pgr_sema); 2423 } 2424 /* 2425 * Looks like a fatal error. 2426 * May be device disappeared underneath. 2427 * Give another chance to target driver for a retry to 2428 * get another path. 2429 */ 2430 return (TRAN_BUSY); 2431 } 2432 } 2433 2434 pkt->pkt_private = vpkt; 2435 vpkt->vpkt_hba_pkt = pkt; 2436 return (TRAN_ACCEPT); 2437 } 2438 2439 2440 /*PRINTFLIKE3*/ 2441 void 2442 vhci_log(int level, dev_info_t *dip, const char *fmt, ...) 2443 { 2444 char buf[256]; 2445 va_list ap; 2446 2447 va_start(ap, fmt); 2448 (void) vsprintf(buf, fmt, ap); 2449 va_end(ap); 2450 2451 scsi_log(dip, "scsi_vhci", level, buf); 2452 } 2453 2454 /* do a PGR out with the information we've saved away */ 2455 static int 2456 vhci_do_prout(scsi_vhci_priv_t *svp) 2457 { 2458 2459 struct scsi_pkt *new_pkt; 2460 struct buf *bp; 2461 scsi_vhci_lun_t *vlun = svp->svp_svl; 2462 int rval, retry, nr_retry, ua_retry; 2463 struct scsi_extended_sense *sns; 2464 2465 bp = getrbuf(KM_SLEEP); 2466 bp->b_flags = B_WRITE; 2467 bp->b_resid = 0; 2468 bp->b_un.b_addr = (caddr_t)&vlun->svl_prout; 2469 bp->b_bcount = vlun->svl_bcount; 2470 2471 VHCI_INCR_PATH_CMDCOUNT(svp); 2472 2473 new_pkt = scsi_init_pkt(&svp->svp_psd->sd_address, NULL, bp, 2474 CDB_GROUP1, sizeof (struct scsi_arq_status), 0, 0, 2475 SLEEP_FUNC, NULL); 2476 if (new_pkt == NULL) { 2477 VHCI_DECR_PATH_CMDCOUNT(svp); 2478 freerbuf(bp); 2479 cmn_err(CE_WARN, "!vhci_do_prout: scsi_init_pkt failed"); 2480 return (0); 2481 } 2482 mutex_enter(&vlun->svl_mutex); 2483 bp->b_un.b_addr = (caddr_t)&vlun->svl_prout; 2484 bp->b_bcount = vlun->svl_bcount; 2485 bcopy(vlun->svl_cdb, new_pkt->pkt_cdbp, 2486 sizeof (vlun->svl_cdb)); 2487 new_pkt->pkt_time = vlun->svl_time; 2488 mutex_exit(&vlun->svl_mutex); 2489 new_pkt->pkt_flags = FLAG_NOINTR; 2490 2491 ua_retry = nr_retry = retry = 0; 2492 again: 2493 rval = vhci_do_scsi_cmd(new_pkt); 2494 if (rval != 1) { 2495 if ((new_pkt->pkt_reason == CMD_CMPLT) && 2496 (SCBP_C(new_pkt) == STATUS_CHECK) && 2497 (new_pkt->pkt_state & STATE_ARQ_DONE)) { 2498 sns = &(((struct scsi_arq_status *)(uintptr_t) 2499 (new_pkt->pkt_scbp))->sts_sensedata); 2500 if ((sns->es_key == KEY_UNIT_ATTENTION) || 2501 (sns->es_key == KEY_NOT_READY)) { 2502 int max_retry; 2503 struct scsi_failover_ops *fops; 2504 fops = vlun->svl_fops; 2505 rval = fops->sfo_analyze_sense(svp->svp_psd, 2506 sns, vlun->svl_fops_ctpriv); 2507 if (rval == SCSI_SENSE_NOT_READY) { 2508 max_retry = vhci_prout_not_ready_retry; 2509 retry = nr_retry++; 2510 delay(1*drv_usectohz(1000000)); 2511 } else { 2512 /* chk for state change and update */ 2513 if (rval == SCSI_SENSE_STATE_CHANGED) { 2514 int held; 2515 VHCI_HOLD_LUN(vlun, 2516 VH_NOSLEEP, held); 2517 if (!held) { 2518 rval = TRAN_BUSY; 2519 } else { 2520 /* chk for alua first */ 2521 vhci_update_pathstates( 2522 (void *)vlun); 2523 } 2524 } 2525 retry = ua_retry++; 2526 max_retry = VHCI_MAX_PGR_RETRIES; 2527 } 2528 if (retry < max_retry) { 2529 VHCI_DEBUG(4, (CE_WARN, NULL, 2530 "!vhci_do_prout retry 0x%x " 2531 "(0x%x 0x%x 0x%x)", 2532 SCBP_C(new_pkt), 2533 new_pkt->pkt_cdbp[0], 2534 new_pkt->pkt_cdbp[1], 2535 new_pkt->pkt_cdbp[2])); 2536 goto again; 2537 } 2538 rval = 0; 2539 VHCI_DEBUG(4, (CE_WARN, NULL, 2540 "!vhci_do_prout 0x%x " 2541 "(0x%x 0x%x 0x%x)", 2542 SCBP_C(new_pkt), 2543 new_pkt->pkt_cdbp[0], 2544 new_pkt->pkt_cdbp[1], 2545 new_pkt->pkt_cdbp[2])); 2546 } else if (sns->es_key == KEY_ILLEGAL_REQUEST) 2547 rval = VHCI_PGR_ILLEGALOP; 2548 } 2549 } else { 2550 rval = 1; 2551 } 2552 scsi_destroy_pkt(new_pkt); 2553 VHCI_DECR_PATH_CMDCOUNT(svp); 2554 freerbuf(bp); 2555 return (rval); 2556 } 2557 2558 static void 2559 vhci_run_cmd(void *arg) 2560 { 2561 struct scsi_pkt *pkt = (struct scsi_pkt *)arg; 2562 struct scsi_pkt *tpkt; 2563 scsi_vhci_priv_t *svp; 2564 mdi_pathinfo_t *pip, *npip; 2565 scsi_vhci_lun_t *vlun; 2566 dev_info_t *cdip; 2567 scsi_vhci_priv_t *nsvp; 2568 int fail = 0; 2569 int rval; 2570 struct vhci_pkt *vpkt; 2571 uchar_t cdb_1; 2572 vhci_prout_t *prout; 2573 2574 vpkt = (struct vhci_pkt *)pkt->pkt_private; 2575 tpkt = vpkt->vpkt_tgt_pkt; 2576 pip = vpkt->vpkt_path; 2577 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip); 2578 if (svp == NULL) { 2579 tpkt->pkt_reason = CMD_TRAN_ERR; 2580 tpkt->pkt_statistics = STAT_ABORTED; 2581 goto done; 2582 } 2583 vlun = svp->svp_svl; 2584 prout = &vlun->svl_prout; 2585 if (SCBP_C(pkt) != STATUS_GOOD) 2586 fail++; 2587 cdip = vlun->svl_dip; 2588 pip = npip = NULL; 2589 rval = mdi_select_path(cdip, NULL, 2590 MDI_SELECT_ONLINE_PATH|MDI_SELECT_STANDBY_PATH, NULL, &npip); 2591 if ((rval != MDI_SUCCESS) || (npip == NULL)) { 2592 VHCI_DEBUG(4, (CE_NOTE, NULL, 2593 "vhci_run_cmd: no path! 0x%p\n", (void *)svp)); 2594 tpkt->pkt_reason = CMD_TRAN_ERR; 2595 tpkt->pkt_statistics = STAT_ABORTED; 2596 goto done; 2597 } 2598 2599 cdb_1 = vlun->svl_cdb[1]; 2600 vlun->svl_cdb[1] &= 0xe0; 2601 vlun->svl_cdb[1] |= VHCI_PROUT_R_AND_IGNORE; 2602 2603 do { 2604 nsvp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(npip); 2605 if (nsvp == NULL) { 2606 VHCI_DEBUG(4, (CE_NOTE, NULL, 2607 "vhci_run_cmd: no " 2608 "client priv! 0x%p offlined?\n", 2609 (void *)npip)); 2610 goto next_path; 2611 } 2612 if (vlun->svl_first_path == npip) { 2613 goto next_path; 2614 } else { 2615 if (vhci_do_prout(nsvp) != 1) 2616 fail++; 2617 } 2618 next_path: 2619 pip = npip; 2620 rval = mdi_select_path(cdip, NULL, 2621 MDI_SELECT_ONLINE_PATH|MDI_SELECT_STANDBY_PATH, 2622 pip, &npip); 2623 mdi_rele_path(pip); 2624 } while ((rval == MDI_SUCCESS) && (npip != NULL)); 2625 2626 vlun->svl_cdb[1] = cdb_1; 2627 2628 if (fail) { 2629 VHCI_DEBUG(4, (CE_WARN, NULL, "%s%d: key registration failed, " 2630 "couldn't be replicated on all paths", 2631 ddi_driver_name(cdip), ddi_get_instance(cdip))); 2632 vhci_print_prout_keys(vlun, "vhci_run_cmd: "); 2633 2634 if (SCBP_C(pkt) != STATUS_GOOD) { 2635 tpkt->pkt_reason = CMD_TRAN_ERR; 2636 tpkt->pkt_statistics = STAT_ABORTED; 2637 } 2638 } else { 2639 vlun->svl_pgr_active = 1; 2640 vhci_print_prout_keys(vlun, "vhci_run_cmd: before bcopy:"); 2641 2642 bcopy((const void *)prout->service_key, 2643 (void *)prout->active_service_key, MHIOC_RESV_KEY_SIZE); 2644 bcopy((const void *)prout->res_key, 2645 (void *)prout->active_res_key, MHIOC_RESV_KEY_SIZE); 2646 2647 vhci_print_prout_keys(vlun, "vhci_run_cmd: after bcopy:"); 2648 } 2649 done: 2650 if (SCBP_C(pkt) == STATUS_GOOD) 2651 vlun->svl_first_path = NULL; 2652 2653 if (svp) 2654 VHCI_DECR_PATH_CMDCOUNT(svp); 2655 2656 if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) == 0) { 2657 scsi_destroy_pkt(pkt); 2658 vpkt->vpkt_hba_pkt = NULL; 2659 if (vpkt->vpkt_path) { 2660 mdi_rele_path(vpkt->vpkt_path); 2661 vpkt->vpkt_path = NULL; 2662 } 2663 } 2664 2665 sema_v(&vlun->svl_pgr_sema); 2666 /* 2667 * The PROUT commands are not included in the automatic retry 2668 * mechanism, therefore, vpkt_org_vpkt should never be set here. 2669 */ 2670 ASSERT(vpkt->vpkt_org_vpkt == NULL); 2671 if (tpkt->pkt_comp) 2672 (*tpkt->pkt_comp)(tpkt); 2673 2674 } 2675 2676 /* 2677 * Get the keys registered with this target. Since we will have 2678 * registered the same key with multiple initiators, strip out 2679 * any duplicate keys. 2680 * 2681 * The pointers which will be used to filter the registered keys from 2682 * the device will be stored in filter_prin and filter_pkt. If the 2683 * allocation length of the buffer was sufficient for the number of 2684 * parameter data bytes available to be returned by the device then the 2685 * key filtering will use the keylist returned from the original 2686 * request. If the allocation length of the buffer was not sufficient, 2687 * then the filtering will use the keylist returned from the request 2688 * that is resent below. 2689 * 2690 * If the device returns an additional length field that is greater than 2691 * the allocation length of the buffer, then allocate a new buffer which 2692 * can accommodate the number of parameter data bytes available to be 2693 * returned. Resend the scsi PRIN command, filter out the duplicate 2694 * keys and return as many of the unique keys found that was originally 2695 * requested and set the additional length field equal to the data bytes 2696 * of unique reservation keys available to be returned. 2697 * 2698 * If the device returns an additional length field that is less than or 2699 * equal to the allocation length of the buffer, then all the available 2700 * keys registered were returned by the device. Filter out the 2701 * duplicate keys and return all of the unique keys found and set the 2702 * additional length field equal to the data bytes of the reservation 2703 * keys to be returned. 2704 */ 2705 static int 2706 vhci_do_prin(struct vhci_pkt *vpkt) 2707 { 2708 scsi_vhci_priv_t *svp = (scsi_vhci_priv_t *) 2709 mdi_pi_get_vhci_private(vpkt->vpkt_path); 2710 vhci_prin_readkeys_t *prin; 2711 scsi_vhci_lun_t *vlun = svp->svp_svl; 2712 struct scsi_vhci *vhci = 2713 ADDR2VHCI(&(vpkt->vpkt_tgt_pkt->pkt_address)); 2714 2715 struct buf *new_bp = NULL; 2716 struct scsi_pkt *new_pkt = NULL; 2717 struct vhci_pkt *new_vpkt = NULL; 2718 int hdr_len = 0; 2719 int rval = VHCI_CMD_CMPLT; 2720 uint32_t prin_length = 0; 2721 uint32_t svl_prin_length = 0; 2722 2723 prin = (vhci_prin_readkeys_t *) 2724 bp_mapin_common(vpkt->vpkt_tgt_init_bp, VM_NOSLEEP); 2725 2726 if (prin != NULL) { 2727 prin_length = BE_32(prin->length); 2728 } 2729 2730 if (prin == NULL) { 2731 VHCI_DEBUG(5, (CE_WARN, NULL, 2732 "vhci_do_prin: bp_mapin_common failed.")); 2733 rval = VHCI_CMD_ERROR; 2734 } else { 2735 /* 2736 * According to SPC-3r22, sec 4.3.4.6: "If the amount of 2737 * information to be transferred exceeds the maximum value 2738 * that the ALLOCATION LENGTH field is capable of specifying, 2739 * the device server shall...terminate the command with CHECK 2740 * CONDITION status". The ALLOCATION LENGTH field of the 2741 * PERSISTENT RESERVE IN command is 2 bytes. We should never 2742 * get here with an ADDITIONAL LENGTH greater than 0xFFFF 2743 * so if we do, then it is an error! 2744 */ 2745 2746 hdr_len = sizeof (prin->length) + sizeof (prin->generation); 2747 2748 if ((prin_length + hdr_len) > 0xFFFF) { 2749 VHCI_DEBUG(5, (CE_NOTE, NULL, 2750 "vhci_do_prin: Device returned invalid " 2751 "length 0x%x\n", prin_length)); 2752 rval = VHCI_CMD_ERROR; 2753 } 2754 } 2755 2756 /* 2757 * If prin->length is greater than the byte count allocated in the 2758 * original buffer, then resend the request with enough buffer 2759 * allocated to get all of the available registered keys. 2760 */ 2761 if (rval != VHCI_CMD_ERROR) { 2762 if ((vpkt->vpkt_tgt_init_bp->b_bcount - hdr_len) < 2763 prin_length) { 2764 if (vpkt->vpkt_org_vpkt == NULL) { 2765 new_pkt = vhci_create_retry_pkt(vpkt); 2766 if (new_pkt != NULL) { 2767 new_vpkt = TGTPKT2VHCIPKT(new_pkt); 2768 2769 /* 2770 * This is the buf with buffer pointer 2771 * where the prin readkeys will be 2772 * returned from the device 2773 */ 2774 new_bp = scsi_alloc_consistent_buf( 2775 &svp->svp_psd->sd_address, 2776 NULL, (prin_length + hdr_len), 2777 (vpkt->vpkt_tgt_init_bp-> 2778 b_flags & (B_READ | B_WRITE)), 2779 NULL_FUNC, NULL); 2780 if (new_bp != NULL) { 2781 if (new_bp->b_un.b_addr != 2782 NULL) { 2783 2784 new_bp->b_bcount = 2785 prin_length + 2786 hdr_len; 2787 2788 new_pkt->pkt_cdbp[7] = 2789 (uchar_t)(new_bp-> 2790 b_bcount >> 8); 2791 new_pkt->pkt_cdbp[8] = 2792 (uchar_t)new_bp-> 2793 b_bcount; 2794 2795 rval = VHCI_CMD_RETRY; 2796 } else { 2797 rval = VHCI_CMD_ERROR; 2798 } 2799 } else { 2800 rval = VHCI_CMD_ERROR; 2801 } 2802 } else { 2803 rval = VHCI_CMD_ERROR; 2804 } 2805 } else { 2806 rval = VHCI_CMD_ERROR; 2807 } 2808 } 2809 } 2810 2811 if (rval == VHCI_CMD_RETRY) { 2812 new_vpkt->vpkt_tgt_init_bp = new_bp; 2813 2814 /* 2815 * Release the old path because it does not matter which path 2816 * this command is sent down. This allows the normal bind 2817 * transport mechanism to be used. 2818 */ 2819 if (vpkt->vpkt_path != NULL) { 2820 mdi_rele_path(vpkt->vpkt_path); 2821 vpkt->vpkt_path = NULL; 2822 } 2823 2824 /* 2825 * Dispatch the retry command 2826 */ 2827 if (taskq_dispatch(vhci->vhci_taskq, vhci_dispatch_scsi_start, 2828 (void *) new_vpkt, KM_NOSLEEP) == NULL) { 2829 rval = VHCI_CMD_ERROR; 2830 } else { 2831 /* 2832 * If we return VHCI_CMD_RETRY, that means the caller 2833 * is going to bail and wait for the reissued command 2834 * to complete. In that case, we need to decrement 2835 * the path command count right now. In any other 2836 * case, it'll be decremented by the caller. 2837 */ 2838 VHCI_DECR_PATH_CMDCOUNT(svp); 2839 } 2840 } 2841 2842 if ((rval != VHCI_CMD_ERROR) && (rval != VHCI_CMD_RETRY)) { 2843 int new, old; 2844 int data_len = 0; 2845 2846 data_len = prin_length / MHIOC_RESV_KEY_SIZE; 2847 VHCI_DEBUG(4, (CE_NOTE, NULL, "vhci_do_prin: %d keys read\n", 2848 data_len)); 2849 2850 #ifdef DEBUG 2851 VHCI_DEBUG(5, (CE_NOTE, NULL, "vhci_do_prin: from storage\n")); 2852 if (vhci_debug == 5) 2853 vhci_print_prin_keys(prin, data_len); 2854 VHCI_DEBUG(5, (CE_NOTE, NULL, 2855 "vhci_do_prin: MPxIO old keys:\n")); 2856 if (vhci_debug == 5) 2857 vhci_print_prin_keys(&vlun->svl_prin, data_len); 2858 #endif 2859 2860 /* 2861 * Filter out all duplicate keys returned from the device 2862 * We know that we use a different key for every host, so we 2863 * can simply strip out duplicates. Otherwise we would need to 2864 * do more bookkeeping to figure out which keys to strip out. 2865 */ 2866 2867 new = 0; 2868 2869 if (data_len > 0) { 2870 vlun->svl_prin.keylist[0] = prin->keylist[0]; 2871 new++; 2872 } 2873 2874 for (old = 1; old < data_len; old++) { 2875 int j; 2876 int match = 0; 2877 for (j = 0; j < new; j++) { 2878 if (bcmp(&prin->keylist[old], 2879 &vlun->svl_prin.keylist[j], 2880 sizeof (mhioc_resv_key_t)) == 0) { 2881 match = 1; 2882 break; 2883 } 2884 } 2885 if (!match) { 2886 vlun->svl_prin.keylist[new] = 2887 prin->keylist[old]; 2888 new++; 2889 } 2890 } 2891 2892 vlun->svl_prin.generation = prin->generation; 2893 svl_prin_length = new * MHIOC_RESV_KEY_SIZE; 2894 vlun->svl_prin.length = BE_32(svl_prin_length); 2895 2896 /* 2897 * If we arrived at this point after issuing a retry, make sure 2898 * that we put everything back the way it originally was so 2899 * that the target driver can complete the command correctly. 2900 */ 2901 if (vpkt->vpkt_org_vpkt != NULL) { 2902 new_bp = vpkt->vpkt_tgt_init_bp; 2903 2904 scsi_free_consistent_buf(new_bp); 2905 2906 vpkt = vhci_sync_retry_pkt(vpkt); 2907 2908 /* 2909 * Make sure the original buffer is mapped into kernel 2910 * space before we try to copy the filtered keys into 2911 * it. 2912 */ 2913 prin = (vhci_prin_readkeys_t *)bp_mapin_common( 2914 vpkt->vpkt_tgt_init_bp, VM_NOSLEEP); 2915 } 2916 2917 /* 2918 * Now copy the desired number of prin keys into the original 2919 * target buffer. 2920 */ 2921 if (svl_prin_length <= 2922 (vpkt->vpkt_tgt_init_bp->b_bcount - hdr_len)) { 2923 /* 2924 * It is safe to return all of the available unique 2925 * keys 2926 */ 2927 bcopy(&vlun->svl_prin, prin, svl_prin_length + hdr_len); 2928 } else { 2929 /* 2930 * Not all of the available keys were requested by the 2931 * original command. 2932 */ 2933 bcopy(&vlun->svl_prin, prin, 2934 vpkt->vpkt_tgt_init_bp->b_bcount); 2935 } 2936 #ifdef DEBUG 2937 VHCI_DEBUG(5, (CE_NOTE, NULL, 2938 "vhci_do_prin: To Application:\n")); 2939 if (vhci_debug == 5) 2940 vhci_print_prin_keys(prin, new); 2941 VHCI_DEBUG(5, (CE_NOTE, NULL, 2942 "vhci_do_prin: MPxIO new keys:\n")); 2943 if (vhci_debug == 5) 2944 vhci_print_prin_keys(&vlun->svl_prin, new); 2945 #endif 2946 } 2947 2948 if (rval == VHCI_CMD_ERROR) { 2949 /* 2950 * If we arrived at this point after issuing a 2951 * retry, make sure that we put everything back 2952 * the way it originally was so that ssd can 2953 * complete the command correctly. 2954 */ 2955 2956 if (vpkt->vpkt_org_vpkt != NULL) { 2957 new_bp = vpkt->vpkt_tgt_init_bp; 2958 if (new_bp != NULL) { 2959 scsi_free_consistent_buf(new_bp); 2960 } 2961 2962 new_vpkt = vpkt; 2963 vpkt = vpkt->vpkt_org_vpkt; 2964 2965 vhci_scsi_destroy_pkt(&svp->svp_psd->sd_address, 2966 new_vpkt->vpkt_tgt_pkt); 2967 } 2968 2969 /* 2970 * Mark this command completion as having an error so that 2971 * ssd will retry the command. 2972 */ 2973 2974 vpkt->vpkt_tgt_pkt->pkt_reason = CMD_ABORTED; 2975 vpkt->vpkt_tgt_pkt->pkt_statistics |= STAT_ABORTED; 2976 2977 rval = VHCI_CMD_CMPLT; 2978 } 2979 2980 /* 2981 * Make sure that the semaphore is only released once. 2982 */ 2983 if (rval == VHCI_CMD_CMPLT) { 2984 sema_v(&vlun->svl_pgr_sema); 2985 } 2986 2987 return (rval); 2988 } 2989 2990 static void 2991 vhci_intr(struct scsi_pkt *pkt) 2992 { 2993 struct vhci_pkt *vpkt = (struct vhci_pkt *)pkt->pkt_private; 2994 struct scsi_pkt *tpkt; 2995 scsi_vhci_priv_t *svp; 2996 scsi_vhci_lun_t *vlun; 2997 int rval, held; 2998 struct scsi_failover_ops *fops; 2999 struct scsi_extended_sense *sns; 3000 mdi_pathinfo_t *lpath; 3001 static char *timeout_err = "Command Timeout"; 3002 static char *parity_err = "Parity Error"; 3003 char *err_str = NULL; 3004 dev_info_t *vdip, *cdip, *pdip; 3005 char *cpath, *dpath; 3006 3007 ASSERT(vpkt != NULL); 3008 tpkt = vpkt->vpkt_tgt_pkt; 3009 ASSERT(tpkt != NULL); 3010 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(vpkt->vpkt_path); 3011 ASSERT(svp != NULL); 3012 vlun = svp->svp_svl; 3013 ASSERT(vlun != NULL); 3014 lpath = vpkt->vpkt_path; 3015 3016 /* 3017 * sync up the target driver's pkt with the pkt that 3018 * we actually used 3019 */ 3020 *(tpkt->pkt_scbp) = *(pkt->pkt_scbp); 3021 tpkt->pkt_resid = pkt->pkt_resid; 3022 tpkt->pkt_state = pkt->pkt_state; 3023 tpkt->pkt_statistics = pkt->pkt_statistics; 3024 tpkt->pkt_reason = pkt->pkt_reason; 3025 3026 /* Return path_instance information back to the target driver. */ 3027 if (scsi_pkt_allocated_correctly(tpkt)) { 3028 if (scsi_pkt_allocated_correctly(pkt)) { 3029 /* 3030 * If both packets were correctly allocated, 3031 * return path returned by pHCI. 3032 */ 3033 tpkt->pkt_path_instance = pkt->pkt_path_instance; 3034 } else { 3035 /* Otherwise return path of pHCI we used */ 3036 tpkt->pkt_path_instance = 3037 mdi_pi_get_path_instance(lpath); 3038 } 3039 } 3040 3041 if (pkt->pkt_cdbp[0] == SCMD_PROUT && 3042 ((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_REGISTER) || 3043 ((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_R_AND_IGNORE)) { 3044 if ((SCBP_C(pkt) != STATUS_GOOD) || 3045 (pkt->pkt_reason != CMD_CMPLT)) { 3046 sema_v(&vlun->svl_pgr_sema); 3047 } 3048 } else if (pkt->pkt_cdbp[0] == SCMD_PRIN) { 3049 if (pkt->pkt_reason != CMD_CMPLT || 3050 (SCBP_C(pkt) != STATUS_GOOD)) { 3051 sema_v(&vlun->svl_pgr_sema); 3052 } 3053 } 3054 3055 switch (pkt->pkt_reason) { 3056 case CMD_CMPLT: 3057 /* 3058 * cmd completed successfully, check for scsi errors 3059 */ 3060 switch (*(pkt->pkt_scbp)) { 3061 case STATUS_CHECK: 3062 if (pkt->pkt_state & STATE_ARQ_DONE) { 3063 sns = &(((struct scsi_arq_status *)(uintptr_t) 3064 (pkt->pkt_scbp))->sts_sensedata); 3065 fops = vlun->svl_fops; 3066 ASSERT(fops != NULL); 3067 VHCI_DEBUG(4, (CE_NOTE, NULL, "vhci_intr: " 3068 "Received sns key %x esc %x escq %x\n", 3069 sns->es_key, sns->es_add_code, 3070 sns->es_qual_code)); 3071 3072 if (vlun->svl_waiting_for_activepath == 1) { 3073 /* 3074 * if we are here it means we are 3075 * in the midst of a probe/attach 3076 * through a passive path; this 3077 * case is exempt from sense analysis 3078 * for detection of ext. failover 3079 * because that would unnecessarily 3080 * increase attach time. 3081 */ 3082 bcopy(pkt->pkt_scbp, tpkt->pkt_scbp, 3083 vpkt->vpkt_tgt_init_scblen); 3084 break; 3085 } 3086 if (sns->es_add_code == VHCI_SCSI_PERR) { 3087 /* 3088 * parity error 3089 */ 3090 err_str = parity_err; 3091 bcopy(pkt->pkt_scbp, tpkt->pkt_scbp, 3092 vpkt->vpkt_tgt_init_scblen); 3093 break; 3094 } 3095 rval = fops->sfo_analyze_sense(svp->svp_psd, 3096 sns, vlun->svl_fops_ctpriv); 3097 if ((rval == SCSI_SENSE_NOFAILOVER) || 3098 (rval == SCSI_SENSE_UNKNOWN) || 3099 (rval == SCSI_SENSE_NOT_READY)) { 3100 bcopy(pkt->pkt_scbp, tpkt->pkt_scbp, 3101 vpkt->vpkt_tgt_init_scblen); 3102 break; 3103 } else if (rval == SCSI_SENSE_STATE_CHANGED) { 3104 struct scsi_vhci *vhci; 3105 vhci = ADDR2VHCI(&tpkt->pkt_address); 3106 VHCI_HOLD_LUN(vlun, VH_NOSLEEP, held); 3107 if (!held) { 3108 /* 3109 * looks like some other thread 3110 * has already detected this 3111 * condition 3112 */ 3113 tpkt->pkt_state &= 3114 ~STATE_ARQ_DONE; 3115 *(tpkt->pkt_scbp) = 3116 STATUS_BUSY; 3117 break; 3118 } 3119 (void) taskq_dispatch( 3120 vhci->vhci_update_pathstates_taskq, 3121 vhci_update_pathstates, 3122 (void *)vlun, KM_SLEEP); 3123 } else { 3124 /* 3125 * externally initiated failover 3126 * has occurred or is in progress 3127 */ 3128 VHCI_HOLD_LUN(vlun, VH_NOSLEEP, held); 3129 if (!held) { 3130 /* 3131 * looks like some other thread 3132 * has already detected this 3133 * condition 3134 */ 3135 tpkt->pkt_state &= 3136 ~STATE_ARQ_DONE; 3137 *(tpkt->pkt_scbp) = 3138 STATUS_BUSY; 3139 break; 3140 } else { 3141 rval = vhci_handle_ext_fo 3142 (pkt, rval); 3143 if (rval == BUSY_RETURN) { 3144 tpkt->pkt_state &= 3145 ~STATE_ARQ_DONE; 3146 *(tpkt->pkt_scbp) = 3147 STATUS_BUSY; 3148 break; 3149 } 3150 bcopy(pkt->pkt_scbp, 3151 tpkt->pkt_scbp, 3152 vpkt->vpkt_tgt_init_scblen); 3153 break; 3154 } 3155 } 3156 } 3157 break; 3158 3159 /* 3160 * If this is a good SCSI-II RELEASE cmd completion then restore 3161 * the load balancing policy and reset VLUN_RESERVE_ACTIVE_FLG. 3162 * If this is a good SCSI-II RESERVE cmd completion then set 3163 * VLUN_RESERVE_ACTIVE_FLG. 3164 */ 3165 case STATUS_GOOD: 3166 if ((pkt->pkt_cdbp[0] == SCMD_RELEASE) || 3167 (pkt->pkt_cdbp[0] == SCMD_RELEASE_G1)) { 3168 (void) mdi_set_lb_policy(vlun->svl_dip, 3169 vlun->svl_lb_policy_save); 3170 vlun->svl_flags &= ~VLUN_RESERVE_ACTIVE_FLG; 3171 VHCI_DEBUG(1, (CE_WARN, NULL, 3172 "!vhci_intr: vlun 0x%p release path 0x%p", 3173 (void *)vlun, (void *)vpkt->vpkt_path)); 3174 } 3175 3176 if ((pkt->pkt_cdbp[0] == SCMD_RESERVE) || 3177 (pkt->pkt_cdbp[0] == SCMD_RESERVE_G1)) { 3178 vlun->svl_flags |= VLUN_RESERVE_ACTIVE_FLG; 3179 vlun->svl_resrv_pip = vpkt->vpkt_path; 3180 VHCI_DEBUG(1, (CE_WARN, NULL, 3181 "!vhci_intr: vlun 0x%p reserved path 0x%p", 3182 (void *)vlun, (void *)vpkt->vpkt_path)); 3183 } 3184 break; 3185 3186 case STATUS_RESERVATION_CONFLICT: 3187 VHCI_DEBUG(1, (CE_WARN, NULL, 3188 "!vhci_intr: vlun 0x%p " 3189 "reserve conflict on path 0x%p", 3190 (void *)vlun, (void *)vpkt->vpkt_path)); 3191 /* FALLTHROUGH */ 3192 default: 3193 break; 3194 } 3195 3196 /* 3197 * Update I/O completion statistics for the path 3198 */ 3199 mdi_pi_kstat_iosupdate(vpkt->vpkt_path, vpkt->vpkt_tgt_init_bp); 3200 3201 /* 3202 * Command completed successfully, release the dma binding and 3203 * destroy the transport side of the packet. 3204 */ 3205 if ((pkt->pkt_cdbp[0] == SCMD_PROUT) && 3206 (((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_REGISTER) || 3207 ((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_R_AND_IGNORE))) { 3208 if (SCBP_C(pkt) == STATUS_GOOD) { 3209 ASSERT(vlun->svl_taskq); 3210 svp->svp_last_pkt_reason = pkt->pkt_reason; 3211 (void) taskq_dispatch(vlun->svl_taskq, 3212 vhci_run_cmd, pkt, KM_SLEEP); 3213 return; 3214 } 3215 } 3216 if ((SCBP_C(pkt) == STATUS_GOOD) && 3217 (pkt->pkt_cdbp[0] == SCMD_PRIN) && vpkt->vpkt_tgt_init_bp) { 3218 /* 3219 * If the action (value in byte 1 of the cdb) is zero, 3220 * we're reading keys, and that's the only condition 3221 * where we need to be concerned with filtering keys 3222 * and potential retries. Otherwise, we simply signal 3223 * the semaphore and move on. 3224 */ 3225 if (pkt->pkt_cdbp[1] == 0) { 3226 /* 3227 * If this is the completion of an internal 3228 * retry then we need to make sure that the 3229 * pkt and tpkt pointers are readjusted so 3230 * the calls to scsi_destroy_pkt and pkt_comp 3231 * below work * correctly. 3232 */ 3233 if (vpkt->vpkt_org_vpkt != NULL) { 3234 pkt = vpkt->vpkt_org_vpkt->vpkt_hba_pkt; 3235 tpkt = vpkt->vpkt_org_vpkt-> 3236 vpkt_tgt_pkt; 3237 3238 /* 3239 * If this command was issued through 3240 * the taskq then we need to clear 3241 * this flag for proper processing in 3242 * the case of a retry from the target 3243 * driver. 3244 */ 3245 vpkt->vpkt_state &= 3246 ~VHCI_PKT_THRU_TASKQ; 3247 } 3248 3249 /* 3250 * if vhci_do_prin returns VHCI_CMD_CMPLT then 3251 * vpkt will contain the address of the 3252 * original vpkt 3253 */ 3254 if (vhci_do_prin(vpkt) == VHCI_CMD_RETRY) { 3255 /* 3256 * The command has been resent to get 3257 * all the keys from the device. Don't 3258 * complete the command with ssd until 3259 * the retry completes. 3260 */ 3261 return; 3262 } 3263 } else { 3264 sema_v(&vlun->svl_pgr_sema); 3265 } 3266 } 3267 3268 break; 3269 3270 case CMD_TIMEOUT: 3271 if ((pkt->pkt_statistics & 3272 (STAT_BUS_RESET|STAT_DEV_RESET|STAT_ABORTED)) == 0) { 3273 3274 VHCI_DEBUG(1, (CE_NOTE, NULL, 3275 "!scsi vhci timeout invoked\n")); 3276 3277 (void) vhci_recovery_reset(vlun, &pkt->pkt_address, 3278 FALSE, VHCI_DEPTH_ALL); 3279 } 3280 MDI_PI_ERRSTAT(lpath, MDI_PI_TRANSERR); 3281 tpkt->pkt_statistics |= STAT_ABORTED; 3282 err_str = timeout_err; 3283 break; 3284 3285 case CMD_TRAN_ERR: 3286 /* 3287 * This status is returned if the transport has sent the cmd 3288 * down the link to the target and then some error occurs. 3289 * In case of SCSI-II RESERVE cmd, we don't know if the 3290 * reservation been accepted by the target or not, so we need 3291 * to clear the reservation. 3292 */ 3293 if ((pkt->pkt_cdbp[0] == SCMD_RESERVE) || 3294 (pkt->pkt_cdbp[0] == SCMD_RESERVE_G1)) { 3295 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_intr received" 3296 " cmd_tran_err for scsi-2 reserve cmd\n")); 3297 if (!vhci_recovery_reset(vlun, &pkt->pkt_address, 3298 TRUE, VHCI_DEPTH_TARGET)) { 3299 VHCI_DEBUG(1, (CE_WARN, NULL, 3300 "!vhci_intr cmd_tran_err reset failed!")); 3301 } 3302 } 3303 break; 3304 3305 case CMD_DEV_GONE: 3306 VHCI_DEBUG(1, (CE_NOTE, NULL, "vhci_intr received " 3307 "cmd_dev_gone\n")); 3308 tpkt->pkt_reason = CMD_CMPLT; 3309 tpkt->pkt_state = STATE_GOT_BUS | 3310 STATE_GOT_TARGET | STATE_SENT_CMD | 3311 STATE_GOT_STATUS; 3312 *(tpkt->pkt_scbp) = STATUS_BUSY; 3313 break; 3314 3315 default: 3316 break; 3317 } 3318 3319 /* 3320 * SCSI-II RESERVE cmd has been serviced by the lower layers clear 3321 * the flag so the lun is not QUIESCED any longer. 3322 * Also clear the VHCI_PKT_THRU_TASKQ flag, to ensure that if this pkt 3323 * is retried, a taskq shall again be dispatched to service it. Else 3324 * it may lead to a system hang if the retry is within interrupt 3325 * context. 3326 */ 3327 if ((pkt->pkt_cdbp[0] == SCMD_RESERVE) || 3328 (pkt->pkt_cdbp[0] == SCMD_RESERVE_G1)) { 3329 vlun->svl_flags &= ~VLUN_QUIESCED_FLG; 3330 vpkt->vpkt_state &= ~VHCI_PKT_THRU_TASKQ; 3331 } 3332 3333 /* 3334 * vpkt_org_vpkt should always be NULL here if the retry command 3335 * has been successfully processed. If vpkt_org_vpkt != NULL at 3336 * this point, it is an error so restore the original vpkt and 3337 * return an error to the target driver so it can retry the 3338 * command as appropriate. 3339 */ 3340 if (vpkt->vpkt_org_vpkt != NULL) { 3341 struct vhci_pkt *new_vpkt = vpkt; 3342 vpkt = vpkt->vpkt_org_vpkt; 3343 3344 vhci_scsi_destroy_pkt(&svp->svp_psd->sd_address, 3345 new_vpkt->vpkt_tgt_pkt); 3346 3347 /* 3348 * Mark this command completion as having an error so that 3349 * ssd will retry the command. 3350 */ 3351 vpkt->vpkt_tgt_pkt->pkt_reason = CMD_ABORTED; 3352 vpkt->vpkt_tgt_pkt->pkt_statistics |= STAT_ABORTED; 3353 3354 pkt = vpkt->vpkt_hba_pkt; 3355 tpkt = vpkt->vpkt_tgt_pkt; 3356 } 3357 3358 if ((err_str != NULL) && (pkt->pkt_reason != 3359 svp->svp_last_pkt_reason)) { 3360 cdip = vlun->svl_dip; 3361 pdip = mdi_pi_get_phci(vpkt->vpkt_path); 3362 vdip = ddi_get_parent(cdip); 3363 cpath = kmem_alloc(MAXPATHLEN, KM_SLEEP); 3364 dpath = kmem_alloc(MAXPATHLEN, KM_SLEEP); 3365 vhci_log(CE_WARN, vdip, "!%s (%s%d): %s on path %s (%s%d)", 3366 ddi_pathname(cdip, cpath), ddi_driver_name(cdip), 3367 ddi_get_instance(cdip), err_str, 3368 ddi_pathname(pdip, dpath), ddi_driver_name(pdip), 3369 ddi_get_instance(pdip)); 3370 kmem_free(cpath, MAXPATHLEN); 3371 kmem_free(dpath, MAXPATHLEN); 3372 } 3373 svp->svp_last_pkt_reason = pkt->pkt_reason; 3374 VHCI_DECR_PATH_CMDCOUNT(svp); 3375 3376 /* 3377 * For PARTIAL_DMA, vhci should not free the path. 3378 * Target driver will call into vhci_scsi_dmafree or 3379 * destroy pkt to release this path. 3380 */ 3381 if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) == 0) { 3382 scsi_destroy_pkt(pkt); 3383 vpkt->vpkt_hba_pkt = NULL; 3384 if (vpkt->vpkt_path) { 3385 mdi_rele_path(vpkt->vpkt_path); 3386 vpkt->vpkt_path = NULL; 3387 } 3388 } 3389 3390 if (tpkt->pkt_comp) { 3391 (*tpkt->pkt_comp)(tpkt); 3392 } 3393 } 3394 3395 /* 3396 * two possibilities: (1) failover has completed 3397 * or (2) is in progress; update our path states for 3398 * the former case; for the latter case, 3399 * initiate a scsi_watch request to 3400 * determine when failover completes - vlun is HELD 3401 * until failover completes; BUSY is returned to upper 3402 * layer in both the cases 3403 */ 3404 static int 3405 vhci_handle_ext_fo(struct scsi_pkt *pkt, int fostat) 3406 { 3407 struct vhci_pkt *vpkt = (struct vhci_pkt *)pkt->pkt_private; 3408 struct scsi_pkt *tpkt; 3409 scsi_vhci_priv_t *svp; 3410 scsi_vhci_lun_t *vlun; 3411 struct scsi_vhci *vhci; 3412 scsi_vhci_swarg_t *swarg; 3413 char *path; 3414 3415 ASSERT(vpkt != NULL); 3416 tpkt = vpkt->vpkt_tgt_pkt; 3417 ASSERT(tpkt != NULL); 3418 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(vpkt->vpkt_path); 3419 ASSERT(svp != NULL); 3420 vlun = svp->svp_svl; 3421 ASSERT(vlun != NULL); 3422 ASSERT(VHCI_LUN_IS_HELD(vlun)); 3423 3424 vhci = ADDR2VHCI(&tpkt->pkt_address); 3425 3426 if (fostat == SCSI_SENSE_INACTIVE) { 3427 VHCI_DEBUG(1, (CE_NOTE, NULL, "!Failover " 3428 "detected for %s; updating path states...\n", 3429 vlun->svl_lun_wwn)); 3430 /* 3431 * set the vlun flag to indicate to the task that the target 3432 * port group needs updating 3433 */ 3434 vlun->svl_flags |= VLUN_UPDATE_TPG; 3435 (void) taskq_dispatch(vhci->vhci_update_pathstates_taskq, 3436 vhci_update_pathstates, (void *)vlun, KM_SLEEP); 3437 } else { 3438 path = kmem_alloc(MAXPATHLEN, KM_SLEEP); 3439 vhci_log(CE_NOTE, ddi_get_parent(vlun->svl_dip), 3440 "!%s (%s%d): Waiting for externally initiated failover " 3441 "to complete", ddi_pathname(vlun->svl_dip, path), 3442 ddi_driver_name(vlun->svl_dip), 3443 ddi_get_instance(vlun->svl_dip)); 3444 kmem_free(path, MAXPATHLEN); 3445 swarg = kmem_alloc(sizeof (*swarg), KM_NOSLEEP); 3446 if (swarg == NULL) { 3447 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_handle_ext_fo: " 3448 "request packet allocation for %s failed....\n", 3449 vlun->svl_lun_wwn)); 3450 VHCI_RELEASE_LUN(vlun); 3451 return (PKT_RETURN); 3452 } 3453 swarg->svs_svp = svp; 3454 swarg->svs_tos = ddi_get_time(); 3455 swarg->svs_pi = vpkt->vpkt_path; 3456 swarg->svs_release_lun = 0; 3457 swarg->svs_done = 0; 3458 /* 3459 * place a hold on the path...we don't want it to 3460 * vanish while scsi_watch is in progress 3461 */ 3462 mdi_hold_path(vpkt->vpkt_path); 3463 svp->svp_sw_token = scsi_watch_request_submit(svp->svp_psd, 3464 VHCI_FOWATCH_INTERVAL, SENSE_LENGTH, vhci_efo_watch_cb, 3465 (caddr_t)swarg); 3466 } 3467 return (BUSY_RETURN); 3468 } 3469 3470 /* 3471 * vhci_efo_watch_cb: 3472 * Callback from scsi_watch request to check the failover status. 3473 * Completion is either due to successful failover or timeout. 3474 * Upon successful completion, vhci_update_path_states is called. 3475 * For timeout condition, vhci_efo_done is called. 3476 * Always returns 0 to scsi_watch to keep retrying till vhci_efo_done 3477 * terminates this request properly in a separate thread. 3478 */ 3479 3480 static int 3481 vhci_efo_watch_cb(caddr_t arg, struct scsi_watch_result *resultp) 3482 { 3483 struct scsi_status *statusp = resultp->statusp; 3484 struct scsi_extended_sense *sensep = resultp->sensep; 3485 struct scsi_pkt *pkt = resultp->pkt; 3486 scsi_vhci_swarg_t *swarg; 3487 scsi_vhci_priv_t *svp; 3488 scsi_vhci_lun_t *vlun; 3489 struct scsi_vhci *vhci; 3490 dev_info_t *vdip; 3491 int rval, updt_paths; 3492 3493 swarg = (scsi_vhci_swarg_t *)(uintptr_t)arg; 3494 svp = swarg->svs_svp; 3495 if (swarg->svs_done) { 3496 /* 3497 * Already completed failover or timedout. 3498 * Waiting for vhci_efo_done to terminate this scsi_watch. 3499 */ 3500 return (0); 3501 } 3502 3503 ASSERT(svp != NULL); 3504 vlun = svp->svp_svl; 3505 ASSERT(vlun != NULL); 3506 ASSERT(VHCI_LUN_IS_HELD(vlun)); 3507 vlun->svl_efo_update_path = 0; 3508 vdip = ddi_get_parent(vlun->svl_dip); 3509 vhci = ddi_get_soft_state(vhci_softstate, 3510 ddi_get_instance(vdip)); 3511 3512 updt_paths = 0; 3513 3514 if (pkt->pkt_reason != CMD_CMPLT) { 3515 if ((ddi_get_time() - swarg->svs_tos) >= VHCI_EXTFO_TIMEOUT) { 3516 swarg->svs_release_lun = 1; 3517 goto done; 3518 } 3519 return (0); 3520 } 3521 if (*((unsigned char *)statusp) == STATUS_CHECK) { 3522 rval = vlun->svl_fops->sfo_analyze_sense(svp->svp_psd, sensep, 3523 vlun->svl_fops_ctpriv); 3524 switch (rval) { 3525 /* 3526 * Only update path states in case path is definitely 3527 * inactive, or no failover occurred. For all other 3528 * check conditions continue pinging. A unexpected 3529 * check condition shouldn't cause pinging to complete 3530 * prematurely. 3531 */ 3532 case SCSI_SENSE_INACTIVE: 3533 case SCSI_SENSE_NOFAILOVER: 3534 updt_paths = 1; 3535 break; 3536 default: 3537 if ((ddi_get_time() - swarg->svs_tos) 3538 >= VHCI_EXTFO_TIMEOUT) { 3539 swarg->svs_release_lun = 1; 3540 goto done; 3541 } 3542 return (0); 3543 } 3544 } else if (*((unsigned char *)statusp) == 3545 STATUS_RESERVATION_CONFLICT) { 3546 updt_paths = 1; 3547 } else if ((*((unsigned char *)statusp)) & 3548 (STATUS_BUSY | STATUS_QFULL)) { 3549 return (0); 3550 } 3551 if ((*((unsigned char *)statusp) == STATUS_GOOD) || 3552 (updt_paths == 1)) { 3553 /* 3554 * we got here because we had detected an 3555 * externally initiated failover; things 3556 * have settled down now, so let's 3557 * start up a task to update the 3558 * path states and target port group 3559 */ 3560 vlun->svl_efo_update_path = 1; 3561 swarg->svs_done = 1; 3562 vlun->svl_swarg = swarg; 3563 vlun->svl_flags |= VLUN_UPDATE_TPG; 3564 (void) taskq_dispatch(vhci->vhci_update_pathstates_taskq, 3565 vhci_update_pathstates, (void *)vlun, 3566 KM_SLEEP); 3567 return (0); 3568 } 3569 if ((ddi_get_time() - swarg->svs_tos) >= VHCI_EXTFO_TIMEOUT) { 3570 swarg->svs_release_lun = 1; 3571 goto done; 3572 } 3573 return (0); 3574 done: 3575 swarg->svs_done = 1; 3576 (void) taskq_dispatch(vhci->vhci_taskq, 3577 vhci_efo_done, (void *)swarg, KM_SLEEP); 3578 return (0); 3579 } 3580 3581 /* 3582 * vhci_efo_done: 3583 * cleanly terminates scsi_watch and free up resources. 3584 * Called as taskq function in vhci_efo_watch_cb for EFO timeout condition 3585 * or by vhci_update_path_states invoked during external initiated 3586 * failover completion. 3587 */ 3588 static void 3589 vhci_efo_done(void *arg) 3590 { 3591 scsi_vhci_lun_t *vlun; 3592 scsi_vhci_swarg_t *swarg = (scsi_vhci_swarg_t *)arg; 3593 scsi_vhci_priv_t *svp = swarg->svs_svp; 3594 ASSERT(svp); 3595 3596 vlun = svp->svp_svl; 3597 ASSERT(vlun); 3598 3599 /* Wait for clean termination of scsi_watch */ 3600 (void) scsi_watch_request_terminate(svp->svp_sw_token, 3601 SCSI_WATCH_TERMINATE_WAIT); 3602 svp->svp_sw_token = NULL; 3603 3604 /* release path and freeup resources to indicate failover completion */ 3605 mdi_rele_path(swarg->svs_pi); 3606 if (swarg->svs_release_lun) { 3607 VHCI_RELEASE_LUN(vlun); 3608 } 3609 kmem_free((void *)swarg, sizeof (*swarg)); 3610 } 3611 3612 /* 3613 * Update the path states 3614 * vlun should be HELD when this is invoked. 3615 * Calls vhci_efo_done to cleanup resources allocated for EFO. 3616 */ 3617 void 3618 vhci_update_pathstates(void *arg) 3619 { 3620 mdi_pathinfo_t *pip, *npip; 3621 dev_info_t *dip, *pdip; 3622 struct scsi_failover_ops *fo; 3623 struct scsi_vhci_priv *svp; 3624 struct scsi_device *psd; 3625 struct scsi_path_opinfo opinfo; 3626 char *pclass, *tptr; 3627 struct scsi_vhci_lun *vlun = (struct scsi_vhci_lun *)arg; 3628 int sps; /* mdi_select_path() status */ 3629 char *cpath, *dpath; 3630 struct scsi_vhci *vhci; 3631 struct scsi_pkt *pkt; 3632 struct buf *bp; 3633 int reserve_conflict = 0; 3634 3635 ASSERT(VHCI_LUN_IS_HELD(vlun)); 3636 dip = vlun->svl_dip; 3637 pip = npip = NULL; 3638 3639 vhci = ddi_get_soft_state(vhci_softstate, 3640 ddi_get_instance(ddi_get_parent(dip))); 3641 3642 sps = mdi_select_path(dip, NULL, (MDI_SELECT_ONLINE_PATH | 3643 MDI_SELECT_STANDBY_PATH), NULL, &npip); 3644 if ((npip == NULL) || (sps != MDI_SUCCESS)) { 3645 goto done; 3646 } 3647 3648 fo = vlun->svl_fops; 3649 do { 3650 pip = npip; 3651 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip); 3652 psd = svp->svp_psd; 3653 if (fo->sfo_path_get_opinfo(psd, &opinfo, 3654 vlun->svl_fops_ctpriv) != 0) { 3655 sps = mdi_select_path(dip, NULL, 3656 (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH), 3657 pip, &npip); 3658 mdi_rele_path(pip); 3659 continue; 3660 } 3661 3662 if (mdi_prop_lookup_string(pip, "path-class", &pclass) != 3663 MDI_SUCCESS) { 3664 VHCI_DEBUG(1, (CE_NOTE, NULL, 3665 "!vhci_update_pathstates: prop lookup failed for " 3666 "path 0x%p\n", (void *)pip)); 3667 sps = mdi_select_path(dip, NULL, 3668 (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH), 3669 pip, &npip); 3670 mdi_rele_path(pip); 3671 continue; 3672 } 3673 3674 /* 3675 * Need to update the "path-class" property 3676 * value in the device tree if different 3677 * from the existing value. 3678 */ 3679 if (strcmp(pclass, opinfo.opinfo_path_attr) != 0) { 3680 (void) mdi_prop_update_string(pip, "path-class", 3681 opinfo.opinfo_path_attr); 3682 } 3683 3684 /* 3685 * Only change the state if needed. i.e. Don't call 3686 * mdi_pi_set_state to ONLINE a path if its already 3687 * ONLINE. Same for STANDBY paths. 3688 */ 3689 3690 if ((opinfo.opinfo_path_state == SCSI_PATH_ACTIVE || 3691 opinfo.opinfo_path_state == SCSI_PATH_ACTIVE_NONOPT)) { 3692 if (!(MDI_PI_IS_ONLINE(pip))) { 3693 VHCI_DEBUG(1, (CE_NOTE, NULL, 3694 "!vhci_update_pathstates: marking path" 3695 " 0x%p as ONLINE\n", (void *)pip)); 3696 pdip = mdi_pi_get_phci(pip); 3697 cpath = kmem_alloc(MAXPATHLEN, KM_SLEEP); 3698 dpath = kmem_alloc(MAXPATHLEN, KM_SLEEP); 3699 vhci_log(CE_NOTE, ddi_get_parent(dip), "!%s" 3700 " (%s%d): path %s (%s%d) target address %s" 3701 " is now ONLINE because of" 3702 " an externally initiated failover", 3703 ddi_pathname(dip, cpath), 3704 ddi_driver_name(dip), 3705 ddi_get_instance(dip), 3706 ddi_pathname(pdip, dpath), 3707 ddi_driver_name(pdip), 3708 ddi_get_instance(pdip), 3709 mdi_pi_get_addr(pip)); 3710 kmem_free(cpath, MAXPATHLEN); 3711 kmem_free(dpath, MAXPATHLEN); 3712 mdi_pi_set_state(pip, 3713 MDI_PATHINFO_STATE_ONLINE); 3714 mdi_pi_set_preferred(pip, 3715 opinfo.opinfo_preferred); 3716 tptr = kmem_alloc(strlen 3717 (opinfo.opinfo_path_attr)+1, KM_SLEEP); 3718 (void) strlcpy(tptr, opinfo.opinfo_path_attr, 3719 (strlen(opinfo.opinfo_path_attr)+1)); 3720 mutex_enter(&vlun->svl_mutex); 3721 if (vlun->svl_active_pclass != NULL) { 3722 kmem_free(vlun->svl_active_pclass, 3723 strlen(vlun->svl_active_pclass)+1); 3724 } 3725 vlun->svl_active_pclass = tptr; 3726 if (vlun->svl_waiting_for_activepath) { 3727 vlun->svl_waiting_for_activepath = 0; 3728 } 3729 mutex_exit(&vlun->svl_mutex); 3730 /* Check for Reservation Conflict */ 3731 bp = scsi_alloc_consistent_buf( 3732 &svp->svp_psd->sd_address, 3733 (struct buf *)NULL, DEV_BSIZE, B_READ, 3734 NULL, NULL); 3735 if (!bp) { 3736 VHCI_DEBUG(1, (CE_NOTE, NULL, 3737 "vhci_update_pathstates: " 3738 "!No resources (buf)\n")); 3739 mdi_rele_path(pip); 3740 goto done; 3741 } 3742 pkt = scsi_init_pkt(&svp->svp_psd->sd_address, 3743 NULL, bp, CDB_GROUP1, 3744 sizeof (struct scsi_arq_status), 0, 3745 PKT_CONSISTENT, NULL, NULL); 3746 if (pkt) { 3747 (void) scsi_setup_cdb((union scsi_cdb *) 3748 (uintptr_t)pkt->pkt_cdbp, 3749 SCMD_READ, 1, 1, 0); 3750 pkt->pkt_time = 3*30; 3751 pkt->pkt_flags = FLAG_NOINTR; 3752 pkt->pkt_path_instance = 3753 mdi_pi_get_path_instance(pip); 3754 3755 if ((scsi_transport(pkt) == 3756 TRAN_ACCEPT) && (pkt->pkt_reason 3757 == CMD_CMPLT) && (SCBP_C(pkt) == 3758 STATUS_RESERVATION_CONFLICT)) { 3759 reserve_conflict = 1; 3760 } 3761 scsi_destroy_pkt(pkt); 3762 } 3763 scsi_free_consistent_buf(bp); 3764 } else if (MDI_PI_IS_ONLINE(pip)) { 3765 if (strcmp(pclass, opinfo.opinfo_path_attr) 3766 != 0) { 3767 mdi_pi_set_preferred(pip, 3768 opinfo.opinfo_preferred); 3769 mutex_enter(&vlun->svl_mutex); 3770 if (vlun->svl_active_pclass == NULL || 3771 strcmp(opinfo.opinfo_path_attr, 3772 vlun->svl_active_pclass) != 0) { 3773 mutex_exit(&vlun->svl_mutex); 3774 tptr = kmem_alloc(strlen 3775 (opinfo.opinfo_path_attr)+1, 3776 KM_SLEEP); 3777 (void) strlcpy(tptr, 3778 opinfo.opinfo_path_attr, 3779 (strlen 3780 (opinfo.opinfo_path_attr) 3781 +1)); 3782 mutex_enter(&vlun->svl_mutex); 3783 } else { 3784 /* 3785 * No need to update 3786 * svl_active_pclass 3787 */ 3788 tptr = NULL; 3789 mutex_exit(&vlun->svl_mutex); 3790 } 3791 if (tptr) { 3792 if (vlun->svl_active_pclass 3793 != NULL) { 3794 kmem_free(vlun-> 3795 svl_active_pclass, 3796 strlen(vlun-> 3797 svl_active_pclass) 3798 +1); 3799 } 3800 vlun->svl_active_pclass = tptr; 3801 mutex_exit(&vlun->svl_mutex); 3802 } 3803 } 3804 } 3805 } else if ((opinfo.opinfo_path_state == SCSI_PATH_INACTIVE) && 3806 !(MDI_PI_IS_STANDBY(pip))) { 3807 VHCI_DEBUG(1, (CE_NOTE, NULL, 3808 "!vhci_update_pathstates: marking path" 3809 " 0x%p as STANDBY\n", (void *)pip)); 3810 pdip = mdi_pi_get_phci(pip); 3811 cpath = kmem_alloc(MAXPATHLEN, KM_SLEEP); 3812 dpath = kmem_alloc(MAXPATHLEN, KM_SLEEP); 3813 vhci_log(CE_NOTE, ddi_get_parent(dip), "!%s" 3814 " (%s%d): path %s (%s%d) target address %s" 3815 " is now STANDBY because of" 3816 " an externally initiated failover", 3817 ddi_pathname(dip, cpath), 3818 ddi_driver_name(dip), 3819 ddi_get_instance(dip), 3820 ddi_pathname(pdip, dpath), 3821 ddi_driver_name(pdip), 3822 ddi_get_instance(pdip), 3823 mdi_pi_get_addr(pip)); 3824 kmem_free(cpath, MAXPATHLEN); 3825 kmem_free(dpath, MAXPATHLEN); 3826 mdi_pi_set_state(pip, 3827 MDI_PATHINFO_STATE_STANDBY); 3828 mdi_pi_set_preferred(pip, 3829 opinfo.opinfo_preferred); 3830 mutex_enter(&vlun->svl_mutex); 3831 if (vlun->svl_active_pclass != NULL) { 3832 if (strcmp(vlun->svl_active_pclass, 3833 opinfo.opinfo_path_attr) == 0) { 3834 kmem_free(vlun-> 3835 svl_active_pclass, 3836 strlen(vlun-> 3837 svl_active_pclass)+1); 3838 vlun->svl_active_pclass = NULL; 3839 } 3840 } 3841 mutex_exit(&vlun->svl_mutex); 3842 } 3843 (void) mdi_prop_free(pclass); 3844 sps = mdi_select_path(dip, NULL, 3845 (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH), 3846 pip, &npip); 3847 mdi_rele_path(pip); 3848 3849 } while ((npip != NULL) && (sps == MDI_SUCCESS)); 3850 3851 /* 3852 * Check to see if this vlun has an active SCSI-II RESERVE. If so 3853 * clear the reservation by sending a reset, so the host doesn't 3854 * receive a reservation conflict. 3855 * Reset VLUN_RESERVE_ACTIVE_FLG for this vlun. Also notify ssd 3856 * of the reset, explicitly. 3857 */ 3858 if (vlun->svl_flags & VLUN_RESERVE_ACTIVE_FLG) { 3859 if (reserve_conflict && (vlun->svl_xlf_capable == 0)) { 3860 (void) vhci_recovery_reset(vlun, 3861 &svp->svp_psd->sd_address, FALSE, 3862 VHCI_DEPTH_TARGET); 3863 } 3864 vlun->svl_flags &= ~VLUN_RESERVE_ACTIVE_FLG; 3865 mutex_enter(&vhci->vhci_mutex); 3866 scsi_hba_reset_notify_callback(&vhci->vhci_mutex, 3867 &vhci->vhci_reset_notify_listf); 3868 mutex_exit(&vhci->vhci_mutex); 3869 } 3870 if (vlun->svl_flags & VLUN_UPDATE_TPG) { 3871 /* 3872 * Update the AccessState of related MP-API TPGs 3873 */ 3874 (void) vhci_mpapi_update_tpg_acc_state_for_lu(vhci, vlun); 3875 vlun->svl_flags &= ~VLUN_UPDATE_TPG; 3876 } 3877 done: 3878 if (vlun->svl_efo_update_path) { 3879 vlun->svl_efo_update_path = 0; 3880 vhci_efo_done(vlun->svl_swarg); 3881 vlun->svl_swarg = 0; 3882 } 3883 VHCI_RELEASE_LUN(vlun); 3884 } 3885 3886 /* ARGSUSED */ 3887 static int 3888 vhci_pathinfo_init(dev_info_t *vdip, mdi_pathinfo_t *pip, int flags) 3889 { 3890 scsi_hba_tran_t *hba = NULL; 3891 struct scsi_device *psd = NULL; 3892 scsi_vhci_lun_t *vlun = NULL; 3893 dev_info_t *pdip = NULL; 3894 dev_info_t *tgt_dip; 3895 struct scsi_vhci *vhci; 3896 char *guid; 3897 scsi_vhci_priv_t *svp = NULL; 3898 int rval = MDI_FAILURE; 3899 int vlun_alloced = 0; 3900 3901 ASSERT(vdip != NULL); 3902 ASSERT(pip != NULL); 3903 3904 vhci = ddi_get_soft_state(vhci_softstate, ddi_get_instance(vdip)); 3905 ASSERT(vhci != NULL); 3906 3907 pdip = mdi_pi_get_phci(pip); 3908 ASSERT(pdip != NULL); 3909 3910 hba = ddi_get_driver_private(pdip); 3911 ASSERT(hba != NULL); 3912 3913 tgt_dip = mdi_pi_get_client(pip); 3914 ASSERT(tgt_dip != NULL); 3915 3916 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, tgt_dip, PROPFLAGS, 3917 MDI_CLIENT_GUID_PROP, &guid) != DDI_SUCCESS) { 3918 VHCI_DEBUG(1, (CE_WARN, NULL, 3919 "vhci_pathinfo_init: lun guid property failed")); 3920 goto failure; 3921 } 3922 3923 vlun = vhci_lun_lookup_alloc(tgt_dip, guid, &vlun_alloced); 3924 ddi_prop_free(guid); 3925 3926 vlun->svl_dip = tgt_dip; 3927 3928 svp = kmem_zalloc(sizeof (*svp), KM_SLEEP); 3929 svp->svp_svl = vlun; 3930 3931 vlun->svl_lb_policy_save = mdi_get_lb_policy(tgt_dip); 3932 mutex_init(&svp->svp_mutex, NULL, MUTEX_DRIVER, NULL); 3933 cv_init(&svp->svp_cv, NULL, CV_DRIVER, NULL); 3934 3935 psd = kmem_zalloc(sizeof (*psd), KM_SLEEP); 3936 mutex_init(&psd->sd_mutex, NULL, MUTEX_DRIVER, NULL); 3937 3938 /* 3939 * Clone transport structure if requested, so 3940 * Self enumerating HBAs always need to use cloning 3941 */ 3942 3943 if (hba->tran_hba_flags & SCSI_HBA_TRAN_CLONE) { 3944 scsi_hba_tran_t *clone = 3945 kmem_alloc(sizeof (scsi_hba_tran_t), KM_SLEEP); 3946 bcopy(hba, clone, sizeof (scsi_hba_tran_t)); 3947 hba = clone; 3948 hba->tran_sd = psd; 3949 } else { 3950 ASSERT(hba->tran_sd == NULL); 3951 } 3952 psd->sd_dev = tgt_dip; 3953 psd->sd_address.a_hba_tran = hba; 3954 psd->sd_private = (caddr_t)pip; 3955 svp->svp_psd = psd; 3956 mdi_pi_set_vhci_private(pip, (caddr_t)svp); 3957 3958 /* 3959 * call hba's target init entry point if it exists 3960 */ 3961 if (hba->tran_tgt_init != NULL) { 3962 if ((rval = (*hba->tran_tgt_init)(pdip, tgt_dip, 3963 hba, psd)) != DDI_SUCCESS) { 3964 VHCI_DEBUG(1, (CE_WARN, pdip, 3965 "!vhci_pathinfo_init: tran_tgt_init failed for " 3966 "path=0x%p rval=%x", (void *)pip, rval)); 3967 goto failure; 3968 } 3969 } 3970 3971 svp->svp_new_path = 1; 3972 3973 psd->sd_inq = NULL; 3974 3975 VHCI_DEBUG(4, (CE_NOTE, NULL, "!vhci_pathinfo_init: path:%p\n", 3976 (void *)pip)); 3977 return (MDI_SUCCESS); 3978 3979 failure: 3980 if (psd) { 3981 mutex_destroy(&psd->sd_mutex); 3982 kmem_free(psd, sizeof (*psd)); 3983 } 3984 if (svp) { 3985 mdi_pi_set_vhci_private(pip, NULL); 3986 mutex_destroy(&svp->svp_mutex); 3987 cv_destroy(&svp->svp_cv); 3988 kmem_free(svp, sizeof (*svp)); 3989 } 3990 if (hba && hba->tran_hba_flags & SCSI_HBA_TRAN_CLONE) 3991 kmem_free(hba, sizeof (scsi_hba_tran_t)); 3992 3993 if (vlun_alloced) 3994 vhci_lun_free(tgt_dip); 3995 3996 return (rval); 3997 } 3998 3999 /* ARGSUSED */ 4000 static int 4001 vhci_pathinfo_uninit(dev_info_t *vdip, mdi_pathinfo_t *pip, int flags) 4002 { 4003 scsi_hba_tran_t *hba = NULL; 4004 struct scsi_device *psd = NULL; 4005 dev_info_t *pdip = NULL; 4006 dev_info_t *cdip = NULL; 4007 scsi_vhci_priv_t *svp = NULL; 4008 4009 ASSERT(vdip != NULL); 4010 ASSERT(pip != NULL); 4011 4012 pdip = mdi_pi_get_phci(pip); 4013 ASSERT(pdip != NULL); 4014 4015 cdip = mdi_pi_get_client(pip); 4016 ASSERT(cdip != NULL); 4017 4018 hba = ddi_get_driver_private(pdip); 4019 ASSERT(hba != NULL); 4020 4021 vhci_mpapi_set_path_state(vdip, pip, MP_DRVR_PATH_STATE_REMOVED); 4022 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip); 4023 if (svp == NULL) { 4024 /* path already freed. Nothing to do. */ 4025 return (MDI_SUCCESS); 4026 } 4027 4028 psd = svp->svp_psd; 4029 ASSERT(psd != NULL); 4030 4031 if (hba->tran_hba_flags & SCSI_HBA_TRAN_CLONE) { 4032 hba = psd->sd_address.a_hba_tran; 4033 ASSERT(hba->tran_hba_flags & SCSI_HBA_TRAN_CLONE); 4034 ASSERT(hba->tran_sd == psd); 4035 } else { 4036 ASSERT(hba->tran_sd == NULL); 4037 } 4038 4039 if (hba->tran_tgt_free != NULL) { 4040 (*hba->tran_tgt_free) (pdip, cdip, hba, psd); 4041 } 4042 mutex_destroy(&psd->sd_mutex); 4043 if (hba->tran_hba_flags & SCSI_HBA_TRAN_CLONE) { 4044 kmem_free(hba, sizeof (*hba)); 4045 } 4046 4047 mdi_pi_set_vhci_private(pip, NULL); 4048 kmem_free((caddr_t)psd, sizeof (*psd)); 4049 4050 mutex_destroy(&svp->svp_mutex); 4051 cv_destroy(&svp->svp_cv); 4052 kmem_free((caddr_t)svp, sizeof (*svp)); 4053 4054 /* 4055 * If this is the last path to the client, 4056 * then free up the vlun as well. 4057 */ 4058 if (mdi_client_get_path_count(cdip) == 1) { 4059 vhci_lun_free(cdip); 4060 } 4061 4062 VHCI_DEBUG(4, (CE_NOTE, NULL, "!vhci_pathinfo_uninit: path=0x%p\n", 4063 (void *)pip)); 4064 return (MDI_SUCCESS); 4065 } 4066 4067 /* ARGSUSED */ 4068 static int 4069 vhci_pathinfo_state_change(dev_info_t *vdip, mdi_pathinfo_t *pip, 4070 mdi_pathinfo_state_t state, uint32_t ext_state, int flags) 4071 { 4072 int rval = MDI_SUCCESS; 4073 scsi_vhci_priv_t *svp; 4074 scsi_vhci_lun_t *vlun; 4075 int held; 4076 int op = (flags & 0xf00) >> 8; 4077 struct scsi_vhci *vhci; 4078 4079 vhci = ddi_get_soft_state(vhci_softstate, ddi_get_instance(vdip)); 4080 4081 if (flags & MDI_EXT_STATE_CHANGE) { 4082 /* 4083 * We do not want to issue any commands down the path in case 4084 * sync flag is set. Lower layers might not be ready to accept 4085 * any I/O commands. 4086 */ 4087 if (op == DRIVER_DISABLE) 4088 return (MDI_SUCCESS); 4089 4090 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip); 4091 if (svp == NULL) { 4092 return (MDI_FAILURE); 4093 } 4094 vlun = svp->svp_svl; 4095 4096 if (flags & MDI_BEFORE_STATE_CHANGE) { 4097 /* 4098 * Hold the LUN. 4099 */ 4100 VHCI_HOLD_LUN(vlun, VH_SLEEP, held); 4101 if (flags & MDI_DISABLE_OP) { 4102 /* 4103 * Issue scsi reset if it happens to be 4104 * reserved path. 4105 */ 4106 if (vlun->svl_flags & VLUN_RESERVE_ACTIVE_FLG) { 4107 /* 4108 * if reservation pending on 4109 * this path, dont' mark the 4110 * path busy 4111 */ 4112 if (op == DRIVER_DISABLE_TRANSIENT) { 4113 VHCI_DEBUG(1, (CE_NOTE, NULL, 4114 "!vhci_pathinfo" 4115 "_state_change (pip:%p): " 4116 " reservation: fail busy\n", 4117 (void *)pip)); 4118 return (MDI_FAILURE); 4119 } 4120 if (pip == vlun->svl_resrv_pip) { 4121 if (vhci_recovery_reset( 4122 svp->svp_svl, 4123 &svp->svp_psd->sd_address, 4124 TRUE, 4125 VHCI_DEPTH_TARGET) == 0) { 4126 VHCI_DEBUG(1, 4127 (CE_NOTE, NULL, 4128 "!vhci_pathinfo" 4129 "_state_change " 4130 " (pip:%p): " 4131 "reset failed, " 4132 "give up!\n", 4133 (void *)pip)); 4134 } 4135 vlun->svl_flags &= 4136 ~VLUN_RESERVE_ACTIVE_FLG; 4137 } 4138 } 4139 } else if (flags & MDI_ENABLE_OP) { 4140 if (((vhci->vhci_conf_flags & 4141 VHCI_CONF_FLAGS_AUTO_FAILBACK) == 4142 VHCI_CONF_FLAGS_AUTO_FAILBACK) && 4143 MDI_PI_IS_USER_DISABLE(pip) && 4144 MDI_PI_IS_STANDBY(pip)) { 4145 struct scsi_failover_ops *fo; 4146 char *best_pclass, *pclass = NULL; 4147 int best_class, rv; 4148 /* 4149 * Failback if enabling a standby path 4150 * and it is the primary class or 4151 * preferred class 4152 */ 4153 best_class = mdi_pi_get_preferred(pip); 4154 if (best_class == 0) { 4155 /* 4156 * if not preferred - compare 4157 * path-class with class 4158 */ 4159 fo = vlun->svl_fops; 4160 (void) fo->sfo_pathclass_next( 4161 NULL, &best_pclass, 4162 vlun->svl_fops_ctpriv); 4163 pclass = NULL; 4164 rv = mdi_prop_lookup_string(pip, 4165 "path-class", &pclass); 4166 if (rv != MDI_SUCCESS || 4167 pclass == NULL) { 4168 vhci_log(CE_NOTE, vdip, 4169 "!path-class " 4170 " lookup " 4171 "failed. rv: %d" 4172 "class: %p", rv, 4173 (void *)pclass); 4174 } else if (strncmp(pclass, 4175 best_pclass, 4176 strlen(best_pclass)) == 0) { 4177 best_class = 1; 4178 } 4179 if (rv == MDI_SUCCESS && 4180 pclass != NULL) { 4181 rv = mdi_prop_free( 4182 pclass); 4183 if (rv != 4184 DDI_PROP_SUCCESS) { 4185 vhci_log( 4186 CE_NOTE, 4187 vdip, 4188 "!path-" 4189 "class" 4190 " free" 4191 " failed" 4192 " rv: %d" 4193 " class: " 4194 "%p", 4195 rv, 4196 (void *) 4197 pclass); 4198 } 4199 } 4200 } 4201 if (best_class == 1) { 4202 VHCI_DEBUG(1, (CE_NOTE, NULL, 4203 "preferred path: %p " 4204 "USER_DISABLE->USER_ENABLE " 4205 "transition for lun %s\n", 4206 (void *)pip, 4207 vlun->svl_lun_wwn)); 4208 (void) taskq_dispatch( 4209 vhci->vhci_taskq, 4210 vhci_initiate_auto_failback, 4211 (void *) vlun, KM_SLEEP); 4212 } 4213 } 4214 /* 4215 * if PGR is active, revalidate key and 4216 * register on this path also, if key is 4217 * still valid 4218 */ 4219 sema_p(&vlun->svl_pgr_sema); 4220 if (vlun->svl_pgr_active) 4221 (void) 4222 vhci_pgr_validate_and_register(svp); 4223 sema_v(&vlun->svl_pgr_sema); 4224 /* 4225 * Inform target driver about any 4226 * reservations to be reinstated if target 4227 * has dropped reservation during the busy 4228 * period. 4229 */ 4230 mutex_enter(&vhci->vhci_mutex); 4231 scsi_hba_reset_notify_callback( 4232 &vhci->vhci_mutex, 4233 &vhci->vhci_reset_notify_listf); 4234 mutex_exit(&vhci->vhci_mutex); 4235 } 4236 } 4237 if (flags & MDI_AFTER_STATE_CHANGE) { 4238 if (flags & MDI_ENABLE_OP) { 4239 mutex_enter(&vhci_global_mutex); 4240 cv_broadcast(&vhci_cv); 4241 mutex_exit(&vhci_global_mutex); 4242 } 4243 if (vlun->svl_setcap_done) { 4244 (void) vhci_pHCI_cap(&svp->svp_psd->sd_address, 4245 "sector-size", vlun->svl_sector_size, 4246 1, pip); 4247 } 4248 4249 /* 4250 * Release the LUN 4251 */ 4252 VHCI_RELEASE_LUN(vlun); 4253 4254 /* 4255 * Path transition is complete. 4256 * Run callback to indicate target driver to 4257 * retry to prevent IO starvation. 4258 */ 4259 if (scsi_callback_id != 0) { 4260 ddi_run_callback(&scsi_callback_id); 4261 } 4262 } 4263 } else { 4264 switch (state) { 4265 case MDI_PATHINFO_STATE_ONLINE: 4266 rval = vhci_pathinfo_online(vdip, pip, flags); 4267 break; 4268 4269 case MDI_PATHINFO_STATE_OFFLINE: 4270 rval = vhci_pathinfo_offline(vdip, pip, flags); 4271 break; 4272 4273 default: 4274 break; 4275 } 4276 /* 4277 * Path transition is complete. 4278 * Run callback to indicate target driver to 4279 * retry to prevent IO starvation. 4280 */ 4281 if ((rval == MDI_SUCCESS) && (scsi_callback_id != 0)) { 4282 ddi_run_callback(&scsi_callback_id); 4283 } 4284 return (rval); 4285 } 4286 4287 return (MDI_SUCCESS); 4288 } 4289 4290 /* 4291 * Parse the mpxio load balancing options. The datanameptr 4292 * will point to a string containing the load-balance-options value. 4293 * The load-balance-options value will be a property that 4294 * defines the load-balance algorithm and any arguments to that 4295 * algorithm. 4296 * For example: 4297 * device-type-mpxio-options-list= 4298 * "device-type=SUN SENA", "load-balance-options=logical-block-options" 4299 * "device-type=SUN SE6920", "round-robin-options"; 4300 * logical-block-options="load-balance=logical-block", "region-size=15"; 4301 * round-robin-options="load-balance=round-robin"; 4302 * 4303 * If the load-balance is not defined the load balance algorithm will 4304 * default to the global setting. There will be default values assigned 4305 * to the arguments (region-size=18) and if an argument is one 4306 * that is not known, it will be ignored. 4307 */ 4308 static void 4309 vhci_parse_mpxio_lb_options(dev_info_t *dip, dev_info_t *cdip, 4310 caddr_t datanameptr) 4311 { 4312 char *dataptr, *next_entry; 4313 caddr_t config_list = NULL; 4314 int config_list_len = 0, list_len = 0; 4315 int region_size = -1; 4316 client_lb_t load_balance; 4317 4318 if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, datanameptr, 4319 (caddr_t)&config_list, &config_list_len) != DDI_PROP_SUCCESS) { 4320 return; 4321 } 4322 4323 list_len = config_list_len; 4324 next_entry = config_list; 4325 while (config_list_len > 0) { 4326 dataptr = next_entry; 4327 4328 if (strncmp(mdi_load_balance, dataptr, 4329 strlen(mdi_load_balance)) == 0) { 4330 /* get the load-balance scheme */ 4331 dataptr += strlen(mdi_load_balance) + 1; 4332 if (strcmp(dataptr, LOAD_BALANCE_PROP_RR) == 0) { 4333 (void) mdi_set_lb_policy(cdip, LOAD_BALANCE_RR); 4334 load_balance = LOAD_BALANCE_RR; 4335 } else if (strcmp(dataptr, 4336 LOAD_BALANCE_PROP_LBA) == 0) { 4337 (void) mdi_set_lb_policy(cdip, 4338 LOAD_BALANCE_LBA); 4339 load_balance = LOAD_BALANCE_LBA; 4340 } else if (strcmp(dataptr, 4341 LOAD_BALANCE_PROP_NONE) == 0) { 4342 (void) mdi_set_lb_policy(cdip, 4343 LOAD_BALANCE_NONE); 4344 load_balance = LOAD_BALANCE_NONE; 4345 } 4346 } else if (strncmp(dataptr, LOGICAL_BLOCK_REGION_SIZE, 4347 strlen(LOGICAL_BLOCK_REGION_SIZE)) == 0) { 4348 int i = 0; 4349 char *ptr; 4350 char *tmp; 4351 4352 tmp = dataptr + (strlen(LOGICAL_BLOCK_REGION_SIZE) + 1); 4353 /* check for numeric value */ 4354 for (ptr = tmp; i < strlen(tmp); i++, ptr++) { 4355 if (!isdigit(*ptr)) { 4356 cmn_err(CE_WARN, 4357 "Illegal region size: %s." 4358 " Setting to default value: %d", 4359 tmp, 4360 LOAD_BALANCE_DEFAULT_REGION_SIZE); 4361 region_size = 4362 LOAD_BALANCE_DEFAULT_REGION_SIZE; 4363 break; 4364 } 4365 } 4366 if (i >= strlen(tmp)) { 4367 region_size = stoi(&tmp); 4368 } 4369 (void) mdi_set_lb_region_size(cdip, region_size); 4370 } 4371 config_list_len -= (strlen(next_entry) + 1); 4372 next_entry += strlen(next_entry) + 1; 4373 } 4374 #ifdef DEBUG 4375 if ((region_size >= 0) && (load_balance != LOAD_BALANCE_LBA)) { 4376 VHCI_DEBUG(1, (CE_NOTE, dip, 4377 "!vhci_parse_mpxio_lb_options: region-size: %d" 4378 "only valid for load-balance=logical-block\n", 4379 region_size)); 4380 } 4381 #endif 4382 if ((region_size == -1) && (load_balance == LOAD_BALANCE_LBA)) { 4383 VHCI_DEBUG(1, (CE_NOTE, dip, 4384 "!vhci_parse_mpxio_lb_options: No region-size" 4385 " defined load-balance=logical-block." 4386 " Default to: %d\n", LOAD_BALANCE_DEFAULT_REGION_SIZE)); 4387 (void) mdi_set_lb_region_size(cdip, 4388 LOAD_BALANCE_DEFAULT_REGION_SIZE); 4389 } 4390 if (list_len > 0) { 4391 kmem_free(config_list, list_len); 4392 } 4393 } 4394 4395 /* 4396 * Parse the device-type-mpxio-options-list looking for the key of 4397 * "load-balance-options". If found, parse the load balancing options. 4398 * Check the comment of the vhci_get_device_type_mpxio_options() 4399 * for the device-type-mpxio-options-list. 4400 */ 4401 static void 4402 vhci_parse_mpxio_options(dev_info_t *dip, dev_info_t *cdip, 4403 caddr_t datanameptr, int list_len) 4404 { 4405 char *dataptr; 4406 int len; 4407 4408 /* 4409 * get the data list 4410 */ 4411 dataptr = datanameptr; 4412 len = 0; 4413 while (len < list_len && 4414 strncmp(dataptr, DEVICE_TYPE_STR, strlen(DEVICE_TYPE_STR)) 4415 != 0) { 4416 if (strncmp(dataptr, LOAD_BALANCE_OPTIONS, 4417 strlen(LOAD_BALANCE_OPTIONS)) == 0) { 4418 len += strlen(LOAD_BALANCE_OPTIONS) + 1; 4419 dataptr += strlen(LOAD_BALANCE_OPTIONS) + 1; 4420 vhci_parse_mpxio_lb_options(dip, cdip, dataptr); 4421 } 4422 len += strlen(dataptr) + 1; 4423 dataptr += strlen(dataptr) + 1; 4424 } 4425 } 4426 4427 /* 4428 * Check the inquriy string returned from the device wiith the device-type 4429 * Check for the existence of the device-type-mpxio-options-list and 4430 * if found parse the list checking for a match with the device-type 4431 * value and the inquiry string returned from the device. If a match 4432 * is found, parse the mpxio options list. The format of the 4433 * device-type-mpxio-options-list is: 4434 * device-type-mpxio-options-list= 4435 * "device-type=SUN SENA", "load-balance-options=logical-block-options" 4436 * "device-type=SUN SE6920", "round-robin-options"; 4437 * logical-block-options="load-balance=logical-block", "region-size=15"; 4438 * round-robin-options="load-balance=round-robin"; 4439 */ 4440 void 4441 vhci_get_device_type_mpxio_options(dev_info_t *dip, dev_info_t *cdip, 4442 struct scsi_device *devp) 4443 { 4444 4445 caddr_t config_list = NULL; 4446 caddr_t vidptr, datanameptr; 4447 int vidlen, dupletlen = 0; 4448 int config_list_len = 0, len; 4449 struct scsi_inquiry *inq = devp->sd_inq; 4450 4451 /* 4452 * look up the device-type-mpxio-options-list and walk thru 4453 * the list compare the vendor ids of the earlier inquiry command and 4454 * with those vids in the list if there is a match, lookup 4455 * the mpxio-options value 4456 */ 4457 if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 4458 MPXIO_OPTIONS_LIST, 4459 (caddr_t)&config_list, &config_list_len) == DDI_PROP_SUCCESS) { 4460 4461 /* 4462 * Compare vids in each duplet - if it matches, 4463 * parse the mpxio options list. 4464 */ 4465 for (len = config_list_len, vidptr = config_list; len > 0; 4466 len -= dupletlen) { 4467 4468 dupletlen = 0; 4469 4470 if (strlen(vidptr) != 0 && 4471 strncmp(vidptr, DEVICE_TYPE_STR, 4472 strlen(DEVICE_TYPE_STR)) == 0) { 4473 /* point to next duplet */ 4474 datanameptr = vidptr + strlen(vidptr) + 1; 4475 /* add len of this duplet */ 4476 dupletlen += strlen(vidptr) + 1; 4477 /* get to device type */ 4478 vidptr += strlen(DEVICE_TYPE_STR) + 1; 4479 vidlen = strlen(vidptr); 4480 if ((vidlen != 0) && 4481 bcmp(inq->inq_vid, vidptr, vidlen) == 0) { 4482 vhci_parse_mpxio_options(dip, cdip, 4483 datanameptr, len - dupletlen); 4484 break; 4485 } 4486 /* get to next duplet */ 4487 vidptr += strlen(vidptr) + 1; 4488 } 4489 /* get to the next device-type */ 4490 while (len - dupletlen > 0 && 4491 strlen(vidptr) != 0 && 4492 strncmp(vidptr, DEVICE_TYPE_STR, 4493 strlen(DEVICE_TYPE_STR)) != 0) { 4494 dupletlen += strlen(vidptr) + 1; 4495 vidptr += strlen(vidptr) + 1; 4496 } 4497 } 4498 if (config_list_len > 0) { 4499 kmem_free(config_list, config_list_len); 4500 } 4501 } 4502 } 4503 4504 static int 4505 vhci_update_pathinfo(struct scsi_device *psd, mdi_pathinfo_t *pip, 4506 struct scsi_failover_ops *fo, 4507 scsi_vhci_lun_t *vlun, 4508 struct scsi_vhci *vhci) 4509 { 4510 struct scsi_path_opinfo opinfo; 4511 char *pclass, *best_pclass; 4512 4513 if (fo->sfo_path_get_opinfo(psd, &opinfo, vlun->svl_fops_ctpriv) != 0) { 4514 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_update_pathinfo: " 4515 "Failed to get operation info for path:%p\n", (void *)pip)); 4516 return (MDI_FAILURE); 4517 } 4518 /* set the xlf capable flag in the vlun for future use */ 4519 vlun->svl_xlf_capable = opinfo.opinfo_xlf_capable; 4520 (void) mdi_prop_update_string(pip, "path-class", 4521 opinfo.opinfo_path_attr); 4522 4523 pclass = opinfo.opinfo_path_attr; 4524 if (opinfo.opinfo_path_state == SCSI_PATH_ACTIVE) { 4525 mutex_enter(&vlun->svl_mutex); 4526 if (vlun->svl_active_pclass != NULL) { 4527 if (strcmp(vlun->svl_active_pclass, pclass) != 0) { 4528 mutex_exit(&vlun->svl_mutex); 4529 /* 4530 * Externally initiated failover has happened; 4531 * force the path state to be STANDBY/ONLINE, 4532 * next IO will trigger failover and thus 4533 * sync-up the pathstates. Reason we don't 4534 * sync-up immediately by invoking 4535 * vhci_update_pathstates() is because it 4536 * needs a VHCI_HOLD_LUN() and we don't 4537 * want to block here. 4538 * 4539 * Further, if the device is an ALUA device, 4540 * then failure to exactly match 'pclass' and 4541 * 'svl_active_pclass'(as is the case here) 4542 * indicates that the currently active path 4543 * is a 'non-optimized' path - which means 4544 * that 'svl_active_pclass' needs to be 4545 * replaced with opinfo.opinfo_path_state 4546 * value. 4547 */ 4548 4549 if (SCSI_FAILOVER_IS_TPGS(vlun->svl_fops)) { 4550 char *tptr; 4551 4552 /* 4553 * The device is ALUA compliant. The 4554 * state need to be changed to online 4555 * rather than standby state which is 4556 * done typically for a asymmetric 4557 * device that is non ALUA compliant. 4558 */ 4559 mdi_pi_set_state(pip, 4560 MDI_PATHINFO_STATE_ONLINE); 4561 tptr = kmem_alloc(strlen 4562 (opinfo.opinfo_path_attr)+1, 4563 KM_SLEEP); 4564 (void) strlcpy(tptr, 4565 opinfo.opinfo_path_attr, 4566 (strlen(opinfo.opinfo_path_attr) 4567 +1)); 4568 mutex_enter(&vlun->svl_mutex); 4569 kmem_free(vlun->svl_active_pclass, 4570 strlen(vlun->svl_active_pclass)+1); 4571 vlun->svl_active_pclass = tptr; 4572 mutex_exit(&vlun->svl_mutex); 4573 } else { 4574 /* 4575 * Non ALUA device case. 4576 */ 4577 mdi_pi_set_state(pip, 4578 MDI_PATHINFO_STATE_STANDBY); 4579 } 4580 vlun->svl_fo_support = opinfo.opinfo_mode; 4581 mdi_pi_set_preferred(pip, 4582 opinfo.opinfo_preferred); 4583 return (MDI_SUCCESS); 4584 } 4585 } else { 4586 char *tptr; 4587 4588 /* 4589 * lets release the mutex before we try to 4590 * allocate since the potential to sleep is 4591 * possible. 4592 */ 4593 mutex_exit(&vlun->svl_mutex); 4594 tptr = kmem_alloc(strlen(pclass)+1, KM_SLEEP); 4595 (void) strlcpy(tptr, pclass, (strlen(pclass)+1)); 4596 mutex_enter(&vlun->svl_mutex); 4597 vlun->svl_active_pclass = tptr; 4598 } 4599 mutex_exit(&vlun->svl_mutex); 4600 mdi_pi_set_state(pip, MDI_PATHINFO_STATE_ONLINE); 4601 vlun->svl_waiting_for_activepath = 0; 4602 } else if (opinfo.opinfo_path_state == SCSI_PATH_ACTIVE_NONOPT) { 4603 mutex_enter(&vlun->svl_mutex); 4604 if (vlun->svl_active_pclass == NULL) { 4605 char *tptr; 4606 4607 mutex_exit(&vlun->svl_mutex); 4608 tptr = kmem_alloc(strlen(pclass)+1, KM_SLEEP); 4609 (void) strlcpy(tptr, pclass, (strlen(pclass)+1)); 4610 mutex_enter(&vlun->svl_mutex); 4611 vlun->svl_active_pclass = tptr; 4612 } 4613 mutex_exit(&vlun->svl_mutex); 4614 mdi_pi_set_state(pip, MDI_PATHINFO_STATE_ONLINE); 4615 vlun->svl_waiting_for_activepath = 0; 4616 } else if (opinfo.opinfo_path_state == SCSI_PATH_INACTIVE) { 4617 mutex_enter(&vlun->svl_mutex); 4618 if (vlun->svl_active_pclass != NULL) { 4619 if (strcmp(vlun->svl_active_pclass, pclass) == 0) { 4620 mutex_exit(&vlun->svl_mutex); 4621 /* 4622 * externally initiated failover has happened; 4623 * force state to ONLINE (see comment above) 4624 */ 4625 mdi_pi_set_state(pip, 4626 MDI_PATHINFO_STATE_ONLINE); 4627 vlun->svl_fo_support = opinfo.opinfo_mode; 4628 mdi_pi_set_preferred(pip, 4629 opinfo.opinfo_preferred); 4630 return (MDI_SUCCESS); 4631 } 4632 } 4633 mutex_exit(&vlun->svl_mutex); 4634 mdi_pi_set_state(pip, MDI_PATHINFO_STATE_STANDBY); 4635 4636 /* 4637 * Initiate auto-failback, if enabled, for path if path-state 4638 * is transitioning from OFFLINE->STANDBY and pathclass is the 4639 * prefered pathclass for this storage. 4640 * NOTE: In case where opinfo_path_state is SCSI_PATH_ACTIVE 4641 * (above), where the pi state is set to STANDBY, we don't 4642 * initiate auto-failback as the next IO shall take care of. 4643 * this. See comment above. 4644 */ 4645 (void) fo->sfo_pathclass_next(NULL, &best_pclass, 4646 vlun->svl_fops_ctpriv); 4647 if (((vhci->vhci_conf_flags & VHCI_CONF_FLAGS_AUTO_FAILBACK) == 4648 VHCI_CONF_FLAGS_AUTO_FAILBACK) && 4649 ((strcmp(pclass, best_pclass) == 0) || 4650 mdi_pi_get_preferred(pip) == 1) && 4651 ((MDI_PI_OLD_STATE(pip) == MDI_PATHINFO_STATE_OFFLINE)|| 4652 (MDI_PI_OLD_STATE(pip) == MDI_PATHINFO_STATE_INIT))) { 4653 VHCI_DEBUG(1, (CE_NOTE, NULL, "%s pathclass path: %p" 4654 " OFFLINE->STANDBY transition for lun %s\n", 4655 best_pclass, (void *)pip, vlun->svl_lun_wwn)); 4656 (void) taskq_dispatch(vhci->vhci_taskq, 4657 vhci_initiate_auto_failback, (void *) vlun, 4658 KM_SLEEP); 4659 } 4660 } 4661 vlun->svl_fo_support = opinfo.opinfo_mode; 4662 mdi_pi_set_preferred(pip, opinfo.opinfo_preferred); 4663 4664 VHCI_DEBUG(8, (CE_NOTE, NULL, "vhci_update_pathinfo: opinfo_rev = %x," 4665 " opinfo_path_state = %x opinfo_preferred = %x, opinfo_mode = %x\n", 4666 opinfo.opinfo_rev, opinfo.opinfo_path_state, 4667 opinfo.opinfo_preferred, opinfo.opinfo_mode)); 4668 4669 return (MDI_SUCCESS); 4670 } 4671 4672 /* 4673 * Form the kstat name and and call mdi_pi_kstat_create() 4674 */ 4675 void 4676 vhci_kstat_create_pathinfo(mdi_pathinfo_t *pip) 4677 { 4678 dev_info_t *tgt_dip; 4679 dev_info_t *pdip; 4680 char *guid; 4681 char *target_port, *target_port_dup; 4682 char ks_name[KSTAT_STRLEN]; 4683 uint_t pid; 4684 int by_id; 4685 mod_hash_val_t hv; 4686 4687 4688 /* return if we have already allocated kstats */ 4689 if (mdi_pi_kstat_exists(pip)) 4690 return; 4691 4692 /* 4693 * We need instance numbers to create a kstat name, return if we don't 4694 * have instance numbers assigned yet. 4695 */ 4696 tgt_dip = mdi_pi_get_client(pip); 4697 pdip = mdi_pi_get_phci(pip); 4698 if ((ddi_get_instance(tgt_dip) == -1) || (ddi_get_instance(pdip) == -1)) 4699 return; 4700 4701 /* 4702 * A path oriented kstat has a ks_name of the form: 4703 * 4704 * <client-driver><instance>.t<pid>.<pHCI-driver><instance> 4705 * 4706 * We maintain a bidirectional 'target-port' to <pid> map, 4707 * called targetmap. All pathinfo nodes with the same 4708 * 'target-port' map to the same <pid>. The iostat(1M) code, 4709 * when parsing a path oriented kstat name, uses the <pid> as 4710 * a SCSI_VHCI_GET_TARGET_LONGNAME ioctl argument in order 4711 * to get the 'target-port'. For KSTAT_FLAG_PERSISTENT kstats, 4712 * this ioctl needs to translate a <pid> to a 'target-port' 4713 * even after all pathinfo nodes associated with the 4714 * 'target-port' have been destroyed. This is needed to support 4715 * consistent first-iteration activity-since-boot iostat(1M) 4716 * output. Because of this requirement, the mapping can't be 4717 * based on pathinfo information in a devinfo snapshot. 4718 */ 4719 4720 /* determine 'target-port' */ 4721 if (mdi_prop_lookup_string(pip, 4722 "target-port", &target_port) == MDI_SUCCESS) { 4723 target_port_dup = i_ddi_strdup(target_port, KM_SLEEP); 4724 (void) mdi_prop_free(target_port); 4725 by_id = 1; 4726 } else { 4727 /* 4728 * If the pHCI did not set up 'target-port' on this 4729 * pathinfo node, assume that our client is the only 4730 * one with paths to the device by using the guid 4731 * value as the 'target-port'. Since no other client 4732 * will have the same guid, no other client will use 4733 * the same <pid>. NOTE: a client with an instance 4734 * number always has a guid. 4735 */ 4736 (void) ddi_prop_lookup_string(DDI_DEV_T_ANY, tgt_dip, 4737 PROPFLAGS, MDI_CLIENT_GUID_PROP, &guid); 4738 target_port_dup = i_ddi_strdup(guid, KM_SLEEP); 4739 ddi_prop_free(guid); 4740 4741 /* 4742 * For this type of mapping we don't want the 4743 * <id> -> 'target-port' mapping to be made. This 4744 * will cause the SCSI_VHCI_GET_TARGET_LONGNAME ioctl 4745 * to fail, and the iostat(1M) long '-n' output will 4746 * still use the <pid>. We do this because we just 4747 * made up the 'target-port' using the guid, and we 4748 * don't want to expose that fact in iostat output. 4749 */ 4750 by_id = 0; 4751 } 4752 4753 /* find/establish <pid> given 'target-port' */ 4754 mutex_enter(&vhci_targetmap_mutex); 4755 if (mod_hash_find(vhci_targetmap_byport, 4756 (mod_hash_key_t)target_port_dup, &hv) == 0) { 4757 pid = (int)(intptr_t)hv; /* mapping exists */ 4758 } else { 4759 pid = vhci_targetmap_pid++; /* new mapping */ 4760 4761 (void) mod_hash_insert(vhci_targetmap_byport, 4762 (mod_hash_key_t)target_port_dup, 4763 (mod_hash_val_t)(intptr_t)pid); 4764 if (by_id) { 4765 (void) mod_hash_insert(vhci_targetmap_bypid, 4766 (mod_hash_key_t)(uintptr_t)pid, 4767 (mod_hash_val_t)(uintptr_t)target_port_dup); 4768 } 4769 target_port_dup = NULL; /* owned by hash */ 4770 } 4771 mutex_exit(&vhci_targetmap_mutex); 4772 4773 /* form kstat name */ 4774 (void) snprintf(ks_name, KSTAT_STRLEN, "%s%d.t%d.%s%d", 4775 ddi_driver_name(tgt_dip), ddi_get_instance(tgt_dip), 4776 pid, ddi_driver_name(pdip), ddi_get_instance(pdip)); 4777 4778 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_path_online: path:%p " 4779 "kstat %s: pid %x <-> port %s\n", (void *)pip, 4780 ks_name, pid, target_port_dup)); 4781 if (target_port_dup) 4782 kmem_free(target_port_dup, strlen(target_port_dup) + 1); 4783 4784 /* call mdi to create kstats with the name we built */ 4785 (void) mdi_pi_kstat_create(pip, ks_name); 4786 } 4787 4788 /* ARGSUSED */ 4789 static int 4790 vhci_pathinfo_online(dev_info_t *vdip, mdi_pathinfo_t *pip, int flags) 4791 { 4792 scsi_hba_tran_t *hba = NULL; 4793 struct scsi_device *psd = NULL; 4794 scsi_vhci_lun_t *vlun = NULL; 4795 dev_info_t *pdip = NULL; 4796 dev_info_t *tgt_dip; 4797 struct scsi_vhci *vhci; 4798 char *guid; 4799 struct scsi_failover *sf; 4800 struct scsi_failover_ops *sfo; 4801 char *override; 4802 scsi_vhci_priv_t *svp = NULL; 4803 struct buf *bp; 4804 struct scsi_address *ap; 4805 struct scsi_pkt *pkt; 4806 int rval = MDI_FAILURE; 4807 uint_t inq_size = VHCI_STD_INQ_SIZE; 4808 mpapi_item_list_t *list_ptr; 4809 mpapi_lu_data_t *ld; 4810 4811 ASSERT(vdip != NULL); 4812 ASSERT(pip != NULL); 4813 4814 vhci = ddi_get_soft_state(vhci_softstate, ddi_get_instance(vdip)); 4815 ASSERT(vhci != NULL); 4816 4817 pdip = mdi_pi_get_phci(pip); 4818 hba = ddi_get_driver_private(pdip); 4819 ASSERT(hba != NULL); 4820 4821 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip); 4822 ASSERT(svp != NULL); 4823 4824 tgt_dip = mdi_pi_get_client(pip); 4825 ASSERT(tgt_dip != NULL); 4826 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, tgt_dip, PROPFLAGS, 4827 MDI_CLIENT_GUID_PROP, &guid) != DDI_SUCCESS) { 4828 VHCI_DEBUG(1, (CE_WARN, NULL, "vhci_path_online: lun guid " 4829 "property failed")); 4830 goto failure; 4831 } 4832 4833 vlun = vhci_lun_lookup(tgt_dip); 4834 ASSERT(vlun != NULL); 4835 4836 ddi_prop_free(guid); 4837 4838 vlun->svl_dip = mdi_pi_get_client(pip); 4839 ASSERT(vlun->svl_dip != NULL); 4840 4841 psd = svp->svp_psd; 4842 ASSERT(psd != NULL); 4843 4844 /* 4845 * For INQUIRY response buffer size, we use VHCI_STD_INQ_SIZE(132bytes) 4846 * instead of SUN_INQSIZE(48bytes) which is used in sd layer. This is 4847 * because we could get the Vendor specific parameters(present 97th 4848 * byte onwards) which are required to process Vendor specific data 4849 * based on array type. 4850 * This INQUIRY buffer is freed in vhci_pathinfo_offline but NEVER 4851 * in a different layer like sd/phci transport. In other words, vhci 4852 * maintains its own copy of scsi_device and scsi_inquiry data on a 4853 * per-path basis. 4854 */ 4855 if (psd->sd_inq == NULL) { 4856 psd->sd_inq = (struct scsi_inquiry *) 4857 kmem_zalloc(inq_size, KM_SLEEP); 4858 } 4859 4860 tgt_dip = psd->sd_dev; 4861 ASSERT(tgt_dip != NULL); 4862 4863 /* 4864 * do inquiry to pass into probe routine; this 4865 * will avoid each probe routine doing scsi inquiry 4866 */ 4867 bp = getrbuf(KM_SLEEP); 4868 bp->b_un.b_addr = (caddr_t)psd->sd_inq; 4869 bp->b_flags = B_READ; 4870 bp->b_bcount = inq_size; 4871 bp->b_resid = 0; 4872 4873 ap = &psd->sd_address; 4874 pkt = scsi_init_pkt(ap, NULL, bp, CDB_GROUP0, 4875 sizeof (struct scsi_arq_status), 0, 0, SLEEP_FUNC, NULL); 4876 if (pkt == NULL) { 4877 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_pathinfo_online: " 4878 "Inquiry init_pkt failed :%p\n", (void *)pip)); 4879 rval = MDI_FAILURE; 4880 goto failure; 4881 } 4882 pkt->pkt_cdbp[0] = SCMD_INQUIRY; 4883 pkt->pkt_cdbp[4] = (uchar_t)inq_size; 4884 pkt->pkt_time = 60; 4885 4886 rval = vhci_do_scsi_cmd(pkt); 4887 scsi_destroy_pkt(pkt); 4888 freerbuf(bp); 4889 if (rval == 0) { 4890 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_pathinfo_online: " 4891 "Failover Inquiry failed path:%p rval:%x\n", 4892 (void *)pip, rval)); 4893 rval = MDI_FAILURE; 4894 goto failure; 4895 } 4896 4897 /* 4898 * Determine if device is supported under scsi_vhci, and select 4899 * failover module. 4900 * 4901 * See if there is a scsi_vhci.conf file override for this devices's 4902 * VID/PID. The following values can be returned: 4903 * 4904 * NULL If the NULL is returned then there is no scsi_vhci.conf 4905 * override. For NULL, we determine the failover_ops for 4906 * this device by checking the sfo_device_probe entry 4907 * point for each 'fops' module, in order. 4908 * 4909 * NOTE: Correct operation may depend on module ordering 4910 * of 'specific' (failover modules that are completely 4911 * VID/PID table based) to 'generic' (failover modules 4912 * that based on T10 standards like TPGS). Currently, 4913 * the value of 'ddi-forceload' in scsi_vhci.conf is used 4914 * to establish the module list and probe order. 4915 * 4916 * "NONE" If value "NONE" is returned then there is a 4917 * scsi_vhci.conf VID/PID override to indicate the device 4918 * should not be supported under scsi_vhci (even if there 4919 * is an 'fops' module supporting the device). 4920 * 4921 * "<other>" If another value is returned then that value is the 4922 * name of the 'fops' module that should be used. 4923 */ 4924 sfo = NULL; /* "NONE" */ 4925 override = scsi_get_device_type_string( 4926 "scsi-vhci-failover-override", vdip, psd); 4927 4928 if (override == NULL) { 4929 /* NULL: default: select based on sfo_device_probe results */ 4930 for (sf = scsi_failover_table; sf->sf_mod; sf++) { 4931 if ((sf->sf_sfo == NULL) || 4932 sf->sf_sfo->sfo_device_probe(psd, psd->sd_inq, 4933 &vlun->svl_fops_ctpriv) == SFO_DEVICE_PROBE_PHCI) 4934 continue; 4935 4936 /* found failover module, supported under scsi_vhci */ 4937 sfo = sf->sf_sfo; 4938 vlun->svl_fops_name = 4939 i_ddi_strdup(sfo->sfo_name, KM_SLEEP); 4940 break; 4941 } 4942 } else if (strcasecmp(override, "NONE")) { 4943 /* !"NONE": select based on driver.conf specified name */ 4944 for (sf = scsi_failover_table, sfo = NULL; sf->sf_mod; sf++) { 4945 if ((sf->sf_sfo == NULL) || 4946 (sf->sf_sfo->sfo_name == NULL) || 4947 strcmp(override, sf->sf_sfo->sfo_name)) 4948 continue; 4949 4950 /* found failover module, supported under scsi_vhci */ 4951 sfo = sf->sf_sfo; 4952 vlun->svl_fops_name = kmem_alloc(strlen("conf ") + 4953 strlen(sfo->sfo_name) + 1, KM_SLEEP); 4954 (void) sprintf(vlun->svl_fops_name, "conf %s", 4955 sfo->sfo_name); 4956 break; 4957 } 4958 } 4959 if (override) 4960 kmem_free(override, strlen(override) + 1); 4961 4962 if (sfo == NULL) { 4963 /* no failover module - device not supported */ 4964 VHCI_DEBUG(1, (CE_NOTE, vhci->vhci_dip, 4965 "!vhci_pathinfo_online: dev (path 0x%p) not " 4966 "supported\n", (void *)pip)); 4967 vlun->svl_not_supported = 1; 4968 rval = MDI_NOT_SUPPORTED; 4969 goto done; 4970 } 4971 4972 /* failover supported for device - save failover_ops in vlun */ 4973 vlun->svl_fops = sfo; 4974 4975 /* 4976 * Obtain the device-type based mpxio options as specified in 4977 * scsi_vhci.conf file. 4978 * 4979 * NOTE: currently, the end result is a call to 4980 * mdi_set_lb_region_size(). 4981 */ 4982 vhci_get_device_type_mpxio_options(vdip, tgt_dip, psd); 4983 4984 /* 4985 * The device probe or options in conf file may have set/changed the 4986 * lb policy, save the current value. 4987 */ 4988 vlun->svl_lb_policy_save = mdi_get_lb_policy(tgt_dip); 4989 4990 /* 4991 * if PGR is active, revalidate key and register on this path also, 4992 * if key is still valid 4993 */ 4994 sema_p(&vlun->svl_pgr_sema); 4995 if (vlun->svl_pgr_active) { 4996 rval = vhci_pgr_validate_and_register(svp); 4997 if (rval != 1) { 4998 rval = MDI_FAILURE; 4999 sema_v(&vlun->svl_pgr_sema); 5000 goto failure; 5001 } 5002 } 5003 sema_v(&vlun->svl_pgr_sema); 5004 5005 if (svp->svp_new_path) { 5006 /* 5007 * Last chance to perform any cleanup operations on this 5008 * new path before making this path completely online. 5009 */ 5010 svp->svp_new_path = 0; 5011 5012 /* 5013 * If scsi_vhci knows the lun is alread RESERVE'd, 5014 * then skip the issue of RELEASE on new path. 5015 */ 5016 if ((vlun->svl_flags & VLUN_RESERVE_ACTIVE_FLG) == 0) { 5017 /* 5018 * Issue SCSI-2 RELEASE only for the first time on 5019 * a new path just in case the host rebooted and 5020 * a reservation is still pending on this path. 5021 * IBM Shark storage does not clear RESERVE upon 5022 * host reboot. 5023 */ 5024 ap = &psd->sd_address; 5025 pkt = scsi_init_pkt(ap, NULL, NULL, CDB_GROUP0, 5026 sizeof (struct scsi_arq_status), 0, 0, 5027 SLEEP_FUNC, NULL); 5028 if (pkt == NULL) { 5029 VHCI_DEBUG(1, (CE_NOTE, NULL, 5030 "!vhci_pathinfo_online: " 5031 "Release init_pkt failed :%p\n", 5032 (void *)pip)); 5033 rval = MDI_FAILURE; 5034 goto failure; 5035 } 5036 pkt->pkt_cdbp[0] = SCMD_RELEASE; 5037 pkt->pkt_time = 60; 5038 5039 VHCI_DEBUG(1, (CE_NOTE, NULL, 5040 "!vhci_path_online: path:%p " 5041 "Issued SCSI-2 RELEASE\n", (void *)pip)); 5042 5043 /* Ignore the return value */ 5044 (void) vhci_do_scsi_cmd(pkt); 5045 scsi_destroy_pkt(pkt); 5046 } 5047 } 5048 5049 rval = vhci_update_pathinfo(psd, pip, sfo, vlun, vhci); 5050 if (rval == MDI_FAILURE) { 5051 goto failure; 5052 } 5053 5054 /* Initialize MP-API data */ 5055 vhci_update_mpapi_data(vhci, vlun, pip); 5056 5057 /* 5058 * MP-API also needs the Inquiry data to be maintained in the 5059 * mp_vendor_prop_t structure, so find the lun and update its 5060 * structure with this data. 5061 */ 5062 list_ptr = (mpapi_item_list_t *)vhci_get_mpapi_item(vhci, NULL, 5063 MP_OBJECT_TYPE_MULTIPATH_LU, (void *)vlun); 5064 ld = (mpapi_lu_data_t *)list_ptr->item->idata; 5065 if (ld != NULL) { 5066 bcopy(psd->sd_inq->inq_vid, ld->prop.prodInfo.vendor, 8); 5067 bcopy(psd->sd_inq->inq_pid, ld->prop.prodInfo.product, 16); 5068 bcopy(psd->sd_inq->inq_revision, ld->prop.prodInfo.revision, 4); 5069 } else { 5070 VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_pathinfo_online: " 5071 "mpapi_lu_data_t is NULL")); 5072 } 5073 5074 /* create kstats for path */ 5075 vhci_kstat_create_pathinfo(pip); 5076 5077 done: 5078 mutex_enter(&vhci_global_mutex); 5079 cv_broadcast(&vhci_cv); 5080 mutex_exit(&vhci_global_mutex); 5081 5082 if (vlun->svl_setcap_done) { 5083 (void) vhci_pHCI_cap(ap, "sector-size", 5084 vlun->svl_sector_size, 1, pip); 5085 } 5086 5087 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_path_online: path:%p\n", 5088 (void *)pip)); 5089 5090 failure: 5091 if ((rval != MDI_SUCCESS) && psd->sd_inq) { 5092 kmem_free((caddr_t)psd->sd_inq, inq_size); 5093 psd->sd_inq = (struct scsi_inquiry *)NULL; 5094 } 5095 return (rval); 5096 } 5097 5098 /* 5099 * path offline handler. Release all bindings that will not be 5100 * released by the normal packet transport/completion code path. 5101 * Since we don't (presently) keep any bindings alive outside of 5102 * the in-transport packets (which will be released on completion) 5103 * there is not much to do here. 5104 */ 5105 /* ARGSUSED */ 5106 static int 5107 vhci_pathinfo_offline(dev_info_t *vdip, mdi_pathinfo_t *pip, int flags) 5108 { 5109 scsi_hba_tran_t *hba = NULL; 5110 struct scsi_device *psd = NULL; 5111 dev_info_t *pdip = NULL; 5112 dev_info_t *cdip = NULL; 5113 scsi_vhci_priv_t *svp = NULL; 5114 uint_t inq_size = VHCI_STD_INQ_SIZE; 5115 5116 ASSERT(vdip != NULL); 5117 ASSERT(pip != NULL); 5118 5119 pdip = mdi_pi_get_phci(pip); 5120 ASSERT(pdip != NULL); 5121 if (pdip == NULL) { 5122 VHCI_DEBUG(1, (CE_WARN, vdip, "Invalid path 0x%p: NULL " 5123 "phci dip", (void *)pip)); 5124 return (MDI_FAILURE); 5125 } 5126 5127 cdip = mdi_pi_get_client(pip); 5128 ASSERT(cdip != NULL); 5129 if (cdip == NULL) { 5130 VHCI_DEBUG(1, (CE_WARN, vdip, "Invalid path 0x%p: NULL " 5131 "client dip", (void *)pip)); 5132 return (MDI_FAILURE); 5133 } 5134 5135 hba = ddi_get_driver_private(pdip); 5136 ASSERT(hba != NULL); 5137 5138 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip); 5139 if (svp == NULL) { 5140 /* 5141 * mdi_pathinfo node in INIT state can have vHCI private 5142 * information set to null 5143 */ 5144 VHCI_DEBUG(1, (CE_NOTE, vdip, "!vhci_pathinfo_offline: " 5145 "svp is NULL for pip 0x%p\n", (void *)pip)); 5146 return (MDI_SUCCESS); 5147 } 5148 5149 psd = svp->svp_psd; 5150 ASSERT(psd != NULL); 5151 5152 mutex_enter(&svp->svp_mutex); 5153 5154 VHCI_DEBUG(1, (CE_NOTE, vdip, "!vhci_pathinfo_offline: " 5155 "%d cmds pending on path: 0x%p\n", svp->svp_cmds, (void *)pip)); 5156 while (svp->svp_cmds != 0) { 5157 if (cv_timedwait(&svp->svp_cv, &svp->svp_mutex, 5158 ddi_get_lbolt() + 5159 drv_usectohz(vhci_path_quiesce_timeout * 1000000)) == -1) { 5160 /* 5161 * The timeout time reached without the condition 5162 * being signaled. 5163 */ 5164 VHCI_DEBUG(1, (CE_NOTE, vdip, "!vhci_pathinfo_offline: " 5165 "Timeout reached on path 0x%p without the cond\n", 5166 (void *)pip)); 5167 VHCI_DEBUG(1, (CE_NOTE, vdip, "!vhci_pathinfo_offline: " 5168 "%d cmds still pending on path: 0x%p\n", 5169 svp->svp_cmds, (void *)pip)); 5170 break; 5171 } 5172 } 5173 mutex_exit(&svp->svp_mutex); 5174 5175 /* 5176 * Check to see if this vlun has an active SCSI-II RESERVE. And this 5177 * is the pip for the path that has been reserved. 5178 * If so clear the reservation by sending a reset, so the host will not 5179 * get a reservation conflict. Reset the flag VLUN_RESERVE_ACTIVE_FLG 5180 * for this lun. Also a reset notify is sent to the target driver 5181 * just in case the POR check condition is cleared by some other layer 5182 * in the stack. 5183 */ 5184 if (svp->svp_svl->svl_flags & VLUN_RESERVE_ACTIVE_FLG) { 5185 if (pip == svp->svp_svl->svl_resrv_pip) { 5186 if (vhci_recovery_reset(svp->svp_svl, 5187 &svp->svp_psd->sd_address, TRUE, 5188 VHCI_DEPTH_TARGET) == 0) { 5189 VHCI_DEBUG(1, (CE_NOTE, NULL, 5190 "!vhci_pathinfo_offline (pip:%p):" 5191 "reset failed, retrying\n", (void *)pip)); 5192 delay(1*drv_usectohz(1000000)); 5193 if (vhci_recovery_reset(svp->svp_svl, 5194 &svp->svp_psd->sd_address, TRUE, 5195 VHCI_DEPTH_TARGET) == 0) { 5196 VHCI_DEBUG(1, (CE_NOTE, NULL, 5197 "!vhci_pathinfo_offline " 5198 "(pip:%p): reset failed, " 5199 "giving up!\n", (void *)pip)); 5200 } 5201 } 5202 svp->svp_svl->svl_flags &= ~VLUN_RESERVE_ACTIVE_FLG; 5203 } 5204 } 5205 5206 mdi_pi_set_state(pip, MDI_PATHINFO_STATE_OFFLINE); 5207 if (psd->sd_inq) { 5208 kmem_free((caddr_t)psd->sd_inq, inq_size); 5209 psd->sd_inq = (struct scsi_inquiry *)NULL; 5210 } 5211 vhci_mpapi_set_path_state(vdip, pip, MP_DRVR_PATH_STATE_REMOVED); 5212 5213 VHCI_DEBUG(1, (CE_NOTE, NULL, 5214 "!vhci_pathinfo_offline: offlined path 0x%p\n", (void *)pip)); 5215 return (MDI_SUCCESS); 5216 } 5217 5218 5219 /* 5220 * routine for SCSI VHCI IOCTL implementation. 5221 */ 5222 /* ARGSUSED */ 5223 static int 5224 vhci_ctl(dev_t dev, int cmd, intptr_t data, int mode, cred_t *credp, int *rval) 5225 { 5226 struct scsi_vhci *vhci; 5227 dev_info_t *vdip; 5228 mdi_pathinfo_t *pip; 5229 int instance, held; 5230 int retval = 0; 5231 caddr_t phci_path = NULL, client_path = NULL; 5232 caddr_t paddr = NULL; 5233 sv_iocdata_t ioc; 5234 sv_iocdata_t *pioc = &ioc; 5235 sv_switch_to_cntlr_iocdata_t iocsc; 5236 sv_switch_to_cntlr_iocdata_t *piocsc = &iocsc; 5237 caddr_t s; 5238 scsi_vhci_lun_t *vlun; 5239 struct scsi_failover_ops *fo; 5240 char *pclass; 5241 5242 /* Check for validity of vhci structure */ 5243 vhci = ddi_get_soft_state(vhci_softstate, MINOR2INST(getminor(dev))); 5244 if (vhci == NULL) { 5245 return (ENXIO); 5246 } 5247 5248 mutex_enter(&vhci->vhci_mutex); 5249 if ((vhci->vhci_state & VHCI_STATE_OPEN) == 0) { 5250 mutex_exit(&vhci->vhci_mutex); 5251 return (ENXIO); 5252 } 5253 mutex_exit(&vhci->vhci_mutex); 5254 5255 /* Get the vhci dip */ 5256 vdip = vhci->vhci_dip; 5257 ASSERT(vdip != NULL); 5258 instance = ddi_get_instance(vdip); 5259 5260 /* Allocate memory for getting parameters from userland */ 5261 phci_path = kmem_zalloc(MAXPATHLEN, KM_SLEEP); 5262 client_path = kmem_zalloc(MAXPATHLEN, KM_SLEEP); 5263 paddr = kmem_zalloc(MAXNAMELEN, KM_SLEEP); 5264 5265 /* 5266 * Set a local variable indicating the ioctl name. Used for 5267 * printing debug strings. 5268 */ 5269 switch (cmd) { 5270 case SCSI_VHCI_GET_CLIENT_MULTIPATH_INFO: 5271 s = "GET_CLIENT_MULTIPATH_INFO"; 5272 break; 5273 5274 case SCSI_VHCI_GET_PHCI_MULTIPATH_INFO: 5275 s = "GET_PHCI_MULTIPATH_INFO"; 5276 break; 5277 5278 case SCSI_VHCI_GET_CLIENT_NAME: 5279 s = "GET_CLIENT_NAME"; 5280 break; 5281 5282 case SCSI_VHCI_PATH_ONLINE: 5283 s = "PATH_ONLINE"; 5284 break; 5285 5286 case SCSI_VHCI_PATH_OFFLINE: 5287 s = "PATH_OFFLINE"; 5288 break; 5289 5290 case SCSI_VHCI_PATH_STANDBY: 5291 s = "PATH_STANDBY"; 5292 break; 5293 5294 case SCSI_VHCI_PATH_TEST: 5295 s = "PATH_TEST"; 5296 break; 5297 5298 case SCSI_VHCI_SWITCH_TO_CNTLR: 5299 s = "SWITCH_TO_CNTLR"; 5300 break; 5301 case SCSI_VHCI_PATH_DISABLE: 5302 s = "PATH_DISABLE"; 5303 break; 5304 case SCSI_VHCI_PATH_ENABLE: 5305 s = "PATH_ENABLE"; 5306 break; 5307 5308 case SCSI_VHCI_GET_TARGET_LONGNAME: 5309 s = "GET_TARGET_LONGNAME"; 5310 break; 5311 5312 #ifdef DEBUG 5313 case SCSI_VHCI_CONFIGURE_PHCI: 5314 s = "CONFIGURE_PHCI"; 5315 break; 5316 5317 case SCSI_VHCI_UNCONFIGURE_PHCI: 5318 s = "UNCONFIGURE_PHCI"; 5319 break; 5320 #endif 5321 5322 default: 5323 s = "Unknown"; 5324 vhci_log(CE_NOTE, vdip, 5325 "!vhci%d: ioctl %x (unsupported ioctl)", instance, cmd); 5326 retval = ENOTSUP; 5327 break; 5328 } 5329 if (retval != 0) { 5330 goto end; 5331 } 5332 5333 VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci%d: ioctl <%s>", instance, s)); 5334 5335 /* 5336 * Get IOCTL parameters from userland 5337 */ 5338 switch (cmd) { 5339 case SCSI_VHCI_GET_CLIENT_MULTIPATH_INFO: 5340 case SCSI_VHCI_GET_PHCI_MULTIPATH_INFO: 5341 case SCSI_VHCI_GET_CLIENT_NAME: 5342 case SCSI_VHCI_PATH_ONLINE: 5343 case SCSI_VHCI_PATH_OFFLINE: 5344 case SCSI_VHCI_PATH_STANDBY: 5345 case SCSI_VHCI_PATH_TEST: 5346 case SCSI_VHCI_PATH_DISABLE: 5347 case SCSI_VHCI_PATH_ENABLE: 5348 case SCSI_VHCI_GET_TARGET_LONGNAME: 5349 #ifdef DEBUG 5350 case SCSI_VHCI_CONFIGURE_PHCI: 5351 case SCSI_VHCI_UNCONFIGURE_PHCI: 5352 #endif 5353 retval = vhci_get_iocdata((const void *)data, pioc, mode, s); 5354 break; 5355 5356 case SCSI_VHCI_SWITCH_TO_CNTLR: 5357 retval = vhci_get_iocswitchdata((const void *)data, piocsc, 5358 mode, s); 5359 break; 5360 } 5361 if (retval != 0) { 5362 goto end; 5363 } 5364 5365 5366 /* 5367 * Process the IOCTL 5368 */ 5369 switch (cmd) { 5370 case SCSI_VHCI_GET_CLIENT_MULTIPATH_INFO: 5371 { 5372 uint_t num_paths; /* Num paths to client dev */ 5373 sv_path_info_t *upibuf = NULL; /* To keep userland values */ 5374 sv_path_info_t *kpibuf = NULL; /* Kernel data for ioctls */ 5375 dev_info_t *cdip; /* Client device dip */ 5376 5377 if (pioc->ret_elem == NULL) { 5378 retval = EINVAL; 5379 break; 5380 } 5381 5382 /* Get client device path from user land */ 5383 if (vhci_ioc_get_client_path(pioc, client_path, mode, s)) { 5384 retval = EFAULT; 5385 break; 5386 } 5387 5388 VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> " 5389 "client <%s>", s, client_path)); 5390 5391 /* Get number of paths to this client device */ 5392 if ((cdip = mdi_client_path2devinfo(vdip, client_path)) 5393 == NULL) { 5394 retval = ENXIO; 5395 VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> " 5396 "client dip doesn't exist. invalid path <%s>", 5397 s, client_path)); 5398 break; 5399 } 5400 num_paths = mdi_client_get_path_count(cdip); 5401 5402 if (ddi_copyout(&num_paths, pioc->ret_elem, 5403 sizeof (num_paths), mode)) { 5404 VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> " 5405 "num_paths copyout failed", s)); 5406 retval = EFAULT; 5407 break; 5408 } 5409 5410 /* If user just wanted num_paths, then return */ 5411 if (pioc->buf_elem == 0 || pioc->ret_buf == NULL || 5412 num_paths == 0) { 5413 break; 5414 } 5415 5416 /* Set num_paths to value as much as can be sent to userland */ 5417 if (num_paths > pioc->buf_elem) { 5418 num_paths = pioc->buf_elem; 5419 } 5420 5421 /* Allocate memory and get userland pointers */ 5422 if (vhci_ioc_alloc_pathinfo(&upibuf, &kpibuf, num_paths, 5423 pioc, mode, s) != 0) { 5424 retval = EFAULT; 5425 break; 5426 } 5427 ASSERT(upibuf != NULL); 5428 ASSERT(kpibuf != NULL); 5429 5430 /* 5431 * Get the path information and send it to userland. 5432 */ 5433 if (vhci_get_client_path_list(cdip, kpibuf, num_paths) 5434 != MDI_SUCCESS) { 5435 retval = ENXIO; 5436 vhci_ioc_free_pathinfo(upibuf, kpibuf, num_paths); 5437 break; 5438 } 5439 5440 if (vhci_ioc_send_pathinfo(upibuf, kpibuf, num_paths, 5441 pioc, mode, s)) { 5442 retval = EFAULT; 5443 vhci_ioc_free_pathinfo(upibuf, kpibuf, num_paths); 5444 break; 5445 } 5446 5447 /* Free the memory allocated for path information */ 5448 vhci_ioc_free_pathinfo(upibuf, kpibuf, num_paths); 5449 break; 5450 } 5451 5452 case SCSI_VHCI_GET_PHCI_MULTIPATH_INFO: 5453 { 5454 uint_t num_paths; /* Num paths to client dev */ 5455 sv_path_info_t *upibuf = NULL; /* To keep userland values */ 5456 sv_path_info_t *kpibuf = NULL; /* Kernel data for ioctls */ 5457 dev_info_t *pdip; /* PHCI device dip */ 5458 5459 if (pioc->ret_elem == NULL) { 5460 retval = EINVAL; 5461 break; 5462 } 5463 5464 /* Get PHCI device path from user land */ 5465 if (vhci_ioc_get_phci_path(pioc, phci_path, mode, s)) { 5466 retval = EFAULT; 5467 break; 5468 } 5469 5470 VHCI_DEBUG(6, (CE_WARN, vdip, 5471 "!vhci_ioctl: ioctl <%s> phci <%s>", s, phci_path)); 5472 5473 /* Get number of devices associated with this PHCI device */ 5474 if ((pdip = mdi_phci_path2devinfo(vdip, phci_path)) == NULL) { 5475 VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> " 5476 "phci dip doesn't exist. invalid path <%s>", 5477 s, phci_path)); 5478 retval = ENXIO; 5479 break; 5480 } 5481 5482 num_paths = mdi_phci_get_path_count(pdip); 5483 5484 if (ddi_copyout(&num_paths, pioc->ret_elem, 5485 sizeof (num_paths), mode)) { 5486 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> " 5487 "num_paths copyout failed", s)); 5488 retval = EFAULT; 5489 break; 5490 } 5491 5492 /* If user just wanted num_paths, then return */ 5493 if (pioc->buf_elem == 0 || pioc->ret_buf == NULL || 5494 num_paths == 0) { 5495 break; 5496 } 5497 5498 /* Set num_paths to value as much as can be sent to userland */ 5499 if (num_paths > pioc->buf_elem) { 5500 num_paths = pioc->buf_elem; 5501 } 5502 5503 /* Allocate memory and get userland pointers */ 5504 if (vhci_ioc_alloc_pathinfo(&upibuf, &kpibuf, num_paths, 5505 pioc, mode, s) != 0) { 5506 retval = EFAULT; 5507 break; 5508 } 5509 ASSERT(upibuf != NULL); 5510 ASSERT(kpibuf != NULL); 5511 5512 /* 5513 * Get the path information and send it to userland. 5514 */ 5515 if (vhci_get_phci_path_list(pdip, kpibuf, num_paths) 5516 != MDI_SUCCESS) { 5517 retval = ENXIO; 5518 vhci_ioc_free_pathinfo(upibuf, kpibuf, num_paths); 5519 break; 5520 } 5521 5522 if (vhci_ioc_send_pathinfo(upibuf, kpibuf, num_paths, 5523 pioc, mode, s)) { 5524 retval = EFAULT; 5525 vhci_ioc_free_pathinfo(upibuf, kpibuf, num_paths); 5526 break; 5527 } 5528 5529 /* Free the memory allocated for path information */ 5530 vhci_ioc_free_pathinfo(upibuf, kpibuf, num_paths); 5531 break; 5532 } 5533 5534 case SCSI_VHCI_GET_CLIENT_NAME: 5535 { 5536 dev_info_t *cdip, *pdip; 5537 5538 /* Get PHCI path and device address from user land */ 5539 if (vhci_ioc_get_phci_path(pioc, phci_path, mode, s) || 5540 vhci_ioc_get_paddr(pioc, paddr, mode, s)) { 5541 retval = EFAULT; 5542 break; 5543 } 5544 5545 VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> " 5546 "phci <%s>, paddr <%s>", s, phci_path, paddr)); 5547 5548 /* Get the PHCI dip */ 5549 if ((pdip = mdi_phci_path2devinfo(vdip, phci_path)) == NULL) { 5550 VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> " 5551 "phci dip doesn't exist. invalid path <%s>", 5552 s, phci_path)); 5553 retval = ENXIO; 5554 break; 5555 } 5556 5557 if ((pip = mdi_pi_find(pdip, NULL, paddr)) == NULL) { 5558 VHCI_DEBUG(1, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> " 5559 "pathinfo doesn't exist. invalid device addr", s)); 5560 retval = ENXIO; 5561 break; 5562 } 5563 5564 /* Get the client device pathname and send to userland */ 5565 cdip = mdi_pi_get_client(pip); 5566 vhci_ioc_devi_to_path(cdip, client_path); 5567 5568 VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> " 5569 "client <%s>", s, client_path)); 5570 5571 if (vhci_ioc_send_client_path(client_path, pioc, mode, s)) { 5572 retval = EFAULT; 5573 break; 5574 } 5575 break; 5576 } 5577 5578 case SCSI_VHCI_PATH_ONLINE: 5579 case SCSI_VHCI_PATH_OFFLINE: 5580 case SCSI_VHCI_PATH_STANDBY: 5581 case SCSI_VHCI_PATH_TEST: 5582 { 5583 dev_info_t *pdip; /* PHCI dip */ 5584 5585 /* Get PHCI path and device address from user land */ 5586 if (vhci_ioc_get_phci_path(pioc, phci_path, mode, s) || 5587 vhci_ioc_get_paddr(pioc, paddr, mode, s)) { 5588 retval = EFAULT; 5589 break; 5590 } 5591 5592 VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> " 5593 "phci <%s>, paddr <%s>", s, phci_path, paddr)); 5594 5595 /* Get the PHCI dip */ 5596 if ((pdip = mdi_phci_path2devinfo(vdip, phci_path)) == NULL) { 5597 VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> " 5598 "phci dip doesn't exist. invalid path <%s>", 5599 s, phci_path)); 5600 retval = ENXIO; 5601 break; 5602 } 5603 5604 if ((pip = mdi_pi_find(pdip, NULL, paddr)) == NULL) { 5605 VHCI_DEBUG(1, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> " 5606 "pathinfo doesn't exist. invalid device addr", s)); 5607 retval = ENXIO; 5608 break; 5609 } 5610 5611 VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> " 5612 "Calling MDI function to change device state", s)); 5613 5614 switch (cmd) { 5615 case SCSI_VHCI_PATH_ONLINE: 5616 retval = mdi_pi_online(pip, 0); 5617 break; 5618 5619 case SCSI_VHCI_PATH_OFFLINE: 5620 retval = mdi_pi_offline(pip, 0); 5621 break; 5622 5623 case SCSI_VHCI_PATH_STANDBY: 5624 retval = mdi_pi_standby(pip, 0); 5625 break; 5626 5627 case SCSI_VHCI_PATH_TEST: 5628 break; 5629 } 5630 break; 5631 } 5632 5633 case SCSI_VHCI_SWITCH_TO_CNTLR: 5634 { 5635 dev_info_t *cdip; 5636 struct scsi_device *devp; 5637 5638 /* Get the client device pathname */ 5639 if (ddi_copyin(piocsc->client, client_path, 5640 MAXPATHLEN, mode)) { 5641 VHCI_DEBUG(2, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> " 5642 "client_path copyin failed", s)); 5643 retval = EFAULT; 5644 break; 5645 } 5646 5647 /* Get the path class to which user wants to switch */ 5648 if (ddi_copyin(piocsc->class, paddr, MAXNAMELEN, mode)) { 5649 VHCI_DEBUG(2, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> " 5650 "controller_class copyin failed", s)); 5651 retval = EFAULT; 5652 break; 5653 } 5654 5655 /* Perform validity checks */ 5656 if ((cdip = mdi_client_path2devinfo(vdip, 5657 client_path)) == NULL) { 5658 VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> " 5659 "client dip doesn't exist. invalid path <%s>", 5660 s, client_path)); 5661 retval = ENXIO; 5662 break; 5663 } 5664 5665 VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: Calling MDI func " 5666 "to switch controller")); 5667 VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: client <%s> " 5668 "class <%s>", client_path, paddr)); 5669 5670 if (strcmp(paddr, PCLASS_PRIMARY) && 5671 strcmp(paddr, PCLASS_SECONDARY)) { 5672 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> " 5673 "invalid path class <%s>", s, paddr)); 5674 retval = ENXIO; 5675 break; 5676 } 5677 5678 devp = ddi_get_driver_private(cdip); 5679 if (devp == NULL) { 5680 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> " 5681 "invalid scsi device <%s>", s, client_path)); 5682 retval = ENXIO; 5683 break; 5684 } 5685 vlun = ADDR2VLUN(&devp->sd_address); 5686 ASSERT(vlun); 5687 5688 /* 5689 * Checking to see if device has only one pclass, PRIMARY. 5690 * If so this device doesn't support failovers. Assumed 5691 * that the devices with one pclass is PRIMARY, as thats the 5692 * case today. If this is not true and in future other 5693 * symmetric devices are supported with other pclass, this 5694 * IOCTL shall have to be overhauled anyways as now the only 5695 * arguments it accepts are PRIMARY and SECONDARY. 5696 */ 5697 fo = vlun->svl_fops; 5698 if (fo->sfo_pathclass_next(PCLASS_PRIMARY, &pclass, 5699 vlun->svl_fops_ctpriv)) { 5700 retval = ENOTSUP; 5701 break; 5702 } 5703 5704 VHCI_HOLD_LUN(vlun, VH_SLEEP, held); 5705 mutex_enter(&vlun->svl_mutex); 5706 if (vlun->svl_active_pclass != NULL) { 5707 if (strcmp(vlun->svl_active_pclass, paddr) == 0) { 5708 mutex_exit(&vlun->svl_mutex); 5709 retval = EALREADY; 5710 VHCI_RELEASE_LUN(vlun); 5711 break; 5712 } 5713 } 5714 mutex_exit(&vlun->svl_mutex); 5715 /* Call mdi function to cause a switch over */ 5716 retval = mdi_failover(vdip, cdip, MDI_FAILOVER_SYNC); 5717 if (retval == MDI_SUCCESS) { 5718 retval = 0; 5719 } else if (retval == MDI_BUSY) { 5720 retval = EBUSY; 5721 } else { 5722 retval = EIO; 5723 } 5724 VHCI_RELEASE_LUN(vlun); 5725 break; 5726 } 5727 5728 case SCSI_VHCI_PATH_ENABLE: 5729 case SCSI_VHCI_PATH_DISABLE: 5730 { 5731 dev_info_t *cdip, *pdip; 5732 5733 /* 5734 * Get client device path from user land 5735 */ 5736 if (vhci_ioc_get_client_path(pioc, client_path, mode, s)) { 5737 retval = EFAULT; 5738 break; 5739 } 5740 5741 /* 5742 * Get Phci device path from user land 5743 */ 5744 if (vhci_ioc_get_phci_path(pioc, phci_path, mode, s)) { 5745 retval = EFAULT; 5746 break; 5747 } 5748 5749 /* 5750 * Get the devinfo for the Phci. 5751 */ 5752 if ((pdip = mdi_phci_path2devinfo(vdip, phci_path)) == NULL) { 5753 VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> " 5754 "phci dip doesn't exist. invalid path <%s>", 5755 s, phci_path)); 5756 retval = ENXIO; 5757 break; 5758 } 5759 5760 /* 5761 * If the client path is set to /scsi_vhci then we need 5762 * to do the operation on all the clients so set cdip to NULL. 5763 * Else, try to get the client dip. 5764 */ 5765 if (strcmp(client_path, "/scsi_vhci") == 0) { 5766 cdip = NULL; 5767 } else { 5768 if ((cdip = mdi_client_path2devinfo(vdip, 5769 client_path)) == NULL) { 5770 retval = ENXIO; 5771 VHCI_DEBUG(1, (CE_WARN, NULL, 5772 "!vhci_ioctl: ioctl <%s> client dip " 5773 "doesn't exist. invalid path <%s>", 5774 s, client_path)); 5775 break; 5776 } 5777 } 5778 5779 if (cmd == SCSI_VHCI_PATH_ENABLE) 5780 retval = mdi_pi_enable(cdip, pdip, USER_DISABLE); 5781 else 5782 retval = mdi_pi_disable(cdip, pdip, USER_DISABLE); 5783 5784 break; 5785 } 5786 5787 case SCSI_VHCI_GET_TARGET_LONGNAME: 5788 { 5789 uint_t pid = pioc->buf_elem; 5790 char *target_port; 5791 mod_hash_val_t hv; 5792 5793 /* targetmap lookup of 'target-port' by <pid> */ 5794 if (mod_hash_find(vhci_targetmap_bypid, 5795 (mod_hash_key_t)(uintptr_t)pid, &hv) != 0) { 5796 /* 5797 * NOTE: failure to find the mapping is OK for guid 5798 * based 'target-port' values. 5799 */ 5800 VHCI_DEBUG(3, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> " 5801 "targetport mapping doesn't exist: pid %d", 5802 s, pid)); 5803 retval = ENXIO; 5804 break; 5805 } 5806 5807 /* copyout 'target-port' result */ 5808 target_port = (char *)hv; 5809 if (copyoutstr(target_port, pioc->addr, MAXNAMELEN, NULL)) { 5810 VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> " 5811 "targetport copyout failed: len: %d", 5812 s, (int)strlen(target_port))); 5813 retval = EFAULT; 5814 } 5815 break; 5816 } 5817 5818 #ifdef DEBUG 5819 case SCSI_VHCI_CONFIGURE_PHCI: 5820 { 5821 dev_info_t *pdip; 5822 5823 /* Get PHCI path and device address from user land */ 5824 if (vhci_ioc_get_phci_path(pioc, phci_path, mode, s)) { 5825 retval = EFAULT; 5826 break; 5827 } 5828 5829 VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> " 5830 "phci <%s>", s, phci_path)); 5831 5832 /* Get the PHCI dip */ 5833 if ((pdip = e_ddi_hold_devi_by_path(phci_path, 0)) == NULL) { 5834 VHCI_DEBUG(3, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> " 5835 "phci dip doesn't exist. invalid path <%s>", 5836 s, phci_path)); 5837 retval = ENXIO; 5838 break; 5839 } 5840 5841 if (ndi_devi_config(pdip, 5842 NDI_DEVFS_CLEAN|NDI_DEVI_PERSIST) != NDI_SUCCESS) { 5843 retval = EIO; 5844 } 5845 5846 ddi_release_devi(pdip); 5847 break; 5848 } 5849 5850 case SCSI_VHCI_UNCONFIGURE_PHCI: 5851 { 5852 dev_info_t *pdip; 5853 5854 /* Get PHCI path and device address from user land */ 5855 if (vhci_ioc_get_phci_path(pioc, phci_path, mode, s)) { 5856 retval = EFAULT; 5857 break; 5858 } 5859 5860 VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> " 5861 "phci <%s>", s, phci_path)); 5862 5863 /* Get the PHCI dip */ 5864 if ((pdip = e_ddi_hold_devi_by_path(phci_path, 0)) == NULL) { 5865 VHCI_DEBUG(3, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> " 5866 "phci dip doesn't exist. invalid path <%s>", 5867 s, phci_path)); 5868 retval = ENXIO; 5869 break; 5870 } 5871 5872 if (ndi_devi_unconfig(pdip, 5873 NDI_DEVI_REMOVE|NDI_DEVFS_CLEAN) != NDI_SUCCESS) { 5874 retval = EBUSY; 5875 } 5876 5877 ddi_release_devi(pdip); 5878 break; 5879 } 5880 #endif 5881 } 5882 5883 end: 5884 /* Free the memory allocated above */ 5885 if (phci_path != NULL) { 5886 kmem_free(phci_path, MAXPATHLEN); 5887 } 5888 if (client_path != NULL) { 5889 kmem_free(client_path, MAXPATHLEN); 5890 } 5891 if (paddr != NULL) { 5892 kmem_free(paddr, MAXNAMELEN); 5893 } 5894 return (retval); 5895 } 5896 5897 /* 5898 * devctl IOCTL support for client device DR 5899 */ 5900 /* ARGSUSED */ 5901 int 5902 vhci_devctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp, 5903 int *rvalp) 5904 { 5905 dev_info_t *self; 5906 dev_info_t *child; 5907 scsi_hba_tran_t *hba; 5908 struct devctl_iocdata *dcp; 5909 struct scsi_vhci *vhci; 5910 int rv = 0; 5911 int retval = 0; 5912 scsi_vhci_priv_t *svp; 5913 mdi_pathinfo_t *pip; 5914 5915 if ((vhci = ddi_get_soft_state(vhci_softstate, 5916 MINOR2INST(getminor(dev)))) == NULL) 5917 return (ENXIO); 5918 5919 /* 5920 * check if :devctl minor device has been opened 5921 */ 5922 mutex_enter(&vhci->vhci_mutex); 5923 if ((vhci->vhci_state & VHCI_STATE_OPEN) == 0) { 5924 mutex_exit(&vhci->vhci_mutex); 5925 return (ENXIO); 5926 } 5927 mutex_exit(&vhci->vhci_mutex); 5928 5929 self = vhci->vhci_dip; 5930 hba = ddi_get_driver_private(self); 5931 if (hba == NULL) 5932 return (ENXIO); 5933 5934 /* 5935 * We can use the generic implementation for these ioctls 5936 */ 5937 switch (cmd) { 5938 case DEVCTL_DEVICE_GETSTATE: 5939 case DEVCTL_DEVICE_ONLINE: 5940 case DEVCTL_DEVICE_OFFLINE: 5941 case DEVCTL_DEVICE_REMOVE: 5942 case DEVCTL_BUS_GETSTATE: 5943 return (ndi_devctl_ioctl(self, cmd, arg, mode, 0)); 5944 } 5945 5946 /* 5947 * read devctl ioctl data 5948 */ 5949 if (ndi_dc_allochdl((void *)arg, &dcp) != NDI_SUCCESS) 5950 return (EFAULT); 5951 5952 switch (cmd) { 5953 5954 case DEVCTL_DEVICE_RESET: 5955 /* 5956 * lookup and hold child device 5957 */ 5958 if ((child = ndi_devi_find(self, ndi_dc_getname(dcp), 5959 ndi_dc_getaddr(dcp))) == NULL) { 5960 rv = ENXIO; 5961 break; 5962 } 5963 retval = mdi_select_path(child, NULL, 5964 (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH), 5965 NULL, &pip); 5966 if ((retval != MDI_SUCCESS) || (pip == NULL)) { 5967 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioctl:" 5968 "Unable to get a path, dip 0x%p", (void *)child)); 5969 rv = ENXIO; 5970 break; 5971 } 5972 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip); 5973 if (vhci_recovery_reset(svp->svp_svl, 5974 &svp->svp_psd->sd_address, TRUE, 5975 VHCI_DEPTH_TARGET) == 0) { 5976 VHCI_DEBUG(1, (CE_NOTE, NULL, 5977 "!vhci_ioctl(pip:%p): " 5978 "reset failed\n", (void *)pip)); 5979 rv = ENXIO; 5980 } 5981 mdi_rele_path(pip); 5982 break; 5983 5984 case DEVCTL_BUS_QUIESCE: 5985 case DEVCTL_BUS_UNQUIESCE: 5986 case DEVCTL_BUS_RESET: 5987 case DEVCTL_BUS_RESETALL: 5988 #ifdef DEBUG 5989 case DEVCTL_BUS_CONFIGURE: 5990 case DEVCTL_BUS_UNCONFIGURE: 5991 #endif 5992 rv = ENOTSUP; 5993 break; 5994 5995 default: 5996 rv = ENOTTY; 5997 } /* end of outer switch */ 5998 5999 ndi_dc_freehdl(dcp); 6000 return (rv); 6001 } 6002 6003 /* 6004 * Routine to get the PHCI pathname from ioctl structures in userland 6005 */ 6006 /* ARGSUSED */ 6007 static int 6008 vhci_ioc_get_phci_path(sv_iocdata_t *pioc, caddr_t phci_path, 6009 int mode, caddr_t s) 6010 { 6011 int retval = 0; 6012 6013 if (ddi_copyin(pioc->phci, phci_path, MAXPATHLEN, mode)) { 6014 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioc_get_phci: ioctl <%s> " 6015 "phci_path copyin failed", s)); 6016 retval = EFAULT; 6017 } 6018 return (retval); 6019 6020 } 6021 6022 6023 /* 6024 * Routine to get the Client device pathname from ioctl structures in userland 6025 */ 6026 /* ARGSUSED */ 6027 static int 6028 vhci_ioc_get_client_path(sv_iocdata_t *pioc, caddr_t client_path, 6029 int mode, caddr_t s) 6030 { 6031 int retval = 0; 6032 6033 if (ddi_copyin(pioc->client, client_path, MAXPATHLEN, mode)) { 6034 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioc_get_client: " 6035 "ioctl <%s> client_path copyin failed", s)); 6036 retval = EFAULT; 6037 } 6038 return (retval); 6039 } 6040 6041 6042 /* 6043 * Routine to get physical device address from ioctl structure in userland 6044 */ 6045 /* ARGSUSED */ 6046 static int 6047 vhci_ioc_get_paddr(sv_iocdata_t *pioc, caddr_t paddr, int mode, caddr_t s) 6048 { 6049 int retval = 0; 6050 6051 if (ddi_copyin(pioc->addr, paddr, MAXNAMELEN, mode)) { 6052 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioc_get_paddr: " 6053 "ioctl <%s> device addr copyin failed", s)); 6054 retval = EFAULT; 6055 } 6056 return (retval); 6057 } 6058 6059 6060 /* 6061 * Routine to send client device pathname to userland. 6062 */ 6063 /* ARGSUSED */ 6064 static int 6065 vhci_ioc_send_client_path(caddr_t client_path, sv_iocdata_t *pioc, 6066 int mode, caddr_t s) 6067 { 6068 int retval = 0; 6069 6070 if (ddi_copyout(client_path, pioc->client, MAXPATHLEN, mode)) { 6071 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioc_send_client: " 6072 "ioctl <%s> client_path copyout failed", s)); 6073 retval = EFAULT; 6074 } 6075 return (retval); 6076 } 6077 6078 6079 /* 6080 * Routine to translated dev_info pointer (dip) to device pathname. 6081 */ 6082 static void 6083 vhci_ioc_devi_to_path(dev_info_t *dip, caddr_t path) 6084 { 6085 (void) ddi_pathname(dip, path); 6086 } 6087 6088 6089 /* 6090 * vhci_get_phci_path_list: 6091 * get information about devices associated with a 6092 * given PHCI device. 6093 * 6094 * Return Values: 6095 * path information elements 6096 */ 6097 int 6098 vhci_get_phci_path_list(dev_info_t *pdip, sv_path_info_t *pibuf, 6099 uint_t num_elems) 6100 { 6101 uint_t count, done; 6102 mdi_pathinfo_t *pip; 6103 sv_path_info_t *ret_pip; 6104 int status; 6105 size_t prop_size; 6106 int circular; 6107 6108 /* 6109 * Get the PHCI structure and retrieve the path information 6110 * from the GUID hash table. 6111 */ 6112 6113 ret_pip = pibuf; 6114 count = 0; 6115 6116 ndi_devi_enter(pdip, &circular); 6117 6118 done = (count >= num_elems); 6119 pip = mdi_get_next_client_path(pdip, NULL); 6120 while (pip && !done) { 6121 mdi_pi_lock(pip); 6122 (void) ddi_pathname(mdi_pi_get_phci(pip), 6123 ret_pip->device.ret_phci); 6124 (void) strcpy(ret_pip->ret_addr, mdi_pi_get_addr(pip)); 6125 (void) mdi_pi_get_state2(pip, &ret_pip->ret_state, 6126 &ret_pip->ret_ext_state); 6127 6128 status = mdi_prop_size(pip, &prop_size); 6129 if (status == MDI_SUCCESS && ret_pip->ret_prop.ret_buf_size) { 6130 *ret_pip->ret_prop.ret_buf_size = (uint_t)prop_size; 6131 } 6132 6133 #ifdef DEBUG 6134 if (status != MDI_SUCCESS) { 6135 VHCI_DEBUG(2, (CE_WARN, NULL, 6136 "!vhci_get_phci_path_list: " 6137 "phci <%s>, prop size failure 0x%x", 6138 ret_pip->device.ret_phci, status)); 6139 } 6140 #endif /* DEBUG */ 6141 6142 6143 if (status == MDI_SUCCESS && ret_pip->ret_prop.buf && 6144 prop_size && ret_pip->ret_prop.buf_size >= prop_size) { 6145 status = mdi_prop_pack(pip, 6146 &ret_pip->ret_prop.buf, 6147 ret_pip->ret_prop.buf_size); 6148 6149 #ifdef DEBUG 6150 if (status != MDI_SUCCESS) { 6151 VHCI_DEBUG(2, (CE_WARN, NULL, 6152 "!vhci_get_phci_path_list: " 6153 "phci <%s>, prop pack failure 0x%x", 6154 ret_pip->device.ret_phci, status)); 6155 } 6156 #endif /* DEBUG */ 6157 } 6158 6159 mdi_pi_unlock(pip); 6160 pip = mdi_get_next_client_path(pdip, pip); 6161 ret_pip++; 6162 count++; 6163 done = (count >= num_elems); 6164 } 6165 6166 ndi_devi_exit(pdip, circular); 6167 6168 return (MDI_SUCCESS); 6169 } 6170 6171 6172 /* 6173 * vhci_get_client_path_list: 6174 * get information about various paths associated with a 6175 * given client device. 6176 * 6177 * Return Values: 6178 * path information elements 6179 */ 6180 int 6181 vhci_get_client_path_list(dev_info_t *cdip, sv_path_info_t *pibuf, 6182 uint_t num_elems) 6183 { 6184 uint_t count, done; 6185 mdi_pathinfo_t *pip; 6186 sv_path_info_t *ret_pip; 6187 int status; 6188 size_t prop_size; 6189 int circular; 6190 6191 ret_pip = pibuf; 6192 count = 0; 6193 6194 ndi_devi_enter(cdip, &circular); 6195 6196 done = (count >= num_elems); 6197 pip = mdi_get_next_phci_path(cdip, NULL); 6198 while (pip && !done) { 6199 mdi_pi_lock(pip); 6200 (void) ddi_pathname(mdi_pi_get_phci(pip), 6201 ret_pip->device.ret_phci); 6202 (void) strcpy(ret_pip->ret_addr, mdi_pi_get_addr(pip)); 6203 (void) mdi_pi_get_state2(pip, &ret_pip->ret_state, 6204 &ret_pip->ret_ext_state); 6205 6206 status = mdi_prop_size(pip, &prop_size); 6207 if (status == MDI_SUCCESS && ret_pip->ret_prop.ret_buf_size) { 6208 *ret_pip->ret_prop.ret_buf_size = (uint_t)prop_size; 6209 } 6210 6211 #ifdef DEBUG 6212 if (status != MDI_SUCCESS) { 6213 VHCI_DEBUG(2, (CE_WARN, NULL, 6214 "!vhci_get_client_path_list: " 6215 "phci <%s>, prop size failure 0x%x", 6216 ret_pip->device.ret_phci, status)); 6217 } 6218 #endif /* DEBUG */ 6219 6220 6221 if (status == MDI_SUCCESS && ret_pip->ret_prop.buf && 6222 prop_size && ret_pip->ret_prop.buf_size >= prop_size) { 6223 status = mdi_prop_pack(pip, 6224 &ret_pip->ret_prop.buf, 6225 ret_pip->ret_prop.buf_size); 6226 6227 #ifdef DEBUG 6228 if (status != MDI_SUCCESS) { 6229 VHCI_DEBUG(2, (CE_WARN, NULL, 6230 "!vhci_get_client_path_list: " 6231 "phci <%s>, prop pack failure 0x%x", 6232 ret_pip->device.ret_phci, status)); 6233 } 6234 #endif /* DEBUG */ 6235 } 6236 6237 mdi_pi_unlock(pip); 6238 pip = mdi_get_next_phci_path(cdip, pip); 6239 ret_pip++; 6240 count++; 6241 done = (count >= num_elems); 6242 } 6243 6244 ndi_devi_exit(cdip, circular); 6245 6246 return (MDI_SUCCESS); 6247 } 6248 6249 6250 /* 6251 * Routine to get ioctl argument structure from userland. 6252 */ 6253 /* ARGSUSED */ 6254 static int 6255 vhci_get_iocdata(const void *data, sv_iocdata_t *pioc, int mode, caddr_t s) 6256 { 6257 int retval = 0; 6258 6259 #ifdef _MULTI_DATAMODEL 6260 switch (ddi_model_convert_from(mode & FMODELS)) { 6261 case DDI_MODEL_ILP32: 6262 { 6263 sv_iocdata32_t ioc32; 6264 6265 if (ddi_copyin(data, &ioc32, sizeof (ioc32), mode)) { 6266 retval = EFAULT; 6267 break; 6268 } 6269 pioc->client = (caddr_t)(uintptr_t)ioc32.client; 6270 pioc->phci = (caddr_t)(uintptr_t)ioc32.phci; 6271 pioc->addr = (caddr_t)(uintptr_t)ioc32.addr; 6272 pioc->buf_elem = (uint_t)ioc32.buf_elem; 6273 pioc->ret_buf = (sv_path_info_t *)(uintptr_t)ioc32.ret_buf; 6274 pioc->ret_elem = (uint_t *)(uintptr_t)ioc32.ret_elem; 6275 break; 6276 } 6277 6278 case DDI_MODEL_NONE: 6279 if (ddi_copyin(data, pioc, sizeof (*pioc), mode)) { 6280 retval = EFAULT; 6281 break; 6282 } 6283 break; 6284 } 6285 #else /* _MULTI_DATAMODEL */ 6286 if (ddi_copyin(data, pioc, sizeof (*pioc), mode)) { 6287 retval = EFAULT; 6288 } 6289 #endif /* _MULTI_DATAMODEL */ 6290 6291 #ifdef DEBUG 6292 if (retval) { 6293 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_get_ioc: cmd <%s> " 6294 "iocdata copyin failed", s)); 6295 } 6296 #endif 6297 6298 return (retval); 6299 } 6300 6301 6302 /* 6303 * Routine to get the ioctl argument for ioctl causing controller switchover. 6304 */ 6305 /* ARGSUSED */ 6306 static int 6307 vhci_get_iocswitchdata(const void *data, sv_switch_to_cntlr_iocdata_t *piocsc, 6308 int mode, caddr_t s) 6309 { 6310 int retval = 0; 6311 6312 #ifdef _MULTI_DATAMODEL 6313 switch (ddi_model_convert_from(mode & FMODELS)) { 6314 case DDI_MODEL_ILP32: 6315 { 6316 sv_switch_to_cntlr_iocdata32_t ioc32; 6317 6318 if (ddi_copyin(data, &ioc32, sizeof (ioc32), mode)) { 6319 retval = EFAULT; 6320 break; 6321 } 6322 piocsc->client = (caddr_t)(uintptr_t)ioc32.client; 6323 piocsc->class = (caddr_t)(uintptr_t)ioc32.class; 6324 break; 6325 } 6326 6327 case DDI_MODEL_NONE: 6328 if (ddi_copyin(data, piocsc, sizeof (*piocsc), mode)) { 6329 retval = EFAULT; 6330 } 6331 break; 6332 } 6333 #else /* _MULTI_DATAMODEL */ 6334 if (ddi_copyin(data, piocsc, sizeof (*piocsc), mode)) { 6335 retval = EFAULT; 6336 } 6337 #endif /* _MULTI_DATAMODEL */ 6338 6339 #ifdef DEBUG 6340 if (retval) { 6341 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_get_ioc: cmd <%s> " 6342 "switch_to_cntlr_iocdata copyin failed", s)); 6343 } 6344 #endif 6345 6346 return (retval); 6347 } 6348 6349 6350 /* 6351 * Routine to allocate memory for the path information structures. 6352 * It allocates two chunks of memory - one for keeping userland 6353 * pointers/values for path information and path properties, second for 6354 * keeping allocating kernel memory for path properties. These path 6355 * properties are finally copied to userland. 6356 */ 6357 /* ARGSUSED */ 6358 static int 6359 vhci_ioc_alloc_pathinfo(sv_path_info_t **upibuf, sv_path_info_t **kpibuf, 6360 uint_t num_paths, sv_iocdata_t *pioc, int mode, caddr_t s) 6361 { 6362 sv_path_info_t *pi; 6363 uint_t bufsize; 6364 int retval = 0; 6365 int index; 6366 6367 /* Allocate memory */ 6368 *upibuf = (sv_path_info_t *) 6369 kmem_zalloc(sizeof (sv_path_info_t) * num_paths, KM_SLEEP); 6370 ASSERT(*upibuf != NULL); 6371 *kpibuf = (sv_path_info_t *) 6372 kmem_zalloc(sizeof (sv_path_info_t) * num_paths, KM_SLEEP); 6373 ASSERT(*kpibuf != NULL); 6374 6375 /* 6376 * Get the path info structure from the user space. 6377 * We are interested in the following fields: 6378 * - user size of buffer for per path properties. 6379 * - user address of buffer for path info properties. 6380 * - user pointer for returning actual buffer size 6381 * Keep these fields in the 'upibuf' structures. 6382 * Allocate buffer for per path info properties in kernel 6383 * structure ('kpibuf'). 6384 * Size of these buffers will be equal to the size of buffers 6385 * in the user space. 6386 */ 6387 #ifdef _MULTI_DATAMODEL 6388 switch (ddi_model_convert_from(mode & FMODELS)) { 6389 case DDI_MODEL_ILP32: 6390 { 6391 sv_path_info32_t *src; 6392 sv_path_info32_t pi32; 6393 6394 src = (sv_path_info32_t *)pioc->ret_buf; 6395 pi = (sv_path_info_t *)*upibuf; 6396 for (index = 0; index < num_paths; index++, src++, pi++) { 6397 if (ddi_copyin(src, &pi32, sizeof (pi32), mode)) { 6398 retval = EFAULT; 6399 break; 6400 } 6401 6402 pi->ret_prop.buf_size = 6403 (uint_t)pi32.ret_prop.buf_size; 6404 pi->ret_prop.ret_buf_size = 6405 (uint_t *)(uintptr_t)pi32.ret_prop.ret_buf_size; 6406 pi->ret_prop.buf = 6407 (caddr_t)(uintptr_t)pi32.ret_prop.buf; 6408 } 6409 break; 6410 } 6411 6412 case DDI_MODEL_NONE: 6413 if (ddi_copyin(pioc->ret_buf, *upibuf, 6414 sizeof (sv_path_info_t) * num_paths, mode)) { 6415 retval = EFAULT; 6416 } 6417 break; 6418 } 6419 #else /* _MULTI_DATAMODEL */ 6420 if (ddi_copyin(pioc->ret_buf, *upibuf, 6421 sizeof (sv_path_info_t) * num_paths, mode)) { 6422 retval = EFAULT; 6423 } 6424 #endif /* _MULTI_DATAMODEL */ 6425 6426 if (retval != 0) { 6427 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_alloc_path_info: " 6428 "ioctl <%s> normal: path_info copyin failed", s)); 6429 kmem_free(*upibuf, sizeof (sv_path_info_t) * num_paths); 6430 kmem_free(*kpibuf, sizeof (sv_path_info_t) * num_paths); 6431 *upibuf = NULL; 6432 *kpibuf = NULL; 6433 return (retval); 6434 } 6435 6436 /* 6437 * Allocate memory for per path properties. 6438 */ 6439 for (index = 0, pi = *kpibuf; index < num_paths; index++, pi++) { 6440 bufsize = (*upibuf)[index].ret_prop.buf_size; 6441 6442 if (bufsize && bufsize <= SV_PROP_MAX_BUF_SIZE) { 6443 pi->ret_prop.buf_size = bufsize; 6444 pi->ret_prop.buf = (caddr_t) 6445 kmem_zalloc(bufsize, KM_SLEEP); 6446 ASSERT(pi->ret_prop.buf != NULL); 6447 } else { 6448 pi->ret_prop.buf_size = 0; 6449 pi->ret_prop.buf = NULL; 6450 } 6451 6452 if ((*upibuf)[index].ret_prop.ret_buf_size != NULL) { 6453 pi->ret_prop.ret_buf_size = (uint_t *)kmem_zalloc( 6454 sizeof (*pi->ret_prop.ret_buf_size), KM_SLEEP); 6455 ASSERT(pi->ret_prop.ret_buf_size != NULL); 6456 } else { 6457 pi->ret_prop.ret_buf_size = NULL; 6458 } 6459 } 6460 6461 return (0); 6462 } 6463 6464 6465 /* 6466 * Routine to free memory for the path information structures. 6467 * This is the memory which was allocated earlier. 6468 */ 6469 /* ARGSUSED */ 6470 static void 6471 vhci_ioc_free_pathinfo(sv_path_info_t *upibuf, sv_path_info_t *kpibuf, 6472 uint_t num_paths) 6473 { 6474 sv_path_info_t *pi; 6475 int index; 6476 6477 /* Free memory for per path properties */ 6478 for (index = 0, pi = kpibuf; index < num_paths; index++, pi++) { 6479 if (pi->ret_prop.ret_buf_size != NULL) { 6480 kmem_free(pi->ret_prop.ret_buf_size, 6481 sizeof (*pi->ret_prop.ret_buf_size)); 6482 } 6483 6484 if (pi->ret_prop.buf != NULL) { 6485 kmem_free(pi->ret_prop.buf, pi->ret_prop.buf_size); 6486 } 6487 } 6488 6489 /* Free memory for path info structures */ 6490 kmem_free(upibuf, sizeof (sv_path_info_t) * num_paths); 6491 kmem_free(kpibuf, sizeof (sv_path_info_t) * num_paths); 6492 } 6493 6494 6495 /* 6496 * Routine to copy path information and path properties to userland. 6497 */ 6498 /* ARGSUSED */ 6499 static int 6500 vhci_ioc_send_pathinfo(sv_path_info_t *upibuf, sv_path_info_t *kpibuf, 6501 uint_t num_paths, sv_iocdata_t *pioc, int mode, caddr_t s) 6502 { 6503 int retval = 0, index; 6504 sv_path_info_t *upi_ptr; 6505 sv_path_info32_t *upi32_ptr; 6506 6507 #ifdef _MULTI_DATAMODEL 6508 switch (ddi_model_convert_from(mode & FMODELS)) { 6509 case DDI_MODEL_ILP32: 6510 goto copy_32bit; 6511 6512 case DDI_MODEL_NONE: 6513 goto copy_normal; 6514 } 6515 #else /* _MULTI_DATAMODEL */ 6516 6517 goto copy_normal; 6518 6519 #endif /* _MULTI_DATAMODEL */ 6520 6521 copy_normal: 6522 6523 /* 6524 * Copy path information and path properties to user land. 6525 * Pointer fields inside the path property structure were 6526 * saved in the 'upibuf' structure earlier. 6527 */ 6528 upi_ptr = pioc->ret_buf; 6529 for (index = 0; index < num_paths; index++) { 6530 if (ddi_copyout(kpibuf[index].device.ret_ct, 6531 upi_ptr[index].device.ret_ct, MAXPATHLEN, mode)) { 6532 retval = EFAULT; 6533 break; 6534 } 6535 6536 if (ddi_copyout(kpibuf[index].ret_addr, 6537 upi_ptr[index].ret_addr, MAXNAMELEN, mode)) { 6538 retval = EFAULT; 6539 break; 6540 } 6541 6542 if (ddi_copyout(&kpibuf[index].ret_state, 6543 &upi_ptr[index].ret_state, sizeof (kpibuf[index].ret_state), 6544 mode)) { 6545 retval = EFAULT; 6546 break; 6547 } 6548 6549 if (ddi_copyout(&kpibuf[index].ret_ext_state, 6550 &upi_ptr[index].ret_ext_state, 6551 sizeof (kpibuf[index].ret_ext_state), mode)) { 6552 retval = EFAULT; 6553 break; 6554 } 6555 6556 if ((kpibuf[index].ret_prop.ret_buf_size != NULL) && 6557 ddi_copyout(kpibuf[index].ret_prop.ret_buf_size, 6558 upibuf[index].ret_prop.ret_buf_size, 6559 sizeof (*upibuf[index].ret_prop.ret_buf_size), mode)) { 6560 retval = EFAULT; 6561 break; 6562 } 6563 6564 if ((kpibuf[index].ret_prop.buf != NULL) && 6565 ddi_copyout(kpibuf[index].ret_prop.buf, 6566 upibuf[index].ret_prop.buf, 6567 upibuf[index].ret_prop.buf_size, mode)) { 6568 retval = EFAULT; 6569 break; 6570 } 6571 } 6572 6573 #ifdef DEBUG 6574 if (retval) { 6575 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_get_ioc: ioctl <%s> " 6576 "normal: path_info copyout failed", s)); 6577 } 6578 #endif 6579 6580 return (retval); 6581 6582 copy_32bit: 6583 /* 6584 * Copy path information and path properties to user land. 6585 * Pointer fields inside the path property structure were 6586 * saved in the 'upibuf' structure earlier. 6587 */ 6588 upi32_ptr = (sv_path_info32_t *)pioc->ret_buf; 6589 for (index = 0; index < num_paths; index++) { 6590 if (ddi_copyout(kpibuf[index].device.ret_ct, 6591 upi32_ptr[index].device.ret_ct, MAXPATHLEN, mode)) { 6592 retval = EFAULT; 6593 break; 6594 } 6595 6596 if (ddi_copyout(kpibuf[index].ret_addr, 6597 upi32_ptr[index].ret_addr, MAXNAMELEN, mode)) { 6598 retval = EFAULT; 6599 break; 6600 } 6601 6602 if (ddi_copyout(&kpibuf[index].ret_state, 6603 &upi32_ptr[index].ret_state, 6604 sizeof (kpibuf[index].ret_state), mode)) { 6605 retval = EFAULT; 6606 break; 6607 } 6608 6609 if (ddi_copyout(&kpibuf[index].ret_ext_state, 6610 &upi32_ptr[index].ret_ext_state, 6611 sizeof (kpibuf[index].ret_ext_state), mode)) { 6612 retval = EFAULT; 6613 break; 6614 } 6615 if ((kpibuf[index].ret_prop.ret_buf_size != NULL) && 6616 ddi_copyout(kpibuf[index].ret_prop.ret_buf_size, 6617 upibuf[index].ret_prop.ret_buf_size, 6618 sizeof (*upibuf[index].ret_prop.ret_buf_size), mode)) { 6619 retval = EFAULT; 6620 break; 6621 } 6622 6623 if ((kpibuf[index].ret_prop.buf != NULL) && 6624 ddi_copyout(kpibuf[index].ret_prop.buf, 6625 upibuf[index].ret_prop.buf, 6626 upibuf[index].ret_prop.buf_size, mode)) { 6627 retval = EFAULT; 6628 break; 6629 } 6630 } 6631 6632 #ifdef DEBUG 6633 if (retval) { 6634 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_get_ioc: ioctl <%s> " 6635 "normal: path_info copyout failed", s)); 6636 } 6637 #endif 6638 6639 return (retval); 6640 } 6641 6642 6643 /* 6644 * vhci_failover() 6645 * This routine expects VHCI_HOLD_LUN before being invoked. It can be invoked 6646 * as MDI_FAILOVER_ASYNC or MDI_FAILOVER_SYNC. For Asynchronous failovers 6647 * this routine shall VHCI_RELEASE_LUN on exiting. For synchronous failovers 6648 * it is the callers responsibility to release lun. 6649 */ 6650 6651 /* ARGSUSED */ 6652 static int 6653 vhci_failover(dev_info_t *vdip, dev_info_t *cdip, int flags) 6654 { 6655 char *guid; 6656 scsi_vhci_lun_t *vlun = NULL; 6657 struct scsi_vhci *vhci; 6658 mdi_pathinfo_t *pip, *npip; 6659 char *s_pclass, *pclass1, *pclass2, *pclass; 6660 char active_pclass_copy[255], *active_pclass_ptr; 6661 char *ptr1, *ptr2; 6662 mdi_pathinfo_state_t pi_state; 6663 uint32_t pi_ext_state; 6664 scsi_vhci_priv_t *svp; 6665 struct scsi_device *sd; 6666 struct scsi_failover_ops *sfo; 6667 int sps; /* mdi_select_path() status */ 6668 int activation_done = 0; 6669 int rval, retval = MDI_FAILURE; 6670 int reserve_pending, check_condition, UA_condition; 6671 struct scsi_pkt *pkt; 6672 struct buf *bp; 6673 6674 vhci = ddi_get_soft_state(vhci_softstate, ddi_get_instance(vdip)); 6675 sd = ddi_get_driver_private(cdip); 6676 vlun = ADDR2VLUN(&sd->sd_address); 6677 ASSERT(vlun != 0); 6678 ASSERT(VHCI_LUN_IS_HELD(vlun)); 6679 guid = vlun->svl_lun_wwn; 6680 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(1): guid %s\n", guid)); 6681 vhci_log(CE_NOTE, vdip, "!Initiating failover for device %s " 6682 "(GUID %s)", ddi_node_name(cdip), guid); 6683 6684 /* 6685 * Lets maintain a local copy of the vlun->svl_active_pclass 6686 * for the rest of the processing. Accessing the field 6687 * directly in the loop below causes loop logic to break 6688 * especially when the field gets updated by other threads 6689 * update path status etc and causes 'paths are not currently 6690 * available' condition to be declared prematurely. 6691 */ 6692 mutex_enter(&vlun->svl_mutex); 6693 if (vlun->svl_active_pclass != NULL) { 6694 (void) strlcpy(active_pclass_copy, vlun->svl_active_pclass, 6695 sizeof (active_pclass_copy)); 6696 active_pclass_ptr = &active_pclass_copy[0]; 6697 mutex_exit(&vlun->svl_mutex); 6698 if (vhci_quiesce_paths(vdip, cdip, vlun, guid, 6699 active_pclass_ptr) != 0) { 6700 retval = MDI_FAILURE; 6701 } 6702 } else { 6703 /* 6704 * can happen only when the available path to device 6705 * discovered is a STANDBY path. 6706 */ 6707 mutex_exit(&vlun->svl_mutex); 6708 active_pclass_copy[0] = '\0'; 6709 active_pclass_ptr = NULL; 6710 } 6711 6712 sfo = vlun->svl_fops; 6713 ASSERT(sfo != NULL); 6714 pclass1 = s_pclass = active_pclass_ptr; 6715 VHCI_DEBUG(1, (CE_NOTE, NULL, "!(%s)failing over from %s\n", guid, 6716 (s_pclass == NULL ? "<none>" : s_pclass))); 6717 6718 next_pathclass: 6719 6720 rval = sfo->sfo_pathclass_next(pclass1, &pclass2, 6721 vlun->svl_fops_ctpriv); 6722 if (rval == ENOENT) { 6723 if (s_pclass == NULL) { 6724 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(4)(%s): " 6725 "failed, no more pathclasses\n", guid)); 6726 goto done; 6727 } else { 6728 (void) sfo->sfo_pathclass_next(NULL, &pclass2, 6729 vlun->svl_fops_ctpriv); 6730 } 6731 } else if (rval == EINVAL) { 6732 vhci_log(CE_NOTE, vdip, "!Failover operation failed for " 6733 "device %s (GUID %s): Invalid path-class %s", 6734 ddi_node_name(cdip), guid, 6735 ((pclass1 == NULL) ? "<none>" : pclass1)); 6736 goto done; 6737 } 6738 if ((s_pclass != NULL) && (strcmp(pclass2, s_pclass) == 0)) { 6739 /* 6740 * paths are not currently available 6741 */ 6742 vhci_log(CE_NOTE, vdip, "!Failover path currently unavailable" 6743 " for device %s (GUID %s)", 6744 ddi_node_name(cdip), guid); 6745 goto done; 6746 } 6747 pip = npip = NULL; 6748 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(5.2)(%s): considering " 6749 "%s as failover destination\n", guid, pclass2)); 6750 sps = mdi_select_path(cdip, NULL, MDI_SELECT_STANDBY_PATH, NULL, &npip); 6751 if ((npip == NULL) || (sps != MDI_SUCCESS)) { 6752 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(%s): no " 6753 "STANDBY paths found (status:%x)!\n", guid, sps)); 6754 pclass1 = pclass2; 6755 goto next_pathclass; 6756 } 6757 do { 6758 pclass = NULL; 6759 if ((mdi_prop_lookup_string(npip, "path-class", 6760 &pclass) != MDI_SUCCESS) || (strcmp(pclass2, 6761 pclass) != 0)) { 6762 VHCI_DEBUG(1, (CE_NOTE, NULL, 6763 "!vhci_failover(5.5)(%s): skipping path " 6764 "%p(%s)...\n", guid, (void *)npip, pclass)); 6765 pip = npip; 6766 sps = mdi_select_path(cdip, NULL, 6767 MDI_SELECT_STANDBY_PATH, pip, &npip); 6768 mdi_rele_path(pip); 6769 (void) mdi_prop_free(pclass); 6770 continue; 6771 } 6772 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(npip); 6773 6774 /* 6775 * Issue READ at non-zer block on this STANDBY path. 6776 * Purple returns 6777 * 1. RESERVATION_CONFLICT if reservation is pending 6778 * 2. POR check condition if it reset happened. 6779 * 2. failover Check Conditions if one is already in progress. 6780 */ 6781 reserve_pending = 0; 6782 check_condition = 0; 6783 UA_condition = 0; 6784 6785 bp = scsi_alloc_consistent_buf(&svp->svp_psd->sd_address, 6786 (struct buf *)NULL, DEV_BSIZE, B_READ, NULL, NULL); 6787 if (!bp) { 6788 VHCI_DEBUG(1, (CE_NOTE, NULL, 6789 "vhci_failover !No resources (buf)\n")); 6790 mdi_rele_path(npip); 6791 goto done; 6792 } 6793 pkt = scsi_init_pkt(&svp->svp_psd->sd_address, NULL, bp, 6794 CDB_GROUP1, sizeof (struct scsi_arq_status), 0, 6795 PKT_CONSISTENT, NULL, NULL); 6796 if (pkt) { 6797 (void) scsi_setup_cdb((union scsi_cdb *)(uintptr_t) 6798 pkt->pkt_cdbp, SCMD_READ, 1, 1, 0); 6799 pkt->pkt_flags = FLAG_NOINTR; 6800 check_path_again: 6801 pkt->pkt_path_instance = mdi_pi_get_path_instance(npip); 6802 pkt->pkt_time = 3*30; 6803 6804 if (scsi_transport(pkt) == TRAN_ACCEPT) { 6805 switch (pkt->pkt_reason) { 6806 case CMD_CMPLT: 6807 switch (SCBP_C(pkt)) { 6808 case STATUS_GOOD: 6809 /* Already failed over */ 6810 activation_done = 1; 6811 break; 6812 case STATUS_RESERVATION_CONFLICT: 6813 reserve_pending = 1; 6814 break; 6815 case STATUS_CHECK: 6816 check_condition = 1; 6817 break; 6818 } 6819 } 6820 } 6821 if (check_condition && 6822 (pkt->pkt_state & STATE_ARQ_DONE)) { 6823 struct scsi_extended_sense *sns = 6824 &(((struct scsi_arq_status *)(uintptr_t) 6825 (pkt->pkt_scbp))->sts_sensedata); 6826 if (sns->es_key == KEY_UNIT_ATTENTION && 6827 sns->es_add_code == 0x29) { 6828 /* Already failed over */ 6829 VHCI_DEBUG(1, (CE_NOTE, NULL, 6830 "!vhci_failover(7)(%s): " 6831 "path 0x%p POR UA condition\n", 6832 guid, (void *)npip)); 6833 if (UA_condition == 0) { 6834 UA_condition = 1; 6835 goto check_path_again; 6836 } 6837 } else { 6838 activation_done = 0; 6839 VHCI_DEBUG(1, (CE_NOTE, NULL, 6840 "!vhci_failover(%s): path 0x%p " 6841 "unhandled chkcond %x %x %x\n", 6842 guid, (void *)npip, sns->es_key, 6843 sns->es_add_code, 6844 sns->es_qual_code)); 6845 } 6846 } 6847 scsi_destroy_pkt(pkt); 6848 } 6849 scsi_free_consistent_buf(bp); 6850 6851 if (activation_done) { 6852 mdi_rele_path(npip); 6853 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(7)(%s): " 6854 "path 0x%p already failedover\n", guid, 6855 (void *)npip)); 6856 break; 6857 } 6858 if (reserve_pending && (vlun->svl_xlf_capable == 0)) { 6859 (void) vhci_recovery_reset(vlun, 6860 &svp->svp_psd->sd_address, 6861 FALSE, VHCI_DEPTH_ALL); 6862 } 6863 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(6)(%s): " 6864 "activating path 0x%p(psd:%p)\n", guid, (void *)npip, 6865 (void *)svp->svp_psd)); 6866 if (sfo->sfo_path_activate(svp->svp_psd, pclass2, 6867 vlun->svl_fops_ctpriv) == 0) { 6868 activation_done = 1; 6869 mdi_rele_path(npip); 6870 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(7)(%s): " 6871 "path 0x%p successfully activated\n", guid, 6872 (void *)npip)); 6873 break; 6874 } 6875 pip = npip; 6876 sps = mdi_select_path(cdip, NULL, MDI_SELECT_STANDBY_PATH, 6877 pip, &npip); 6878 mdi_rele_path(pip); 6879 } while ((npip != NULL) && (sps == MDI_SUCCESS)); 6880 if (activation_done == 0) { 6881 pclass1 = pclass2; 6882 goto next_pathclass; 6883 } 6884 6885 /* 6886 * if we are here, we have succeeded in activating path npip of 6887 * pathclass pclass2; let us validate all paths of pclass2 by 6888 * "ping"-ing each one and mark the good ones ONLINE 6889 * Also, set the state of the paths belonging to the previously 6890 * active pathclass to STANDBY 6891 */ 6892 pip = npip = NULL; 6893 sps = mdi_select_path(cdip, NULL, (MDI_SELECT_ONLINE_PATH | 6894 MDI_SELECT_STANDBY_PATH | MDI_SELECT_USER_DISABLE_PATH), 6895 NULL, &npip); 6896 if (npip == NULL || sps != MDI_SUCCESS) { 6897 VHCI_DEBUG(1, (CE_NOTE, NULL, "!Failover operation failed for " 6898 "device %s (GUID %s): paths may be busy\n", 6899 ddi_node_name(cdip), guid)); 6900 goto done; 6901 } 6902 do { 6903 (void) mdi_pi_get_state2(npip, &pi_state, &pi_ext_state); 6904 if (mdi_prop_lookup_string(npip, "path-class", &pclass) 6905 != MDI_SUCCESS) { 6906 pip = npip; 6907 sps = mdi_select_path(cdip, NULL, 6908 (MDI_SELECT_ONLINE_PATH | 6909 MDI_SELECT_STANDBY_PATH | 6910 MDI_SELECT_USER_DISABLE_PATH), 6911 pip, &npip); 6912 mdi_rele_path(pip); 6913 continue; 6914 } 6915 if (strcmp(pclass, pclass2) == 0) { 6916 if (pi_state == MDI_PATHINFO_STATE_STANDBY) { 6917 svp = (scsi_vhci_priv_t *) 6918 mdi_pi_get_vhci_private(npip); 6919 VHCI_DEBUG(1, (CE_NOTE, NULL, 6920 "!vhci_failover(8)(%s): " 6921 "pinging path 0x%p\n", 6922 guid, (void *)npip)); 6923 if (sfo->sfo_path_ping(svp->svp_psd, 6924 vlun->svl_fops_ctpriv) == 1) { 6925 mdi_pi_set_state(npip, 6926 MDI_PATHINFO_STATE_ONLINE); 6927 VHCI_DEBUG(1, (CE_NOTE, NULL, 6928 "!vhci_failover(9)(%s): " 6929 "path 0x%p ping successful, " 6930 "marked online\n", guid, 6931 (void *)npip)); 6932 MDI_PI_ERRSTAT(npip, MDI_PI_FAILTO); 6933 } 6934 } 6935 } else if ((s_pclass != NULL) && (strcmp(pclass, s_pclass) 6936 == 0)) { 6937 if (pi_state == MDI_PATHINFO_STATE_ONLINE) { 6938 mdi_pi_set_state(npip, 6939 MDI_PATHINFO_STATE_STANDBY); 6940 VHCI_DEBUG(1, (CE_NOTE, NULL, 6941 "!vhci_failover(10)(%s): path 0x%p marked " 6942 "STANDBY\n", guid, (void *)npip)); 6943 MDI_PI_ERRSTAT(npip, MDI_PI_FAILFROM); 6944 } 6945 } 6946 (void) mdi_prop_free(pclass); 6947 pip = npip; 6948 sps = mdi_select_path(cdip, NULL, (MDI_SELECT_ONLINE_PATH | 6949 MDI_SELECT_STANDBY_PATH|MDI_SELECT_USER_DISABLE_PATH), 6950 pip, &npip); 6951 mdi_rele_path(pip); 6952 } while ((npip != NULL) && (sps == MDI_SUCCESS)); 6953 6954 /* 6955 * Update the AccessState of related MP-API TPGs 6956 */ 6957 (void) vhci_mpapi_update_tpg_acc_state_for_lu(vhci, vlun); 6958 6959 vhci_log(CE_NOTE, vdip, "!Failover operation completed successfully " 6960 "for device %s (GUID %s): failed over from %s to %s", 6961 ddi_node_name(cdip), guid, ((s_pclass == NULL) ? "<none>" : 6962 s_pclass), pclass2); 6963 ptr1 = kmem_alloc(strlen(pclass2)+1, KM_SLEEP); 6964 (void) strlcpy(ptr1, pclass2, (strlen(pclass2)+1)); 6965 mutex_enter(&vlun->svl_mutex); 6966 ptr2 = vlun->svl_active_pclass; 6967 vlun->svl_active_pclass = ptr1; 6968 mutex_exit(&vlun->svl_mutex); 6969 if (ptr2) { 6970 kmem_free(ptr2, strlen(ptr2)+1); 6971 } 6972 mutex_enter(&vhci->vhci_mutex); 6973 scsi_hba_reset_notify_callback(&vhci->vhci_mutex, 6974 &vhci->vhci_reset_notify_listf); 6975 /* All reservations are cleared upon these resets. */ 6976 vlun->svl_flags &= ~VLUN_RESERVE_ACTIVE_FLG; 6977 mutex_exit(&vhci->vhci_mutex); 6978 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(11): DONE! Active " 6979 "pathclass for %s is now %s\n", guid, pclass2)); 6980 retval = MDI_SUCCESS; 6981 6982 done: 6983 if (flags == MDI_FAILOVER_ASYNC) { 6984 VHCI_RELEASE_LUN(vlun); 6985 VHCI_DEBUG(6, (CE_NOTE, NULL, "!vhci_failover(12): DONE! " 6986 "releasing lun, as failover was ASYNC\n")); 6987 } else { 6988 VHCI_DEBUG(6, (CE_NOTE, NULL, "!vhci_failover(12): DONE! " 6989 "NOT releasing lun, as failover was SYNC\n")); 6990 } 6991 return (retval); 6992 } 6993 6994 /* 6995 * vhci_client_attached is called after the successful attach of a 6996 * client devinfo node. 6997 */ 6998 static void 6999 vhci_client_attached(dev_info_t *cdip) 7000 { 7001 mdi_pathinfo_t *pip; 7002 int circular; 7003 7004 /* 7005 * At this point the client has attached and it's instance number is 7006 * valid, so we can set up kstats. We need to do this here because it 7007 * is possible for paths to go online prior to client attach, in which 7008 * case the call to vhci_kstat_create_pathinfo in vhci_pathinfo_online 7009 * was a noop. 7010 */ 7011 ndi_devi_enter(cdip, &circular); 7012 for (pip = mdi_get_next_phci_path(cdip, NULL); pip; 7013 pip = mdi_get_next_phci_path(cdip, pip)) 7014 vhci_kstat_create_pathinfo(pip); 7015 ndi_devi_exit(cdip, circular); 7016 } 7017 7018 /* 7019 * quiesce all of the online paths 7020 */ 7021 static int 7022 vhci_quiesce_paths(dev_info_t *vdip, dev_info_t *cdip, scsi_vhci_lun_t *vlun, 7023 char *guid, char *active_pclass_ptr) 7024 { 7025 scsi_vhci_priv_t *svp; 7026 char *s_pclass = NULL; 7027 mdi_pathinfo_t *npip, *pip; 7028 int sps; 7029 7030 /* quiesce currently active paths */ 7031 s_pclass = NULL; 7032 pip = npip = NULL; 7033 sps = mdi_select_path(cdip, NULL, MDI_SELECT_ONLINE_PATH, NULL, &npip); 7034 if ((npip == NULL) || (sps != MDI_SUCCESS)) { 7035 return (1); 7036 } 7037 do { 7038 if (mdi_prop_lookup_string(npip, "path-class", 7039 &s_pclass) != MDI_SUCCESS) { 7040 mdi_rele_path(npip); 7041 vhci_log(CE_NOTE, vdip, "!Failover operation failed " 7042 "for device %s (GUID %s) due to an internal " 7043 "error", ddi_node_name(cdip), guid); 7044 return (1); 7045 } 7046 if (strcmp(s_pclass, active_pclass_ptr) == 0) { 7047 /* 7048 * quiesce path. Free s_pclass since 7049 * we don't need it anymore 7050 */ 7051 VHCI_DEBUG(1, (CE_NOTE, NULL, 7052 "!vhci_failover(2)(%s): failing over " 7053 "from %s; quiescing path %p\n", 7054 guid, s_pclass, (void *)npip)); 7055 (void) mdi_prop_free(s_pclass); 7056 svp = (scsi_vhci_priv_t *) 7057 mdi_pi_get_vhci_private(npip); 7058 if (svp == NULL) { 7059 VHCI_DEBUG(1, (CE_NOTE, NULL, 7060 "!vhci_failover(2.5)(%s): no " 7061 "client priv! %p offlined?\n", 7062 guid, (void *)npip)); 7063 pip = npip; 7064 sps = mdi_select_path(cdip, NULL, 7065 MDI_SELECT_ONLINE_PATH, pip, &npip); 7066 mdi_rele_path(pip); 7067 continue; 7068 } 7069 if (scsi_abort(&svp->svp_psd->sd_address, NULL) 7070 == 0) { 7071 (void) vhci_recovery_reset(vlun, 7072 &svp->svp_psd->sd_address, FALSE, 7073 VHCI_DEPTH_TARGET); 7074 } 7075 mutex_enter(&svp->svp_mutex); 7076 if (svp->svp_cmds == 0) { 7077 VHCI_DEBUG(1, (CE_NOTE, NULL, 7078 "!vhci_failover(3)(%s):" 7079 "quiesced path %p\n", guid, (void *)npip)); 7080 } else { 7081 while (svp->svp_cmds != 0) { 7082 cv_wait(&svp->svp_cv, &svp->svp_mutex); 7083 VHCI_DEBUG(1, (CE_NOTE, NULL, 7084 "!vhci_failover(3.cv)(%s):" 7085 "quiesced path %p\n", guid, 7086 (void *)npip)); 7087 } 7088 } 7089 mutex_exit(&svp->svp_mutex); 7090 } else { 7091 /* 7092 * make sure we freeup the memory 7093 */ 7094 (void) mdi_prop_free(s_pclass); 7095 } 7096 pip = npip; 7097 sps = mdi_select_path(cdip, NULL, MDI_SELECT_ONLINE_PATH, 7098 pip, &npip); 7099 mdi_rele_path(pip); 7100 } while ((npip != NULL) && (sps == MDI_SUCCESS)); 7101 return (0); 7102 } 7103 7104 static struct scsi_vhci_lun * 7105 vhci_lun_lookup(dev_info_t *tgt_dip) 7106 { 7107 return ((struct scsi_vhci_lun *) 7108 mdi_client_get_vhci_private(tgt_dip)); 7109 } 7110 7111 static struct scsi_vhci_lun * 7112 vhci_lun_lookup_alloc(dev_info_t *tgt_dip, char *guid, int *didalloc) 7113 { 7114 struct scsi_vhci_lun *svl; 7115 7116 if (svl = vhci_lun_lookup(tgt_dip)) { 7117 return (svl); 7118 } 7119 7120 svl = kmem_zalloc(sizeof (*svl), KM_SLEEP); 7121 svl->svl_lun_wwn = kmem_zalloc(strlen(guid)+1, KM_SLEEP); 7122 (void) strcpy(svl->svl_lun_wwn, guid); 7123 mutex_init(&svl->svl_mutex, NULL, MUTEX_DRIVER, NULL); 7124 cv_init(&svl->svl_cv, NULL, CV_DRIVER, NULL); 7125 sema_init(&svl->svl_pgr_sema, 1, NULL, SEMA_DRIVER, NULL); 7126 svl->svl_waiting_for_activepath = 1; 7127 svl->svl_sector_size = 1; 7128 mdi_client_set_vhci_private(tgt_dip, svl); 7129 *didalloc = 1; 7130 VHCI_DEBUG(1, (CE_NOTE, NULL, 7131 "vhci_lun_lookup_alloc: guid %s vlun 0x%p\n", 7132 guid, (void *)svl)); 7133 return (svl); 7134 } 7135 7136 static void 7137 vhci_lun_free(dev_info_t *tgt_dip) 7138 { 7139 struct scsi_vhci_lun *dvlp; 7140 char *guid; 7141 struct scsi_device *sd; 7142 7143 /* 7144 * The scsi_device was set to driver private during child node 7145 * initialization in the scsi_hba_bus_ctl(). 7146 */ 7147 sd = (struct scsi_device *)ddi_get_driver_private(tgt_dip); 7148 7149 dvlp = (struct scsi_vhci_lun *) 7150 mdi_client_get_vhci_private(tgt_dip); 7151 ASSERT(dvlp != NULL); 7152 7153 mdi_client_set_vhci_private(tgt_dip, NULL); 7154 7155 guid = dvlp->svl_lun_wwn; 7156 ASSERT(guid != NULL); 7157 VHCI_DEBUG(4, (CE_NOTE, NULL, "!vhci_lun_free: %s\n", guid)); 7158 7159 mutex_enter(&dvlp->svl_mutex); 7160 if (dvlp->svl_active_pclass != NULL) { 7161 kmem_free(dvlp->svl_active_pclass, 7162 strlen(dvlp->svl_active_pclass)+1); 7163 } 7164 dvlp->svl_active_pclass = NULL; 7165 mutex_exit(&dvlp->svl_mutex); 7166 7167 if (dvlp->svl_lun_wwn != NULL) { 7168 kmem_free(dvlp->svl_lun_wwn, strlen(dvlp->svl_lun_wwn)+1); 7169 } 7170 dvlp->svl_lun_wwn = NULL; 7171 7172 if (dvlp->svl_fops_name) { 7173 kmem_free(dvlp->svl_fops_name, strlen(dvlp->svl_fops_name)+1); 7174 } 7175 dvlp->svl_fops_name = NULL; 7176 7177 if (dvlp->svl_fops_ctpriv != NULL) { 7178 dvlp->svl_fops->sfo_device_unprobe(sd, dvlp->svl_fops_ctpriv); 7179 } 7180 7181 if (dvlp->svl_flags & VLUN_TASK_D_ALIVE_FLG) 7182 taskq_destroy(dvlp->svl_taskq); 7183 7184 mutex_destroy(&dvlp->svl_mutex); 7185 cv_destroy(&dvlp->svl_cv); 7186 sema_destroy(&dvlp->svl_pgr_sema); 7187 kmem_free(dvlp, sizeof (*dvlp)); 7188 /* 7189 * vhci_lun_free may be called before the tgt_dip 7190 * initialization so check if the sd is NULL. 7191 */ 7192 if (sd != NULL) 7193 sd->sd_address.a_hba_tran->tran_tgt_private = NULL; 7194 } 7195 7196 7197 int 7198 vhci_do_scsi_cmd(struct scsi_pkt *pkt) 7199 { 7200 int err = 0; 7201 int retry_cnt = 0; 7202 struct scsi_extended_sense *sns; 7203 7204 #ifdef DEBUG 7205 if (vhci_debug > 5) { 7206 vhci_print_cdb(pkt->pkt_address.a_hba_tran->tran_hba_dip, 7207 CE_WARN, "Vhci command", pkt->pkt_cdbp); 7208 } 7209 #endif 7210 7211 retry: 7212 err = scsi_poll(pkt); 7213 if (err) { 7214 if (pkt->pkt_cdbp[0] == SCMD_RELEASE) { 7215 if (SCBP_C(pkt) == STATUS_RESERVATION_CONFLICT) { 7216 VHCI_DEBUG(1, (CE_NOTE, NULL, 7217 "!v_s_do_s_c: RELEASE conflict\n")); 7218 return (0); 7219 } 7220 } 7221 if (retry_cnt++ < 3) { 7222 VHCI_DEBUG(1, (CE_WARN, NULL, 7223 "!v_s_do_s_c:retry packet 0x%p " 7224 "status 0x%x reason %s", 7225 (void *)pkt, SCBP_C(pkt), 7226 scsi_rname(pkt->pkt_reason))); 7227 if ((pkt->pkt_reason == CMD_CMPLT) && 7228 (SCBP_C(pkt) == STATUS_CHECK) && 7229 (pkt->pkt_state & STATE_ARQ_DONE)) { 7230 sns = &(((struct scsi_arq_status *)(uintptr_t) 7231 (pkt->pkt_scbp))->sts_sensedata); 7232 VHCI_DEBUG(1, (CE_WARN, NULL, 7233 "!v_s_do_s_c:retry " 7234 "packet 0x%p sense data %s", (void *)pkt, 7235 scsi_sname(sns->es_key))); 7236 } 7237 goto retry; 7238 } 7239 VHCI_DEBUG(1, (CE_WARN, NULL, 7240 "!v_s_do_s_c: failed transport 0x%p 0x%x", 7241 (void *)pkt, SCBP_C(pkt))); 7242 return (0); 7243 } 7244 7245 switch (pkt->pkt_reason) { 7246 case CMD_TIMEOUT: 7247 VHCI_DEBUG(1, (CE_WARN, NULL, "!pkt timed " 7248 "out (pkt 0x%p)", (void *)pkt)); 7249 return (0); 7250 case CMD_CMPLT: 7251 switch (SCBP_C(pkt)) { 7252 case STATUS_GOOD: 7253 break; 7254 case STATUS_CHECK: 7255 if (pkt->pkt_state & STATE_ARQ_DONE) { 7256 sns = &((( 7257 struct scsi_arq_status *) 7258 (uintptr_t) 7259 (pkt->pkt_scbp))-> 7260 sts_sensedata); 7261 if ((sns->es_key == 7262 KEY_UNIT_ATTENTION) || 7263 (sns->es_key == 7264 KEY_NOT_READY)) { 7265 /* 7266 * clear unit attn. 7267 */ 7268 7269 VHCI_DEBUG(1, 7270 (CE_WARN, NULL, 7271 "!v_s_do_s_c: " 7272 "retry " 7273 "packet 0x%p sense " 7274 "data %s", 7275 (void *)pkt, 7276 scsi_sname 7277 (sns->es_key))); 7278 goto retry; 7279 } 7280 VHCI_DEBUG(4, (CE_WARN, NULL, 7281 "!ARQ while " 7282 "transporting " 7283 "(pkt 0x%p)", 7284 (void *)pkt)); 7285 return (0); 7286 } 7287 return (0); 7288 default: 7289 VHCI_DEBUG(1, (CE_WARN, NULL, 7290 "!Bad status returned " 7291 "(pkt 0x%p, status %x)", 7292 (void *)pkt, SCBP_C(pkt))); 7293 return (0); 7294 } 7295 break; 7296 case CMD_INCOMPLETE: 7297 case CMD_RESET: 7298 case CMD_ABORTED: 7299 case CMD_TRAN_ERR: 7300 if (retry_cnt++ < 1) { 7301 VHCI_DEBUG(1, (CE_WARN, NULL, 7302 "!v_s_do_s_c: retry packet 0x%p %s", 7303 (void *)pkt, scsi_rname(pkt->pkt_reason))); 7304 goto retry; 7305 } 7306 /* FALLTHROUGH */ 7307 default: 7308 VHCI_DEBUG(1, (CE_WARN, NULL, "!pkt did not " 7309 "complete successfully (pkt 0x%p," 7310 "reason %x)", (void *)pkt, pkt->pkt_reason)); 7311 return (0); 7312 } 7313 return (1); 7314 } 7315 7316 static int 7317 vhci_quiesce_lun(struct scsi_vhci_lun *vlun) 7318 { 7319 mdi_pathinfo_t *pip, *spip; 7320 dev_info_t *cdip; 7321 struct scsi_vhci_priv *svp; 7322 mdi_pathinfo_state_t pstate; 7323 uint32_t p_ext_state; 7324 int circular; 7325 7326 cdip = vlun->svl_dip; 7327 pip = spip = NULL; 7328 ndi_devi_enter(cdip, &circular); 7329 pip = mdi_get_next_phci_path(cdip, NULL); 7330 while (pip != NULL) { 7331 (void) mdi_pi_get_state2(pip, &pstate, &p_ext_state); 7332 if (pstate != MDI_PATHINFO_STATE_ONLINE) { 7333 spip = pip; 7334 pip = mdi_get_next_phci_path(cdip, spip); 7335 continue; 7336 } 7337 mdi_hold_path(pip); 7338 ndi_devi_exit(cdip, circular); 7339 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip); 7340 mutex_enter(&svp->svp_mutex); 7341 while (svp->svp_cmds != 0) { 7342 if (cv_timedwait(&svp->svp_cv, &svp->svp_mutex, 7343 ddi_get_lbolt() + drv_usectohz 7344 (vhci_path_quiesce_timeout * 1000000)) == -1) { 7345 mutex_exit(&svp->svp_mutex); 7346 mdi_rele_path(pip); 7347 VHCI_DEBUG(1, (CE_WARN, NULL, 7348 "Quiesce of lun is not successful " 7349 "vlun: 0x%p.", (void *)vlun)); 7350 return (0); 7351 } 7352 } 7353 mutex_exit(&svp->svp_mutex); 7354 ndi_devi_enter(cdip, &circular); 7355 spip = pip; 7356 pip = mdi_get_next_phci_path(cdip, spip); 7357 mdi_rele_path(spip); 7358 } 7359 ndi_devi_exit(cdip, circular); 7360 return (1); 7361 } 7362 7363 static int 7364 vhci_pgr_validate_and_register(scsi_vhci_priv_t *svp) 7365 { 7366 scsi_vhci_lun_t *vlun; 7367 vhci_prout_t *prout; 7368 int rval, success; 7369 mdi_pathinfo_t *pip, *npip; 7370 scsi_vhci_priv_t *osvp; 7371 dev_info_t *cdip; 7372 uchar_t cdb_1; 7373 uchar_t temp_res_key[MHIOC_RESV_KEY_SIZE]; 7374 7375 7376 /* 7377 * see if there are any other paths available; if none, 7378 * then there is nothing to do. 7379 */ 7380 cdip = svp->svp_svl->svl_dip; 7381 rval = mdi_select_path(cdip, NULL, MDI_SELECT_ONLINE_PATH | 7382 MDI_SELECT_STANDBY_PATH, NULL, &pip); 7383 if ((rval != MDI_SUCCESS) || (pip == NULL)) { 7384 VHCI_DEBUG(4, (CE_NOTE, NULL, 7385 "%s%d: vhci_pgr_validate_and_register: first path\n", 7386 ddi_driver_name(cdip), ddi_get_instance(cdip))); 7387 return (1); 7388 } 7389 7390 vlun = svp->svp_svl; 7391 prout = &vlun->svl_prout; 7392 ASSERT(vlun->svl_pgr_active != 0); 7393 7394 /* 7395 * When the path was busy/offlined, some other host might have 7396 * cleared this key. Validate key on some other path first. 7397 * If it fails, return failure. 7398 */ 7399 7400 npip = pip; 7401 pip = NULL; 7402 success = 0; 7403 7404 /* Save the res key */ 7405 bcopy(prout->res_key, temp_res_key, MHIOC_RESV_KEY_SIZE); 7406 7407 /* 7408 * Sometimes CDB from application can be a Register_And_Ignore. 7409 * Instead of validation, this cdb would result in force registration. 7410 * Convert it to normal cdb for validation. 7411 * After that be sure to restore the cdb. 7412 */ 7413 cdb_1 = vlun->svl_cdb[1]; 7414 vlun->svl_cdb[1] &= 0xe0; 7415 7416 do { 7417 osvp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(npip); 7418 if (osvp == NULL) { 7419 VHCI_DEBUG(4, (CE_NOTE, NULL, 7420 "vhci_pgr_validate_and_register: no " 7421 "client priv! 0x%p offlined?\n", 7422 (void *)npip)); 7423 goto next_path_1; 7424 } 7425 7426 if (osvp == svp) { 7427 VHCI_DEBUG(4, (CE_NOTE, NULL, 7428 "vhci_pgr_validate_and_register: same svp 0x%p" 7429 " npip 0x%p vlun 0x%p\n", 7430 (void *)svp, (void *)npip, (void *)vlun)); 7431 goto next_path_1; 7432 } 7433 7434 VHCI_DEBUG(4, (CE_NOTE, NULL, 7435 "vhci_pgr_validate_and_register: First validate on" 7436 " osvp 0x%p being done. vlun 0x%p thread 0x%p Before bcopy" 7437 " cdb1 %x\n", (void *)osvp, (void *)vlun, 7438 (void *)curthread, vlun->svl_cdb[1])); 7439 vhci_print_prout_keys(vlun, "v_pgr_val_reg: before bcopy:"); 7440 7441 bcopy(prout->service_key, prout->res_key, MHIOC_RESV_KEY_SIZE); 7442 7443 VHCI_DEBUG(4, (CE_WARN, NULL, "vlun 0x%p After bcopy", 7444 (void *)vlun)); 7445 vhci_print_prout_keys(vlun, "v_pgr_val_reg: after bcopy: "); 7446 7447 rval = vhci_do_prout(osvp); 7448 if (rval == 1) { 7449 VHCI_DEBUG(4, (CE_NOTE, NULL, 7450 "%s%d: vhci_pgr_validate_and_register: key" 7451 " validated thread 0x%p\n", ddi_driver_name(cdip), 7452 ddi_get_instance(cdip), (void *)curthread)); 7453 pip = npip; 7454 success = 1; 7455 break; 7456 } else { 7457 VHCI_DEBUG(4, (CE_NOTE, NULL, 7458 "vhci_pgr_validate_and_register: First validation" 7459 " on osvp 0x%p failed %x\n", (void *)osvp, rval)); 7460 vhci_print_prout_keys(vlun, "v_pgr_val_reg: failed:"); 7461 } 7462 7463 /* 7464 * Try other paths 7465 */ 7466 next_path_1: 7467 pip = npip; 7468 rval = mdi_select_path(cdip, NULL, 7469 MDI_SELECT_ONLINE_PATH|MDI_SELECT_STANDBY_PATH, 7470 pip, &npip); 7471 mdi_rele_path(pip); 7472 } while ((rval == MDI_SUCCESS) && (npip != NULL)); 7473 7474 7475 /* Be sure to restore original cdb */ 7476 vlun->svl_cdb[1] = cdb_1; 7477 7478 /* Restore the res_key */ 7479 bcopy(temp_res_key, prout->res_key, MHIOC_RESV_KEY_SIZE); 7480 7481 /* 7482 * If key could not be registered on any path for the first time, 7483 * return success as online should still continue. 7484 */ 7485 if (success == 0) { 7486 return (1); 7487 } 7488 7489 ASSERT(pip != NULL); 7490 7491 /* 7492 * Force register on new path 7493 */ 7494 cdb_1 = vlun->svl_cdb[1]; /* store the cdb */ 7495 7496 vlun->svl_cdb[1] &= 0xe0; 7497 vlun->svl_cdb[1] |= VHCI_PROUT_R_AND_IGNORE; 7498 7499 vhci_print_prout_keys(vlun, "v_pgr_val_reg: keys before bcopy: "); 7500 7501 bcopy(prout->active_service_key, prout->service_key, 7502 MHIOC_RESV_KEY_SIZE); 7503 bcopy(prout->active_res_key, prout->res_key, MHIOC_RESV_KEY_SIZE); 7504 7505 vhci_print_prout_keys(vlun, "v_pgr_val_reg:keys after bcopy: "); 7506 7507 rval = vhci_do_prout(svp); 7508 vlun->svl_cdb[1] = cdb_1; /* restore the cdb */ 7509 if (rval != 1) { 7510 VHCI_DEBUG(4, (CE_NOTE, NULL, 7511 "vhci_pgr_validate_and_register: register on new" 7512 " path 0x%p svp 0x%p failed %x\n", 7513 (void *)pip, (void *)svp, rval)); 7514 vhci_print_prout_keys(vlun, "v_pgr_val_reg: reg failed: "); 7515 mdi_rele_path(pip); 7516 return (0); 7517 } 7518 7519 if (bcmp(prout->service_key, zero_key, MHIOC_RESV_KEY_SIZE) == 0) { 7520 VHCI_DEBUG(4, (CE_NOTE, NULL, 7521 "vhci_pgr_validate_and_register: zero service key\n")); 7522 mdi_rele_path(pip); 7523 return (rval); 7524 } 7525 7526 /* 7527 * While the key was force registered, some other host might have 7528 * cleared the key. Re-validate key on another pre-existing path 7529 * before declaring success. 7530 */ 7531 npip = pip; 7532 pip = NULL; 7533 7534 /* 7535 * Sometimes CDB from application can be Register and Ignore. 7536 * Instead of validation, it would result in force registration. 7537 * Convert it to normal cdb for validation. 7538 * After that be sure to restore the cdb. 7539 */ 7540 cdb_1 = vlun->svl_cdb[1]; 7541 vlun->svl_cdb[1] &= 0xe0; 7542 success = 0; 7543 7544 do { 7545 osvp = (scsi_vhci_priv_t *) 7546 mdi_pi_get_vhci_private(npip); 7547 if (osvp == NULL) { 7548 VHCI_DEBUG(4, (CE_NOTE, NULL, 7549 "vhci_pgr_validate_and_register: no " 7550 "client priv! 0x%p offlined?\n", 7551 (void *)npip)); 7552 goto next_path_2; 7553 } 7554 7555 if (osvp == svp) { 7556 VHCI_DEBUG(4, (CE_NOTE, NULL, 7557 "vhci_pgr_validate_and_register: same osvp 0x%p" 7558 " npip 0x%p vlun 0x%p\n", 7559 (void *)svp, (void *)npip, (void *)vlun)); 7560 goto next_path_2; 7561 } 7562 7563 VHCI_DEBUG(4, (CE_NOTE, NULL, 7564 "vhci_pgr_validate_and_register: Re-validation on" 7565 " osvp 0x%p being done. vlun 0x%p Before bcopy cdb1 %x\n", 7566 (void *)osvp, (void *)vlun, vlun->svl_cdb[1])); 7567 vhci_print_prout_keys(vlun, "v_pgr_val_reg: before bcopy: "); 7568 7569 bcopy(prout->service_key, prout->res_key, MHIOC_RESV_KEY_SIZE); 7570 7571 vhci_print_prout_keys(vlun, "v_pgr_val_reg: after bcopy: "); 7572 7573 rval = vhci_do_prout(osvp); 7574 if (rval == 1) { 7575 VHCI_DEBUG(4, (CE_NOTE, NULL, 7576 "%s%d: vhci_pgr_validate_and_register: key" 7577 " validated thread 0x%p\n", ddi_driver_name(cdip), 7578 ddi_get_instance(cdip), (void *)curthread)); 7579 pip = npip; 7580 success = 1; 7581 break; 7582 } else { 7583 VHCI_DEBUG(4, (CE_NOTE, NULL, 7584 "vhci_pgr_validate_and_register: Re-validation on" 7585 " osvp 0x%p failed %x\n", (void *)osvp, rval)); 7586 vhci_print_prout_keys(vlun, 7587 "v_pgr_val_reg: reval failed: "); 7588 } 7589 7590 /* 7591 * Try other paths 7592 */ 7593 next_path_2: 7594 pip = npip; 7595 rval = mdi_select_path(cdip, NULL, 7596 MDI_SELECT_ONLINE_PATH|MDI_SELECT_STANDBY_PATH, 7597 pip, &npip); 7598 mdi_rele_path(pip); 7599 } while ((rval == MDI_SUCCESS) && (npip != NULL)); 7600 7601 /* Be sure to restore original cdb */ 7602 vlun->svl_cdb[1] = cdb_1; 7603 7604 if (success == 1) { 7605 /* Successfully validated registration */ 7606 mdi_rele_path(pip); 7607 return (1); 7608 } 7609 7610 VHCI_DEBUG(4, (CE_WARN, NULL, "key validation failed")); 7611 7612 /* 7613 * key invalid, back out by registering key value of 0 7614 */ 7615 VHCI_DEBUG(4, (CE_NOTE, NULL, 7616 "vhci_pgr_validate_and_register: backout on" 7617 " svp 0x%p being done\n", (void *)svp)); 7618 vhci_print_prout_keys(vlun, "v_pgr_val_reg: before bcopy: "); 7619 7620 bcopy(prout->service_key, prout->res_key, MHIOC_RESV_KEY_SIZE); 7621 bzero(prout->service_key, MHIOC_RESV_KEY_SIZE); 7622 7623 vhci_print_prout_keys(vlun, "v_pgr_val_reg: before bcopy: "); 7624 7625 /* 7626 * Get a new path 7627 */ 7628 rval = mdi_select_path(cdip, NULL, MDI_SELECT_ONLINE_PATH | 7629 MDI_SELECT_STANDBY_PATH, NULL, &pip); 7630 if ((rval != MDI_SUCCESS) || (pip == NULL)) { 7631 VHCI_DEBUG(4, (CE_NOTE, NULL, 7632 "%s%d: vhci_pgr_validate_and_register: no valid pip\n", 7633 ddi_driver_name(cdip), ddi_get_instance(cdip))); 7634 return (0); 7635 } 7636 7637 if ((rval = vhci_do_prout(svp)) != 1) { 7638 VHCI_DEBUG(4, (CE_NOTE, NULL, 7639 "vhci_pgr_validate_and_register: backout on" 7640 " svp 0x%p failed\n", (void *)svp)); 7641 vhci_print_prout_keys(vlun, "backout failed"); 7642 7643 VHCI_DEBUG(4, (CE_WARN, NULL, 7644 "%s%d: vhci_pgr_validate_and_register: key" 7645 " validation and backout failed", ddi_driver_name(cdip), 7646 ddi_get_instance(cdip))); 7647 if (rval == VHCI_PGR_ILLEGALOP) { 7648 VHCI_DEBUG(4, (CE_WARN, NULL, 7649 "%s%d: vhci_pgr_validate_and_register: key" 7650 " already cleared", ddi_driver_name(cdip), 7651 ddi_get_instance(cdip))); 7652 rval = 1; 7653 } else 7654 rval = 0; 7655 } else { 7656 VHCI_DEBUG(4, (CE_NOTE, NULL, 7657 "%s%d: vhci_pgr_validate_and_register: key" 7658 " validation failed, key backed out\n", 7659 ddi_driver_name(cdip), ddi_get_instance(cdip))); 7660 vhci_print_prout_keys(vlun, "v_pgr_val_reg: key backed out: "); 7661 } 7662 mdi_rele_path(pip); 7663 7664 return (rval); 7665 } 7666 7667 /* 7668 * taskq routine to dispatch a scsi cmd to vhci_scsi_start. This ensures 7669 * that vhci_scsi_start is not called in interrupt context. 7670 * As the upper layer gets TRAN_ACCEPT when the command is dispatched, we 7671 * need to complete the command if something goes wrong. 7672 */ 7673 static void 7674 vhci_dispatch_scsi_start(void *arg) 7675 { 7676 struct vhci_pkt *vpkt = (struct vhci_pkt *)arg; 7677 struct scsi_pkt *tpkt = vpkt->vpkt_tgt_pkt; 7678 int rval = TRAN_BUSY; 7679 7680 VHCI_DEBUG(6, (CE_NOTE, NULL, "!vhci_dispatch_scsi_start: sending" 7681 " scsi-2 reserve for 0x%p\n", 7682 (void *)ADDR2DIP(&(vpkt->vpkt_tgt_pkt->pkt_address)))); 7683 7684 /* 7685 * To prevent the taskq from being called recursively we set the 7686 * the VHCI_PKT_THRU_TASKQ bit in the vhci_pkt_states. 7687 */ 7688 vpkt->vpkt_state |= VHCI_PKT_THRU_TASKQ; 7689 7690 /* 7691 * Wait for the transport to get ready to send packets 7692 * and if it times out, it will return something other than 7693 * TRAN_BUSY. The vhci_reserve_delay may want to 7694 * get tuned for other transports and is therefore a global. 7695 * Using delay since this routine is called by taskq dispatch 7696 * and not called during interrupt context. 7697 */ 7698 while ((rval = vhci_scsi_start(&(vpkt->vpkt_tgt_pkt->pkt_address), 7699 vpkt->vpkt_tgt_pkt)) == TRAN_BUSY) { 7700 delay(drv_usectohz(vhci_reserve_delay)); 7701 } 7702 7703 switch (rval) { 7704 case TRAN_ACCEPT: 7705 return; 7706 7707 default: 7708 /* 7709 * This pkt shall be retried, and to ensure another taskq 7710 * is dispatched for it, clear the VHCI_PKT_THRU_TASKQ 7711 * flag. 7712 */ 7713 vpkt->vpkt_state &= ~VHCI_PKT_THRU_TASKQ; 7714 7715 /* Ensure that the pkt is retried without a reset */ 7716 tpkt->pkt_reason = CMD_ABORTED; 7717 tpkt->pkt_statistics |= STAT_ABORTED; 7718 VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_dispatch_scsi_start: " 7719 "TRAN_rval %d returned for dip 0x%p", rval, 7720 (void *)ADDR2DIP(&(vpkt->vpkt_tgt_pkt->pkt_address)))); 7721 break; 7722 } 7723 7724 /* 7725 * vpkt_org_vpkt should always be NULL here if the retry command 7726 * has been successfully dispatched. If vpkt_org_vpkt != NULL at 7727 * this point, it is an error so restore the original vpkt and 7728 * return an error to the target driver so it can retry the 7729 * command as appropriate. 7730 */ 7731 if (vpkt->vpkt_org_vpkt != NULL) { 7732 struct vhci_pkt *new_vpkt = vpkt; 7733 scsi_vhci_priv_t *svp = (scsi_vhci_priv_t *) 7734 mdi_pi_get_vhci_private(vpkt->vpkt_path); 7735 7736 vpkt = vpkt->vpkt_org_vpkt; 7737 7738 vpkt->vpkt_tgt_pkt->pkt_reason = tpkt->pkt_reason; 7739 vpkt->vpkt_tgt_pkt->pkt_statistics = tpkt->pkt_statistics; 7740 7741 vhci_scsi_destroy_pkt(&svp->svp_psd->sd_address, 7742 new_vpkt->vpkt_tgt_pkt); 7743 7744 tpkt = vpkt->vpkt_tgt_pkt; 7745 } 7746 7747 if (tpkt->pkt_comp) { 7748 tpkt->pkt_comp(tpkt); 7749 } 7750 } 7751 7752 static void 7753 vhci_initiate_auto_failback(void *arg) 7754 { 7755 struct scsi_vhci_lun *vlun = (struct scsi_vhci_lun *)arg; 7756 dev_info_t *vdip, *cdip; 7757 int held; 7758 7759 cdip = vlun->svl_dip; 7760 vdip = ddi_get_parent(cdip); 7761 7762 VHCI_HOLD_LUN(vlun, VH_SLEEP, held); 7763 7764 /* 7765 * Perform a final check to see if the active path class is indeed 7766 * not the prefered path class. As in the time the auto failback 7767 * was dispatched, an external failover could have been detected. 7768 * [Some other host could have detected this condition and triggered 7769 * the auto failback before]. 7770 * In such a case if we go ahead with failover we will be negating the 7771 * whole purpose of auto failback. 7772 */ 7773 mutex_enter(&vlun->svl_mutex); 7774 if (vlun->svl_active_pclass != NULL) { 7775 char *best_pclass; 7776 struct scsi_failover_ops *fo; 7777 7778 fo = vlun->svl_fops; 7779 7780 (void) fo->sfo_pathclass_next(NULL, &best_pclass, 7781 vlun->svl_fops_ctpriv); 7782 if (strcmp(vlun->svl_active_pclass, best_pclass) == 0) { 7783 mutex_exit(&vlun->svl_mutex); 7784 VHCI_RELEASE_LUN(vlun); 7785 VHCI_DEBUG(1, (CE_NOTE, NULL, "Not initiating " 7786 "auto failback for %s as %s pathclass already " 7787 "active.\n", vlun->svl_lun_wwn, best_pclass)); 7788 return; 7789 } 7790 } 7791 mutex_exit(&vlun->svl_mutex); 7792 if (mdi_failover(vdip, vlun->svl_dip, MDI_FAILOVER_SYNC) 7793 == MDI_SUCCESS) { 7794 vhci_log(CE_NOTE, vdip, "!Auto failback operation " 7795 "succeeded for device %s (GUID %s)", 7796 ddi_node_name(cdip), vlun->svl_lun_wwn); 7797 } else { 7798 vhci_log(CE_NOTE, vdip, "!Auto failback operation " 7799 "failed for device %s (GUID %s)", 7800 ddi_node_name(cdip), vlun->svl_lun_wwn); 7801 } 7802 VHCI_RELEASE_LUN(vlun); 7803 } 7804 7805 #ifdef DEBUG 7806 static void 7807 vhci_print_prin_keys(vhci_prin_readkeys_t *prin, int numkeys) 7808 { 7809 vhci_clean_print(NULL, 5, "Current PGR Keys", 7810 (uchar_t *)prin, numkeys * 8); 7811 } 7812 #endif 7813 7814 static void 7815 vhci_print_prout_keys(scsi_vhci_lun_t *vlun, char *msg) 7816 { 7817 int i; 7818 vhci_prout_t *prout; 7819 char buf1[4*MHIOC_RESV_KEY_SIZE + 1]; 7820 char buf2[4*MHIOC_RESV_KEY_SIZE + 1]; 7821 char buf3[4*MHIOC_RESV_KEY_SIZE + 1]; 7822 char buf4[4*MHIOC_RESV_KEY_SIZE + 1]; 7823 7824 prout = &vlun->svl_prout; 7825 7826 for (i = 0; i < MHIOC_RESV_KEY_SIZE; i++) 7827 (void) sprintf(&buf1[4*i], "[%02x]", prout->res_key[i]); 7828 for (i = 0; i < MHIOC_RESV_KEY_SIZE; i++) 7829 (void) sprintf(&buf2[(4*i)], "[%02x]", prout->service_key[i]); 7830 for (i = 0; i < MHIOC_RESV_KEY_SIZE; i++) 7831 (void) sprintf(&buf3[4*i], "[%02x]", prout->active_res_key[i]); 7832 for (i = 0; i < MHIOC_RESV_KEY_SIZE; i++) 7833 (void) sprintf(&buf4[4*i], "[%02x]", 7834 prout->active_service_key[i]); 7835 7836 /* Printing all in one go. Otherwise it will jumble up */ 7837 VHCI_DEBUG(5, (CE_CONT, NULL, "%s vlun 0x%p, thread 0x%p\n" 7838 "res_key: : %s\n" 7839 "service_key : %s\n" 7840 "active_res_key : %s\n" 7841 "active_service_key: %s\n", 7842 msg, (void *)vlun, (void *)curthread, buf1, buf2, buf3, buf4)); 7843 } 7844 7845 /* 7846 * Called from vhci_scsi_start to update the pHCI pkt with target packet. 7847 */ 7848 static void 7849 vhci_update_pHCI_pkt(struct vhci_pkt *vpkt, struct scsi_pkt *pkt) 7850 { 7851 7852 ASSERT(vpkt->vpkt_hba_pkt); 7853 7854 vpkt->vpkt_hba_pkt->pkt_flags = pkt->pkt_flags; 7855 vpkt->vpkt_hba_pkt->pkt_flags |= FLAG_NOQUEUE; 7856 7857 if ((vpkt->vpkt_hba_pkt->pkt_flags & FLAG_NOINTR) || 7858 MDI_PI_IS_SUSPENDED(vpkt->vpkt_path)) { 7859 /* 7860 * Polled Command is requested or HBA is in 7861 * suspended state 7862 */ 7863 vpkt->vpkt_hba_pkt->pkt_flags |= FLAG_NOINTR; 7864 vpkt->vpkt_hba_pkt->pkt_comp = NULL; 7865 } else { 7866 vpkt->vpkt_hba_pkt->pkt_comp = vhci_intr; 7867 } 7868 vpkt->vpkt_hba_pkt->pkt_time = pkt->pkt_time; 7869 bcopy(pkt->pkt_cdbp, vpkt->vpkt_hba_pkt->pkt_cdbp, 7870 vpkt->vpkt_tgt_init_cdblen); 7871 vpkt->vpkt_hba_pkt->pkt_resid = pkt->pkt_resid; 7872 7873 /* Re-initialize the following pHCI packet state information */ 7874 vpkt->vpkt_hba_pkt->pkt_state = 0; 7875 vpkt->vpkt_hba_pkt->pkt_statistics = 0; 7876 vpkt->vpkt_hba_pkt->pkt_reason = 0; 7877 } 7878 7879 static int 7880 vhci_scsi_bus_power(dev_info_t *parent, void *impl_arg, pm_bus_power_op_t op, 7881 void *arg, void *result) 7882 { 7883 int ret = DDI_SUCCESS; 7884 7885 /* 7886 * Generic processing in MPxIO framework 7887 */ 7888 ret = mdi_bus_power(parent, impl_arg, op, arg, result); 7889 7890 switch (ret) { 7891 case MDI_SUCCESS: 7892 ret = DDI_SUCCESS; 7893 break; 7894 case MDI_FAILURE: 7895 ret = DDI_FAILURE; 7896 break; 7897 default: 7898 break; 7899 } 7900 7901 return (ret); 7902 } 7903 7904 static int 7905 vhci_pHCI_cap(struct scsi_address *ap, char *cap, int val, int whom, 7906 mdi_pathinfo_t *pip) 7907 { 7908 dev_info_t *cdip; 7909 mdi_pathinfo_t *npip = NULL; 7910 scsi_vhci_priv_t *svp = NULL; 7911 struct scsi_address *pap = NULL; 7912 scsi_hba_tran_t *hba = NULL; 7913 int sps; 7914 int mps_flag; 7915 int rval = 0; 7916 7917 mps_flag = (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH); 7918 if (pip) { 7919 /* 7920 * If the call is from vhci_pathinfo_state_change, 7921 * then this path was busy and is becoming ready to accept IO. 7922 */ 7923 ASSERT(ap != NULL); 7924 hba = ap->a_hba_tran; 7925 ASSERT(hba != NULL); 7926 rval = scsi_ifsetcap(ap, cap, val, whom); 7927 7928 VHCI_DEBUG(2, (CE_NOTE, NULL, 7929 "!vhci_pHCI_cap: only on path %p, ap %p, rval %x\n", 7930 (void *)pip, (void *)ap, rval)); 7931 7932 return (rval); 7933 } 7934 7935 /* 7936 * Set capability on all the pHCIs. 7937 * If any path is busy, then the capability would be set by 7938 * vhci_pathinfo_state_change. 7939 */ 7940 7941 cdip = ADDR2DIP(ap); 7942 ASSERT(cdip != NULL); 7943 sps = mdi_select_path(cdip, NULL, mps_flag, NULL, &pip); 7944 if ((sps != MDI_SUCCESS) || (pip == NULL)) { 7945 VHCI_DEBUG(2, (CE_WARN, NULL, 7946 "!vhci_pHCI_cap: Unable to get a path, dip 0x%p", 7947 (void *)cdip)); 7948 return (0); 7949 } 7950 7951 again: 7952 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip); 7953 if (svp == NULL) { 7954 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_pHCI_cap: " 7955 "priv is NULL, pip 0x%p", (void *)pip)); 7956 mdi_rele_path(pip); 7957 return (rval); 7958 } 7959 7960 if (svp->svp_psd == NULL) { 7961 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_pHCI_cap: " 7962 "psd is NULL, pip 0x%p, svp 0x%p", 7963 (void *)pip, (void *)svp)); 7964 mdi_rele_path(pip); 7965 return (rval); 7966 } 7967 7968 pap = &svp->svp_psd->sd_address; 7969 ASSERT(pap != NULL); 7970 hba = pap->a_hba_tran; 7971 ASSERT(hba != NULL); 7972 7973 if (hba->tran_setcap != NULL) { 7974 rval = scsi_ifsetcap(pap, cap, val, whom); 7975 7976 VHCI_DEBUG(2, (CE_NOTE, NULL, 7977 "!vhci_pHCI_cap: path %p, ap %p, rval %x\n", 7978 (void *)pip, (void *)ap, rval)); 7979 7980 /* 7981 * Select next path and issue the setcap, repeat 7982 * until all paths are exhausted 7983 */ 7984 sps = mdi_select_path(cdip, NULL, mps_flag, pip, &npip); 7985 if ((sps != MDI_SUCCESS) || (npip == NULL)) { 7986 mdi_rele_path(pip); 7987 return (1); 7988 } 7989 mdi_rele_path(pip); 7990 pip = npip; 7991 goto again; 7992 } 7993 mdi_rele_path(pip); 7994 return (rval); 7995 } 7996 7997 static int 7998 vhci_scsi_bus_config(dev_info_t *pdip, uint_t flags, ddi_bus_config_op_t op, 7999 void *arg, dev_info_t **child) 8000 { 8001 char *guid; 8002 8003 if (op == BUS_CONFIG_ONE || op == BUS_UNCONFIG_ONE) 8004 guid = vhci_devnm_to_guid((char *)arg); 8005 else 8006 guid = NULL; 8007 8008 if (mdi_vhci_bus_config(pdip, flags, op, arg, child, guid) 8009 == MDI_SUCCESS) 8010 return (NDI_SUCCESS); 8011 else 8012 return (NDI_FAILURE); 8013 } 8014 8015 /* 8016 * Take the original vhci_pkt, create a duplicate of the pkt for resending 8017 * as though it originated in ssd. 8018 */ 8019 static struct scsi_pkt * 8020 vhci_create_retry_pkt(struct vhci_pkt *vpkt) 8021 { 8022 struct vhci_pkt *new_vpkt = NULL; 8023 struct scsi_pkt *pkt = NULL; 8024 8025 scsi_vhci_priv_t *svp = (scsi_vhci_priv_t *) 8026 mdi_pi_get_vhci_private(vpkt->vpkt_path); 8027 8028 /* 8029 * Ensure consistent data at completion time by setting PKT_CONSISTENT 8030 */ 8031 pkt = vhci_scsi_init_pkt(&svp->svp_psd->sd_address, pkt, 8032 vpkt->vpkt_tgt_init_bp, vpkt->vpkt_tgt_init_cdblen, 8033 vpkt->vpkt_tgt_init_scblen, 0, PKT_CONSISTENT, NULL_FUNC, NULL); 8034 if (pkt != NULL) { 8035 new_vpkt = TGTPKT2VHCIPKT(pkt); 8036 8037 pkt->pkt_address = vpkt->vpkt_tgt_pkt->pkt_address; 8038 pkt->pkt_flags = vpkt->vpkt_tgt_pkt->pkt_flags; 8039 pkt->pkt_time = vpkt->vpkt_tgt_pkt->pkt_time; 8040 pkt->pkt_comp = vpkt->vpkt_tgt_pkt->pkt_comp; 8041 8042 pkt->pkt_resid = 0; 8043 pkt->pkt_statistics = 0; 8044 pkt->pkt_reason = 0; 8045 8046 bcopy(vpkt->vpkt_tgt_pkt->pkt_cdbp, 8047 pkt->pkt_cdbp, vpkt->vpkt_tgt_init_cdblen); 8048 8049 /* 8050 * Save a pointer to the original vhci_pkt 8051 */ 8052 new_vpkt->vpkt_org_vpkt = vpkt; 8053 } 8054 8055 return (pkt); 8056 } 8057 8058 /* 8059 * Copy the successful completion information from the hba packet into 8060 * the original target pkt from the upper layer. Returns the original 8061 * vpkt and destroys the new vpkt from the internal retry. 8062 */ 8063 static struct vhci_pkt * 8064 vhci_sync_retry_pkt(struct vhci_pkt *vpkt) 8065 { 8066 struct vhci_pkt *ret_vpkt = NULL; 8067 struct scsi_pkt *tpkt = NULL; 8068 struct scsi_pkt *hba_pkt = NULL; 8069 scsi_vhci_priv_t *svp = (scsi_vhci_priv_t *) 8070 mdi_pi_get_vhci_private(vpkt->vpkt_path); 8071 8072 ASSERT(vpkt->vpkt_org_vpkt != NULL); 8073 VHCI_DEBUG(0, (CE_NOTE, NULL, "vhci_sync_retry_pkt: Retry pkt " 8074 "completed successfully!\n")); 8075 8076 ret_vpkt = vpkt->vpkt_org_vpkt; 8077 tpkt = ret_vpkt->vpkt_tgt_pkt; 8078 hba_pkt = vpkt->vpkt_hba_pkt; 8079 8080 /* 8081 * Copy the good status into the target driver's packet 8082 */ 8083 *(tpkt->pkt_scbp) = *(hba_pkt->pkt_scbp); 8084 tpkt->pkt_resid = hba_pkt->pkt_resid; 8085 tpkt->pkt_state = hba_pkt->pkt_state; 8086 tpkt->pkt_statistics = hba_pkt->pkt_statistics; 8087 tpkt->pkt_reason = hba_pkt->pkt_reason; 8088 8089 /* 8090 * Destroy the internally created vpkt for the retry 8091 */ 8092 vhci_scsi_destroy_pkt(&svp->svp_psd->sd_address, 8093 vpkt->vpkt_tgt_pkt); 8094 8095 return (ret_vpkt); 8096 } 8097 8098 /* restart the request sense request */ 8099 static void 8100 vhci_uscsi_restart_sense(void *arg) 8101 { 8102 struct buf *rqbp; 8103 struct buf *bp; 8104 struct scsi_pkt *rqpkt = (struct scsi_pkt *)arg; 8105 mp_uscsi_cmd_t *mp_uscmdp; 8106 8107 VHCI_DEBUG(4, (CE_WARN, NULL, 8108 "vhci_uscsi_restart_sense: enter: rqpkt: %p", (void *)rqpkt)); 8109 8110 if (scsi_transport(rqpkt) != TRAN_ACCEPT) { 8111 /* if it fails - need to wakeup the original command */ 8112 mp_uscmdp = rqpkt->pkt_private; 8113 bp = mp_uscmdp->cmdbp; 8114 rqbp = mp_uscmdp->rqbp; 8115 ASSERT(mp_uscmdp && bp && rqbp); 8116 scsi_free_consistent_buf(rqbp); 8117 scsi_destroy_pkt(rqpkt); 8118 bp->b_resid = bp->b_bcount; 8119 bioerror(bp, EIO); 8120 biodone(bp); 8121 } 8122 } 8123 8124 /* 8125 * auto-rqsense is not enabled so we have to retrieve the request sense 8126 * manually. 8127 */ 8128 static int 8129 vhci_uscsi_send_sense(struct scsi_pkt *pkt, mp_uscsi_cmd_t *mp_uscmdp) 8130 { 8131 struct buf *rqbp, *cmdbp; 8132 struct scsi_pkt *rqpkt; 8133 int rval = 0; 8134 8135 cmdbp = mp_uscmdp->cmdbp; 8136 ASSERT(cmdbp != NULL); 8137 8138 VHCI_DEBUG(4, (CE_WARN, NULL, 8139 "vhci_uscsi_send_sense: enter: bp: %p pkt: %p scmd: %p", 8140 (void *)cmdbp, (void *)pkt, (void *)mp_uscmdp)); 8141 /* set up the packet information and cdb */ 8142 if ((rqbp = scsi_alloc_consistent_buf(mp_uscmdp->ap, NULL, 8143 SENSE_LENGTH, B_READ, NULL, NULL)) == NULL) { 8144 return (-1); 8145 } 8146 8147 if ((rqpkt = scsi_init_pkt(mp_uscmdp->ap, NULL, rqbp, 8148 CDB_GROUP0, 1, 0, PKT_CONSISTENT, NULL, NULL)) == NULL) { 8149 scsi_free_consistent_buf(rqbp); 8150 return (-1); 8151 } 8152 8153 (void) scsi_setup_cdb((union scsi_cdb *)(intptr_t)rqpkt->pkt_cdbp, 8154 SCMD_REQUEST_SENSE, 0, SENSE_LENGTH, 0); 8155 8156 mp_uscmdp->rqbp = rqbp; 8157 rqbp->b_private = mp_uscmdp; 8158 rqpkt->pkt_flags |= FLAG_SENSING; 8159 rqpkt->pkt_time = 60; 8160 rqpkt->pkt_comp = vhci_uscsi_iodone; 8161 rqpkt->pkt_private = mp_uscmdp; 8162 8163 /* 8164 * NOTE: This code path is related to MPAPI uscsi(7I), so path 8165 * selection is not based on path_instance. 8166 */ 8167 if (scsi_pkt_allocated_correctly(rqpkt)) 8168 rqpkt->pkt_path_instance = 0; 8169 8170 /* get her done */ 8171 switch (scsi_transport(rqpkt)) { 8172 case TRAN_ACCEPT: 8173 VHCI_DEBUG(1, (CE_NOTE, NULL, "vhci_uscsi_send_sense: " 8174 "transport accepted.")); 8175 break; 8176 case TRAN_BUSY: 8177 VHCI_DEBUG(1, (CE_NOTE, NULL, "vhci_uscsi_send_sense: " 8178 "transport busy, setting timeout.")); 8179 vhci_restart_timeid = timeout(vhci_uscsi_restart_sense, rqpkt, 8180 (drv_usectohz(5 * 1000000))); 8181 break; 8182 default: 8183 VHCI_DEBUG(1, (CE_NOTE, NULL, "vhci_uscsi_send_sense: " 8184 "transport failed")); 8185 scsi_free_consistent_buf(rqbp); 8186 scsi_destroy_pkt(rqpkt); 8187 rval = -1; 8188 } 8189 8190 return (rval); 8191 } 8192 8193 /* 8194 * done routine for the mpapi uscsi command - this is behaving as though 8195 * FLAG_DIAGNOSE is set meaning there are no retries except for a manual 8196 * request sense. 8197 */ 8198 void 8199 vhci_uscsi_iodone(struct scsi_pkt *pkt) 8200 { 8201 struct buf *bp; 8202 mp_uscsi_cmd_t *mp_uscmdp; 8203 struct uscsi_cmd *uscmdp; 8204 struct scsi_arq_status *arqstat; 8205 int err; 8206 8207 mp_uscmdp = (mp_uscsi_cmd_t *)pkt->pkt_private; 8208 uscmdp = mp_uscmdp->uscmdp; 8209 bp = mp_uscmdp->cmdbp; 8210 ASSERT(bp != NULL); 8211 VHCI_DEBUG(4, (CE_WARN, NULL, 8212 "vhci_uscsi_iodone: enter: bp: %p pkt: %p scmd: %p", 8213 (void *)bp, (void *)pkt, (void *)mp_uscmdp)); 8214 /* Save the status and the residual into the uscsi_cmd struct */ 8215 uscmdp->uscsi_status = ((*(pkt)->pkt_scbp) & STATUS_MASK); 8216 uscmdp->uscsi_resid = bp->b_resid; 8217 8218 /* return on a very successful command */ 8219 if (pkt->pkt_reason == CMD_CMPLT && 8220 SCBP_C(pkt) == 0 && ((pkt->pkt_flags & FLAG_SENSING) == 0) && 8221 pkt->pkt_resid == 0) { 8222 mdi_pi_kstat_iosupdate(mp_uscmdp->pip, bp); 8223 scsi_destroy_pkt(pkt); 8224 biodone(bp); 8225 return; 8226 } 8227 VHCI_DEBUG(4, (CE_NOTE, NULL, "iodone: reason=0x%x " 8228 " pkt_resid=%ld pkt_state: 0x%x b_count: %ld b_resid: %ld", 8229 pkt->pkt_reason, pkt->pkt_resid, 8230 pkt->pkt_state, bp->b_bcount, bp->b_resid)); 8231 8232 err = EIO; 8233 8234 arqstat = (struct scsi_arq_status *)(intptr_t)(pkt->pkt_scbp); 8235 if (pkt->pkt_reason != CMD_CMPLT) { 8236 /* 8237 * The command did not complete. 8238 */ 8239 VHCI_DEBUG(4, (CE_NOTE, NULL, 8240 "vhci_uscsi_iodone: command did not complete." 8241 " reason: %x flag: %x", pkt->pkt_reason, pkt->pkt_flags)); 8242 if (pkt->pkt_flags & FLAG_SENSING) { 8243 MDI_PI_ERRSTAT(mp_uscmdp->pip, MDI_PI_TRANSERR); 8244 } else if (pkt->pkt_reason == CMD_TIMEOUT) { 8245 MDI_PI_ERRSTAT(mp_uscmdp->pip, MDI_PI_HARDERR); 8246 err = ETIMEDOUT; 8247 } 8248 } else if (pkt->pkt_state & STATE_ARQ_DONE && mp_uscmdp->arq_enabled) { 8249 /* 8250 * The auto-rqsense happened, and the packet has a filled-in 8251 * scsi_arq_status structure, pointed to by pkt_scbp. 8252 */ 8253 VHCI_DEBUG(4, (CE_NOTE, NULL, 8254 "vhci_uscsi_iodone: received auto-requested sense")); 8255 if (uscmdp->uscsi_flags & USCSI_RQENABLE) { 8256 /* get the amount of data to copy into rqbuf */ 8257 int rqlen = SENSE_LENGTH - arqstat->sts_rqpkt_resid; 8258 rqlen = min(((int)uscmdp->uscsi_rqlen), rqlen); 8259 uscmdp->uscsi_rqresid = uscmdp->uscsi_rqlen - rqlen; 8260 uscmdp->uscsi_rqstatus = 8261 *((char *)&arqstat->sts_rqpkt_status); 8262 if (uscmdp->uscsi_rqbuf && uscmdp->uscsi_rqlen && 8263 rqlen != 0) { 8264 bcopy(&(arqstat->sts_sensedata), 8265 uscmdp->uscsi_rqbuf, rqlen); 8266 } 8267 mdi_pi_kstat_iosupdate(mp_uscmdp->pip, bp); 8268 VHCI_DEBUG(4, (CE_NOTE, NULL, 8269 "vhci_uscsi_iodone: ARQ " 8270 "uscsi_rqstatus=0x%x uscsi_rqresid=%d rqlen: %d " 8271 "xfer: %d rqpkt_resid: %d\n", 8272 uscmdp->uscsi_rqstatus, uscmdp->uscsi_rqresid, 8273 uscmdp->uscsi_rqlen, rqlen, 8274 arqstat->sts_rqpkt_resid)); 8275 } 8276 } else if (pkt->pkt_flags & FLAG_SENSING) { 8277 struct buf *rqbp; 8278 struct scsi_status *rqstatus; 8279 8280 rqstatus = (struct scsi_status *)pkt->pkt_scbp; 8281 /* a manual request sense was done - get the information */ 8282 if (uscmdp->uscsi_flags & USCSI_RQENABLE) { 8283 int rqlen = SENSE_LENGTH - pkt->pkt_resid; 8284 8285 rqbp = mp_uscmdp->rqbp; 8286 /* get the amount of data to copy into rqbuf */ 8287 rqlen = min(((int)uscmdp->uscsi_rqlen), rqlen); 8288 uscmdp->uscsi_rqresid = uscmdp->uscsi_rqlen - rqlen; 8289 uscmdp->uscsi_rqstatus = *((char *)rqstatus); 8290 if (uscmdp->uscsi_rqlen && uscmdp->uscsi_rqbuf) { 8291 bcopy(rqbp->b_un.b_addr, uscmdp->uscsi_rqbuf, 8292 rqlen); 8293 } 8294 MDI_PI_ERRSTAT(mp_uscmdp->pip, MDI_PI_TRANSERR); 8295 scsi_free_consistent_buf(rqbp); 8296 } 8297 VHCI_DEBUG(4, (CE_NOTE, NULL, "vhci_uscsi_iodone: FLAG_SENSING" 8298 "uscsi_rqstatus=0x%x uscsi_rqresid=%d\n", 8299 uscmdp->uscsi_rqstatus, uscmdp->uscsi_rqresid)); 8300 } else { 8301 struct scsi_status *status = 8302 (struct scsi_status *)pkt->pkt_scbp; 8303 /* 8304 * Command completed and we're not getting sense. Check for 8305 * errors and decide what to do next. 8306 */ 8307 VHCI_DEBUG(4, (CE_NOTE, NULL, 8308 "vhci_uscsi_iodone: command appears complete: reason: %x", 8309 pkt->pkt_reason)); 8310 if (status->sts_chk) { 8311 /* need to manually get the request sense */ 8312 if (vhci_uscsi_send_sense(pkt, mp_uscmdp) == 0) { 8313 scsi_destroy_pkt(pkt); 8314 return; 8315 } 8316 } else { 8317 VHCI_DEBUG(4, (CE_NOTE, NULL, 8318 "vhci_chk_err: appears complete")); 8319 err = 0; 8320 mdi_pi_kstat_iosupdate(mp_uscmdp->pip, bp); 8321 if (pkt->pkt_resid) { 8322 bp->b_resid += pkt->pkt_resid; 8323 } 8324 } 8325 } 8326 8327 if (err) { 8328 if (bp->b_resid == 0) 8329 bp->b_resid = bp->b_bcount; 8330 bioerror(bp, err); 8331 bp->b_flags |= B_ERROR; 8332 } 8333 8334 scsi_destroy_pkt(pkt); 8335 biodone(bp); 8336 8337 VHCI_DEBUG(4, (CE_WARN, NULL, "vhci_uscsi_iodone: exit")); 8338 } 8339 8340 /* 8341 * start routine for the mpapi uscsi command 8342 */ 8343 int 8344 vhci_uscsi_iostart(struct buf *bp) 8345 { 8346 struct scsi_pkt *pkt; 8347 struct uscsi_cmd *uscmdp; 8348 mp_uscsi_cmd_t *mp_uscmdp; 8349 int stat_size, rval; 8350 int retry = 0; 8351 8352 ASSERT(bp->b_private != NULL); 8353 8354 mp_uscmdp = (mp_uscsi_cmd_t *)bp->b_private; 8355 uscmdp = mp_uscmdp->uscmdp; 8356 if (uscmdp->uscsi_flags & USCSI_RQENABLE) { 8357 stat_size = SENSE_LENGTH; 8358 } else { 8359 stat_size = 1; 8360 } 8361 8362 pkt = scsi_init_pkt(mp_uscmdp->ap, NULL, bp, uscmdp->uscsi_cdblen, 8363 stat_size, 0, 0, SLEEP_FUNC, NULL); 8364 if (pkt == NULL) { 8365 VHCI_DEBUG(4, (CE_NOTE, NULL, 8366 "vhci_uscsi_iostart: rval: EINVAL")); 8367 bp->b_resid = bp->b_bcount; 8368 uscmdp->uscsi_resid = bp->b_bcount; 8369 bioerror(bp, EINVAL); 8370 biodone(bp); 8371 return (EINVAL); 8372 } 8373 8374 pkt->pkt_time = uscmdp->uscsi_timeout; 8375 bcopy(uscmdp->uscsi_cdb, pkt->pkt_cdbp, (size_t)uscmdp->uscsi_cdblen); 8376 pkt->pkt_comp = vhci_uscsi_iodone; 8377 pkt->pkt_private = mp_uscmdp; 8378 if (uscmdp->uscsi_flags & USCSI_SILENT) 8379 pkt->pkt_flags |= FLAG_SILENT; 8380 if (uscmdp->uscsi_flags & USCSI_ISOLATE) 8381 pkt->pkt_flags |= FLAG_ISOLATE; 8382 if (uscmdp->uscsi_flags & USCSI_DIAGNOSE) 8383 pkt->pkt_flags |= FLAG_DIAGNOSE; 8384 if (uscmdp->uscsi_flags & USCSI_RENEGOT) { 8385 pkt->pkt_flags |= FLAG_RENEGOTIATE_WIDE_SYNC; 8386 } 8387 VHCI_DEBUG(4, (CE_WARN, NULL, 8388 "vhci_uscsi_iostart: ap: %p pkt: %p pcdbp: %p uscmdp: %p" 8389 " ucdbp: %p pcdblen: %d bp: %p count: %ld pip: %p" 8390 " stat_size: %d", 8391 (void *)mp_uscmdp->ap, (void *)pkt, (void *)pkt->pkt_cdbp, 8392 (void *)uscmdp, (void *)uscmdp->uscsi_cdb, pkt->pkt_cdblen, 8393 (void *)bp, bp->b_bcount, (void *)mp_uscmdp->pip, stat_size)); 8394 8395 /* 8396 * NOTE: This code path is related to MPAPI uscsi(7I), so path 8397 * selection is not based on path_instance. 8398 */ 8399 if (scsi_pkt_allocated_correctly(pkt)) 8400 pkt->pkt_path_instance = 0; 8401 8402 while (((rval = scsi_transport(pkt)) == TRAN_BUSY) && 8403 retry < vhci_uscsi_retry_count) { 8404 delay(drv_usectohz(vhci_uscsi_delay)); 8405 retry++; 8406 } 8407 if (retry >= vhci_uscsi_retry_count) { 8408 VHCI_DEBUG(4, (CE_NOTE, NULL, 8409 "vhci_uscsi_iostart: tran_busy - retry: %d", retry)); 8410 } 8411 switch (rval) { 8412 case TRAN_ACCEPT: 8413 rval = 0; 8414 break; 8415 8416 default: 8417 VHCI_DEBUG(4, (CE_NOTE, NULL, 8418 "vhci_uscsi_iostart: rval: %d count: %ld res: %ld", 8419 rval, bp->b_bcount, bp->b_resid)); 8420 bp->b_resid = bp->b_bcount; 8421 uscmdp->uscsi_resid = bp->b_bcount; 8422 bioerror(bp, EIO); 8423 scsi_destroy_pkt(pkt); 8424 biodone(bp); 8425 rval = EIO; 8426 MDI_PI_ERRSTAT(mp_uscmdp->pip, MDI_PI_TRANSERR); 8427 break; 8428 } 8429 VHCI_DEBUG(4, (CE_NOTE, NULL, 8430 "vhci_uscsi_iostart: exit: rval: %d", rval)); 8431 return (rval); 8432 } 8433 8434 #ifdef DEBUG 8435 8436 extern struct scsi_key_strings scsi_cmds[]; 8437 8438 static char * 8439 vhci_print_scsi_cmd(char cmd) 8440 { 8441 char tmp[64]; 8442 char *cpnt; 8443 8444 cpnt = scsi_cmd_name(cmd, scsi_cmds, tmp); 8445 /* tmp goes out of scope on return and caller sees garbage */ 8446 if (cpnt == tmp) { 8447 cpnt = "Unknown Command"; 8448 } 8449 return (cpnt); 8450 } 8451 8452 extern uchar_t scsi_cdb_size[]; 8453 8454 static void 8455 vhci_print_cdb(dev_info_t *dip, uint_t level, char *title, uchar_t *cdb) 8456 { 8457 int len = scsi_cdb_size[CDB_GROUPID(cdb[0])]; 8458 char buf[256]; 8459 8460 if (level == CE_NOTE) { 8461 vhci_log(level, dip, "path cmd %s\n", 8462 vhci_print_scsi_cmd(*cdb)); 8463 return; 8464 } 8465 8466 (void) sprintf(buf, "%s for cmd(%s)", title, vhci_print_scsi_cmd(*cdb)); 8467 vhci_clean_print(dip, level, buf, cdb, len); 8468 } 8469 8470 static void 8471 vhci_clean_print(dev_info_t *dev, uint_t level, char *title, uchar_t *data, 8472 int len) 8473 { 8474 int i; 8475 int c; 8476 char *format; 8477 char buf[256]; 8478 uchar_t byte; 8479 8480 (void) sprintf(buf, "%s:\n", title); 8481 vhci_log(level, dev, "%s", buf); 8482 level = CE_CONT; 8483 for (i = 0; i < len; ) { 8484 buf[0] = 0; 8485 for (c = 0; c < 8 && i < len; c++, i++) { 8486 byte = (uchar_t)data[i]; 8487 if (byte < 0x10) 8488 format = "0x0%x "; 8489 else 8490 format = "0x%x "; 8491 (void) sprintf(&buf[(int)strlen(buf)], format, byte); 8492 } 8493 (void) sprintf(&buf[(int)strlen(buf)], "\n"); 8494 8495 vhci_log(level, dev, "%s\n", buf); 8496 } 8497 } 8498 #endif 8499