1b1dd958fScth /* 2b1dd958fScth * CDDL HEADER START 3b1dd958fScth * 4b1dd958fScth * The contents of this file are subject to the terms of the 519397407SSherry Moore * Common Development and Distribution License (the "License"). 619397407SSherry Moore * You may not use this file except in compliance with the License. 7b1dd958fScth * 8b1dd958fScth * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9b1dd958fScth * or http://www.opensolaris.org/os/licensing. 10b1dd958fScth * See the License for the specific language governing permissions 11b1dd958fScth * and limitations under the License. 12b1dd958fScth * 13b1dd958fScth * When distributing Covered Code, include this CDDL HEADER in each 14b1dd958fScth * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15b1dd958fScth * If applicable, add the following below this CDDL HEADER, with the 16b1dd958fScth * fields enclosed by brackets "[]" replaced with your own identifying 17b1dd958fScth * information: Portions Copyright [yyyy] [name of copyright owner] 18b1dd958fScth * 19b1dd958fScth * CDDL HEADER END 20b1dd958fScth */ 21b1dd958fScth /* 22*9c57abc8Ssrivijitha dugganapalli * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23b1dd958fScth * Use is subject to license terms. 24b1dd958fScth */ 25b1dd958fScth 26b1dd958fScth 27b1dd958fScth /* 28b1dd958fScth * SCSA HBA nexus driver that emulates an HBA connected to SCSI target 29b1dd958fScth * devices (large disks). 30b1dd958fScth */ 31b1dd958fScth 32b1dd958fScth #ifdef DEBUG 33b1dd958fScth #define EMUL64DEBUG 34b1dd958fScth #endif 35b1dd958fScth 36b1dd958fScth #include <sys/scsi/scsi.h> 37b1dd958fScth #include <sys/ddi.h> 38b1dd958fScth #include <sys/sunddi.h> 39b1dd958fScth #include <sys/taskq.h> 40b1dd958fScth #include <sys/disp.h> 41b1dd958fScth #include <sys/types.h> 42b1dd958fScth #include <sys/buf.h> 43b1dd958fScth #include <sys/cpuvar.h> 44b1dd958fScth #include <sys/dklabel.h> 45b1dd958fScth 46b1dd958fScth #include <sys/emul64.h> 47b1dd958fScth #include <sys/emul64cmd.h> 48b1dd958fScth #include <sys/emul64var.h> 49b1dd958fScth 50b1dd958fScth int emul64_usetaskq = 1; /* set to zero for debugging */ 51b1dd958fScth int emul64debug = 0; 52b1dd958fScth #ifdef EMUL64DEBUG 53b1dd958fScth static int emul64_cdb_debug = 0; 54b1dd958fScth #include <sys/debug.h> 55b1dd958fScth #endif 56b1dd958fScth 57b1dd958fScth /* 58b1dd958fScth * cb_ops function prototypes 59b1dd958fScth */ 60b1dd958fScth static int emul64_ioctl(dev_t, int cmd, intptr_t arg, int mode, 61b1dd958fScth cred_t *credp, int *rvalp); 62b1dd958fScth 63b1dd958fScth /* 64b1dd958fScth * dev_ops functions prototypes 65b1dd958fScth */ 66b1dd958fScth static int emul64_info(dev_info_t *dip, ddi_info_cmd_t infocmd, 67b1dd958fScth void *arg, void **result); 68b1dd958fScth static int emul64_attach(dev_info_t *dip, ddi_attach_cmd_t cmd); 69b1dd958fScth static int emul64_detach(dev_info_t *dip, ddi_detach_cmd_t cmd); 70b1dd958fScth 71b1dd958fScth /* 72b1dd958fScth * Function prototypes 73b1dd958fScth * 74b1dd958fScth * SCSA functions exported by means of the transport table 75b1dd958fScth */ 76b1dd958fScth static int emul64_tran_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip, 77b1dd958fScth scsi_hba_tran_t *tran, struct scsi_device *sd); 78b1dd958fScth static int emul64_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt); 79b1dd958fScth static void emul64_pkt_comp(void *); 80b1dd958fScth static int emul64_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt); 81b1dd958fScth static int emul64_scsi_reset(struct scsi_address *ap, int level); 82b1dd958fScth static int emul64_scsi_getcap(struct scsi_address *ap, char *cap, int whom); 83b1dd958fScth static int emul64_scsi_setcap(struct scsi_address *ap, char *cap, int value, 84b1dd958fScth int whom); 85b1dd958fScth static struct scsi_pkt *emul64_scsi_init_pkt(struct scsi_address *ap, 86b1dd958fScth struct scsi_pkt *pkt, struct buf *bp, int cmdlen, int statuslen, 87b1dd958fScth int tgtlen, int flags, int (*callback)(), caddr_t arg); 88b1dd958fScth static void emul64_scsi_destroy_pkt(struct scsi_address *ap, 89b1dd958fScth struct scsi_pkt *pkt); 90b1dd958fScth static void emul64_scsi_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt); 91b1dd958fScth static void emul64_scsi_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt); 92b1dd958fScth static int emul64_scsi_reset_notify(struct scsi_address *ap, int flag, 93b1dd958fScth void (*callback)(caddr_t), caddr_t arg); 94b1dd958fScth 95b1dd958fScth /* 96b1dd958fScth * internal functions 97b1dd958fScth */ 98b1dd958fScth static void emul64_i_initcap(struct emul64 *emul64); 99b1dd958fScth 100b1dd958fScth static void emul64_i_log(struct emul64 *emul64, int level, char *fmt, ...); 101b1dd958fScth static int emul64_get_tgtrange(struct emul64 *, 102b1dd958fScth intptr_t, 103b1dd958fScth emul64_tgt_t **, 104b1dd958fScth emul64_tgt_range_t *); 105b1dd958fScth static int emul64_write_off(struct emul64 *, 106b1dd958fScth emul64_tgt_t *, 107b1dd958fScth emul64_tgt_range_t *); 108b1dd958fScth static int emul64_write_on(struct emul64 *, 109b1dd958fScth emul64_tgt_t *, 110b1dd958fScth emul64_tgt_range_t *); 111b1dd958fScth static emul64_nowrite_t *emul64_nowrite_alloc(emul64_range_t *); 112b1dd958fScth static void emul64_nowrite_free(emul64_nowrite_t *); 113b1dd958fScth static emul64_nowrite_t *emul64_find_nowrite(emul64_tgt_t *, 114b1dd958fScth diskaddr_t start_block, 115b1dd958fScth size_t blkcnt, 116b1dd958fScth emul64_rng_overlap_t *overlapp, 117b1dd958fScth emul64_nowrite_t ***prevp); 118b1dd958fScth 119b1dd958fScth extern emul64_tgt_t *find_tgt(struct emul64 *, ushort_t, ushort_t); 120b1dd958fScth 121b1dd958fScth #ifdef EMUL64DEBUG 122b1dd958fScth static void emul64_debug_dump_cdb(struct scsi_address *ap, 123b1dd958fScth struct scsi_pkt *pkt); 124b1dd958fScth #endif 125b1dd958fScth 126b1dd958fScth 127b1dd958fScth #ifdef _DDICT 128b1dd958fScth static int ddi_in_panic(void); 129b1dd958fScth static int ddi_in_panic() { return (0); } 130b1dd958fScth #ifndef SCSI_CAP_RESET_NOTIFICATION 131b1dd958fScth #define SCSI_CAP_RESET_NOTIFICATION 14 132b1dd958fScth #endif 133b1dd958fScth #ifndef SCSI_RESET_NOTIFY 134b1dd958fScth #define SCSI_RESET_NOTIFY 0x01 135b1dd958fScth #endif 136b1dd958fScth #ifndef SCSI_RESET_CANCEL 137b1dd958fScth #define SCSI_RESET_CANCEL 0x02 138b1dd958fScth #endif 139b1dd958fScth #endif 140b1dd958fScth 141b1dd958fScth /* 142b1dd958fScth * Tunables: 143b1dd958fScth * 144b1dd958fScth * emul64_max_task 145b1dd958fScth * The taskq facility is used to queue up SCSI start requests on a per 146b1dd958fScth * controller basis. If the maximum number of queued tasks is hit, 147b1dd958fScth * taskq_ent_alloc() delays for a second, which adversely impacts our 148b1dd958fScth * performance. This value establishes the maximum number of task 149b1dd958fScth * queue entries when taskq_create is called. 150b1dd958fScth * 151b1dd958fScth * emul64_task_nthreads 152b1dd958fScth * Specifies the number of threads that should be used to process a 153b1dd958fScth * controller's task queue. Our init function sets this to the number 154b1dd958fScth * of CPUs on the system, but this can be overridden in emul64.conf. 155b1dd958fScth */ 156b1dd958fScth int emul64_max_task = 16; 157b1dd958fScth int emul64_task_nthreads = 1; 158b1dd958fScth 159b1dd958fScth /* 160b1dd958fScth * Local static data 161b1dd958fScth */ 162b1dd958fScth static void *emul64_state = NULL; 163b1dd958fScth 164b1dd958fScth /* 165b1dd958fScth * Character/block operations. 166b1dd958fScth */ 167b1dd958fScth static struct cb_ops emul64_cbops = { 168b1dd958fScth scsi_hba_open, /* cb_open */ 169b1dd958fScth scsi_hba_close, /* cb_close */ 170b1dd958fScth nodev, /* cb_strategy */ 171b1dd958fScth nodev, /* cb_print */ 172b1dd958fScth nodev, /* cb_dump */ 173b1dd958fScth nodev, /* cb_read */ 174b1dd958fScth nodev, /* cb_write */ 175b1dd958fScth emul64_ioctl, /* cb_ioctl */ 176b1dd958fScth nodev, /* cb_devmap */ 177b1dd958fScth nodev, /* cb_mmap */ 178b1dd958fScth nodev, /* cb_segmap */ 179b1dd958fScth nochpoll, /* cb_chpoll */ 180b1dd958fScth ddi_prop_op, /* cb_prop_op */ 181b1dd958fScth NULL, /* cb_str */ 182b1dd958fScth D_MP | D_64BIT | D_HOTPLUG, /* cb_flag */ 183b1dd958fScth CB_REV, /* cb_rev */ 184b1dd958fScth nodev, /* cb_aread */ 185b1dd958fScth nodev /* cb_awrite */ 186b1dd958fScth }; 187b1dd958fScth 188b1dd958fScth /* 189b1dd958fScth * autoconfiguration routines. 190b1dd958fScth */ 191b1dd958fScth 192b1dd958fScth static struct dev_ops emul64_ops = { 193b1dd958fScth DEVO_REV, /* rev, */ 194b1dd958fScth 0, /* refcnt */ 195b1dd958fScth emul64_info, /* getinfo */ 196b1dd958fScth nulldev, /* identify */ 197b1dd958fScth nulldev, /* probe */ 198b1dd958fScth emul64_attach, /* attach */ 199b1dd958fScth emul64_detach, /* detach */ 200b1dd958fScth nodev, /* reset */ 201b1dd958fScth &emul64_cbops, /* char/block ops */ 20219397407SSherry Moore NULL, /* bus ops */ 20319397407SSherry Moore NULL, /* power */ 20419397407SSherry Moore ddi_quiesce_not_needed, /* quiesce */ 205b1dd958fScth }; 206b1dd958fScth 207b1dd958fScth char _depends_on[] = "misc/scsi"; 208b1dd958fScth 209b1dd958fScth static struct modldrv modldrv = { 210b1dd958fScth &mod_driverops, /* module type - driver */ 211b1dd958fScth "emul64 SCSI Host Bus Adapter", /* module name */ 212b1dd958fScth &emul64_ops, /* driver ops */ 213b1dd958fScth }; 214b1dd958fScth 215b1dd958fScth static struct modlinkage modlinkage = { 216b1dd958fScth MODREV_1, /* ml_rev - must be MODREV_1 */ 217b1dd958fScth &modldrv, /* ml_linkage */ 218b1dd958fScth NULL /* end of driver linkage */ 219b1dd958fScth }; 220b1dd958fScth 221b1dd958fScth int 222b1dd958fScth _init(void) 223b1dd958fScth { 224b1dd958fScth int ret; 225b1dd958fScth 226b1dd958fScth ret = ddi_soft_state_init(&emul64_state, sizeof (struct emul64), 227b1dd958fScth EMUL64_INITIAL_SOFT_SPACE); 228b1dd958fScth if (ret != 0) 229b1dd958fScth return (ret); 230b1dd958fScth 231b1dd958fScth if ((ret = scsi_hba_init(&modlinkage)) != 0) { 232b1dd958fScth ddi_soft_state_fini(&emul64_state); 233b1dd958fScth return (ret); 234b1dd958fScth } 235b1dd958fScth 236b1dd958fScth /* Set the number of task threads to the number of CPUs */ 237b1dd958fScth if (boot_max_ncpus == -1) { 238b1dd958fScth emul64_task_nthreads = max_ncpus; 239b1dd958fScth } else { 240b1dd958fScth emul64_task_nthreads = boot_max_ncpus; 241b1dd958fScth } 242b1dd958fScth 243b1dd958fScth emul64_bsd_init(); 244b1dd958fScth 245b1dd958fScth ret = mod_install(&modlinkage); 246b1dd958fScth if (ret != 0) { 247b1dd958fScth emul64_bsd_fini(); 248b1dd958fScth scsi_hba_fini(&modlinkage); 249b1dd958fScth ddi_soft_state_fini(&emul64_state); 250b1dd958fScth } 251b1dd958fScth 252b1dd958fScth return (ret); 253b1dd958fScth } 254b1dd958fScth 255b1dd958fScth int 256b1dd958fScth _fini(void) 257b1dd958fScth { 258b1dd958fScth int ret; 259b1dd958fScth 260b1dd958fScth if ((ret = mod_remove(&modlinkage)) != 0) 261b1dd958fScth return (ret); 262b1dd958fScth 263b1dd958fScth emul64_bsd_fini(); 264b1dd958fScth 265b1dd958fScth scsi_hba_fini(&modlinkage); 266b1dd958fScth 267b1dd958fScth ddi_soft_state_fini(&emul64_state); 268b1dd958fScth 269b1dd958fScth return (ret); 270b1dd958fScth } 271b1dd958fScth 272b1dd958fScth int 273b1dd958fScth _info(struct modinfo *modinfop) 274b1dd958fScth { 275b1dd958fScth return (mod_info(&modlinkage, modinfop)); 276b1dd958fScth } 277b1dd958fScth 278b1dd958fScth /* 279b1dd958fScth * Given the device number return the devinfo pointer 280b1dd958fScth * from the scsi_device structure. 281b1dd958fScth */ 282b1dd958fScth /*ARGSUSED*/ 283b1dd958fScth static int 284b1dd958fScth emul64_info(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result) 285b1dd958fScth { 286b1dd958fScth struct emul64 *foo; 287b1dd958fScth int instance = getminor((dev_t)arg); 288b1dd958fScth 289b1dd958fScth switch (cmd) { 290b1dd958fScth case DDI_INFO_DEVT2DEVINFO: 291b1dd958fScth foo = ddi_get_soft_state(emul64_state, instance); 292b1dd958fScth if (foo != NULL) 293b1dd958fScth *result = (void *)foo->emul64_dip; 294b1dd958fScth else { 295b1dd958fScth *result = NULL; 296b1dd958fScth return (DDI_FAILURE); 297b1dd958fScth } 298b1dd958fScth break; 299b1dd958fScth 300b1dd958fScth case DDI_INFO_DEVT2INSTANCE: 301b1dd958fScth *result = (void *)(uintptr_t)instance; 302b1dd958fScth break; 303b1dd958fScth 304b1dd958fScth default: 305b1dd958fScth return (DDI_FAILURE); 306b1dd958fScth } 307b1dd958fScth 308b1dd958fScth return (DDI_SUCCESS); 309b1dd958fScth } 310b1dd958fScth 311b1dd958fScth /* 312b1dd958fScth * Attach an instance of an emul64 host adapter. Allocate data structures, 313b1dd958fScth * initialize the emul64 and we're on the air. 314b1dd958fScth */ 315b1dd958fScth /*ARGSUSED*/ 316b1dd958fScth static int 317b1dd958fScth emul64_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 318b1dd958fScth { 319b1dd958fScth int mutex_initted = 0; 320b1dd958fScth struct emul64 *emul64; 321b1dd958fScth int instance; 322b1dd958fScth scsi_hba_tran_t *tran = NULL; 323b1dd958fScth ddi_dma_attr_t tmp_dma_attr; 324b1dd958fScth 325b1dd958fScth emul64_bsd_get_props(dip); 326b1dd958fScth 327b1dd958fScth bzero((void *) &tmp_dma_attr, sizeof (tmp_dma_attr)); 328b1dd958fScth instance = ddi_get_instance(dip); 329b1dd958fScth 330b1dd958fScth switch (cmd) { 331b1dd958fScth case DDI_ATTACH: 332b1dd958fScth break; 333b1dd958fScth 334b1dd958fScth case DDI_RESUME: 335b1dd958fScth tran = (scsi_hba_tran_t *)ddi_get_driver_private(dip); 336b1dd958fScth if (!tran) { 337b1dd958fScth return (DDI_FAILURE); 338b1dd958fScth } 339b1dd958fScth emul64 = TRAN2EMUL64(tran); 340b1dd958fScth 341b1dd958fScth return (DDI_SUCCESS); 342b1dd958fScth 343b1dd958fScth default: 344b1dd958fScth emul64_i_log(NULL, CE_WARN, 345b1dd958fScth "emul64%d: Cmd != DDI_ATTACH/DDI_RESUME", instance); 346b1dd958fScth return (DDI_FAILURE); 347b1dd958fScth } 348b1dd958fScth 349b1dd958fScth /* 350b1dd958fScth * Allocate emul64 data structure. 351b1dd958fScth */ 352b1dd958fScth if (ddi_soft_state_zalloc(emul64_state, instance) != DDI_SUCCESS) { 353b1dd958fScth emul64_i_log(NULL, CE_WARN, 354b1dd958fScth "emul64%d: Failed to alloc soft state", 355b1dd958fScth instance); 356b1dd958fScth return (DDI_FAILURE); 357b1dd958fScth } 358b1dd958fScth 359b1dd958fScth emul64 = (struct emul64 *)ddi_get_soft_state(emul64_state, instance); 360b1dd958fScth if (emul64 == (struct emul64 *)NULL) { 361b1dd958fScth emul64_i_log(NULL, CE_WARN, "emul64%d: Bad soft state", 362b1dd958fScth instance); 363b1dd958fScth ddi_soft_state_free(emul64_state, instance); 364b1dd958fScth return (DDI_FAILURE); 365b1dd958fScth } 366b1dd958fScth 367b1dd958fScth 368b1dd958fScth /* 369b1dd958fScth * Allocate a transport structure 370b1dd958fScth */ 371b1dd958fScth tran = scsi_hba_tran_alloc(dip, SCSI_HBA_CANSLEEP); 372b1dd958fScth if (tran == NULL) { 373b1dd958fScth cmn_err(CE_WARN, "emul64: scsi_hba_tran_alloc failed\n"); 374b1dd958fScth goto fail; 375b1dd958fScth } 376b1dd958fScth 377b1dd958fScth emul64->emul64_tran = tran; 378b1dd958fScth emul64->emul64_dip = dip; 379b1dd958fScth 380b1dd958fScth tran->tran_hba_private = emul64; 381b1dd958fScth tran->tran_tgt_private = NULL; 382b1dd958fScth tran->tran_tgt_init = emul64_tran_tgt_init; 383b1dd958fScth tran->tran_tgt_probe = scsi_hba_probe; 384b1dd958fScth tran->tran_tgt_free = NULL; 385b1dd958fScth 386b1dd958fScth tran->tran_start = emul64_scsi_start; 387b1dd958fScth tran->tran_abort = emul64_scsi_abort; 388b1dd958fScth tran->tran_reset = emul64_scsi_reset; 389b1dd958fScth tran->tran_getcap = emul64_scsi_getcap; 390b1dd958fScth tran->tran_setcap = emul64_scsi_setcap; 391b1dd958fScth tran->tran_init_pkt = emul64_scsi_init_pkt; 392b1dd958fScth tran->tran_destroy_pkt = emul64_scsi_destroy_pkt; 393b1dd958fScth tran->tran_dmafree = emul64_scsi_dmafree; 394b1dd958fScth tran->tran_sync_pkt = emul64_scsi_sync_pkt; 395b1dd958fScth tran->tran_reset_notify = emul64_scsi_reset_notify; 396b1dd958fScth 397b1dd958fScth tmp_dma_attr.dma_attr_minxfer = 0x1; 398b1dd958fScth tmp_dma_attr.dma_attr_burstsizes = 0x7f; 399b1dd958fScth 400b1dd958fScth /* 401b1dd958fScth * Attach this instance of the hba 402b1dd958fScth */ 403b1dd958fScth if (scsi_hba_attach_setup(dip, &tmp_dma_attr, tran, 404b1dd958fScth 0) != DDI_SUCCESS) { 405b1dd958fScth cmn_err(CE_WARN, "emul64: scsi_hba_attach failed\n"); 406b1dd958fScth goto fail; 407b1dd958fScth } 408b1dd958fScth 409b1dd958fScth emul64->emul64_initiator_id = 2; 410b1dd958fScth 411b1dd958fScth /* 412b1dd958fScth * Look up the scsi-options property 413b1dd958fScth */ 414b1dd958fScth emul64->emul64_scsi_options = 415b1dd958fScth ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, "scsi-options", 416b1dd958fScth EMUL64_DEFAULT_SCSI_OPTIONS); 417b1dd958fScth EMUL64_DEBUG(emul64, SCSI_DEBUG, "emul64 scsi-options=%x", 418b1dd958fScth emul64->emul64_scsi_options); 419b1dd958fScth 420b1dd958fScth 421b1dd958fScth /* mutexes to protect the emul64 request and response queue */ 422b1dd958fScth mutex_init(EMUL64_REQ_MUTEX(emul64), NULL, MUTEX_DRIVER, 423b1dd958fScth emul64->emul64_iblock); 424b1dd958fScth mutex_init(EMUL64_RESP_MUTEX(emul64), NULL, MUTEX_DRIVER, 425b1dd958fScth emul64->emul64_iblock); 426b1dd958fScth 427b1dd958fScth mutex_initted = 1; 428b1dd958fScth 429b1dd958fScth EMUL64_MUTEX_ENTER(emul64); 430b1dd958fScth 431b1dd958fScth /* 432b1dd958fScth * Initialize the default Target Capabilities and Sync Rates 433b1dd958fScth */ 434b1dd958fScth emul64_i_initcap(emul64); 435b1dd958fScth 436b1dd958fScth EMUL64_MUTEX_EXIT(emul64); 437b1dd958fScth 438b1dd958fScth 439b1dd958fScth ddi_report_dev(dip); 440b1dd958fScth emul64->emul64_taskq = taskq_create("emul64_comp", 441b1dd958fScth emul64_task_nthreads, MINCLSYSPRI, 1, emul64_max_task, 0); 442b1dd958fScth 443b1dd958fScth return (DDI_SUCCESS); 444b1dd958fScth 445b1dd958fScth fail: 446b1dd958fScth emul64_i_log(NULL, CE_WARN, "emul64%d: Unable to attach", instance); 447b1dd958fScth 448b1dd958fScth if (mutex_initted) { 449b1dd958fScth mutex_destroy(EMUL64_REQ_MUTEX(emul64)); 450b1dd958fScth mutex_destroy(EMUL64_RESP_MUTEX(emul64)); 451b1dd958fScth } 452b1dd958fScth if (tran) { 453b1dd958fScth scsi_hba_tran_free(tran); 454b1dd958fScth } 455b1dd958fScth ddi_soft_state_free(emul64_state, instance); 456b1dd958fScth return (DDI_FAILURE); 457b1dd958fScth } 458b1dd958fScth 459b1dd958fScth /*ARGSUSED*/ 460b1dd958fScth static int 461b1dd958fScth emul64_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 462b1dd958fScth { 463b1dd958fScth struct emul64 *emul64; 464b1dd958fScth scsi_hba_tran_t *tran; 465b1dd958fScth int instance = ddi_get_instance(dip); 466b1dd958fScth 467b1dd958fScth 468b1dd958fScth /* get transport structure pointer from the dip */ 469b1dd958fScth if (!(tran = (scsi_hba_tran_t *)ddi_get_driver_private(dip))) { 470b1dd958fScth return (DDI_FAILURE); 471b1dd958fScth } 472b1dd958fScth 473b1dd958fScth /* get soft state from transport structure */ 474b1dd958fScth emul64 = TRAN2EMUL64(tran); 475b1dd958fScth 476b1dd958fScth if (!emul64) { 477b1dd958fScth return (DDI_FAILURE); 478b1dd958fScth } 479b1dd958fScth 480b1dd958fScth EMUL64_DEBUG(emul64, SCSI_DEBUG, "emul64_detach: cmd = %d", cmd); 481b1dd958fScth 482b1dd958fScth switch (cmd) { 483b1dd958fScth case DDI_DETACH: 484b1dd958fScth EMUL64_MUTEX_ENTER(emul64); 485b1dd958fScth 486b1dd958fScth taskq_destroy(emul64->emul64_taskq); 487b1dd958fScth (void) scsi_hba_detach(dip); 488b1dd958fScth 489b1dd958fScth scsi_hba_tran_free(emul64->emul64_tran); 490b1dd958fScth 491b1dd958fScth 492b1dd958fScth EMUL64_MUTEX_EXIT(emul64); 493b1dd958fScth 494b1dd958fScth mutex_destroy(EMUL64_REQ_MUTEX(emul64)); 495b1dd958fScth mutex_destroy(EMUL64_RESP_MUTEX(emul64)); 496b1dd958fScth 497b1dd958fScth 498b1dd958fScth EMUL64_DEBUG(emul64, SCSI_DEBUG, "emul64_detach: done"); 499b1dd958fScth ddi_soft_state_free(emul64_state, instance); 500b1dd958fScth 501b1dd958fScth return (DDI_SUCCESS); 502b1dd958fScth 503b1dd958fScth case DDI_SUSPEND: 504b1dd958fScth return (DDI_SUCCESS); 505b1dd958fScth 506b1dd958fScth default: 507b1dd958fScth return (DDI_FAILURE); 508b1dd958fScth } 509b1dd958fScth } 510b1dd958fScth 511b1dd958fScth /* 512b1dd958fScth * Function name : emul64_tran_tgt_init 513b1dd958fScth * 514b1dd958fScth * Return Values : DDI_SUCCESS if target supported, DDI_FAILURE otherwise 515b1dd958fScth * 516b1dd958fScth */ 517b1dd958fScth /*ARGSUSED*/ 518b1dd958fScth static int 519b1dd958fScth emul64_tran_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip, 520b1dd958fScth scsi_hba_tran_t *tran, struct scsi_device *sd) 521b1dd958fScth { 522b1dd958fScth struct emul64 *emul64; 523b1dd958fScth emul64_tgt_t *tgt; 524b1dd958fScth char **geo_vidpid = NULL; 525b1dd958fScth char *geo, *vidpid; 526b1dd958fScth uint32_t *geoip = NULL; 527b1dd958fScth uint_t length; 528b1dd958fScth uint_t length2; 529b1dd958fScth lldaddr_t sector_count; 530b1dd958fScth char prop_name[15]; 531b1dd958fScth int ret = DDI_FAILURE; 532b1dd958fScth 533b1dd958fScth emul64 = TRAN2EMUL64(tran); 534b1dd958fScth EMUL64_MUTEX_ENTER(emul64); 535b1dd958fScth 536b1dd958fScth /* 537b1dd958fScth * We get called for each target driver.conf node, multiple 538b1dd958fScth * nodes may map to the same tgt,lun (sd.conf, st.conf, etc). 539b1dd958fScth * Check to see if transport to tgt,lun already established. 540b1dd958fScth */ 541b1dd958fScth tgt = find_tgt(emul64, sd->sd_address.a_target, sd->sd_address.a_lun); 542b1dd958fScth if (tgt) { 543b1dd958fScth ret = DDI_SUCCESS; 544b1dd958fScth goto out; 545b1dd958fScth } 546b1dd958fScth 547b1dd958fScth /* see if we have driver.conf specified device for this target,lun */ 548b1dd958fScth (void) snprintf(prop_name, sizeof (prop_name), "targ_%d_%d", 549b1dd958fScth sd->sd_address.a_target, sd->sd_address.a_lun); 550b1dd958fScth if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, hba_dip, 551b1dd958fScth DDI_PROP_DONTPASS, prop_name, 552b1dd958fScth &geo_vidpid, &length) != DDI_PROP_SUCCESS) 553b1dd958fScth goto out; 554b1dd958fScth if (length < 2) { 555b1dd958fScth cmn_err(CE_WARN, "emul64: %s property does not have 2 " 556b1dd958fScth "elements", prop_name); 557b1dd958fScth goto out; 558b1dd958fScth } 559b1dd958fScth 560b1dd958fScth /* pick geometry name and vidpid string from string array */ 561b1dd958fScth geo = *geo_vidpid; 562b1dd958fScth vidpid = *(geo_vidpid + 1); 563b1dd958fScth 564b1dd958fScth /* lookup geometry property integer array */ 565b1dd958fScth if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, hba_dip, DDI_PROP_DONTPASS, 566b1dd958fScth geo, (int **)&geoip, &length2) != DDI_PROP_SUCCESS) { 567b1dd958fScth cmn_err(CE_WARN, "emul64: didn't get prop '%s'", geo); 568b1dd958fScth goto out; 569b1dd958fScth } 570b1dd958fScth if (length2 < 6) { 571b1dd958fScth cmn_err(CE_WARN, "emul64: property %s does not have 6 " 572b1dd958fScth "elements", *geo_vidpid); 573b1dd958fScth goto out; 574b1dd958fScth } 575b1dd958fScth 576b1dd958fScth /* allocate and initialize tgt structure for tgt,lun */ 577b1dd958fScth tgt = kmem_zalloc(sizeof (emul64_tgt_t), KM_SLEEP); 578b1dd958fScth rw_init(&tgt->emul64_tgt_nw_lock, NULL, RW_DRIVER, NULL); 579b1dd958fScth mutex_init(&tgt->emul64_tgt_blk_lock, NULL, MUTEX_DRIVER, NULL); 580b1dd958fScth 581b1dd958fScth /* create avl for data block storage */ 582b1dd958fScth avl_create(&tgt->emul64_tgt_data, emul64_bsd_blkcompare, 583b1dd958fScth sizeof (blklist_t), offsetof(blklist_t, bl_node)); 584b1dd958fScth 585b1dd958fScth /* save scsi_address and vidpid */ 586b1dd958fScth bcopy(sd, &tgt->emul64_tgt_saddr, sizeof (struct scsi_address)); 587b1dd958fScth (void) strncpy(tgt->emul64_tgt_inq, vidpid, 588b1dd958fScth sizeof (emul64->emul64_tgt->emul64_tgt_inq)); 589b1dd958fScth 590b1dd958fScth /* 591b1dd958fScth * The high order 4 bytes of the sector count always come first in 592b1dd958fScth * emul64.conf. They are followed by the low order 4 bytes. Not 593b1dd958fScth * all CPU types want them in this order, but laddr_t takes care of 594b1dd958fScth * this for us. We then pick up geometry (ncyl X nheads X nsect). 595b1dd958fScth */ 596b1dd958fScth sector_count._p._u = *(geoip + 0); 597b1dd958fScth sector_count._p._l = *(geoip + 1); 598b1dd958fScth /* 599b1dd958fScth * On 32-bit platforms, fix block size if it's greater than the 600b1dd958fScth * allowable maximum. 601b1dd958fScth */ 602b1dd958fScth #if !defined(_LP64) 603b1dd958fScth if (sector_count._f > DK_MAX_BLOCKS) 604b1dd958fScth sector_count._f = DK_MAX_BLOCKS; 605b1dd958fScth #endif 606b1dd958fScth tgt->emul64_tgt_sectors = sector_count._f; 607b1dd958fScth tgt->emul64_tgt_dtype = *(geoip + 2); 608b1dd958fScth tgt->emul64_tgt_ncyls = *(geoip + 3); 609b1dd958fScth tgt->emul64_tgt_nheads = *(geoip + 4); 610b1dd958fScth tgt->emul64_tgt_nsect = *(geoip + 5); 611b1dd958fScth 612b1dd958fScth /* insert target structure into list */ 613b1dd958fScth tgt->emul64_tgt_next = emul64->emul64_tgt; 614b1dd958fScth emul64->emul64_tgt = tgt; 615b1dd958fScth ret = DDI_SUCCESS; 616b1dd958fScth 617b1dd958fScth out: EMUL64_MUTEX_EXIT(emul64); 618b1dd958fScth if (geoip) 619b1dd958fScth ddi_prop_free(geoip); 620b1dd958fScth if (geo_vidpid) 621b1dd958fScth ddi_prop_free(geo_vidpid); 622b1dd958fScth return (ret); 623b1dd958fScth } 624b1dd958fScth 625b1dd958fScth /* 626b1dd958fScth * Function name : emul64_i_initcap 627b1dd958fScth * 628b1dd958fScth * Return Values : NONE 629b1dd958fScth * Description : Initializes the default target capabilities and 630b1dd958fScth * Sync Rates. 631b1dd958fScth * 632b1dd958fScth * Context : Called from the user thread through attach. 633b1dd958fScth * 634b1dd958fScth */ 635b1dd958fScth static void 636b1dd958fScth emul64_i_initcap(struct emul64 *emul64) 637b1dd958fScth { 638b1dd958fScth uint16_t cap, synch; 639b1dd958fScth int i; 640b1dd958fScth 641b1dd958fScth cap = 0; 642b1dd958fScth synch = 0; 643b1dd958fScth for (i = 0; i < NTARGETS_WIDE; i++) { 644b1dd958fScth emul64->emul64_cap[i] = cap; 645b1dd958fScth emul64->emul64_synch[i] = synch; 646b1dd958fScth } 647b1dd958fScth EMUL64_DEBUG(emul64, SCSI_DEBUG, "default cap = 0x%x", cap); 648b1dd958fScth } 649b1dd958fScth 650b1dd958fScth /* 651b1dd958fScth * Function name : emul64_scsi_getcap() 652b1dd958fScth * 653b1dd958fScth * Return Values : current value of capability, if defined 654b1dd958fScth * -1 if capability is not defined 655b1dd958fScth * Description : returns current capability value 656b1dd958fScth * 657b1dd958fScth * Context : Can be called from different kernel process threads. 658b1dd958fScth * Can be called by interrupt thread. 659b1dd958fScth */ 660b1dd958fScth static int 661b1dd958fScth emul64_scsi_getcap(struct scsi_address *ap, char *cap, int whom) 662b1dd958fScth { 663b1dd958fScth struct emul64 *emul64 = ADDR2EMUL64(ap); 664b1dd958fScth int rval = 0; 665b1dd958fScth 666b1dd958fScth /* 667b1dd958fScth * We don't allow inquiring about capabilities for other targets 668b1dd958fScth */ 669b1dd958fScth if (cap == NULL || whom == 0) { 670b1dd958fScth return (-1); 671b1dd958fScth } 672b1dd958fScth 673b1dd958fScth EMUL64_MUTEX_ENTER(emul64); 674b1dd958fScth 675b1dd958fScth switch (scsi_hba_lookup_capstr(cap)) { 676b1dd958fScth case SCSI_CAP_DMA_MAX: 677b1dd958fScth rval = 1 << 24; /* Limit to 16MB max transfer */ 678b1dd958fScth break; 679b1dd958fScth case SCSI_CAP_MSG_OUT: 680b1dd958fScth rval = 1; 681b1dd958fScth break; 682b1dd958fScth case SCSI_CAP_DISCONNECT: 683b1dd958fScth rval = 1; 684b1dd958fScth break; 685b1dd958fScth case SCSI_CAP_SYNCHRONOUS: 686b1dd958fScth rval = 1; 687b1dd958fScth break; 688b1dd958fScth case SCSI_CAP_WIDE_XFER: 689b1dd958fScth rval = 1; 690b1dd958fScth break; 691b1dd958fScth case SCSI_CAP_TAGGED_QING: 692b1dd958fScth rval = 1; 693b1dd958fScth break; 694b1dd958fScth case SCSI_CAP_UNTAGGED_QING: 695b1dd958fScth rval = 1; 696b1dd958fScth break; 697b1dd958fScth case SCSI_CAP_PARITY: 698b1dd958fScth rval = 1; 699b1dd958fScth break; 700b1dd958fScth case SCSI_CAP_INITIATOR_ID: 701b1dd958fScth rval = emul64->emul64_initiator_id; 702b1dd958fScth break; 703b1dd958fScth case SCSI_CAP_ARQ: 704b1dd958fScth rval = 1; 705b1dd958fScth break; 706b1dd958fScth case SCSI_CAP_LINKED_CMDS: 707b1dd958fScth break; 708b1dd958fScth case SCSI_CAP_RESET_NOTIFICATION: 709b1dd958fScth rval = 1; 710b1dd958fScth break; 711b1dd958fScth 712b1dd958fScth default: 713b1dd958fScth rval = -1; 714b1dd958fScth break; 715b1dd958fScth } 716b1dd958fScth 717b1dd958fScth EMUL64_MUTEX_EXIT(emul64); 718b1dd958fScth 719b1dd958fScth return (rval); 720b1dd958fScth } 721b1dd958fScth 722b1dd958fScth /* 723b1dd958fScth * Function name : emul64_scsi_setcap() 724b1dd958fScth * 725b1dd958fScth * Return Values : 1 - capability exists and can be set to new value 726b1dd958fScth * 0 - capability could not be set to new value 727b1dd958fScth * -1 - no such capability 728b1dd958fScth * 729b1dd958fScth * Description : sets a capability for a target 730b1dd958fScth * 731b1dd958fScth * Context : Can be called from different kernel process threads. 732b1dd958fScth * Can be called by interrupt thread. 733b1dd958fScth */ 734b1dd958fScth static int 735b1dd958fScth emul64_scsi_setcap(struct scsi_address *ap, char *cap, int value, int whom) 736b1dd958fScth { 737b1dd958fScth struct emul64 *emul64 = ADDR2EMUL64(ap); 738b1dd958fScth int rval = 0; 739b1dd958fScth 740b1dd958fScth /* 741b1dd958fScth * We don't allow setting capabilities for other targets 742b1dd958fScth */ 743b1dd958fScth if (cap == NULL || whom == 0) { 744b1dd958fScth return (-1); 745b1dd958fScth } 746b1dd958fScth 747b1dd958fScth EMUL64_MUTEX_ENTER(emul64); 748b1dd958fScth 749b1dd958fScth switch (scsi_hba_lookup_capstr(cap)) { 750b1dd958fScth case SCSI_CAP_DMA_MAX: 751b1dd958fScth case SCSI_CAP_MSG_OUT: 752b1dd958fScth case SCSI_CAP_PARITY: 753b1dd958fScth case SCSI_CAP_UNTAGGED_QING: 754b1dd958fScth case SCSI_CAP_LINKED_CMDS: 755b1dd958fScth case SCSI_CAP_RESET_NOTIFICATION: 756b1dd958fScth /* 757b1dd958fScth * None of these are settable via 758b1dd958fScth * the capability interface. 759b1dd958fScth */ 760b1dd958fScth break; 761b1dd958fScth case SCSI_CAP_DISCONNECT: 762b1dd958fScth rval = 1; 763b1dd958fScth break; 764b1dd958fScth case SCSI_CAP_SYNCHRONOUS: 765b1dd958fScth rval = 1; 766b1dd958fScth break; 767b1dd958fScth case SCSI_CAP_TAGGED_QING: 768b1dd958fScth rval = 1; 769b1dd958fScth break; 770b1dd958fScth case SCSI_CAP_WIDE_XFER: 771b1dd958fScth rval = 1; 772b1dd958fScth break; 773b1dd958fScth case SCSI_CAP_INITIATOR_ID: 774b1dd958fScth rval = -1; 775b1dd958fScth break; 776b1dd958fScth case SCSI_CAP_ARQ: 777b1dd958fScth rval = 1; 778b1dd958fScth break; 779b1dd958fScth case SCSI_CAP_TOTAL_SECTORS: 780b1dd958fScth emul64->nt_total_sectors[ap->a_target][ap->a_lun] = value; 781b1dd958fScth rval = TRUE; 782b1dd958fScth break; 783b1dd958fScth case SCSI_CAP_SECTOR_SIZE: 784b1dd958fScth rval = TRUE; 785b1dd958fScth break; 786b1dd958fScth default: 787b1dd958fScth rval = -1; 788b1dd958fScth break; 789b1dd958fScth } 790b1dd958fScth 791b1dd958fScth 792b1dd958fScth EMUL64_MUTEX_EXIT(emul64); 793b1dd958fScth 794b1dd958fScth return (rval); 795b1dd958fScth } 796b1dd958fScth 797b1dd958fScth /* 798b1dd958fScth * Function name : emul64_scsi_init_pkt 799b1dd958fScth * 800b1dd958fScth * Return Values : pointer to scsi_pkt, or NULL 801b1dd958fScth * Description : Called by kernel on behalf of a target driver 802b1dd958fScth * calling scsi_init_pkt(9F). 803b1dd958fScth * Refer to tran_init_pkt(9E) man page 804b1dd958fScth * 805b1dd958fScth * Context : Can be called from different kernel process threads. 806b1dd958fScth * Can be called by interrupt thread. 807b1dd958fScth */ 808b1dd958fScth /* ARGSUSED */ 809b1dd958fScth static struct scsi_pkt * 810b1dd958fScth emul64_scsi_init_pkt(struct scsi_address *ap, struct scsi_pkt *pkt, 811b1dd958fScth struct buf *bp, int cmdlen, int statuslen, int tgtlen, 812b1dd958fScth int flags, int (*callback)(), caddr_t arg) 813b1dd958fScth { 814b1dd958fScth struct emul64 *emul64 = ADDR2EMUL64(ap); 815b1dd958fScth struct emul64_cmd *sp; 816b1dd958fScth 817b1dd958fScth ASSERT(callback == NULL_FUNC || callback == SLEEP_FUNC); 818b1dd958fScth 819b1dd958fScth /* 820b1dd958fScth * First step of emul64_scsi_init_pkt: pkt allocation 821b1dd958fScth */ 822b1dd958fScth if (pkt == NULL) { 823b1dd958fScth pkt = scsi_hba_pkt_alloc(emul64->emul64_dip, ap, cmdlen, 824b1dd958fScth statuslen, 825b1dd958fScth tgtlen, sizeof (struct emul64_cmd), callback, arg); 826b1dd958fScth if (pkt == NULL) { 827b1dd958fScth cmn_err(CE_WARN, "emul64_scsi_init_pkt: " 828b1dd958fScth "scsi_hba_pkt_alloc failed"); 829b1dd958fScth return (NULL); 830b1dd958fScth } 831b1dd958fScth 832b1dd958fScth sp = PKT2CMD(pkt); 833b1dd958fScth 834b1dd958fScth /* 835b1dd958fScth * Initialize the new pkt - we redundantly initialize 836b1dd958fScth * all the fields for illustrative purposes. 837b1dd958fScth */ 838b1dd958fScth sp->cmd_pkt = pkt; 839b1dd958fScth sp->cmd_flags = 0; 840b1dd958fScth sp->cmd_scblen = statuslen; 841b1dd958fScth sp->cmd_cdblen = cmdlen; 842b1dd958fScth sp->cmd_emul64 = emul64; 843b1dd958fScth pkt->pkt_address = *ap; 844b1dd958fScth pkt->pkt_comp = (void (*)())NULL; 845b1dd958fScth pkt->pkt_flags = 0; 846b1dd958fScth pkt->pkt_time = 0; 847b1dd958fScth pkt->pkt_resid = 0; 848b1dd958fScth pkt->pkt_statistics = 0; 849b1dd958fScth pkt->pkt_reason = 0; 850b1dd958fScth 851b1dd958fScth } else { 852b1dd958fScth sp = PKT2CMD(pkt); 853b1dd958fScth } 854b1dd958fScth 855b1dd958fScth /* 856b1dd958fScth * Second step of emul64_scsi_init_pkt: dma allocation/move 857b1dd958fScth */ 858b1dd958fScth if (bp && bp->b_bcount != 0) { 859b1dd958fScth if (bp->b_flags & B_READ) { 860b1dd958fScth sp->cmd_flags &= ~CFLAG_DMASEND; 861b1dd958fScth } else { 862b1dd958fScth sp->cmd_flags |= CFLAG_DMASEND; 863b1dd958fScth } 864b1dd958fScth bp_mapin(bp); 865b1dd958fScth sp->cmd_addr = (unsigned char *) bp->b_un.b_addr; 866b1dd958fScth sp->cmd_count = bp->b_bcount; 867b1dd958fScth pkt->pkt_resid = 0; 868b1dd958fScth } 869b1dd958fScth 870b1dd958fScth return (pkt); 871b1dd958fScth } 872b1dd958fScth 873b1dd958fScth 874b1dd958fScth /* 875b1dd958fScth * Function name : emul64_scsi_destroy_pkt 876b1dd958fScth * 877b1dd958fScth * Return Values : none 878b1dd958fScth * Description : Called by kernel on behalf of a target driver 879b1dd958fScth * calling scsi_destroy_pkt(9F). 880b1dd958fScth * Refer to tran_destroy_pkt(9E) man page 881b1dd958fScth * 882b1dd958fScth * Context : Can be called from different kernel process threads. 883b1dd958fScth * Can be called by interrupt thread. 884b1dd958fScth */ 885b1dd958fScth static void 886b1dd958fScth emul64_scsi_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt) 887b1dd958fScth { 888b1dd958fScth struct emul64_cmd *sp = PKT2CMD(pkt); 889b1dd958fScth 890b1dd958fScth /* 891b1dd958fScth * emul64_scsi_dmafree inline to make things faster 892b1dd958fScth */ 893b1dd958fScth if (sp->cmd_flags & CFLAG_DMAVALID) { 894b1dd958fScth /* 895b1dd958fScth * Free the mapping. 896b1dd958fScth */ 897b1dd958fScth sp->cmd_flags &= ~CFLAG_DMAVALID; 898b1dd958fScth } 899b1dd958fScth 900b1dd958fScth /* 901b1dd958fScth * Free the pkt 902b1dd958fScth */ 903b1dd958fScth scsi_hba_pkt_free(ap, pkt); 904b1dd958fScth } 905b1dd958fScth 906b1dd958fScth 907b1dd958fScth /* 908b1dd958fScth * Function name : emul64_scsi_dmafree() 909b1dd958fScth * 910b1dd958fScth * Return Values : none 911b1dd958fScth * Description : free dvma resources 912b1dd958fScth * 913b1dd958fScth * Context : Can be called from different kernel process threads. 914b1dd958fScth * Can be called by interrupt thread. 915b1dd958fScth */ 916b1dd958fScth /*ARGSUSED*/ 917b1dd958fScth static void 918b1dd958fScth emul64_scsi_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt) 919b1dd958fScth { 920b1dd958fScth } 921b1dd958fScth 922b1dd958fScth /* 923b1dd958fScth * Function name : emul64_scsi_sync_pkt() 924b1dd958fScth * 925b1dd958fScth * Return Values : none 926b1dd958fScth * Description : sync dma 927b1dd958fScth * 928b1dd958fScth * Context : Can be called from different kernel process threads. 929b1dd958fScth * Can be called by interrupt thread. 930b1dd958fScth */ 931b1dd958fScth /*ARGSUSED*/ 932b1dd958fScth static void 933b1dd958fScth emul64_scsi_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt) 934b1dd958fScth { 935b1dd958fScth } 936b1dd958fScth 937b1dd958fScth /* 938b1dd958fScth * routine for reset notification setup, to register or cancel. 939b1dd958fScth */ 940b1dd958fScth static int 941b1dd958fScth emul64_scsi_reset_notify(struct scsi_address *ap, int flag, 942b1dd958fScth void (*callback)(caddr_t), caddr_t arg) 943b1dd958fScth { 944b1dd958fScth struct emul64 *emul64 = ADDR2EMUL64(ap); 945b1dd958fScth struct emul64_reset_notify_entry *p, *beforep; 946b1dd958fScth int rval = DDI_FAILURE; 947b1dd958fScth 948b1dd958fScth mutex_enter(EMUL64_REQ_MUTEX(emul64)); 949b1dd958fScth 950b1dd958fScth p = emul64->emul64_reset_notify_listf; 951b1dd958fScth beforep = NULL; 952b1dd958fScth 953b1dd958fScth while (p) { 954b1dd958fScth if (p->ap == ap) 955b1dd958fScth break; /* An entry exists for this target */ 956b1dd958fScth beforep = p; 957b1dd958fScth p = p->next; 958b1dd958fScth } 959b1dd958fScth 960b1dd958fScth if ((flag & SCSI_RESET_CANCEL) && (p != NULL)) { 961b1dd958fScth if (beforep == NULL) { 962b1dd958fScth emul64->emul64_reset_notify_listf = p->next; 963b1dd958fScth } else { 964b1dd958fScth beforep->next = p->next; 965b1dd958fScth } 966b1dd958fScth kmem_free((caddr_t)p, 967b1dd958fScth sizeof (struct emul64_reset_notify_entry)); 968b1dd958fScth rval = DDI_SUCCESS; 969b1dd958fScth 970b1dd958fScth } else if ((flag & SCSI_RESET_NOTIFY) && (p == NULL)) { 971b1dd958fScth p = kmem_zalloc(sizeof (struct emul64_reset_notify_entry), 972b1dd958fScth KM_SLEEP); 973b1dd958fScth p->ap = ap; 974b1dd958fScth p->callback = callback; 975b1dd958fScth p->arg = arg; 976b1dd958fScth p->next = emul64->emul64_reset_notify_listf; 977b1dd958fScth emul64->emul64_reset_notify_listf = p; 978b1dd958fScth rval = DDI_SUCCESS; 979b1dd958fScth } 980b1dd958fScth 981b1dd958fScth mutex_exit(EMUL64_REQ_MUTEX(emul64)); 982b1dd958fScth 983b1dd958fScth return (rval); 984b1dd958fScth } 985b1dd958fScth 986b1dd958fScth /* 987b1dd958fScth * Function name : emul64_scsi_start() 988b1dd958fScth * 989b1dd958fScth * Return Values : TRAN_FATAL_ERROR - emul64 has been shutdown 990b1dd958fScth * TRAN_BUSY - request queue is full 991b1dd958fScth * TRAN_ACCEPT - pkt has been submitted to emul64 992b1dd958fScth * 993b1dd958fScth * Description : init pkt, start the request 994b1dd958fScth * 995b1dd958fScth * Context : Can be called from different kernel process threads. 996b1dd958fScth * Can be called by interrupt thread. 997b1dd958fScth */ 998b1dd958fScth static int 999b1dd958fScth emul64_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt) 1000b1dd958fScth { 1001b1dd958fScth struct emul64_cmd *sp = PKT2CMD(pkt); 1002b1dd958fScth int rval = TRAN_ACCEPT; 1003b1dd958fScth struct emul64 *emul64 = ADDR2EMUL64(ap); 1004b1dd958fScth clock_t cur_lbolt; 1005b1dd958fScth taskqid_t dispatched; 1006b1dd958fScth 1007b1dd958fScth ASSERT(mutex_owned(EMUL64_REQ_MUTEX(emul64)) == 0 || ddi_in_panic()); 1008b1dd958fScth ASSERT(mutex_owned(EMUL64_RESP_MUTEX(emul64)) == 0 || ddi_in_panic()); 1009b1dd958fScth 1010b1dd958fScth EMUL64_DEBUG2(emul64, SCSI_DEBUG, "emul64_scsi_start %x", sp); 1011b1dd958fScth 1012b1dd958fScth pkt->pkt_reason = CMD_CMPLT; 1013b1dd958fScth 1014b1dd958fScth #ifdef EMUL64DEBUG 1015b1dd958fScth if (emul64_cdb_debug) { 1016b1dd958fScth emul64_debug_dump_cdb(ap, pkt); 1017b1dd958fScth } 1018b1dd958fScth #endif /* EMUL64DEBUG */ 1019b1dd958fScth 1020b1dd958fScth /* 1021b1dd958fScth * calculate deadline from pkt_time 1022b1dd958fScth * Instead of multiplying by 100 (ie. HZ), we multiply by 128 so 1023b1dd958fScth * we can shift and at the same time have a 28% grace period 1024b1dd958fScth * we ignore the rare case of pkt_time == 0 and deal with it 1025b1dd958fScth * in emul64_i_watch() 1026b1dd958fScth */ 1027b1dd958fScth cur_lbolt = ddi_get_lbolt(); 1028b1dd958fScth sp->cmd_deadline = cur_lbolt + (pkt->pkt_time * 128); 1029b1dd958fScth 1030b1dd958fScth if ((emul64_usetaskq == 0) || (pkt->pkt_flags & FLAG_NOINTR) != 0) { 1031b1dd958fScth emul64_pkt_comp((caddr_t)pkt); 1032b1dd958fScth } else { 1033b1dd958fScth dispatched = NULL; 1034b1dd958fScth if (emul64_collect_stats) { 1035b1dd958fScth /* 1036b1dd958fScth * If we are collecting statistics, call 1037b1dd958fScth * taskq_dispatch in no sleep mode, so that we can 1038b1dd958fScth * detect if we are exceeding the queue length that 1039b1dd958fScth * was established in the call to taskq_create in 1040b1dd958fScth * emul64_attach. If the no sleep call fails 1041b1dd958fScth * (returns NULL), the task will be dispatched in 1042b1dd958fScth * sleep mode below. 1043b1dd958fScth */ 1044b1dd958fScth dispatched = taskq_dispatch(emul64->emul64_taskq, 104519397407SSherry Moore emul64_pkt_comp, (void *)pkt, TQ_NOSLEEP); 1046b1dd958fScth if (dispatched == NULL) { 1047b1dd958fScth /* Queue was full. dispatch failed. */ 1048b1dd958fScth mutex_enter(&emul64_stats_mutex); 1049b1dd958fScth emul64_taskq_max++; 1050b1dd958fScth mutex_exit(&emul64_stats_mutex); 1051b1dd958fScth } 1052b1dd958fScth } 1053b1dd958fScth if (dispatched == NULL) { 1054b1dd958fScth (void) taskq_dispatch(emul64->emul64_taskq, 1055b1dd958fScth emul64_pkt_comp, (void *)pkt, TQ_SLEEP); 1056b1dd958fScth } 1057b1dd958fScth } 1058b1dd958fScth 1059b1dd958fScth done: 1060b1dd958fScth ASSERT(mutex_owned(EMUL64_REQ_MUTEX(emul64)) == 0 || ddi_in_panic()); 1061b1dd958fScth ASSERT(mutex_owned(EMUL64_RESP_MUTEX(emul64)) == 0 || ddi_in_panic()); 1062b1dd958fScth 1063b1dd958fScth return (rval); 1064b1dd958fScth } 1065b1dd958fScth 1066b1dd958fScth void 1067b1dd958fScth emul64_check_cond(struct scsi_pkt *pkt, uchar_t key, uchar_t asc, uchar_t ascq) 1068b1dd958fScth { 1069b1dd958fScth struct scsi_arq_status *arq = 1070b1dd958fScth (struct scsi_arq_status *)pkt->pkt_scbp; 1071b1dd958fScth 1072b1dd958fScth /* got check, no data transferred and ARQ done */ 1073b1dd958fScth arq->sts_status.sts_chk = 1; 1074b1dd958fScth pkt->pkt_state |= STATE_ARQ_DONE; 1075b1dd958fScth pkt->pkt_state &= ~STATE_XFERRED_DATA; 1076b1dd958fScth 1077b1dd958fScth /* for ARQ */ 1078b1dd958fScth arq->sts_rqpkt_reason = CMD_CMPLT; 1079b1dd958fScth arq->sts_rqpkt_resid = 0; 1080b1dd958fScth arq->sts_rqpkt_state = STATE_GOT_BUS | STATE_GOT_TARGET | 1081b1dd958fScth STATE_SENT_CMD | STATE_XFERRED_DATA | STATE_GOT_STATUS; 1082b1dd958fScth arq->sts_sensedata.es_valid = 1; 1083b1dd958fScth arq->sts_sensedata.es_class = 0x7; 1084b1dd958fScth arq->sts_sensedata.es_key = key; 1085b1dd958fScth arq->sts_sensedata.es_add_code = asc; 1086b1dd958fScth arq->sts_sensedata.es_qual_code = ascq; 1087b1dd958fScth } 1088b1dd958fScth 1089cefe316eSpd144616 ushort_t 1090cefe316eSpd144616 emul64_error_inject(struct scsi_pkt *pkt) 1091cefe316eSpd144616 { 1092cefe316eSpd144616 struct emul64_cmd *sp = PKT2CMD(pkt); 1093cefe316eSpd144616 emul64_tgt_t *tgt; 1094cefe316eSpd144616 struct scsi_arq_status *arq = 1095cefe316eSpd144616 (struct scsi_arq_status *)pkt->pkt_scbp; 1096cefe316eSpd144616 uint_t max_sense_len; 1097cefe316eSpd144616 1098cefe316eSpd144616 EMUL64_MUTEX_ENTER(sp->cmd_emul64); 1099cefe316eSpd144616 tgt = find_tgt(sp->cmd_emul64, 1100cefe316eSpd144616 pkt->pkt_address.a_target, pkt->pkt_address.a_lun); 1101cefe316eSpd144616 EMUL64_MUTEX_EXIT(sp->cmd_emul64); 1102cefe316eSpd144616 1103cefe316eSpd144616 /* 1104cefe316eSpd144616 * If there is no target, skip the error injection and 1105cefe316eSpd144616 * let the packet be handled normally. This would normally 1106cefe316eSpd144616 * never happen since a_target and a_lun are setup in 1107cefe316eSpd144616 * emul64_scsi_init_pkt. 1108cefe316eSpd144616 */ 1109cefe316eSpd144616 if (tgt == NULL) { 1110cefe316eSpd144616 return (ERR_INJ_DISABLE); 1111cefe316eSpd144616 } 1112cefe316eSpd144616 1113cefe316eSpd144616 if (tgt->emul64_einj_state != ERR_INJ_DISABLE) { 1114cefe316eSpd144616 arq->sts_status = tgt->emul64_einj_scsi_status; 1115cefe316eSpd144616 pkt->pkt_state = tgt->emul64_einj_pkt_state; 1116cefe316eSpd144616 pkt->pkt_reason = tgt->emul64_einj_pkt_reason; 1117cefe316eSpd144616 1118cefe316eSpd144616 /* 1119cefe316eSpd144616 * Calculate available sense buffer length. We could just 1120cefe316eSpd144616 * assume sizeof(struct scsi_extended_sense) but hopefully 1121cefe316eSpd144616 * that limitation will go away soon. 1122cefe316eSpd144616 */ 1123cefe316eSpd144616 max_sense_len = sp->cmd_scblen - 1124cefe316eSpd144616 (sizeof (struct scsi_arq_status) - 1125cefe316eSpd144616 sizeof (struct scsi_extended_sense)); 1126cefe316eSpd144616 if (max_sense_len > tgt->emul64_einj_sense_length) { 1127cefe316eSpd144616 max_sense_len = tgt->emul64_einj_sense_length; 1128cefe316eSpd144616 } 1129cefe316eSpd144616 1130cefe316eSpd144616 /* for ARQ */ 1131cefe316eSpd144616 arq->sts_rqpkt_reason = CMD_CMPLT; 1132cefe316eSpd144616 arq->sts_rqpkt_resid = 0; 1133cefe316eSpd144616 arq->sts_rqpkt_state = STATE_GOT_BUS | STATE_GOT_TARGET | 1134cefe316eSpd144616 STATE_SENT_CMD | STATE_XFERRED_DATA | STATE_GOT_STATUS; 1135cefe316eSpd144616 1136cefe316eSpd144616 /* Copy sense data */ 1137cefe316eSpd144616 if (tgt->emul64_einj_sense_data != 0) { 1138cefe316eSpd144616 bcopy(tgt->emul64_einj_sense_data, 1139cefe316eSpd144616 (uint8_t *)&arq->sts_sensedata, 1140cefe316eSpd144616 max_sense_len); 1141cefe316eSpd144616 } 1142cefe316eSpd144616 } 1143cefe316eSpd144616 1144cefe316eSpd144616 /* Return current error injection state */ 1145cefe316eSpd144616 return (tgt->emul64_einj_state); 1146cefe316eSpd144616 } 1147cefe316eSpd144616 1148cefe316eSpd144616 int 1149cefe316eSpd144616 emul64_error_inject_req(struct emul64 *emul64, intptr_t arg) 1150cefe316eSpd144616 { 1151cefe316eSpd144616 emul64_tgt_t *tgt; 1152cefe316eSpd144616 struct emul64_error_inj_data error_inj_req; 1153cefe316eSpd144616 1154cefe316eSpd144616 /* Check args */ 1155cefe316eSpd144616 if (arg == NULL) { 1156cefe316eSpd144616 return (EINVAL); 1157cefe316eSpd144616 } 1158cefe316eSpd144616 1159cefe316eSpd144616 if (ddi_copyin((void *)arg, &error_inj_req, 1160cefe316eSpd144616 sizeof (error_inj_req), 0) != 0) { 1161cefe316eSpd144616 cmn_err(CE_WARN, "emul64: ioctl - inj copyin failed\n"); 1162cefe316eSpd144616 return (EFAULT); 1163cefe316eSpd144616 } 1164cefe316eSpd144616 1165cefe316eSpd144616 EMUL64_MUTEX_ENTER(emul64); 1166cefe316eSpd144616 tgt = find_tgt(emul64, error_inj_req.eccd_target, 1167cefe316eSpd144616 error_inj_req.eccd_lun); 1168cefe316eSpd144616 EMUL64_MUTEX_EXIT(emul64); 1169cefe316eSpd144616 1170cefe316eSpd144616 /* Make sure device exists */ 1171cefe316eSpd144616 if (tgt == NULL) { 1172cefe316eSpd144616 return (ENODEV); 1173cefe316eSpd144616 } 1174cefe316eSpd144616 1175cefe316eSpd144616 /* Free old sense buffer if we have one */ 1176cefe316eSpd144616 if (tgt->emul64_einj_sense_data != NULL) { 1177cefe316eSpd144616 ASSERT(tgt->emul64_einj_sense_length != 0); 1178cefe316eSpd144616 kmem_free(tgt->emul64_einj_sense_data, 1179cefe316eSpd144616 tgt->emul64_einj_sense_length); 1180cefe316eSpd144616 tgt->emul64_einj_sense_data = NULL; 1181cefe316eSpd144616 tgt->emul64_einj_sense_length = 0; 1182cefe316eSpd144616 } 1183cefe316eSpd144616 1184cefe316eSpd144616 /* 1185cefe316eSpd144616 * Now handle error injection request. If error injection 1186cefe316eSpd144616 * is requested we will return the sense data provided for 1187cefe316eSpd144616 * any I/O to this target until told to stop. 1188cefe316eSpd144616 */ 1189cefe316eSpd144616 tgt->emul64_einj_state = error_inj_req.eccd_inj_state; 1190cefe316eSpd144616 tgt->emul64_einj_sense_length = error_inj_req.eccd_sns_dlen; 1191cefe316eSpd144616 tgt->emul64_einj_pkt_state = error_inj_req.eccd_pkt_state; 1192cefe316eSpd144616 tgt->emul64_einj_pkt_reason = error_inj_req.eccd_pkt_reason; 1193cefe316eSpd144616 tgt->emul64_einj_scsi_status = error_inj_req.eccd_scsi_status; 1194cefe316eSpd144616 switch (error_inj_req.eccd_inj_state) { 1195cefe316eSpd144616 case ERR_INJ_ENABLE: 1196cefe316eSpd144616 case ERR_INJ_ENABLE_NODATA: 1197cefe316eSpd144616 if (error_inj_req.eccd_sns_dlen) { 1198cefe316eSpd144616 tgt->emul64_einj_sense_data = 1199cefe316eSpd144616 kmem_alloc(error_inj_req.eccd_sns_dlen, KM_SLEEP); 1200cefe316eSpd144616 /* Copy sense data */ 1201cefe316eSpd144616 if (ddi_copyin((void *)(arg + sizeof (error_inj_req)), 1202cefe316eSpd144616 tgt->emul64_einj_sense_data, 1203cefe316eSpd144616 error_inj_req.eccd_sns_dlen, 0) != 0) { 1204cefe316eSpd144616 cmn_err(CE_WARN, 1205cefe316eSpd144616 "emul64: sense data copy in failed\n"); 1206cefe316eSpd144616 return (EFAULT); 1207cefe316eSpd144616 } 1208cefe316eSpd144616 } 1209cefe316eSpd144616 break; 1210cefe316eSpd144616 case ERR_INJ_DISABLE: 1211cefe316eSpd144616 default: 1212cefe316eSpd144616 break; 1213cefe316eSpd144616 } 1214cefe316eSpd144616 1215cefe316eSpd144616 return (0); 1216cefe316eSpd144616 } 1217cefe316eSpd144616 1218b1dd958fScth int bsd_scsi_start_stop_unit(struct scsi_pkt *); 1219b1dd958fScth int bsd_scsi_test_unit_ready(struct scsi_pkt *); 1220b1dd958fScth int bsd_scsi_request_sense(struct scsi_pkt *); 1221b1dd958fScth int bsd_scsi_inquiry(struct scsi_pkt *); 1222b1dd958fScth int bsd_scsi_format(struct scsi_pkt *); 1223b1dd958fScth int bsd_scsi_io(struct scsi_pkt *); 1224b1dd958fScth int bsd_scsi_log_sense(struct scsi_pkt *); 1225b1dd958fScth int bsd_scsi_mode_sense(struct scsi_pkt *); 1226b1dd958fScth int bsd_scsi_mode_select(struct scsi_pkt *); 1227b1dd958fScth int bsd_scsi_read_capacity(struct scsi_pkt *); 1228b1dd958fScth int bsd_scsi_read_capacity_16(struct scsi_pkt *); 1229b1dd958fScth int bsd_scsi_reserve(struct scsi_pkt *); 1230b1dd958fScth int bsd_scsi_format(struct scsi_pkt *); 1231b1dd958fScth int bsd_scsi_release(struct scsi_pkt *); 1232b1dd958fScth int bsd_scsi_read_defect_list(struct scsi_pkt *); 1233b1dd958fScth int bsd_scsi_reassign_block(struct scsi_pkt *); 1234b1dd958fScth int bsd_freeblkrange(emul64_tgt_t *, emul64_range_t *); 1235b1dd958fScth 1236b1dd958fScth static void 1237b1dd958fScth emul64_handle_cmd(struct scsi_pkt *pkt) 1238b1dd958fScth { 1239cefe316eSpd144616 if (emul64_error_inject(pkt) == ERR_INJ_ENABLE_NODATA) { 1240cefe316eSpd144616 /* 1241cefe316eSpd144616 * If error injection is configured to return with 1242cefe316eSpd144616 * no data return now without handling the command. 1243cefe316eSpd144616 * This is how normal check conditions work. 1244cefe316eSpd144616 * 1245cefe316eSpd144616 * If the error injection state is ERR_INJ_ENABLE 1246cefe316eSpd144616 * (or if error injection is disabled) continue and 1247cefe316eSpd144616 * handle the command. This would be used for 1248cefe316eSpd144616 * KEY_RECOVERABLE_ERROR type conditions. 1249cefe316eSpd144616 */ 1250cefe316eSpd144616 return; 1251cefe316eSpd144616 } 1252cefe316eSpd144616 1253b1dd958fScth switch (pkt->pkt_cdbp[0]) { 1254b1dd958fScth case SCMD_START_STOP: 1255b1dd958fScth (void) bsd_scsi_start_stop_unit(pkt); 1256b1dd958fScth break; 1257b1dd958fScth case SCMD_TEST_UNIT_READY: 1258b1dd958fScth (void) bsd_scsi_test_unit_ready(pkt); 1259b1dd958fScth break; 1260b1dd958fScth case SCMD_REQUEST_SENSE: 1261b1dd958fScth (void) bsd_scsi_request_sense(pkt); 1262b1dd958fScth break; 1263b1dd958fScth case SCMD_INQUIRY: 1264b1dd958fScth (void) bsd_scsi_inquiry(pkt); 1265b1dd958fScth break; 1266b1dd958fScth case SCMD_FORMAT: 1267b1dd958fScth (void) bsd_scsi_format(pkt); 1268b1dd958fScth break; 1269b1dd958fScth case SCMD_READ: 1270b1dd958fScth case SCMD_WRITE: 1271b1dd958fScth case SCMD_READ_G1: 1272b1dd958fScth case SCMD_WRITE_G1: 1273b1dd958fScth case SCMD_READ_G4: 1274b1dd958fScth case SCMD_WRITE_G4: 1275b1dd958fScth (void) bsd_scsi_io(pkt); 1276b1dd958fScth break; 1277b1dd958fScth case SCMD_LOG_SENSE_G1: 1278b1dd958fScth (void) bsd_scsi_log_sense(pkt); 1279b1dd958fScth break; 1280b1dd958fScth case SCMD_MODE_SENSE: 1281b1dd958fScth case SCMD_MODE_SENSE_G1: 1282b1dd958fScth (void) bsd_scsi_mode_sense(pkt); 1283b1dd958fScth break; 1284b1dd958fScth case SCMD_MODE_SELECT: 1285b1dd958fScth case SCMD_MODE_SELECT_G1: 1286b1dd958fScth (void) bsd_scsi_mode_select(pkt); 1287b1dd958fScth break; 1288b1dd958fScth case SCMD_READ_CAPACITY: 1289b1dd958fScth (void) bsd_scsi_read_capacity(pkt); 1290b1dd958fScth break; 1291b1dd958fScth case SCMD_SVC_ACTION_IN_G4: 1292b1dd958fScth if (pkt->pkt_cdbp[1] == SSVC_ACTION_READ_CAPACITY_G4) { 1293b1dd958fScth (void) bsd_scsi_read_capacity_16(pkt); 1294b1dd958fScth } else { 1295b1dd958fScth cmn_err(CE_WARN, "emul64: unrecognized G4 service " 1296b1dd958fScth "action 0x%x", pkt->pkt_cdbp[1]); 1297b1dd958fScth } 1298b1dd958fScth break; 1299b1dd958fScth case SCMD_RESERVE: 1300b1dd958fScth case SCMD_RESERVE_G1: 1301b1dd958fScth (void) bsd_scsi_reserve(pkt); 1302b1dd958fScth break; 1303b1dd958fScth case SCMD_RELEASE: 1304b1dd958fScth case SCMD_RELEASE_G1: 1305b1dd958fScth (void) bsd_scsi_release(pkt); 1306b1dd958fScth break; 1307b1dd958fScth case SCMD_REASSIGN_BLOCK: 1308b1dd958fScth (void) bsd_scsi_reassign_block(pkt); 1309b1dd958fScth break; 1310b1dd958fScth case SCMD_READ_DEFECT_LIST: 1311b1dd958fScth (void) bsd_scsi_read_defect_list(pkt); 1312b1dd958fScth break; 1313b1dd958fScth case SCMD_PRIN: 1314b1dd958fScth case SCMD_PROUT: 1315b1dd958fScth case SCMD_REPORT_LUNS: 1316b1dd958fScth /* ASC 0x24 INVALID FIELD IN CDB */ 1317b1dd958fScth emul64_check_cond(pkt, KEY_ILLEGAL_REQUEST, 0x24, 0x0); 1318b1dd958fScth break; 1319b1dd958fScth default: 1320b1dd958fScth cmn_err(CE_WARN, "emul64: unrecognized " 1321b1dd958fScth "SCSI cmd 0x%x", pkt->pkt_cdbp[0]); 1322b1dd958fScth emul64_check_cond(pkt, KEY_ILLEGAL_REQUEST, 0x24, 0x0); 1323b1dd958fScth break; 1324b1dd958fScth case SCMD_GET_CONFIGURATION: 1325b1dd958fScth case 0x35: /* SCMD_SYNCHRONIZE_CACHE */ 1326b1dd958fScth /* Don't complain */ 1327b1dd958fScth break; 1328b1dd958fScth } 1329b1dd958fScth } 1330b1dd958fScth 1331b1dd958fScth static void 1332b1dd958fScth emul64_pkt_comp(void * arg) 1333b1dd958fScth { 1334b1dd958fScth struct scsi_pkt *pkt = (struct scsi_pkt *)arg; 1335b1dd958fScth struct emul64_cmd *sp = PKT2CMD(pkt); 1336b1dd958fScth emul64_tgt_t *tgt; 1337b1dd958fScth 1338b1dd958fScth EMUL64_MUTEX_ENTER(sp->cmd_emul64); 1339b1dd958fScth tgt = find_tgt(sp->cmd_emul64, 1340b1dd958fScth pkt->pkt_address.a_target, pkt->pkt_address.a_lun); 1341b1dd958fScth EMUL64_MUTEX_EXIT(sp->cmd_emul64); 1342b1dd958fScth if (!tgt) { 1343b1dd958fScth pkt->pkt_reason = CMD_TIMEOUT; 1344b1dd958fScth pkt->pkt_state = STATE_GOT_BUS | STATE_SENT_CMD; 1345b1dd958fScth pkt->pkt_statistics = STAT_TIMEOUT; 1346b1dd958fScth } else { 1347b1dd958fScth pkt->pkt_reason = CMD_CMPLT; 1348b1dd958fScth *pkt->pkt_scbp = STATUS_GOOD; 1349b1dd958fScth pkt->pkt_state = STATE_GOT_BUS | STATE_GOT_TARGET | 1350b1dd958fScth STATE_SENT_CMD | STATE_XFERRED_DATA | STATE_GOT_STATUS; 1351b1dd958fScth pkt->pkt_statistics = 0; 1352b1dd958fScth emul64_handle_cmd(pkt); 1353b1dd958fScth } 1354*9c57abc8Ssrivijitha dugganapalli scsi_hba_pkt_comp(pkt); 1355b1dd958fScth } 1356b1dd958fScth 1357b1dd958fScth /* ARGSUSED */ 1358b1dd958fScth static int 1359b1dd958fScth emul64_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt) 1360b1dd958fScth { 1361b1dd958fScth return (1); 1362b1dd958fScth } 1363b1dd958fScth 1364b1dd958fScth /* ARGSUSED */ 1365b1dd958fScth static int 1366b1dd958fScth emul64_scsi_reset(struct scsi_address *ap, int level) 1367b1dd958fScth { 1368b1dd958fScth return (1); 1369b1dd958fScth } 1370b1dd958fScth 1371b1dd958fScth static int 1372b1dd958fScth emul64_get_tgtrange(struct emul64 *emul64, 1373b1dd958fScth intptr_t arg, 1374b1dd958fScth emul64_tgt_t **tgtp, 1375b1dd958fScth emul64_tgt_range_t *tgtr) 1376b1dd958fScth { 1377b1dd958fScth if (ddi_copyin((void *)arg, tgtr, sizeof (*tgtr), 0) != 0) { 1378b1dd958fScth cmn_err(CE_WARN, "emul64: ioctl - copy in failed\n"); 1379b1dd958fScth return (EFAULT); 1380b1dd958fScth } 1381b1dd958fScth EMUL64_MUTEX_ENTER(emul64); 1382b1dd958fScth *tgtp = find_tgt(emul64, tgtr->emul64_target, tgtr->emul64_lun); 1383b1dd958fScth EMUL64_MUTEX_EXIT(emul64); 1384b1dd958fScth if (*tgtp == NULL) { 1385b1dd958fScth cmn_err(CE_WARN, "emul64: ioctl - no target for %d,%d on %d", 1386b1dd958fScth tgtr->emul64_target, tgtr->emul64_lun, 1387b1dd958fScth ddi_get_instance(emul64->emul64_dip)); 1388b1dd958fScth return (ENXIO); 1389b1dd958fScth } 1390b1dd958fScth return (0); 1391b1dd958fScth } 1392b1dd958fScth 1393b1dd958fScth static int 1394b1dd958fScth emul64_ioctl(dev_t dev, 1395b1dd958fScth int cmd, 1396b1dd958fScth intptr_t arg, 1397b1dd958fScth int mode, 1398b1dd958fScth cred_t *credp, 1399b1dd958fScth int *rvalp) 1400b1dd958fScth { 1401b1dd958fScth struct emul64 *emul64; 1402b1dd958fScth int instance; 1403b1dd958fScth int rv = 0; 1404b1dd958fScth emul64_tgt_range_t tgtr; 1405b1dd958fScth emul64_tgt_t *tgt; 1406b1dd958fScth 1407b1dd958fScth instance = MINOR2INST(getminor(dev)); 1408b1dd958fScth emul64 = (struct emul64 *)ddi_get_soft_state(emul64_state, instance); 1409b1dd958fScth if (emul64 == NULL) { 1410b1dd958fScth cmn_err(CE_WARN, "emul64: ioctl - no softstate for %d\n", 1411b1dd958fScth getminor(dev)); 1412b1dd958fScth return (ENXIO); 1413b1dd958fScth } 1414b1dd958fScth 1415b1dd958fScth switch (cmd) { 1416b1dd958fScth case EMUL64_WRITE_OFF: 1417b1dd958fScth rv = emul64_get_tgtrange(emul64, arg, &tgt, &tgtr); 1418b1dd958fScth if (rv == 0) { 1419b1dd958fScth rv = emul64_write_off(emul64, tgt, &tgtr); 1420b1dd958fScth } 1421b1dd958fScth break; 1422b1dd958fScth case EMUL64_WRITE_ON: 1423b1dd958fScth rv = emul64_get_tgtrange(emul64, arg, &tgt, &tgtr); 1424b1dd958fScth if (rv == 0) { 1425b1dd958fScth rv = emul64_write_on(emul64, tgt, &tgtr); 1426b1dd958fScth } 1427b1dd958fScth break; 1428b1dd958fScth case EMUL64_ZERO_RANGE: 1429b1dd958fScth rv = emul64_get_tgtrange(emul64, arg, &tgt, &tgtr); 1430b1dd958fScth if (rv == 0) { 1431b1dd958fScth mutex_enter(&tgt->emul64_tgt_blk_lock); 1432b1dd958fScth rv = bsd_freeblkrange(tgt, &tgtr.emul64_blkrange); 1433b1dd958fScth mutex_exit(&tgt->emul64_tgt_blk_lock); 1434b1dd958fScth } 1435b1dd958fScth break; 1436cefe316eSpd144616 case EMUL64_ERROR_INJECT: 1437cefe316eSpd144616 rv = emul64_error_inject_req(emul64, arg); 1438cefe316eSpd144616 break; 1439b1dd958fScth default: 1440b1dd958fScth rv = scsi_hba_ioctl(dev, cmd, arg, mode, credp, rvalp); 1441b1dd958fScth break; 1442b1dd958fScth } 1443b1dd958fScth return (rv); 1444b1dd958fScth } 1445b1dd958fScth 1446b1dd958fScth /* ARGSUSED */ 1447b1dd958fScth static int 1448b1dd958fScth emul64_write_off(struct emul64 *emul64, 1449b1dd958fScth emul64_tgt_t *tgt, 1450b1dd958fScth emul64_tgt_range_t *tgtr) 1451b1dd958fScth { 1452b1dd958fScth size_t blkcnt = tgtr->emul64_blkrange.emul64_blkcnt; 1453b1dd958fScth emul64_nowrite_t *cur; 1454b1dd958fScth emul64_nowrite_t *nowrite; 1455b1dd958fScth emul64_rng_overlap_t overlap = O_NONE; 1456b1dd958fScth emul64_nowrite_t **prev = NULL; 1457b1dd958fScth diskaddr_t sb = tgtr->emul64_blkrange.emul64_sb; 1458b1dd958fScth 1459b1dd958fScth nowrite = emul64_nowrite_alloc(&tgtr->emul64_blkrange); 1460b1dd958fScth 1461b1dd958fScth /* Find spot in list */ 1462b1dd958fScth rw_enter(&tgt->emul64_tgt_nw_lock, RW_WRITER); 1463b1dd958fScth cur = emul64_find_nowrite(tgt, sb, blkcnt, &overlap, &prev); 1464b1dd958fScth if (overlap == O_NONE) { 1465b1dd958fScth /* Insert into list */ 1466b1dd958fScth *prev = nowrite; 1467b1dd958fScth nowrite->emul64_nwnext = cur; 1468b1dd958fScth } 1469b1dd958fScth rw_exit(&tgt->emul64_tgt_nw_lock); 1470b1dd958fScth if (overlap == O_NONE) { 1471b1dd958fScth if (emul64_collect_stats) { 1472b1dd958fScth mutex_enter(&emul64_stats_mutex); 1473b1dd958fScth emul64_nowrite_count++; 1474b1dd958fScth mutex_exit(&emul64_stats_mutex); 1475b1dd958fScth } 1476b1dd958fScth } else { 1477b1dd958fScth cmn_err(CE_WARN, "emul64: EMUL64_WRITE_OFF 0x%llx,0x%" 1478b1dd958fScth PRIx64 "overlaps 0x%llx,0x%" PRIx64 "\n", 1479b1dd958fScth nowrite->emul64_blocked.emul64_sb, 1480b1dd958fScth nowrite->emul64_blocked.emul64_blkcnt, 1481b1dd958fScth cur->emul64_blocked.emul64_sb, 1482b1dd958fScth cur->emul64_blocked.emul64_blkcnt); 1483b1dd958fScth emul64_nowrite_free(nowrite); 1484b1dd958fScth return (EINVAL); 1485b1dd958fScth } 1486b1dd958fScth return (0); 1487b1dd958fScth } 1488b1dd958fScth 1489b1dd958fScth /* ARGSUSED */ 1490b1dd958fScth static int 1491b1dd958fScth emul64_write_on(struct emul64 *emul64, 1492b1dd958fScth emul64_tgt_t *tgt, 1493b1dd958fScth emul64_tgt_range_t *tgtr) 1494b1dd958fScth { 1495b1dd958fScth size_t blkcnt = tgtr->emul64_blkrange.emul64_blkcnt; 1496b1dd958fScth emul64_nowrite_t *cur; 1497b1dd958fScth emul64_rng_overlap_t overlap = O_NONE; 1498b1dd958fScth emul64_nowrite_t **prev = NULL; 1499b1dd958fScth int rv = 0; 1500b1dd958fScth diskaddr_t sb = tgtr->emul64_blkrange.emul64_sb; 1501b1dd958fScth 1502b1dd958fScth /* Find spot in list */ 1503b1dd958fScth rw_enter(&tgt->emul64_tgt_nw_lock, RW_WRITER); 1504b1dd958fScth cur = emul64_find_nowrite(tgt, sb, blkcnt, &overlap, &prev); 1505b1dd958fScth if (overlap == O_SAME) { 1506b1dd958fScth /* Remove from list */ 1507b1dd958fScth *prev = cur->emul64_nwnext; 1508b1dd958fScth } 1509b1dd958fScth rw_exit(&tgt->emul64_tgt_nw_lock); 1510b1dd958fScth 1511b1dd958fScth switch (overlap) { 1512b1dd958fScth case O_NONE: 1513b1dd958fScth cmn_err(CE_WARN, "emul64: EMUL64_WRITE_ON 0x%llx,0x%lx " 1514b1dd958fScth "range not found\n", sb, blkcnt); 1515b1dd958fScth rv = ENXIO; 1516b1dd958fScth break; 1517b1dd958fScth case O_SAME: 1518b1dd958fScth if (emul64_collect_stats) { 1519b1dd958fScth mutex_enter(&emul64_stats_mutex); 1520b1dd958fScth emul64_nowrite_count--; 1521b1dd958fScth mutex_exit(&emul64_stats_mutex); 1522b1dd958fScth } 1523b1dd958fScth emul64_nowrite_free(cur); 1524b1dd958fScth break; 1525b1dd958fScth case O_OVERLAP: 1526b1dd958fScth case O_SUBSET: 1527b1dd958fScth cmn_err(CE_WARN, "emul64: EMUL64_WRITE_ON 0x%llx,0x%lx " 1528b1dd958fScth "overlaps 0x%llx,0x%" PRIx64 "\n", 1529b1dd958fScth sb, blkcnt, cur->emul64_blocked.emul64_sb, 1530b1dd958fScth cur->emul64_blocked.emul64_blkcnt); 1531b1dd958fScth rv = EINVAL; 1532b1dd958fScth break; 1533b1dd958fScth } 1534b1dd958fScth return (rv); 1535b1dd958fScth } 1536b1dd958fScth 1537b1dd958fScth static emul64_nowrite_t * 1538b1dd958fScth emul64_find_nowrite(emul64_tgt_t *tgt, 1539b1dd958fScth diskaddr_t sb, 1540b1dd958fScth size_t blkcnt, 1541b1dd958fScth emul64_rng_overlap_t *overlap, 1542b1dd958fScth emul64_nowrite_t ***prevp) 1543b1dd958fScth { 1544b1dd958fScth emul64_nowrite_t *cur; 1545b1dd958fScth emul64_nowrite_t **prev; 1546b1dd958fScth 1547b1dd958fScth /* Find spot in list */ 1548b1dd958fScth *overlap = O_NONE; 1549b1dd958fScth prev = &tgt->emul64_tgt_nowrite; 1550b1dd958fScth cur = tgt->emul64_tgt_nowrite; 1551b1dd958fScth while (cur != NULL) { 1552b1dd958fScth *overlap = emul64_overlap(&cur->emul64_blocked, sb, blkcnt); 1553b1dd958fScth if (*overlap != O_NONE) 1554b1dd958fScth break; 1555b1dd958fScth prev = &cur->emul64_nwnext; 1556b1dd958fScth cur = cur->emul64_nwnext; 1557b1dd958fScth } 1558b1dd958fScth 1559b1dd958fScth *prevp = prev; 1560b1dd958fScth return (cur); 1561b1dd958fScth } 1562b1dd958fScth 1563b1dd958fScth static emul64_nowrite_t * 1564b1dd958fScth emul64_nowrite_alloc(emul64_range_t *range) 1565b1dd958fScth { 1566b1dd958fScth emul64_nowrite_t *nw; 1567b1dd958fScth 1568b1dd958fScth nw = kmem_zalloc(sizeof (*nw), KM_SLEEP); 1569b1dd958fScth bcopy((void *) range, 1570b1dd958fScth (void *) &nw->emul64_blocked, 1571b1dd958fScth sizeof (nw->emul64_blocked)); 1572b1dd958fScth return (nw); 1573b1dd958fScth } 1574b1dd958fScth 1575b1dd958fScth static void 1576b1dd958fScth emul64_nowrite_free(emul64_nowrite_t *nw) 1577b1dd958fScth { 1578b1dd958fScth kmem_free((void *) nw, sizeof (*nw)); 1579b1dd958fScth } 1580b1dd958fScth 1581b1dd958fScth emul64_rng_overlap_t 1582b1dd958fScth emul64_overlap(emul64_range_t *rng, diskaddr_t sb, size_t cnt) 1583b1dd958fScth { 1584b1dd958fScth 1585b1dd958fScth if (rng->emul64_sb >= sb + cnt) 1586b1dd958fScth return (O_NONE); 1587b1dd958fScth if (rng->emul64_sb + rng->emul64_blkcnt <= sb) 1588b1dd958fScth return (O_NONE); 1589b1dd958fScth if ((rng->emul64_sb == sb) && (rng->emul64_blkcnt == cnt)) 1590b1dd958fScth return (O_SAME); 1591b1dd958fScth if ((sb >= rng->emul64_sb) && 1592b1dd958fScth ((sb + cnt) <= (rng->emul64_sb + rng->emul64_blkcnt))) { 1593b1dd958fScth return (O_SUBSET); 1594b1dd958fScth } 1595b1dd958fScth return (O_OVERLAP); 1596b1dd958fScth } 1597b1dd958fScth 1598b1dd958fScth #include <sys/varargs.h> 1599b1dd958fScth 1600b1dd958fScth /* 1601b1dd958fScth * Error logging, printing, and debug print routines 1602b1dd958fScth */ 1603b1dd958fScth 1604b1dd958fScth /*VARARGS3*/ 1605b1dd958fScth static void 1606b1dd958fScth emul64_i_log(struct emul64 *emul64, int level, char *fmt, ...) 1607b1dd958fScth { 1608b1dd958fScth char buf[256]; 1609b1dd958fScth va_list ap; 1610b1dd958fScth 1611b1dd958fScth va_start(ap, fmt); 1612b1dd958fScth (void) vsnprintf(buf, sizeof (buf), fmt, ap); 1613b1dd958fScth va_end(ap); 1614b1dd958fScth 1615b1dd958fScth scsi_log(emul64 ? emul64->emul64_dip : NULL, 1616b1dd958fScth "emul64", level, "%s\n", buf); 1617b1dd958fScth } 1618b1dd958fScth 1619b1dd958fScth 1620b1dd958fScth #ifdef EMUL64DEBUG 1621b1dd958fScth 1622b1dd958fScth static void 1623b1dd958fScth emul64_debug_dump_cdb(struct scsi_address *ap, struct scsi_pkt *pkt) 1624b1dd958fScth { 1625b1dd958fScth static char hex[] = "0123456789abcdef"; 1626b1dd958fScth struct emul64 *emul64 = ADDR2EMUL64(ap); 1627b1dd958fScth struct emul64_cmd *sp = PKT2CMD(pkt); 1628b1dd958fScth uint8_t *cdb = pkt->pkt_cdbp; 1629b1dd958fScth char buf [256]; 1630b1dd958fScth char *p; 1631b1dd958fScth int i; 1632b1dd958fScth 1633b1dd958fScth (void) snprintf(buf, sizeof (buf), "emul64%d: <%d,%d> ", 1634b1dd958fScth ddi_get_instance(emul64->emul64_dip), 1635b1dd958fScth ap->a_target, ap->a_lun); 1636b1dd958fScth 1637b1dd958fScth p = buf + strlen(buf); 1638b1dd958fScth 1639b1dd958fScth *p++ = '['; 1640b1dd958fScth for (i = 0; i < sp->cmd_cdblen; i++, cdb++) { 1641b1dd958fScth if (i != 0) 1642b1dd958fScth *p++ = ' '; 1643b1dd958fScth *p++ = hex[(*cdb >> 4) & 0x0f]; 1644b1dd958fScth *p++ = hex[*cdb & 0x0f]; 1645b1dd958fScth } 1646b1dd958fScth *p++ = ']'; 1647b1dd958fScth *p++ = '\n'; 1648b1dd958fScth *p = 0; 1649b1dd958fScth 1650b1dd958fScth cmn_err(CE_CONT, buf); 1651b1dd958fScth } 1652b1dd958fScth #endif /* EMUL64DEBUG */ 1653