1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 /* 23 * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 /* 30 * SCSA HBA nexus driver that emulates an HBA connected to SCSI target 31 * devices (large disks). 32 */ 33 34 #ifdef DEBUG 35 #define EMUL64DEBUG 36 #endif 37 38 #include <sys/scsi/scsi.h> 39 #include <sys/ddi.h> 40 #include <sys/sunddi.h> 41 #include <sys/taskq.h> 42 #include <sys/disp.h> 43 #include <sys/types.h> 44 #include <sys/buf.h> 45 #include <sys/cpuvar.h> 46 #include <sys/dklabel.h> 47 48 #include <sys/emul64.h> 49 #include <sys/emul64cmd.h> 50 #include <sys/emul64var.h> 51 52 int emul64_usetaskq = 1; /* set to zero for debugging */ 53 int emul64debug = 0; 54 #ifdef EMUL64DEBUG 55 static int emul64_cdb_debug = 0; 56 #include <sys/debug.h> 57 #endif 58 59 /* 60 * cb_ops function prototypes 61 */ 62 static int emul64_ioctl(dev_t, int cmd, intptr_t arg, int mode, 63 cred_t *credp, int *rvalp); 64 65 /* 66 * dev_ops functions prototypes 67 */ 68 static int emul64_info(dev_info_t *dip, ddi_info_cmd_t infocmd, 69 void *arg, void **result); 70 static int emul64_attach(dev_info_t *dip, ddi_attach_cmd_t cmd); 71 static int emul64_detach(dev_info_t *dip, ddi_detach_cmd_t cmd); 72 73 /* 74 * Function prototypes 75 * 76 * SCSA functions exported by means of the transport table 77 */ 78 static int emul64_tran_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip, 79 scsi_hba_tran_t *tran, struct scsi_device *sd); 80 static int emul64_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt); 81 static void emul64_pkt_comp(void *); 82 static int emul64_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt); 83 static int emul64_scsi_reset(struct scsi_address *ap, int level); 84 static int emul64_scsi_getcap(struct scsi_address *ap, char *cap, int whom); 85 static int emul64_scsi_setcap(struct scsi_address *ap, char *cap, int value, 86 int whom); 87 static struct scsi_pkt *emul64_scsi_init_pkt(struct scsi_address *ap, 88 struct scsi_pkt *pkt, struct buf *bp, int cmdlen, int statuslen, 89 int tgtlen, int flags, int (*callback)(), caddr_t arg); 90 static void emul64_scsi_destroy_pkt(struct scsi_address *ap, 91 struct scsi_pkt *pkt); 92 static void emul64_scsi_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt); 93 static void emul64_scsi_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt); 94 static int emul64_scsi_reset_notify(struct scsi_address *ap, int flag, 95 void (*callback)(caddr_t), caddr_t arg); 96 97 /* 98 * internal functions 99 */ 100 static void emul64_i_initcap(struct emul64 *emul64); 101 102 static void emul64_i_log(struct emul64 *emul64, int level, char *fmt, ...); 103 static int emul64_get_tgtrange(struct emul64 *, 104 intptr_t, 105 emul64_tgt_t **, 106 emul64_tgt_range_t *); 107 static int emul64_write_off(struct emul64 *, 108 emul64_tgt_t *, 109 emul64_tgt_range_t *); 110 static int emul64_write_on(struct emul64 *, 111 emul64_tgt_t *, 112 emul64_tgt_range_t *); 113 static emul64_nowrite_t *emul64_nowrite_alloc(emul64_range_t *); 114 static void emul64_nowrite_free(emul64_nowrite_t *); 115 static emul64_nowrite_t *emul64_find_nowrite(emul64_tgt_t *, 116 diskaddr_t start_block, 117 size_t blkcnt, 118 emul64_rng_overlap_t *overlapp, 119 emul64_nowrite_t ***prevp); 120 121 extern emul64_tgt_t *find_tgt(struct emul64 *, ushort_t, ushort_t); 122 123 #ifdef EMUL64DEBUG 124 static void emul64_debug_dump_cdb(struct scsi_address *ap, 125 struct scsi_pkt *pkt); 126 #endif 127 128 129 #ifdef _DDICT 130 static int ddi_in_panic(void); 131 static int ddi_in_panic() { return (0); } 132 #ifndef SCSI_CAP_RESET_NOTIFICATION 133 #define SCSI_CAP_RESET_NOTIFICATION 14 134 #endif 135 #ifndef SCSI_RESET_NOTIFY 136 #define SCSI_RESET_NOTIFY 0x01 137 #endif 138 #ifndef SCSI_RESET_CANCEL 139 #define SCSI_RESET_CANCEL 0x02 140 #endif 141 #endif 142 143 /* 144 * Tunables: 145 * 146 * emul64_max_task 147 * The taskq facility is used to queue up SCSI start requests on a per 148 * controller basis. If the maximum number of queued tasks is hit, 149 * taskq_ent_alloc() delays for a second, which adversely impacts our 150 * performance. This value establishes the maximum number of task 151 * queue entries when taskq_create is called. 152 * 153 * emul64_task_nthreads 154 * Specifies the number of threads that should be used to process a 155 * controller's task queue. Our init function sets this to the number 156 * of CPUs on the system, but this can be overridden in emul64.conf. 157 */ 158 int emul64_max_task = 16; 159 int emul64_task_nthreads = 1; 160 161 /* 162 * Local static data 163 */ 164 static void *emul64_state = NULL; 165 166 /* 167 * Character/block operations. 168 */ 169 static struct cb_ops emul64_cbops = { 170 scsi_hba_open, /* cb_open */ 171 scsi_hba_close, /* cb_close */ 172 nodev, /* cb_strategy */ 173 nodev, /* cb_print */ 174 nodev, /* cb_dump */ 175 nodev, /* cb_read */ 176 nodev, /* cb_write */ 177 emul64_ioctl, /* cb_ioctl */ 178 nodev, /* cb_devmap */ 179 nodev, /* cb_mmap */ 180 nodev, /* cb_segmap */ 181 nochpoll, /* cb_chpoll */ 182 ddi_prop_op, /* cb_prop_op */ 183 NULL, /* cb_str */ 184 D_MP | D_64BIT | D_HOTPLUG, /* cb_flag */ 185 CB_REV, /* cb_rev */ 186 nodev, /* cb_aread */ 187 nodev /* cb_awrite */ 188 }; 189 190 /* 191 * autoconfiguration routines. 192 */ 193 194 static struct dev_ops emul64_ops = { 195 DEVO_REV, /* rev, */ 196 0, /* refcnt */ 197 emul64_info, /* getinfo */ 198 nulldev, /* identify */ 199 nulldev, /* probe */ 200 emul64_attach, /* attach */ 201 emul64_detach, /* detach */ 202 nodev, /* reset */ 203 &emul64_cbops, /* char/block ops */ 204 NULL /* bus ops */ 205 }; 206 207 char _depends_on[] = "misc/scsi"; 208 209 static struct modldrv modldrv = { 210 &mod_driverops, /* module type - driver */ 211 "emul64 SCSI Host Bus Adapter", /* module name */ 212 &emul64_ops, /* driver ops */ 213 }; 214 215 static struct modlinkage modlinkage = { 216 MODREV_1, /* ml_rev - must be MODREV_1 */ 217 &modldrv, /* ml_linkage */ 218 NULL /* end of driver linkage */ 219 }; 220 221 int 222 _init(void) 223 { 224 int ret; 225 226 ret = ddi_soft_state_init(&emul64_state, sizeof (struct emul64), 227 EMUL64_INITIAL_SOFT_SPACE); 228 if (ret != 0) 229 return (ret); 230 231 if ((ret = scsi_hba_init(&modlinkage)) != 0) { 232 ddi_soft_state_fini(&emul64_state); 233 return (ret); 234 } 235 236 /* Set the number of task threads to the number of CPUs */ 237 if (boot_max_ncpus == -1) { 238 emul64_task_nthreads = max_ncpus; 239 } else { 240 emul64_task_nthreads = boot_max_ncpus; 241 } 242 243 emul64_bsd_init(); 244 245 ret = mod_install(&modlinkage); 246 if (ret != 0) { 247 emul64_bsd_fini(); 248 scsi_hba_fini(&modlinkage); 249 ddi_soft_state_fini(&emul64_state); 250 } 251 252 return (ret); 253 } 254 255 int 256 _fini(void) 257 { 258 int ret; 259 260 if ((ret = mod_remove(&modlinkage)) != 0) 261 return (ret); 262 263 emul64_bsd_fini(); 264 265 scsi_hba_fini(&modlinkage); 266 267 ddi_soft_state_fini(&emul64_state); 268 269 return (ret); 270 } 271 272 int 273 _info(struct modinfo *modinfop) 274 { 275 return (mod_info(&modlinkage, modinfop)); 276 } 277 278 /* 279 * Given the device number return the devinfo pointer 280 * from the scsi_device structure. 281 */ 282 /*ARGSUSED*/ 283 static int 284 emul64_info(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result) 285 { 286 struct emul64 *foo; 287 int instance = getminor((dev_t)arg); 288 289 switch (cmd) { 290 case DDI_INFO_DEVT2DEVINFO: 291 foo = ddi_get_soft_state(emul64_state, instance); 292 if (foo != NULL) 293 *result = (void *)foo->emul64_dip; 294 else { 295 *result = NULL; 296 return (DDI_FAILURE); 297 } 298 break; 299 300 case DDI_INFO_DEVT2INSTANCE: 301 *result = (void *)(uintptr_t)instance; 302 break; 303 304 default: 305 return (DDI_FAILURE); 306 } 307 308 return (DDI_SUCCESS); 309 } 310 311 /* 312 * Attach an instance of an emul64 host adapter. Allocate data structures, 313 * initialize the emul64 and we're on the air. 314 */ 315 /*ARGSUSED*/ 316 static int 317 emul64_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 318 { 319 int mutex_initted = 0; 320 struct emul64 *emul64; 321 int instance; 322 scsi_hba_tran_t *tran = NULL; 323 ddi_dma_attr_t tmp_dma_attr; 324 325 emul64_bsd_get_props(dip); 326 327 bzero((void *) &tmp_dma_attr, sizeof (tmp_dma_attr)); 328 instance = ddi_get_instance(dip); 329 330 switch (cmd) { 331 case DDI_ATTACH: 332 break; 333 334 case DDI_RESUME: 335 tran = (scsi_hba_tran_t *)ddi_get_driver_private(dip); 336 if (!tran) { 337 return (DDI_FAILURE); 338 } 339 emul64 = TRAN2EMUL64(tran); 340 341 return (DDI_SUCCESS); 342 343 default: 344 emul64_i_log(NULL, CE_WARN, 345 "emul64%d: Cmd != DDI_ATTACH/DDI_RESUME", instance); 346 return (DDI_FAILURE); 347 } 348 349 /* 350 * Allocate emul64 data structure. 351 */ 352 if (ddi_soft_state_zalloc(emul64_state, instance) != DDI_SUCCESS) { 353 emul64_i_log(NULL, CE_WARN, 354 "emul64%d: Failed to alloc soft state", 355 instance); 356 return (DDI_FAILURE); 357 } 358 359 emul64 = (struct emul64 *)ddi_get_soft_state(emul64_state, instance); 360 if (emul64 == (struct emul64 *)NULL) { 361 emul64_i_log(NULL, CE_WARN, "emul64%d: Bad soft state", 362 instance); 363 ddi_soft_state_free(emul64_state, instance); 364 return (DDI_FAILURE); 365 } 366 367 368 /* 369 * Allocate a transport structure 370 */ 371 tran = scsi_hba_tran_alloc(dip, SCSI_HBA_CANSLEEP); 372 if (tran == NULL) { 373 cmn_err(CE_WARN, "emul64: scsi_hba_tran_alloc failed\n"); 374 goto fail; 375 } 376 377 emul64->emul64_tran = tran; 378 emul64->emul64_dip = dip; 379 380 tran->tran_hba_private = emul64; 381 tran->tran_tgt_private = NULL; 382 tran->tran_tgt_init = emul64_tran_tgt_init; 383 tran->tran_tgt_probe = scsi_hba_probe; 384 tran->tran_tgt_free = NULL; 385 386 tran->tran_start = emul64_scsi_start; 387 tran->tran_abort = emul64_scsi_abort; 388 tran->tran_reset = emul64_scsi_reset; 389 tran->tran_getcap = emul64_scsi_getcap; 390 tran->tran_setcap = emul64_scsi_setcap; 391 tran->tran_init_pkt = emul64_scsi_init_pkt; 392 tran->tran_destroy_pkt = emul64_scsi_destroy_pkt; 393 tran->tran_dmafree = emul64_scsi_dmafree; 394 tran->tran_sync_pkt = emul64_scsi_sync_pkt; 395 tran->tran_reset_notify = emul64_scsi_reset_notify; 396 397 tmp_dma_attr.dma_attr_minxfer = 0x1; 398 tmp_dma_attr.dma_attr_burstsizes = 0x7f; 399 400 /* 401 * Attach this instance of the hba 402 */ 403 if (scsi_hba_attach_setup(dip, &tmp_dma_attr, tran, 404 0) != DDI_SUCCESS) { 405 cmn_err(CE_WARN, "emul64: scsi_hba_attach failed\n"); 406 goto fail; 407 } 408 409 emul64->emul64_initiator_id = 2; 410 411 /* 412 * Look up the scsi-options property 413 */ 414 emul64->emul64_scsi_options = 415 ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, "scsi-options", 416 EMUL64_DEFAULT_SCSI_OPTIONS); 417 EMUL64_DEBUG(emul64, SCSI_DEBUG, "emul64 scsi-options=%x", 418 emul64->emul64_scsi_options); 419 420 421 /* mutexes to protect the emul64 request and response queue */ 422 mutex_init(EMUL64_REQ_MUTEX(emul64), NULL, MUTEX_DRIVER, 423 emul64->emul64_iblock); 424 mutex_init(EMUL64_RESP_MUTEX(emul64), NULL, MUTEX_DRIVER, 425 emul64->emul64_iblock); 426 427 mutex_initted = 1; 428 429 EMUL64_MUTEX_ENTER(emul64); 430 431 /* 432 * Initialize the default Target Capabilities and Sync Rates 433 */ 434 emul64_i_initcap(emul64); 435 436 EMUL64_MUTEX_EXIT(emul64); 437 438 439 ddi_report_dev(dip); 440 emul64->emul64_taskq = taskq_create("emul64_comp", 441 emul64_task_nthreads, MINCLSYSPRI, 1, emul64_max_task, 0); 442 443 return (DDI_SUCCESS); 444 445 fail: 446 emul64_i_log(NULL, CE_WARN, "emul64%d: Unable to attach", instance); 447 448 if (mutex_initted) { 449 mutex_destroy(EMUL64_REQ_MUTEX(emul64)); 450 mutex_destroy(EMUL64_RESP_MUTEX(emul64)); 451 } 452 if (tran) { 453 scsi_hba_tran_free(tran); 454 } 455 ddi_soft_state_free(emul64_state, instance); 456 return (DDI_FAILURE); 457 } 458 459 /*ARGSUSED*/ 460 static int 461 emul64_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 462 { 463 struct emul64 *emul64; 464 scsi_hba_tran_t *tran; 465 int instance = ddi_get_instance(dip); 466 467 468 /* get transport structure pointer from the dip */ 469 if (!(tran = (scsi_hba_tran_t *)ddi_get_driver_private(dip))) { 470 return (DDI_FAILURE); 471 } 472 473 /* get soft state from transport structure */ 474 emul64 = TRAN2EMUL64(tran); 475 476 if (!emul64) { 477 return (DDI_FAILURE); 478 } 479 480 EMUL64_DEBUG(emul64, SCSI_DEBUG, "emul64_detach: cmd = %d", cmd); 481 482 switch (cmd) { 483 case DDI_DETACH: 484 EMUL64_MUTEX_ENTER(emul64); 485 486 taskq_destroy(emul64->emul64_taskq); 487 (void) scsi_hba_detach(dip); 488 489 scsi_hba_tran_free(emul64->emul64_tran); 490 491 492 EMUL64_MUTEX_EXIT(emul64); 493 494 mutex_destroy(EMUL64_REQ_MUTEX(emul64)); 495 mutex_destroy(EMUL64_RESP_MUTEX(emul64)); 496 497 498 EMUL64_DEBUG(emul64, SCSI_DEBUG, "emul64_detach: done"); 499 ddi_soft_state_free(emul64_state, instance); 500 501 return (DDI_SUCCESS); 502 503 case DDI_SUSPEND: 504 return (DDI_SUCCESS); 505 506 default: 507 return (DDI_FAILURE); 508 } 509 } 510 511 /* 512 * Function name : emul64_tran_tgt_init 513 * 514 * Return Values : DDI_SUCCESS if target supported, DDI_FAILURE otherwise 515 * 516 */ 517 /*ARGSUSED*/ 518 static int 519 emul64_tran_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip, 520 scsi_hba_tran_t *tran, struct scsi_device *sd) 521 { 522 struct emul64 *emul64; 523 emul64_tgt_t *tgt; 524 char **geo_vidpid = NULL; 525 char *geo, *vidpid; 526 uint32_t *geoip = NULL; 527 uint_t length; 528 uint_t length2; 529 lldaddr_t sector_count; 530 char prop_name[15]; 531 int ret = DDI_FAILURE; 532 533 emul64 = TRAN2EMUL64(tran); 534 EMUL64_MUTEX_ENTER(emul64); 535 536 /* 537 * We get called for each target driver.conf node, multiple 538 * nodes may map to the same tgt,lun (sd.conf, st.conf, etc). 539 * Check to see if transport to tgt,lun already established. 540 */ 541 tgt = find_tgt(emul64, sd->sd_address.a_target, sd->sd_address.a_lun); 542 if (tgt) { 543 ret = DDI_SUCCESS; 544 goto out; 545 } 546 547 /* see if we have driver.conf specified device for this target,lun */ 548 (void) snprintf(prop_name, sizeof (prop_name), "targ_%d_%d", 549 sd->sd_address.a_target, sd->sd_address.a_lun); 550 if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, hba_dip, 551 DDI_PROP_DONTPASS, prop_name, 552 &geo_vidpid, &length) != DDI_PROP_SUCCESS) 553 goto out; 554 if (length < 2) { 555 cmn_err(CE_WARN, "emul64: %s property does not have 2 " 556 "elements", prop_name); 557 goto out; 558 } 559 560 /* pick geometry name and vidpid string from string array */ 561 geo = *geo_vidpid; 562 vidpid = *(geo_vidpid + 1); 563 564 /* lookup geometry property integer array */ 565 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, hba_dip, DDI_PROP_DONTPASS, 566 geo, (int **)&geoip, &length2) != DDI_PROP_SUCCESS) { 567 cmn_err(CE_WARN, "emul64: didn't get prop '%s'", geo); 568 goto out; 569 } 570 if (length2 < 6) { 571 cmn_err(CE_WARN, "emul64: property %s does not have 6 " 572 "elements", *geo_vidpid); 573 goto out; 574 } 575 576 /* allocate and initialize tgt structure for tgt,lun */ 577 tgt = kmem_zalloc(sizeof (emul64_tgt_t), KM_SLEEP); 578 rw_init(&tgt->emul64_tgt_nw_lock, NULL, RW_DRIVER, NULL); 579 mutex_init(&tgt->emul64_tgt_blk_lock, NULL, MUTEX_DRIVER, NULL); 580 581 /* create avl for data block storage */ 582 avl_create(&tgt->emul64_tgt_data, emul64_bsd_blkcompare, 583 sizeof (blklist_t), offsetof(blklist_t, bl_node)); 584 585 /* save scsi_address and vidpid */ 586 bcopy(sd, &tgt->emul64_tgt_saddr, sizeof (struct scsi_address)); 587 (void) strncpy(tgt->emul64_tgt_inq, vidpid, 588 sizeof (emul64->emul64_tgt->emul64_tgt_inq)); 589 590 /* 591 * The high order 4 bytes of the sector count always come first in 592 * emul64.conf. They are followed by the low order 4 bytes. Not 593 * all CPU types want them in this order, but laddr_t takes care of 594 * this for us. We then pick up geometry (ncyl X nheads X nsect). 595 */ 596 sector_count._p._u = *(geoip + 0); 597 sector_count._p._l = *(geoip + 1); 598 /* 599 * On 32-bit platforms, fix block size if it's greater than the 600 * allowable maximum. 601 */ 602 #if !defined(_LP64) 603 if (sector_count._f > DK_MAX_BLOCKS) 604 sector_count._f = DK_MAX_BLOCKS; 605 #endif 606 tgt->emul64_tgt_sectors = sector_count._f; 607 tgt->emul64_tgt_dtype = *(geoip + 2); 608 tgt->emul64_tgt_ncyls = *(geoip + 3); 609 tgt->emul64_tgt_nheads = *(geoip + 4); 610 tgt->emul64_tgt_nsect = *(geoip + 5); 611 612 /* insert target structure into list */ 613 tgt->emul64_tgt_next = emul64->emul64_tgt; 614 emul64->emul64_tgt = tgt; 615 ret = DDI_SUCCESS; 616 617 out: EMUL64_MUTEX_EXIT(emul64); 618 if (geoip) 619 ddi_prop_free(geoip); 620 if (geo_vidpid) 621 ddi_prop_free(geo_vidpid); 622 return (ret); 623 } 624 625 /* 626 * Function name : emul64_i_initcap 627 * 628 * Return Values : NONE 629 * Description : Initializes the default target capabilities and 630 * Sync Rates. 631 * 632 * Context : Called from the user thread through attach. 633 * 634 */ 635 static void 636 emul64_i_initcap(struct emul64 *emul64) 637 { 638 uint16_t cap, synch; 639 int i; 640 641 cap = 0; 642 synch = 0; 643 for (i = 0; i < NTARGETS_WIDE; i++) { 644 emul64->emul64_cap[i] = cap; 645 emul64->emul64_synch[i] = synch; 646 } 647 EMUL64_DEBUG(emul64, SCSI_DEBUG, "default cap = 0x%x", cap); 648 } 649 650 /* 651 * Function name : emul64_scsi_getcap() 652 * 653 * Return Values : current value of capability, if defined 654 * -1 if capability is not defined 655 * Description : returns current capability value 656 * 657 * Context : Can be called from different kernel process threads. 658 * Can be called by interrupt thread. 659 */ 660 static int 661 emul64_scsi_getcap(struct scsi_address *ap, char *cap, int whom) 662 { 663 struct emul64 *emul64 = ADDR2EMUL64(ap); 664 int rval = 0; 665 666 /* 667 * We don't allow inquiring about capabilities for other targets 668 */ 669 if (cap == NULL || whom == 0) { 670 return (-1); 671 } 672 673 EMUL64_MUTEX_ENTER(emul64); 674 675 switch (scsi_hba_lookup_capstr(cap)) { 676 case SCSI_CAP_DMA_MAX: 677 rval = 1 << 24; /* Limit to 16MB max transfer */ 678 break; 679 case SCSI_CAP_MSG_OUT: 680 rval = 1; 681 break; 682 case SCSI_CAP_DISCONNECT: 683 rval = 1; 684 break; 685 case SCSI_CAP_SYNCHRONOUS: 686 rval = 1; 687 break; 688 case SCSI_CAP_WIDE_XFER: 689 rval = 1; 690 break; 691 case SCSI_CAP_TAGGED_QING: 692 rval = 1; 693 break; 694 case SCSI_CAP_UNTAGGED_QING: 695 rval = 1; 696 break; 697 case SCSI_CAP_PARITY: 698 rval = 1; 699 break; 700 case SCSI_CAP_INITIATOR_ID: 701 rval = emul64->emul64_initiator_id; 702 break; 703 case SCSI_CAP_ARQ: 704 rval = 1; 705 break; 706 case SCSI_CAP_LINKED_CMDS: 707 break; 708 case SCSI_CAP_RESET_NOTIFICATION: 709 rval = 1; 710 break; 711 712 default: 713 rval = -1; 714 break; 715 } 716 717 EMUL64_MUTEX_EXIT(emul64); 718 719 return (rval); 720 } 721 722 /* 723 * Function name : emul64_scsi_setcap() 724 * 725 * Return Values : 1 - capability exists and can be set to new value 726 * 0 - capability could not be set to new value 727 * -1 - no such capability 728 * 729 * Description : sets a capability for a target 730 * 731 * Context : Can be called from different kernel process threads. 732 * Can be called by interrupt thread. 733 */ 734 static int 735 emul64_scsi_setcap(struct scsi_address *ap, char *cap, int value, int whom) 736 { 737 struct emul64 *emul64 = ADDR2EMUL64(ap); 738 int rval = 0; 739 740 /* 741 * We don't allow setting capabilities for other targets 742 */ 743 if (cap == NULL || whom == 0) { 744 return (-1); 745 } 746 747 EMUL64_MUTEX_ENTER(emul64); 748 749 switch (scsi_hba_lookup_capstr(cap)) { 750 case SCSI_CAP_DMA_MAX: 751 case SCSI_CAP_MSG_OUT: 752 case SCSI_CAP_PARITY: 753 case SCSI_CAP_UNTAGGED_QING: 754 case SCSI_CAP_LINKED_CMDS: 755 case SCSI_CAP_RESET_NOTIFICATION: 756 /* 757 * None of these are settable via 758 * the capability interface. 759 */ 760 break; 761 case SCSI_CAP_DISCONNECT: 762 rval = 1; 763 break; 764 case SCSI_CAP_SYNCHRONOUS: 765 rval = 1; 766 break; 767 case SCSI_CAP_TAGGED_QING: 768 rval = 1; 769 break; 770 case SCSI_CAP_WIDE_XFER: 771 rval = 1; 772 break; 773 case SCSI_CAP_INITIATOR_ID: 774 rval = -1; 775 break; 776 case SCSI_CAP_ARQ: 777 rval = 1; 778 break; 779 case SCSI_CAP_TOTAL_SECTORS: 780 emul64->nt_total_sectors[ap->a_target][ap->a_lun] = value; 781 rval = TRUE; 782 break; 783 case SCSI_CAP_SECTOR_SIZE: 784 rval = TRUE; 785 break; 786 default: 787 rval = -1; 788 break; 789 } 790 791 792 EMUL64_MUTEX_EXIT(emul64); 793 794 return (rval); 795 } 796 797 /* 798 * Function name : emul64_scsi_init_pkt 799 * 800 * Return Values : pointer to scsi_pkt, or NULL 801 * Description : Called by kernel on behalf of a target driver 802 * calling scsi_init_pkt(9F). 803 * Refer to tran_init_pkt(9E) man page 804 * 805 * Context : Can be called from different kernel process threads. 806 * Can be called by interrupt thread. 807 */ 808 /* ARGSUSED */ 809 static struct scsi_pkt * 810 emul64_scsi_init_pkt(struct scsi_address *ap, struct scsi_pkt *pkt, 811 struct buf *bp, int cmdlen, int statuslen, int tgtlen, 812 int flags, int (*callback)(), caddr_t arg) 813 { 814 struct emul64 *emul64 = ADDR2EMUL64(ap); 815 struct emul64_cmd *sp; 816 817 ASSERT(callback == NULL_FUNC || callback == SLEEP_FUNC); 818 819 /* 820 * First step of emul64_scsi_init_pkt: pkt allocation 821 */ 822 if (pkt == NULL) { 823 pkt = scsi_hba_pkt_alloc(emul64->emul64_dip, ap, cmdlen, 824 statuslen, 825 tgtlen, sizeof (struct emul64_cmd), callback, arg); 826 if (pkt == NULL) { 827 cmn_err(CE_WARN, "emul64_scsi_init_pkt: " 828 "scsi_hba_pkt_alloc failed"); 829 return (NULL); 830 } 831 832 sp = PKT2CMD(pkt); 833 834 /* 835 * Initialize the new pkt - we redundantly initialize 836 * all the fields for illustrative purposes. 837 */ 838 sp->cmd_pkt = pkt; 839 sp->cmd_flags = 0; 840 sp->cmd_scblen = statuslen; 841 sp->cmd_cdblen = cmdlen; 842 sp->cmd_emul64 = emul64; 843 pkt->pkt_address = *ap; 844 pkt->pkt_comp = (void (*)())NULL; 845 pkt->pkt_flags = 0; 846 pkt->pkt_time = 0; 847 pkt->pkt_resid = 0; 848 pkt->pkt_statistics = 0; 849 pkt->pkt_reason = 0; 850 851 } else { 852 sp = PKT2CMD(pkt); 853 } 854 855 /* 856 * Second step of emul64_scsi_init_pkt: dma allocation/move 857 */ 858 if (bp && bp->b_bcount != 0) { 859 if (bp->b_flags & B_READ) { 860 sp->cmd_flags &= ~CFLAG_DMASEND; 861 } else { 862 sp->cmd_flags |= CFLAG_DMASEND; 863 } 864 bp_mapin(bp); 865 sp->cmd_addr = (unsigned char *) bp->b_un.b_addr; 866 sp->cmd_count = bp->b_bcount; 867 pkt->pkt_resid = 0; 868 } 869 870 return (pkt); 871 } 872 873 874 /* 875 * Function name : emul64_scsi_destroy_pkt 876 * 877 * Return Values : none 878 * Description : Called by kernel on behalf of a target driver 879 * calling scsi_destroy_pkt(9F). 880 * Refer to tran_destroy_pkt(9E) man page 881 * 882 * Context : Can be called from different kernel process threads. 883 * Can be called by interrupt thread. 884 */ 885 static void 886 emul64_scsi_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt) 887 { 888 struct emul64_cmd *sp = PKT2CMD(pkt); 889 890 /* 891 * emul64_scsi_dmafree inline to make things faster 892 */ 893 if (sp->cmd_flags & CFLAG_DMAVALID) { 894 /* 895 * Free the mapping. 896 */ 897 sp->cmd_flags &= ~CFLAG_DMAVALID; 898 } 899 900 /* 901 * Free the pkt 902 */ 903 scsi_hba_pkt_free(ap, pkt); 904 } 905 906 907 /* 908 * Function name : emul64_scsi_dmafree() 909 * 910 * Return Values : none 911 * Description : free dvma resources 912 * 913 * Context : Can be called from different kernel process threads. 914 * Can be called by interrupt thread. 915 */ 916 /*ARGSUSED*/ 917 static void 918 emul64_scsi_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt) 919 { 920 } 921 922 /* 923 * Function name : emul64_scsi_sync_pkt() 924 * 925 * Return Values : none 926 * Description : sync dma 927 * 928 * Context : Can be called from different kernel process threads. 929 * Can be called by interrupt thread. 930 */ 931 /*ARGSUSED*/ 932 static void 933 emul64_scsi_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt) 934 { 935 } 936 937 /* 938 * routine for reset notification setup, to register or cancel. 939 */ 940 static int 941 emul64_scsi_reset_notify(struct scsi_address *ap, int flag, 942 void (*callback)(caddr_t), caddr_t arg) 943 { 944 struct emul64 *emul64 = ADDR2EMUL64(ap); 945 struct emul64_reset_notify_entry *p, *beforep; 946 int rval = DDI_FAILURE; 947 948 mutex_enter(EMUL64_REQ_MUTEX(emul64)); 949 950 p = emul64->emul64_reset_notify_listf; 951 beforep = NULL; 952 953 while (p) { 954 if (p->ap == ap) 955 break; /* An entry exists for this target */ 956 beforep = p; 957 p = p->next; 958 } 959 960 if ((flag & SCSI_RESET_CANCEL) && (p != NULL)) { 961 if (beforep == NULL) { 962 emul64->emul64_reset_notify_listf = p->next; 963 } else { 964 beforep->next = p->next; 965 } 966 kmem_free((caddr_t)p, 967 sizeof (struct emul64_reset_notify_entry)); 968 rval = DDI_SUCCESS; 969 970 } else if ((flag & SCSI_RESET_NOTIFY) && (p == NULL)) { 971 p = kmem_zalloc(sizeof (struct emul64_reset_notify_entry), 972 KM_SLEEP); 973 p->ap = ap; 974 p->callback = callback; 975 p->arg = arg; 976 p->next = emul64->emul64_reset_notify_listf; 977 emul64->emul64_reset_notify_listf = p; 978 rval = DDI_SUCCESS; 979 } 980 981 mutex_exit(EMUL64_REQ_MUTEX(emul64)); 982 983 return (rval); 984 } 985 986 /* 987 * Function name : emul64_scsi_start() 988 * 989 * Return Values : TRAN_FATAL_ERROR - emul64 has been shutdown 990 * TRAN_BUSY - request queue is full 991 * TRAN_ACCEPT - pkt has been submitted to emul64 992 * 993 * Description : init pkt, start the request 994 * 995 * Context : Can be called from different kernel process threads. 996 * Can be called by interrupt thread. 997 */ 998 static int 999 emul64_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt) 1000 { 1001 struct emul64_cmd *sp = PKT2CMD(pkt); 1002 int rval = TRAN_ACCEPT; 1003 struct emul64 *emul64 = ADDR2EMUL64(ap); 1004 clock_t cur_lbolt; 1005 taskqid_t dispatched; 1006 1007 ASSERT(mutex_owned(EMUL64_REQ_MUTEX(emul64)) == 0 || ddi_in_panic()); 1008 ASSERT(mutex_owned(EMUL64_RESP_MUTEX(emul64)) == 0 || ddi_in_panic()); 1009 1010 EMUL64_DEBUG2(emul64, SCSI_DEBUG, "emul64_scsi_start %x", sp); 1011 1012 pkt->pkt_reason = CMD_CMPLT; 1013 1014 #ifdef EMUL64DEBUG 1015 if (emul64_cdb_debug) { 1016 emul64_debug_dump_cdb(ap, pkt); 1017 } 1018 #endif /* EMUL64DEBUG */ 1019 1020 /* 1021 * calculate deadline from pkt_time 1022 * Instead of multiplying by 100 (ie. HZ), we multiply by 128 so 1023 * we can shift and at the same time have a 28% grace period 1024 * we ignore the rare case of pkt_time == 0 and deal with it 1025 * in emul64_i_watch() 1026 */ 1027 cur_lbolt = ddi_get_lbolt(); 1028 sp->cmd_deadline = cur_lbolt + (pkt->pkt_time * 128); 1029 1030 if ((emul64_usetaskq == 0) || (pkt->pkt_flags & FLAG_NOINTR) != 0) { 1031 emul64_pkt_comp((caddr_t)pkt); 1032 } else { 1033 dispatched = NULL; 1034 if (emul64_collect_stats) { 1035 /* 1036 * If we are collecting statistics, call 1037 * taskq_dispatch in no sleep mode, so that we can 1038 * detect if we are exceeding the queue length that 1039 * was established in the call to taskq_create in 1040 * emul64_attach. If the no sleep call fails 1041 * (returns NULL), the task will be dispatched in 1042 * sleep mode below. 1043 */ 1044 dispatched = taskq_dispatch(emul64->emul64_taskq, 1045 emul64_pkt_comp, 1046 (void *)pkt, TQ_NOSLEEP); 1047 if (dispatched == NULL) { 1048 /* Queue was full. dispatch failed. */ 1049 mutex_enter(&emul64_stats_mutex); 1050 emul64_taskq_max++; 1051 mutex_exit(&emul64_stats_mutex); 1052 } 1053 } 1054 if (dispatched == NULL) { 1055 (void) taskq_dispatch(emul64->emul64_taskq, 1056 emul64_pkt_comp, (void *)pkt, TQ_SLEEP); 1057 } 1058 } 1059 1060 done: 1061 ASSERT(mutex_owned(EMUL64_REQ_MUTEX(emul64)) == 0 || ddi_in_panic()); 1062 ASSERT(mutex_owned(EMUL64_RESP_MUTEX(emul64)) == 0 || ddi_in_panic()); 1063 1064 return (rval); 1065 } 1066 1067 void 1068 emul64_check_cond(struct scsi_pkt *pkt, uchar_t key, uchar_t asc, uchar_t ascq) 1069 { 1070 struct scsi_arq_status *arq = 1071 (struct scsi_arq_status *)pkt->pkt_scbp; 1072 1073 /* got check, no data transferred and ARQ done */ 1074 arq->sts_status.sts_chk = 1; 1075 pkt->pkt_state |= STATE_ARQ_DONE; 1076 pkt->pkt_state &= ~STATE_XFERRED_DATA; 1077 1078 /* for ARQ */ 1079 arq->sts_rqpkt_reason = CMD_CMPLT; 1080 arq->sts_rqpkt_resid = 0; 1081 arq->sts_rqpkt_state = STATE_GOT_BUS | STATE_GOT_TARGET | 1082 STATE_SENT_CMD | STATE_XFERRED_DATA | STATE_GOT_STATUS; 1083 arq->sts_sensedata.es_valid = 1; 1084 arq->sts_sensedata.es_class = 0x7; 1085 arq->sts_sensedata.es_key = key; 1086 arq->sts_sensedata.es_add_code = asc; 1087 arq->sts_sensedata.es_qual_code = ascq; 1088 } 1089 1090 ushort_t 1091 emul64_error_inject(struct scsi_pkt *pkt) 1092 { 1093 struct emul64_cmd *sp = PKT2CMD(pkt); 1094 emul64_tgt_t *tgt; 1095 struct scsi_arq_status *arq = 1096 (struct scsi_arq_status *)pkt->pkt_scbp; 1097 uint_t max_sense_len; 1098 1099 EMUL64_MUTEX_ENTER(sp->cmd_emul64); 1100 tgt = find_tgt(sp->cmd_emul64, 1101 pkt->pkt_address.a_target, pkt->pkt_address.a_lun); 1102 EMUL64_MUTEX_EXIT(sp->cmd_emul64); 1103 1104 /* 1105 * If there is no target, skip the error injection and 1106 * let the packet be handled normally. This would normally 1107 * never happen since a_target and a_lun are setup in 1108 * emul64_scsi_init_pkt. 1109 */ 1110 if (tgt == NULL) { 1111 return (ERR_INJ_DISABLE); 1112 } 1113 1114 if (tgt->emul64_einj_state != ERR_INJ_DISABLE) { 1115 arq->sts_status = tgt->emul64_einj_scsi_status; 1116 pkt->pkt_state = tgt->emul64_einj_pkt_state; 1117 pkt->pkt_reason = tgt->emul64_einj_pkt_reason; 1118 1119 /* 1120 * Calculate available sense buffer length. We could just 1121 * assume sizeof(struct scsi_extended_sense) but hopefully 1122 * that limitation will go away soon. 1123 */ 1124 max_sense_len = sp->cmd_scblen - 1125 (sizeof (struct scsi_arq_status) - 1126 sizeof (struct scsi_extended_sense)); 1127 if (max_sense_len > tgt->emul64_einj_sense_length) { 1128 max_sense_len = tgt->emul64_einj_sense_length; 1129 } 1130 1131 /* for ARQ */ 1132 arq->sts_rqpkt_reason = CMD_CMPLT; 1133 arq->sts_rqpkt_resid = 0; 1134 arq->sts_rqpkt_state = STATE_GOT_BUS | STATE_GOT_TARGET | 1135 STATE_SENT_CMD | STATE_XFERRED_DATA | STATE_GOT_STATUS; 1136 1137 /* Copy sense data */ 1138 if (tgt->emul64_einj_sense_data != 0) { 1139 bcopy(tgt->emul64_einj_sense_data, 1140 (uint8_t *)&arq->sts_sensedata, 1141 max_sense_len); 1142 } 1143 } 1144 1145 /* Return current error injection state */ 1146 return (tgt->emul64_einj_state); 1147 } 1148 1149 int 1150 emul64_error_inject_req(struct emul64 *emul64, intptr_t arg) 1151 { 1152 emul64_tgt_t *tgt; 1153 struct emul64_error_inj_data error_inj_req; 1154 1155 /* Check args */ 1156 if (arg == NULL) { 1157 return (EINVAL); 1158 } 1159 1160 if (ddi_copyin((void *)arg, &error_inj_req, 1161 sizeof (error_inj_req), 0) != 0) { 1162 cmn_err(CE_WARN, "emul64: ioctl - inj copyin failed\n"); 1163 return (EFAULT); 1164 } 1165 1166 EMUL64_MUTEX_ENTER(emul64); 1167 tgt = find_tgt(emul64, error_inj_req.eccd_target, 1168 error_inj_req.eccd_lun); 1169 EMUL64_MUTEX_EXIT(emul64); 1170 1171 /* Make sure device exists */ 1172 if (tgt == NULL) { 1173 return (ENODEV); 1174 } 1175 1176 /* Free old sense buffer if we have one */ 1177 if (tgt->emul64_einj_sense_data != NULL) { 1178 ASSERT(tgt->emul64_einj_sense_length != 0); 1179 kmem_free(tgt->emul64_einj_sense_data, 1180 tgt->emul64_einj_sense_length); 1181 tgt->emul64_einj_sense_data = NULL; 1182 tgt->emul64_einj_sense_length = 0; 1183 } 1184 1185 /* 1186 * Now handle error injection request. If error injection 1187 * is requested we will return the sense data provided for 1188 * any I/O to this target until told to stop. 1189 */ 1190 tgt->emul64_einj_state = error_inj_req.eccd_inj_state; 1191 tgt->emul64_einj_sense_length = error_inj_req.eccd_sns_dlen; 1192 tgt->emul64_einj_pkt_state = error_inj_req.eccd_pkt_state; 1193 tgt->emul64_einj_pkt_reason = error_inj_req.eccd_pkt_reason; 1194 tgt->emul64_einj_scsi_status = error_inj_req.eccd_scsi_status; 1195 switch (error_inj_req.eccd_inj_state) { 1196 case ERR_INJ_ENABLE: 1197 case ERR_INJ_ENABLE_NODATA: 1198 if (error_inj_req.eccd_sns_dlen) { 1199 tgt->emul64_einj_sense_data = 1200 kmem_alloc(error_inj_req.eccd_sns_dlen, KM_SLEEP); 1201 /* Copy sense data */ 1202 if (ddi_copyin((void *)(arg + sizeof (error_inj_req)), 1203 tgt->emul64_einj_sense_data, 1204 error_inj_req.eccd_sns_dlen, 0) != 0) { 1205 cmn_err(CE_WARN, 1206 "emul64: sense data copy in failed\n"); 1207 return (EFAULT); 1208 } 1209 } 1210 break; 1211 case ERR_INJ_DISABLE: 1212 default: 1213 break; 1214 } 1215 1216 return (0); 1217 } 1218 1219 int bsd_scsi_start_stop_unit(struct scsi_pkt *); 1220 int bsd_scsi_test_unit_ready(struct scsi_pkt *); 1221 int bsd_scsi_request_sense(struct scsi_pkt *); 1222 int bsd_scsi_inquiry(struct scsi_pkt *); 1223 int bsd_scsi_format(struct scsi_pkt *); 1224 int bsd_scsi_io(struct scsi_pkt *); 1225 int bsd_scsi_log_sense(struct scsi_pkt *); 1226 int bsd_scsi_mode_sense(struct scsi_pkt *); 1227 int bsd_scsi_mode_select(struct scsi_pkt *); 1228 int bsd_scsi_read_capacity(struct scsi_pkt *); 1229 int bsd_scsi_read_capacity_16(struct scsi_pkt *); 1230 int bsd_scsi_reserve(struct scsi_pkt *); 1231 int bsd_scsi_format(struct scsi_pkt *); 1232 int bsd_scsi_release(struct scsi_pkt *); 1233 int bsd_scsi_read_defect_list(struct scsi_pkt *); 1234 int bsd_scsi_reassign_block(struct scsi_pkt *); 1235 int bsd_freeblkrange(emul64_tgt_t *, emul64_range_t *); 1236 1237 static void 1238 emul64_handle_cmd(struct scsi_pkt *pkt) 1239 { 1240 if (emul64_error_inject(pkt) == ERR_INJ_ENABLE_NODATA) { 1241 /* 1242 * If error injection is configured to return with 1243 * no data return now without handling the command. 1244 * This is how normal check conditions work. 1245 * 1246 * If the error injection state is ERR_INJ_ENABLE 1247 * (or if error injection is disabled) continue and 1248 * handle the command. This would be used for 1249 * KEY_RECOVERABLE_ERROR type conditions. 1250 */ 1251 return; 1252 } 1253 1254 switch (pkt->pkt_cdbp[0]) { 1255 case SCMD_START_STOP: 1256 (void) bsd_scsi_start_stop_unit(pkt); 1257 break; 1258 case SCMD_TEST_UNIT_READY: 1259 (void) bsd_scsi_test_unit_ready(pkt); 1260 break; 1261 case SCMD_REQUEST_SENSE: 1262 (void) bsd_scsi_request_sense(pkt); 1263 break; 1264 case SCMD_INQUIRY: 1265 (void) bsd_scsi_inquiry(pkt); 1266 break; 1267 case SCMD_FORMAT: 1268 (void) bsd_scsi_format(pkt); 1269 break; 1270 case SCMD_READ: 1271 case SCMD_WRITE: 1272 case SCMD_READ_G1: 1273 case SCMD_WRITE_G1: 1274 case SCMD_READ_G4: 1275 case SCMD_WRITE_G4: 1276 (void) bsd_scsi_io(pkt); 1277 break; 1278 case SCMD_LOG_SENSE_G1: 1279 (void) bsd_scsi_log_sense(pkt); 1280 break; 1281 case SCMD_MODE_SENSE: 1282 case SCMD_MODE_SENSE_G1: 1283 (void) bsd_scsi_mode_sense(pkt); 1284 break; 1285 case SCMD_MODE_SELECT: 1286 case SCMD_MODE_SELECT_G1: 1287 (void) bsd_scsi_mode_select(pkt); 1288 break; 1289 case SCMD_READ_CAPACITY: 1290 (void) bsd_scsi_read_capacity(pkt); 1291 break; 1292 case SCMD_SVC_ACTION_IN_G4: 1293 if (pkt->pkt_cdbp[1] == SSVC_ACTION_READ_CAPACITY_G4) { 1294 (void) bsd_scsi_read_capacity_16(pkt); 1295 } else { 1296 cmn_err(CE_WARN, "emul64: unrecognized G4 service " 1297 "action 0x%x", pkt->pkt_cdbp[1]); 1298 } 1299 break; 1300 case SCMD_RESERVE: 1301 case SCMD_RESERVE_G1: 1302 (void) bsd_scsi_reserve(pkt); 1303 break; 1304 case SCMD_RELEASE: 1305 case SCMD_RELEASE_G1: 1306 (void) bsd_scsi_release(pkt); 1307 break; 1308 case SCMD_REASSIGN_BLOCK: 1309 (void) bsd_scsi_reassign_block(pkt); 1310 break; 1311 case SCMD_READ_DEFECT_LIST: 1312 (void) bsd_scsi_read_defect_list(pkt); 1313 break; 1314 case SCMD_PRIN: 1315 case SCMD_PROUT: 1316 case SCMD_REPORT_LUNS: 1317 /* ASC 0x24 INVALID FIELD IN CDB */ 1318 emul64_check_cond(pkt, KEY_ILLEGAL_REQUEST, 0x24, 0x0); 1319 break; 1320 default: 1321 cmn_err(CE_WARN, "emul64: unrecognized " 1322 "SCSI cmd 0x%x", pkt->pkt_cdbp[0]); 1323 emul64_check_cond(pkt, KEY_ILLEGAL_REQUEST, 0x24, 0x0); 1324 break; 1325 case SCMD_GET_CONFIGURATION: 1326 case 0x35: /* SCMD_SYNCHRONIZE_CACHE */ 1327 /* Don't complain */ 1328 break; 1329 } 1330 } 1331 1332 static void 1333 emul64_pkt_comp(void * arg) 1334 { 1335 struct scsi_pkt *pkt = (struct scsi_pkt *)arg; 1336 struct emul64_cmd *sp = PKT2CMD(pkt); 1337 emul64_tgt_t *tgt; 1338 1339 EMUL64_MUTEX_ENTER(sp->cmd_emul64); 1340 tgt = find_tgt(sp->cmd_emul64, 1341 pkt->pkt_address.a_target, pkt->pkt_address.a_lun); 1342 EMUL64_MUTEX_EXIT(sp->cmd_emul64); 1343 if (!tgt) { 1344 pkt->pkt_reason = CMD_TIMEOUT; 1345 pkt->pkt_state = STATE_GOT_BUS | STATE_SENT_CMD; 1346 pkt->pkt_statistics = STAT_TIMEOUT; 1347 } else { 1348 pkt->pkt_reason = CMD_CMPLT; 1349 *pkt->pkt_scbp = STATUS_GOOD; 1350 pkt->pkt_state = STATE_GOT_BUS | STATE_GOT_TARGET | 1351 STATE_SENT_CMD | STATE_XFERRED_DATA | STATE_GOT_STATUS; 1352 pkt->pkt_statistics = 0; 1353 emul64_handle_cmd(pkt); 1354 } 1355 (*pkt->pkt_comp)(pkt); 1356 } 1357 1358 /* ARGSUSED */ 1359 static int 1360 emul64_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt) 1361 { 1362 return (1); 1363 } 1364 1365 /* ARGSUSED */ 1366 static int 1367 emul64_scsi_reset(struct scsi_address *ap, int level) 1368 { 1369 return (1); 1370 } 1371 1372 static int 1373 emul64_get_tgtrange(struct emul64 *emul64, 1374 intptr_t arg, 1375 emul64_tgt_t **tgtp, 1376 emul64_tgt_range_t *tgtr) 1377 { 1378 if (ddi_copyin((void *)arg, tgtr, sizeof (*tgtr), 0) != 0) { 1379 cmn_err(CE_WARN, "emul64: ioctl - copy in failed\n"); 1380 return (EFAULT); 1381 } 1382 EMUL64_MUTEX_ENTER(emul64); 1383 *tgtp = find_tgt(emul64, tgtr->emul64_target, tgtr->emul64_lun); 1384 EMUL64_MUTEX_EXIT(emul64); 1385 if (*tgtp == NULL) { 1386 cmn_err(CE_WARN, "emul64: ioctl - no target for %d,%d on %d", 1387 tgtr->emul64_target, tgtr->emul64_lun, 1388 ddi_get_instance(emul64->emul64_dip)); 1389 return (ENXIO); 1390 } 1391 return (0); 1392 } 1393 1394 static int 1395 emul64_ioctl(dev_t dev, 1396 int cmd, 1397 intptr_t arg, 1398 int mode, 1399 cred_t *credp, 1400 int *rvalp) 1401 { 1402 struct emul64 *emul64; 1403 int instance; 1404 int rv = 0; 1405 emul64_tgt_range_t tgtr; 1406 emul64_tgt_t *tgt; 1407 1408 instance = MINOR2INST(getminor(dev)); 1409 emul64 = (struct emul64 *)ddi_get_soft_state(emul64_state, instance); 1410 if (emul64 == NULL) { 1411 cmn_err(CE_WARN, "emul64: ioctl - no softstate for %d\n", 1412 getminor(dev)); 1413 return (ENXIO); 1414 } 1415 1416 switch (cmd) { 1417 case EMUL64_WRITE_OFF: 1418 rv = emul64_get_tgtrange(emul64, arg, &tgt, &tgtr); 1419 if (rv == 0) { 1420 rv = emul64_write_off(emul64, tgt, &tgtr); 1421 } 1422 break; 1423 case EMUL64_WRITE_ON: 1424 rv = emul64_get_tgtrange(emul64, arg, &tgt, &tgtr); 1425 if (rv == 0) { 1426 rv = emul64_write_on(emul64, tgt, &tgtr); 1427 } 1428 break; 1429 case EMUL64_ZERO_RANGE: 1430 rv = emul64_get_tgtrange(emul64, arg, &tgt, &tgtr); 1431 if (rv == 0) { 1432 mutex_enter(&tgt->emul64_tgt_blk_lock); 1433 rv = bsd_freeblkrange(tgt, &tgtr.emul64_blkrange); 1434 mutex_exit(&tgt->emul64_tgt_blk_lock); 1435 } 1436 break; 1437 case EMUL64_ERROR_INJECT: 1438 rv = emul64_error_inject_req(emul64, arg); 1439 break; 1440 default: 1441 rv = scsi_hba_ioctl(dev, cmd, arg, mode, credp, rvalp); 1442 break; 1443 } 1444 return (rv); 1445 } 1446 1447 /* ARGSUSED */ 1448 static int 1449 emul64_write_off(struct emul64 *emul64, 1450 emul64_tgt_t *tgt, 1451 emul64_tgt_range_t *tgtr) 1452 { 1453 size_t blkcnt = tgtr->emul64_blkrange.emul64_blkcnt; 1454 emul64_nowrite_t *cur; 1455 emul64_nowrite_t *nowrite; 1456 emul64_rng_overlap_t overlap = O_NONE; 1457 emul64_nowrite_t **prev = NULL; 1458 diskaddr_t sb = tgtr->emul64_blkrange.emul64_sb; 1459 1460 nowrite = emul64_nowrite_alloc(&tgtr->emul64_blkrange); 1461 1462 /* Find spot in list */ 1463 rw_enter(&tgt->emul64_tgt_nw_lock, RW_WRITER); 1464 cur = emul64_find_nowrite(tgt, sb, blkcnt, &overlap, &prev); 1465 if (overlap == O_NONE) { 1466 /* Insert into list */ 1467 *prev = nowrite; 1468 nowrite->emul64_nwnext = cur; 1469 } 1470 rw_exit(&tgt->emul64_tgt_nw_lock); 1471 if (overlap == O_NONE) { 1472 if (emul64_collect_stats) { 1473 mutex_enter(&emul64_stats_mutex); 1474 emul64_nowrite_count++; 1475 mutex_exit(&emul64_stats_mutex); 1476 } 1477 } else { 1478 cmn_err(CE_WARN, "emul64: EMUL64_WRITE_OFF 0x%llx,0x%" 1479 PRIx64 "overlaps 0x%llx,0x%" PRIx64 "\n", 1480 nowrite->emul64_blocked.emul64_sb, 1481 nowrite->emul64_blocked.emul64_blkcnt, 1482 cur->emul64_blocked.emul64_sb, 1483 cur->emul64_blocked.emul64_blkcnt); 1484 emul64_nowrite_free(nowrite); 1485 return (EINVAL); 1486 } 1487 return (0); 1488 } 1489 1490 /* ARGSUSED */ 1491 static int 1492 emul64_write_on(struct emul64 *emul64, 1493 emul64_tgt_t *tgt, 1494 emul64_tgt_range_t *tgtr) 1495 { 1496 size_t blkcnt = tgtr->emul64_blkrange.emul64_blkcnt; 1497 emul64_nowrite_t *cur; 1498 emul64_rng_overlap_t overlap = O_NONE; 1499 emul64_nowrite_t **prev = NULL; 1500 int rv = 0; 1501 diskaddr_t sb = tgtr->emul64_blkrange.emul64_sb; 1502 1503 /* Find spot in list */ 1504 rw_enter(&tgt->emul64_tgt_nw_lock, RW_WRITER); 1505 cur = emul64_find_nowrite(tgt, sb, blkcnt, &overlap, &prev); 1506 if (overlap == O_SAME) { 1507 /* Remove from list */ 1508 *prev = cur->emul64_nwnext; 1509 } 1510 rw_exit(&tgt->emul64_tgt_nw_lock); 1511 1512 switch (overlap) { 1513 case O_NONE: 1514 cmn_err(CE_WARN, "emul64: EMUL64_WRITE_ON 0x%llx,0x%lx " 1515 "range not found\n", sb, blkcnt); 1516 rv = ENXIO; 1517 break; 1518 case O_SAME: 1519 if (emul64_collect_stats) { 1520 mutex_enter(&emul64_stats_mutex); 1521 emul64_nowrite_count--; 1522 mutex_exit(&emul64_stats_mutex); 1523 } 1524 emul64_nowrite_free(cur); 1525 break; 1526 case O_OVERLAP: 1527 case O_SUBSET: 1528 cmn_err(CE_WARN, "emul64: EMUL64_WRITE_ON 0x%llx,0x%lx " 1529 "overlaps 0x%llx,0x%" PRIx64 "\n", 1530 sb, blkcnt, cur->emul64_blocked.emul64_sb, 1531 cur->emul64_blocked.emul64_blkcnt); 1532 rv = EINVAL; 1533 break; 1534 } 1535 return (rv); 1536 } 1537 1538 static emul64_nowrite_t * 1539 emul64_find_nowrite(emul64_tgt_t *tgt, 1540 diskaddr_t sb, 1541 size_t blkcnt, 1542 emul64_rng_overlap_t *overlap, 1543 emul64_nowrite_t ***prevp) 1544 { 1545 emul64_nowrite_t *cur; 1546 emul64_nowrite_t **prev; 1547 1548 /* Find spot in list */ 1549 *overlap = O_NONE; 1550 prev = &tgt->emul64_tgt_nowrite; 1551 cur = tgt->emul64_tgt_nowrite; 1552 while (cur != NULL) { 1553 *overlap = emul64_overlap(&cur->emul64_blocked, sb, blkcnt); 1554 if (*overlap != O_NONE) 1555 break; 1556 prev = &cur->emul64_nwnext; 1557 cur = cur->emul64_nwnext; 1558 } 1559 1560 *prevp = prev; 1561 return (cur); 1562 } 1563 1564 static emul64_nowrite_t * 1565 emul64_nowrite_alloc(emul64_range_t *range) 1566 { 1567 emul64_nowrite_t *nw; 1568 1569 nw = kmem_zalloc(sizeof (*nw), KM_SLEEP); 1570 bcopy((void *) range, 1571 (void *) &nw->emul64_blocked, 1572 sizeof (nw->emul64_blocked)); 1573 return (nw); 1574 } 1575 1576 static void 1577 emul64_nowrite_free(emul64_nowrite_t *nw) 1578 { 1579 kmem_free((void *) nw, sizeof (*nw)); 1580 } 1581 1582 emul64_rng_overlap_t 1583 emul64_overlap(emul64_range_t *rng, diskaddr_t sb, size_t cnt) 1584 { 1585 1586 if (rng->emul64_sb >= sb + cnt) 1587 return (O_NONE); 1588 if (rng->emul64_sb + rng->emul64_blkcnt <= sb) 1589 return (O_NONE); 1590 if ((rng->emul64_sb == sb) && (rng->emul64_blkcnt == cnt)) 1591 return (O_SAME); 1592 if ((sb >= rng->emul64_sb) && 1593 ((sb + cnt) <= (rng->emul64_sb + rng->emul64_blkcnt))) { 1594 return (O_SUBSET); 1595 } 1596 return (O_OVERLAP); 1597 } 1598 1599 #include <sys/varargs.h> 1600 1601 /* 1602 * Error logging, printing, and debug print routines 1603 */ 1604 1605 /*VARARGS3*/ 1606 static void 1607 emul64_i_log(struct emul64 *emul64, int level, char *fmt, ...) 1608 { 1609 char buf[256]; 1610 va_list ap; 1611 1612 va_start(ap, fmt); 1613 (void) vsnprintf(buf, sizeof (buf), fmt, ap); 1614 va_end(ap); 1615 1616 scsi_log(emul64 ? emul64->emul64_dip : NULL, 1617 "emul64", level, "%s\n", buf); 1618 } 1619 1620 1621 #ifdef EMUL64DEBUG 1622 1623 static void 1624 emul64_debug_dump_cdb(struct scsi_address *ap, struct scsi_pkt *pkt) 1625 { 1626 static char hex[] = "0123456789abcdef"; 1627 struct emul64 *emul64 = ADDR2EMUL64(ap); 1628 struct emul64_cmd *sp = PKT2CMD(pkt); 1629 uint8_t *cdb = pkt->pkt_cdbp; 1630 char buf [256]; 1631 char *p; 1632 int i; 1633 1634 (void) snprintf(buf, sizeof (buf), "emul64%d: <%d,%d> ", 1635 ddi_get_instance(emul64->emul64_dip), 1636 ap->a_target, ap->a_lun); 1637 1638 p = buf + strlen(buf); 1639 1640 *p++ = '['; 1641 for (i = 0; i < sp->cmd_cdblen; i++, cdb++) { 1642 if (i != 0) 1643 *p++ = ' '; 1644 *p++ = hex[(*cdb >> 4) & 0x0f]; 1645 *p++ = hex[*cdb & 0x0f]; 1646 } 1647 *p++ = ']'; 1648 *p++ = '\n'; 1649 *p = 0; 1650 1651 cmn_err(CE_CONT, buf); 1652 } 1653 #endif /* EMUL64DEBUG */ 1654