1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 * Copyright (c) 2011 Bayard G. Bell. All rights reserved. 25 */ 26 27 28 /* 29 * SCSA HBA nexus driver that emulates an HBA connected to SCSI target 30 * devices (large disks). 31 */ 32 33 #ifdef DEBUG 34 #define EMUL64DEBUG 35 #endif 36 37 #include <sys/scsi/scsi.h> 38 #include <sys/ddi.h> 39 #include <sys/sunddi.h> 40 #include <sys/taskq.h> 41 #include <sys/disp.h> 42 #include <sys/types.h> 43 #include <sys/buf.h> 44 #include <sys/cpuvar.h> 45 #include <sys/dklabel.h> 46 47 #include <sys/emul64.h> 48 #include <sys/emul64cmd.h> 49 #include <sys/emul64var.h> 50 51 int emul64_usetaskq = 1; /* set to zero for debugging */ 52 int emul64debug = 0; 53 #ifdef EMUL64DEBUG 54 static int emul64_cdb_debug = 0; 55 #include <sys/debug.h> 56 #endif 57 58 /* 59 * cb_ops function prototypes 60 */ 61 static int emul64_ioctl(dev_t, int cmd, intptr_t arg, int mode, 62 cred_t *credp, int *rvalp); 63 64 /* 65 * dev_ops functions prototypes 66 */ 67 static int emul64_info(dev_info_t *dip, ddi_info_cmd_t infocmd, 68 void *arg, void **result); 69 static int emul64_attach(dev_info_t *dip, ddi_attach_cmd_t cmd); 70 static int emul64_detach(dev_info_t *dip, ddi_detach_cmd_t cmd); 71 72 /* 73 * Function prototypes 74 * 75 * SCSA functions exported by means of the transport table 76 */ 77 static int emul64_tran_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip, 78 scsi_hba_tran_t *tran, struct scsi_device *sd); 79 static int emul64_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt); 80 static void emul64_pkt_comp(void *); 81 static int emul64_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt); 82 static int emul64_scsi_reset(struct scsi_address *ap, int level); 83 static int emul64_scsi_getcap(struct scsi_address *ap, char *cap, int whom); 84 static int emul64_scsi_setcap(struct scsi_address *ap, char *cap, int value, 85 int whom); 86 static struct scsi_pkt *emul64_scsi_init_pkt(struct scsi_address *ap, 87 struct scsi_pkt *pkt, struct buf *bp, int cmdlen, int statuslen, 88 int tgtlen, int flags, int (*callback)(), caddr_t arg); 89 static void emul64_scsi_destroy_pkt(struct scsi_address *ap, 90 struct scsi_pkt *pkt); 91 static void emul64_scsi_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt); 92 static void emul64_scsi_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt); 93 static int emul64_scsi_reset_notify(struct scsi_address *ap, int flag, 94 void (*callback)(caddr_t), caddr_t arg); 95 96 /* 97 * internal functions 98 */ 99 static void emul64_i_initcap(struct emul64 *emul64); 100 101 static void emul64_i_log(struct emul64 *emul64, int level, char *fmt, ...); 102 static int emul64_get_tgtrange(struct emul64 *, 103 intptr_t, 104 emul64_tgt_t **, 105 emul64_tgt_range_t *); 106 static int emul64_write_off(struct emul64 *, 107 emul64_tgt_t *, 108 emul64_tgt_range_t *); 109 static int emul64_write_on(struct emul64 *, 110 emul64_tgt_t *, 111 emul64_tgt_range_t *); 112 static emul64_nowrite_t *emul64_nowrite_alloc(emul64_range_t *); 113 static void emul64_nowrite_free(emul64_nowrite_t *); 114 static emul64_nowrite_t *emul64_find_nowrite(emul64_tgt_t *, 115 diskaddr_t start_block, 116 size_t blkcnt, 117 emul64_rng_overlap_t *overlapp, 118 emul64_nowrite_t ***prevp); 119 120 extern emul64_tgt_t *find_tgt(struct emul64 *, ushort_t, ushort_t); 121 122 #ifdef EMUL64DEBUG 123 static void emul64_debug_dump_cdb(struct scsi_address *ap, 124 struct scsi_pkt *pkt); 125 #endif 126 127 128 #ifdef _DDICT 129 static int ddi_in_panic(void); 130 static int ddi_in_panic() { return (0); } 131 #ifndef SCSI_CAP_RESET_NOTIFICATION 132 #define SCSI_CAP_RESET_NOTIFICATION 14 133 #endif 134 #ifndef SCSI_RESET_NOTIFY 135 #define SCSI_RESET_NOTIFY 0x01 136 #endif 137 #ifndef SCSI_RESET_CANCEL 138 #define SCSI_RESET_CANCEL 0x02 139 #endif 140 #endif 141 142 /* 143 * Tunables: 144 * 145 * emul64_max_task 146 * The taskq facility is used to queue up SCSI start requests on a per 147 * controller basis. If the maximum number of queued tasks is hit, 148 * taskq_ent_alloc() delays for a second, which adversely impacts our 149 * performance. This value establishes the maximum number of task 150 * queue entries when taskq_create is called. 151 * 152 * emul64_task_nthreads 153 * Specifies the number of threads that should be used to process a 154 * controller's task queue. Our init function sets this to the number 155 * of CPUs on the system, but this can be overridden in emul64.conf. 156 */ 157 int emul64_max_task = 16; 158 int emul64_task_nthreads = 1; 159 160 /* 161 * Local static data 162 */ 163 static void *emul64_state = NULL; 164 165 /* 166 * Character/block operations. 167 */ 168 static struct cb_ops emul64_cbops = { 169 scsi_hba_open, /* cb_open */ 170 scsi_hba_close, /* cb_close */ 171 nodev, /* cb_strategy */ 172 nodev, /* cb_print */ 173 nodev, /* cb_dump */ 174 nodev, /* cb_read */ 175 nodev, /* cb_write */ 176 emul64_ioctl, /* cb_ioctl */ 177 nodev, /* cb_devmap */ 178 nodev, /* cb_mmap */ 179 nodev, /* cb_segmap */ 180 nochpoll, /* cb_chpoll */ 181 ddi_prop_op, /* cb_prop_op */ 182 NULL, /* cb_str */ 183 D_MP | D_64BIT | D_HOTPLUG, /* cb_flag */ 184 CB_REV, /* cb_rev */ 185 nodev, /* cb_aread */ 186 nodev /* cb_awrite */ 187 }; 188 189 /* 190 * autoconfiguration routines. 191 */ 192 193 static struct dev_ops emul64_ops = { 194 DEVO_REV, /* rev, */ 195 0, /* refcnt */ 196 emul64_info, /* getinfo */ 197 nulldev, /* identify */ 198 nulldev, /* probe */ 199 emul64_attach, /* attach */ 200 emul64_detach, /* detach */ 201 nodev, /* reset */ 202 &emul64_cbops, /* char/block ops */ 203 NULL, /* bus ops */ 204 NULL, /* power */ 205 ddi_quiesce_not_needed, /* quiesce */ 206 }; 207 208 static struct modldrv modldrv = { 209 &mod_driverops, /* module type - driver */ 210 "emul64 SCSI Host Bus Adapter", /* module name */ 211 &emul64_ops, /* driver ops */ 212 }; 213 214 static struct modlinkage modlinkage = { 215 MODREV_1, /* ml_rev - must be MODREV_1 */ 216 &modldrv, /* ml_linkage */ 217 NULL /* end of driver linkage */ 218 }; 219 220 int 221 _init(void) 222 { 223 int ret; 224 225 ret = ddi_soft_state_init(&emul64_state, sizeof (struct emul64), 226 EMUL64_INITIAL_SOFT_SPACE); 227 if (ret != 0) 228 return (ret); 229 230 if ((ret = scsi_hba_init(&modlinkage)) != 0) { 231 ddi_soft_state_fini(&emul64_state); 232 return (ret); 233 } 234 235 /* Set the number of task threads to the number of CPUs */ 236 if (boot_max_ncpus == -1) { 237 emul64_task_nthreads = max_ncpus; 238 } else { 239 emul64_task_nthreads = boot_max_ncpus; 240 } 241 242 emul64_bsd_init(); 243 244 ret = mod_install(&modlinkage); 245 if (ret != 0) { 246 emul64_bsd_fini(); 247 scsi_hba_fini(&modlinkage); 248 ddi_soft_state_fini(&emul64_state); 249 } 250 251 return (ret); 252 } 253 254 int 255 _fini(void) 256 { 257 int ret; 258 259 if ((ret = mod_remove(&modlinkage)) != 0) 260 return (ret); 261 262 emul64_bsd_fini(); 263 264 scsi_hba_fini(&modlinkage); 265 266 ddi_soft_state_fini(&emul64_state); 267 268 return (ret); 269 } 270 271 int 272 _info(struct modinfo *modinfop) 273 { 274 return (mod_info(&modlinkage, modinfop)); 275 } 276 277 /* 278 * Given the device number return the devinfo pointer 279 * from the scsi_device structure. 280 */ 281 /*ARGSUSED*/ 282 static int 283 emul64_info(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result) 284 { 285 struct emul64 *foo; 286 int instance = getminor((dev_t)arg); 287 288 switch (cmd) { 289 case DDI_INFO_DEVT2DEVINFO: 290 foo = ddi_get_soft_state(emul64_state, instance); 291 if (foo != NULL) 292 *result = (void *)foo->emul64_dip; 293 else { 294 *result = NULL; 295 return (DDI_FAILURE); 296 } 297 break; 298 299 case DDI_INFO_DEVT2INSTANCE: 300 *result = (void *)(uintptr_t)instance; 301 break; 302 303 default: 304 return (DDI_FAILURE); 305 } 306 307 return (DDI_SUCCESS); 308 } 309 310 /* 311 * Attach an instance of an emul64 host adapter. Allocate data structures, 312 * initialize the emul64 and we're on the air. 313 */ 314 /*ARGSUSED*/ 315 static int 316 emul64_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 317 { 318 int mutex_initted = 0; 319 struct emul64 *emul64; 320 int instance; 321 scsi_hba_tran_t *tran = NULL; 322 ddi_dma_attr_t tmp_dma_attr; 323 324 emul64_bsd_get_props(dip); 325 326 bzero((void *) &tmp_dma_attr, sizeof (tmp_dma_attr)); 327 instance = ddi_get_instance(dip); 328 329 switch (cmd) { 330 case DDI_ATTACH: 331 break; 332 333 case DDI_RESUME: 334 tran = (scsi_hba_tran_t *)ddi_get_driver_private(dip); 335 if (!tran) { 336 return (DDI_FAILURE); 337 } 338 emul64 = TRAN2EMUL64(tran); 339 340 return (DDI_SUCCESS); 341 342 default: 343 emul64_i_log(NULL, CE_WARN, 344 "emul64%d: Cmd != DDI_ATTACH/DDI_RESUME", instance); 345 return (DDI_FAILURE); 346 } 347 348 /* 349 * Allocate emul64 data structure. 350 */ 351 if (ddi_soft_state_zalloc(emul64_state, instance) != DDI_SUCCESS) { 352 emul64_i_log(NULL, CE_WARN, 353 "emul64%d: Failed to alloc soft state", 354 instance); 355 return (DDI_FAILURE); 356 } 357 358 emul64 = (struct emul64 *)ddi_get_soft_state(emul64_state, instance); 359 if (emul64 == (struct emul64 *)NULL) { 360 emul64_i_log(NULL, CE_WARN, "emul64%d: Bad soft state", 361 instance); 362 ddi_soft_state_free(emul64_state, instance); 363 return (DDI_FAILURE); 364 } 365 366 367 /* 368 * Allocate a transport structure 369 */ 370 tran = scsi_hba_tran_alloc(dip, SCSI_HBA_CANSLEEP); 371 if (tran == NULL) { 372 cmn_err(CE_WARN, "emul64: scsi_hba_tran_alloc failed\n"); 373 goto fail; 374 } 375 376 emul64->emul64_tran = tran; 377 emul64->emul64_dip = dip; 378 379 tran->tran_hba_private = emul64; 380 tran->tran_tgt_private = NULL; 381 tran->tran_tgt_init = emul64_tran_tgt_init; 382 tran->tran_tgt_probe = scsi_hba_probe; 383 tran->tran_tgt_free = NULL; 384 385 tran->tran_start = emul64_scsi_start; 386 tran->tran_abort = emul64_scsi_abort; 387 tran->tran_reset = emul64_scsi_reset; 388 tran->tran_getcap = emul64_scsi_getcap; 389 tran->tran_setcap = emul64_scsi_setcap; 390 tran->tran_init_pkt = emul64_scsi_init_pkt; 391 tran->tran_destroy_pkt = emul64_scsi_destroy_pkt; 392 tran->tran_dmafree = emul64_scsi_dmafree; 393 tran->tran_sync_pkt = emul64_scsi_sync_pkt; 394 tran->tran_reset_notify = emul64_scsi_reset_notify; 395 396 tmp_dma_attr.dma_attr_minxfer = 0x1; 397 tmp_dma_attr.dma_attr_burstsizes = 0x7f; 398 399 /* 400 * Attach this instance of the hba 401 */ 402 if (scsi_hba_attach_setup(dip, &tmp_dma_attr, tran, 403 0) != DDI_SUCCESS) { 404 cmn_err(CE_WARN, "emul64: scsi_hba_attach failed\n"); 405 goto fail; 406 } 407 408 emul64->emul64_initiator_id = 2; 409 410 /* 411 * Look up the scsi-options property 412 */ 413 emul64->emul64_scsi_options = 414 ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, "scsi-options", 415 EMUL64_DEFAULT_SCSI_OPTIONS); 416 EMUL64_DEBUG(emul64, SCSI_DEBUG, "emul64 scsi-options=%x", 417 emul64->emul64_scsi_options); 418 419 420 /* mutexes to protect the emul64 request and response queue */ 421 mutex_init(EMUL64_REQ_MUTEX(emul64), NULL, MUTEX_DRIVER, 422 emul64->emul64_iblock); 423 mutex_init(EMUL64_RESP_MUTEX(emul64), NULL, MUTEX_DRIVER, 424 emul64->emul64_iblock); 425 426 mutex_initted = 1; 427 428 EMUL64_MUTEX_ENTER(emul64); 429 430 /* 431 * Initialize the default Target Capabilities and Sync Rates 432 */ 433 emul64_i_initcap(emul64); 434 435 EMUL64_MUTEX_EXIT(emul64); 436 437 438 ddi_report_dev(dip); 439 emul64->emul64_taskq = taskq_create("emul64_comp", 440 emul64_task_nthreads, MINCLSYSPRI, 1, emul64_max_task, 0); 441 442 return (DDI_SUCCESS); 443 444 fail: 445 emul64_i_log(NULL, CE_WARN, "emul64%d: Unable to attach", instance); 446 447 if (mutex_initted) { 448 mutex_destroy(EMUL64_REQ_MUTEX(emul64)); 449 mutex_destroy(EMUL64_RESP_MUTEX(emul64)); 450 } 451 if (tran) { 452 scsi_hba_tran_free(tran); 453 } 454 ddi_soft_state_free(emul64_state, instance); 455 return (DDI_FAILURE); 456 } 457 458 /*ARGSUSED*/ 459 static int 460 emul64_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 461 { 462 struct emul64 *emul64; 463 scsi_hba_tran_t *tran; 464 int instance = ddi_get_instance(dip); 465 466 467 /* get transport structure pointer from the dip */ 468 if (!(tran = (scsi_hba_tran_t *)ddi_get_driver_private(dip))) { 469 return (DDI_FAILURE); 470 } 471 472 /* get soft state from transport structure */ 473 emul64 = TRAN2EMUL64(tran); 474 475 if (!emul64) { 476 return (DDI_FAILURE); 477 } 478 479 EMUL64_DEBUG(emul64, SCSI_DEBUG, "emul64_detach: cmd = %d", cmd); 480 481 switch (cmd) { 482 case DDI_DETACH: 483 EMUL64_MUTEX_ENTER(emul64); 484 485 taskq_destroy(emul64->emul64_taskq); 486 (void) scsi_hba_detach(dip); 487 488 scsi_hba_tran_free(emul64->emul64_tran); 489 490 491 EMUL64_MUTEX_EXIT(emul64); 492 493 mutex_destroy(EMUL64_REQ_MUTEX(emul64)); 494 mutex_destroy(EMUL64_RESP_MUTEX(emul64)); 495 496 497 EMUL64_DEBUG(emul64, SCSI_DEBUG, "emul64_detach: done"); 498 ddi_soft_state_free(emul64_state, instance); 499 500 return (DDI_SUCCESS); 501 502 case DDI_SUSPEND: 503 return (DDI_SUCCESS); 504 505 default: 506 return (DDI_FAILURE); 507 } 508 } 509 510 /* 511 * Function name : emul64_tran_tgt_init 512 * 513 * Return Values : DDI_SUCCESS if target supported, DDI_FAILURE otherwise 514 * 515 */ 516 /*ARGSUSED*/ 517 static int 518 emul64_tran_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip, 519 scsi_hba_tran_t *tran, struct scsi_device *sd) 520 { 521 struct emul64 *emul64; 522 emul64_tgt_t *tgt; 523 char **geo_vidpid = NULL; 524 char *geo, *vidpid; 525 uint32_t *geoip = NULL; 526 uint_t length; 527 uint_t length2; 528 lldaddr_t sector_count; 529 char prop_name[15]; 530 int ret = DDI_FAILURE; 531 532 emul64 = TRAN2EMUL64(tran); 533 EMUL64_MUTEX_ENTER(emul64); 534 535 /* 536 * We get called for each target driver.conf node, multiple 537 * nodes may map to the same tgt,lun (sd.conf, st.conf, etc). 538 * Check to see if transport to tgt,lun already established. 539 */ 540 tgt = find_tgt(emul64, sd->sd_address.a_target, sd->sd_address.a_lun); 541 if (tgt) { 542 ret = DDI_SUCCESS; 543 goto out; 544 } 545 546 /* see if we have driver.conf specified device for this target,lun */ 547 (void) snprintf(prop_name, sizeof (prop_name), "targ_%d_%d", 548 sd->sd_address.a_target, sd->sd_address.a_lun); 549 if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, hba_dip, 550 DDI_PROP_DONTPASS, prop_name, 551 &geo_vidpid, &length) != DDI_PROP_SUCCESS) 552 goto out; 553 if (length < 2) { 554 cmn_err(CE_WARN, "emul64: %s property does not have 2 " 555 "elements", prop_name); 556 goto out; 557 } 558 559 /* pick geometry name and vidpid string from string array */ 560 geo = *geo_vidpid; 561 vidpid = *(geo_vidpid + 1); 562 563 /* lookup geometry property integer array */ 564 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, hba_dip, DDI_PROP_DONTPASS, 565 geo, (int **)&geoip, &length2) != DDI_PROP_SUCCESS) { 566 cmn_err(CE_WARN, "emul64: didn't get prop '%s'", geo); 567 goto out; 568 } 569 if (length2 < 6) { 570 cmn_err(CE_WARN, "emul64: property %s does not have 6 " 571 "elements", *geo_vidpid); 572 goto out; 573 } 574 575 /* allocate and initialize tgt structure for tgt,lun */ 576 tgt = kmem_zalloc(sizeof (emul64_tgt_t), KM_SLEEP); 577 rw_init(&tgt->emul64_tgt_nw_lock, NULL, RW_DRIVER, NULL); 578 mutex_init(&tgt->emul64_tgt_blk_lock, NULL, MUTEX_DRIVER, NULL); 579 580 /* create avl for data block storage */ 581 avl_create(&tgt->emul64_tgt_data, emul64_bsd_blkcompare, 582 sizeof (blklist_t), offsetof(blklist_t, bl_node)); 583 584 /* save scsi_address and vidpid */ 585 bcopy(sd, &tgt->emul64_tgt_saddr, sizeof (struct scsi_address)); 586 (void) strncpy(tgt->emul64_tgt_inq, vidpid, 587 sizeof (emul64->emul64_tgt->emul64_tgt_inq)); 588 589 /* 590 * The high order 4 bytes of the sector count always come first in 591 * emul64.conf. They are followed by the low order 4 bytes. Not 592 * all CPU types want them in this order, but laddr_t takes care of 593 * this for us. We then pick up geometry (ncyl X nheads X nsect). 594 */ 595 sector_count._p._u = *(geoip + 0); 596 sector_count._p._l = *(geoip + 1); 597 /* 598 * On 32-bit platforms, fix block size if it's greater than the 599 * allowable maximum. 600 */ 601 #if !defined(_LP64) 602 if (sector_count._f > DK_MAX_BLOCKS) 603 sector_count._f = DK_MAX_BLOCKS; 604 #endif 605 tgt->emul64_tgt_sectors = sector_count._f; 606 tgt->emul64_tgt_dtype = *(geoip + 2); 607 tgt->emul64_tgt_ncyls = *(geoip + 3); 608 tgt->emul64_tgt_nheads = *(geoip + 4); 609 tgt->emul64_tgt_nsect = *(geoip + 5); 610 611 /* insert target structure into list */ 612 tgt->emul64_tgt_next = emul64->emul64_tgt; 613 emul64->emul64_tgt = tgt; 614 ret = DDI_SUCCESS; 615 616 out: EMUL64_MUTEX_EXIT(emul64); 617 if (geoip) 618 ddi_prop_free(geoip); 619 if (geo_vidpid) 620 ddi_prop_free(geo_vidpid); 621 return (ret); 622 } 623 624 /* 625 * Function name : emul64_i_initcap 626 * 627 * Return Values : NONE 628 * Description : Initializes the default target capabilities and 629 * Sync Rates. 630 * 631 * Context : Called from the user thread through attach. 632 * 633 */ 634 static void 635 emul64_i_initcap(struct emul64 *emul64) 636 { 637 uint16_t cap, synch; 638 int i; 639 640 cap = 0; 641 synch = 0; 642 for (i = 0; i < NTARGETS_WIDE; i++) { 643 emul64->emul64_cap[i] = cap; 644 emul64->emul64_synch[i] = synch; 645 } 646 EMUL64_DEBUG(emul64, SCSI_DEBUG, "default cap = 0x%x", cap); 647 } 648 649 /* 650 * Function name : emul64_scsi_getcap() 651 * 652 * Return Values : current value of capability, if defined 653 * -1 if capability is not defined 654 * Description : returns current capability value 655 * 656 * Context : Can be called from different kernel process threads. 657 * Can be called by interrupt thread. 658 */ 659 static int 660 emul64_scsi_getcap(struct scsi_address *ap, char *cap, int whom) 661 { 662 struct emul64 *emul64 = ADDR2EMUL64(ap); 663 int rval = 0; 664 665 /* 666 * We don't allow inquiring about capabilities for other targets 667 */ 668 if (cap == NULL || whom == 0) { 669 return (-1); 670 } 671 672 EMUL64_MUTEX_ENTER(emul64); 673 674 switch (scsi_hba_lookup_capstr(cap)) { 675 case SCSI_CAP_DMA_MAX: 676 rval = 1 << 24; /* Limit to 16MB max transfer */ 677 break; 678 case SCSI_CAP_MSG_OUT: 679 rval = 1; 680 break; 681 case SCSI_CAP_DISCONNECT: 682 rval = 1; 683 break; 684 case SCSI_CAP_SYNCHRONOUS: 685 rval = 1; 686 break; 687 case SCSI_CAP_WIDE_XFER: 688 rval = 1; 689 break; 690 case SCSI_CAP_TAGGED_QING: 691 rval = 1; 692 break; 693 case SCSI_CAP_UNTAGGED_QING: 694 rval = 1; 695 break; 696 case SCSI_CAP_PARITY: 697 rval = 1; 698 break; 699 case SCSI_CAP_INITIATOR_ID: 700 rval = emul64->emul64_initiator_id; 701 break; 702 case SCSI_CAP_ARQ: 703 rval = 1; 704 break; 705 case SCSI_CAP_LINKED_CMDS: 706 break; 707 case SCSI_CAP_RESET_NOTIFICATION: 708 rval = 1; 709 break; 710 711 default: 712 rval = -1; 713 break; 714 } 715 716 EMUL64_MUTEX_EXIT(emul64); 717 718 return (rval); 719 } 720 721 /* 722 * Function name : emul64_scsi_setcap() 723 * 724 * Return Values : 1 - capability exists and can be set to new value 725 * 0 - capability could not be set to new value 726 * -1 - no such capability 727 * 728 * Description : sets a capability for a target 729 * 730 * Context : Can be called from different kernel process threads. 731 * Can be called by interrupt thread. 732 */ 733 static int 734 emul64_scsi_setcap(struct scsi_address *ap, char *cap, int value, int whom) 735 { 736 struct emul64 *emul64 = ADDR2EMUL64(ap); 737 int rval = 0; 738 739 /* 740 * We don't allow setting capabilities for other targets 741 */ 742 if (cap == NULL || whom == 0) { 743 return (-1); 744 } 745 746 EMUL64_MUTEX_ENTER(emul64); 747 748 switch (scsi_hba_lookup_capstr(cap)) { 749 case SCSI_CAP_DMA_MAX: 750 case SCSI_CAP_MSG_OUT: 751 case SCSI_CAP_PARITY: 752 case SCSI_CAP_UNTAGGED_QING: 753 case SCSI_CAP_LINKED_CMDS: 754 case SCSI_CAP_RESET_NOTIFICATION: 755 /* 756 * None of these are settable via 757 * the capability interface. 758 */ 759 break; 760 case SCSI_CAP_DISCONNECT: 761 rval = 1; 762 break; 763 case SCSI_CAP_SYNCHRONOUS: 764 rval = 1; 765 break; 766 case SCSI_CAP_TAGGED_QING: 767 rval = 1; 768 break; 769 case SCSI_CAP_WIDE_XFER: 770 rval = 1; 771 break; 772 case SCSI_CAP_INITIATOR_ID: 773 rval = -1; 774 break; 775 case SCSI_CAP_ARQ: 776 rval = 1; 777 break; 778 case SCSI_CAP_TOTAL_SECTORS: 779 emul64->nt_total_sectors[ap->a_target][ap->a_lun] = value; 780 rval = TRUE; 781 break; 782 case SCSI_CAP_SECTOR_SIZE: 783 rval = TRUE; 784 break; 785 default: 786 rval = -1; 787 break; 788 } 789 790 791 EMUL64_MUTEX_EXIT(emul64); 792 793 return (rval); 794 } 795 796 /* 797 * Function name : emul64_scsi_init_pkt 798 * 799 * Return Values : pointer to scsi_pkt, or NULL 800 * Description : Called by kernel on behalf of a target driver 801 * calling scsi_init_pkt(9F). 802 * Refer to tran_init_pkt(9E) man page 803 * 804 * Context : Can be called from different kernel process threads. 805 * Can be called by interrupt thread. 806 */ 807 /* ARGSUSED */ 808 static struct scsi_pkt * 809 emul64_scsi_init_pkt(struct scsi_address *ap, struct scsi_pkt *pkt, 810 struct buf *bp, int cmdlen, int statuslen, int tgtlen, 811 int flags, int (*callback)(), caddr_t arg) 812 { 813 struct emul64 *emul64 = ADDR2EMUL64(ap); 814 struct emul64_cmd *sp; 815 816 ASSERT(callback == NULL_FUNC || callback == SLEEP_FUNC); 817 818 /* 819 * First step of emul64_scsi_init_pkt: pkt allocation 820 */ 821 if (pkt == NULL) { 822 pkt = scsi_hba_pkt_alloc(emul64->emul64_dip, ap, cmdlen, 823 statuslen, 824 tgtlen, sizeof (struct emul64_cmd), callback, arg); 825 if (pkt == NULL) { 826 cmn_err(CE_WARN, "emul64_scsi_init_pkt: " 827 "scsi_hba_pkt_alloc failed"); 828 return (NULL); 829 } 830 831 sp = PKT2CMD(pkt); 832 833 /* 834 * Initialize the new pkt - we redundantly initialize 835 * all the fields for illustrative purposes. 836 */ 837 sp->cmd_pkt = pkt; 838 sp->cmd_flags = 0; 839 sp->cmd_scblen = statuslen; 840 sp->cmd_cdblen = cmdlen; 841 sp->cmd_emul64 = emul64; 842 pkt->pkt_address = *ap; 843 pkt->pkt_comp = (void (*)())NULL; 844 pkt->pkt_flags = 0; 845 pkt->pkt_time = 0; 846 pkt->pkt_resid = 0; 847 pkt->pkt_statistics = 0; 848 pkt->pkt_reason = 0; 849 850 } else { 851 sp = PKT2CMD(pkt); 852 } 853 854 /* 855 * Second step of emul64_scsi_init_pkt: dma allocation/move 856 */ 857 if (bp && bp->b_bcount != 0) { 858 if (bp->b_flags & B_READ) { 859 sp->cmd_flags &= ~CFLAG_DMASEND; 860 } else { 861 sp->cmd_flags |= CFLAG_DMASEND; 862 } 863 bp_mapin(bp); 864 sp->cmd_addr = (unsigned char *) bp->b_un.b_addr; 865 sp->cmd_count = bp->b_bcount; 866 pkt->pkt_resid = 0; 867 } 868 869 return (pkt); 870 } 871 872 873 /* 874 * Function name : emul64_scsi_destroy_pkt 875 * 876 * Return Values : none 877 * Description : Called by kernel on behalf of a target driver 878 * calling scsi_destroy_pkt(9F). 879 * Refer to tran_destroy_pkt(9E) man page 880 * 881 * Context : Can be called from different kernel process threads. 882 * Can be called by interrupt thread. 883 */ 884 static void 885 emul64_scsi_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt) 886 { 887 struct emul64_cmd *sp = PKT2CMD(pkt); 888 889 /* 890 * emul64_scsi_dmafree inline to make things faster 891 */ 892 if (sp->cmd_flags & CFLAG_DMAVALID) { 893 /* 894 * Free the mapping. 895 */ 896 sp->cmd_flags &= ~CFLAG_DMAVALID; 897 } 898 899 /* 900 * Free the pkt 901 */ 902 scsi_hba_pkt_free(ap, pkt); 903 } 904 905 906 /* 907 * Function name : emul64_scsi_dmafree() 908 * 909 * Return Values : none 910 * Description : free dvma resources 911 * 912 * Context : Can be called from different kernel process threads. 913 * Can be called by interrupt thread. 914 */ 915 /*ARGSUSED*/ 916 static void 917 emul64_scsi_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt) 918 { 919 } 920 921 /* 922 * Function name : emul64_scsi_sync_pkt() 923 * 924 * Return Values : none 925 * Description : sync dma 926 * 927 * Context : Can be called from different kernel process threads. 928 * Can be called by interrupt thread. 929 */ 930 /*ARGSUSED*/ 931 static void 932 emul64_scsi_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt) 933 { 934 } 935 936 /* 937 * routine for reset notification setup, to register or cancel. 938 */ 939 static int 940 emul64_scsi_reset_notify(struct scsi_address *ap, int flag, 941 void (*callback)(caddr_t), caddr_t arg) 942 { 943 struct emul64 *emul64 = ADDR2EMUL64(ap); 944 struct emul64_reset_notify_entry *p, *beforep; 945 int rval = DDI_FAILURE; 946 947 mutex_enter(EMUL64_REQ_MUTEX(emul64)); 948 949 p = emul64->emul64_reset_notify_listf; 950 beforep = NULL; 951 952 while (p) { 953 if (p->ap == ap) 954 break; /* An entry exists for this target */ 955 beforep = p; 956 p = p->next; 957 } 958 959 if ((flag & SCSI_RESET_CANCEL) && (p != NULL)) { 960 if (beforep == NULL) { 961 emul64->emul64_reset_notify_listf = p->next; 962 } else { 963 beforep->next = p->next; 964 } 965 kmem_free((caddr_t)p, 966 sizeof (struct emul64_reset_notify_entry)); 967 rval = DDI_SUCCESS; 968 969 } else if ((flag & SCSI_RESET_NOTIFY) && (p == NULL)) { 970 p = kmem_zalloc(sizeof (struct emul64_reset_notify_entry), 971 KM_SLEEP); 972 p->ap = ap; 973 p->callback = callback; 974 p->arg = arg; 975 p->next = emul64->emul64_reset_notify_listf; 976 emul64->emul64_reset_notify_listf = p; 977 rval = DDI_SUCCESS; 978 } 979 980 mutex_exit(EMUL64_REQ_MUTEX(emul64)); 981 982 return (rval); 983 } 984 985 /* 986 * Function name : emul64_scsi_start() 987 * 988 * Return Values : TRAN_FATAL_ERROR - emul64 has been shutdown 989 * TRAN_BUSY - request queue is full 990 * TRAN_ACCEPT - pkt has been submitted to emul64 991 * 992 * Description : init pkt, start the request 993 * 994 * Context : Can be called from different kernel process threads. 995 * Can be called by interrupt thread. 996 */ 997 static int 998 emul64_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt) 999 { 1000 struct emul64_cmd *sp = PKT2CMD(pkt); 1001 int rval = TRAN_ACCEPT; 1002 struct emul64 *emul64 = ADDR2EMUL64(ap); 1003 clock_t cur_lbolt; 1004 taskqid_t dispatched; 1005 1006 ASSERT(mutex_owned(EMUL64_REQ_MUTEX(emul64)) == 0 || ddi_in_panic()); 1007 ASSERT(mutex_owned(EMUL64_RESP_MUTEX(emul64)) == 0 || ddi_in_panic()); 1008 1009 EMUL64_DEBUG2(emul64, SCSI_DEBUG, "emul64_scsi_start %x", sp); 1010 1011 pkt->pkt_reason = CMD_CMPLT; 1012 1013 #ifdef EMUL64DEBUG 1014 if (emul64_cdb_debug) { 1015 emul64_debug_dump_cdb(ap, pkt); 1016 } 1017 #endif /* EMUL64DEBUG */ 1018 1019 /* 1020 * calculate deadline from pkt_time 1021 * Instead of multiplying by 100 (ie. HZ), we multiply by 128 so 1022 * we can shift and at the same time have a 28% grace period 1023 * we ignore the rare case of pkt_time == 0 and deal with it 1024 * in emul64_i_watch() 1025 */ 1026 cur_lbolt = ddi_get_lbolt(); 1027 sp->cmd_deadline = cur_lbolt + (pkt->pkt_time * 128); 1028 1029 if ((emul64_usetaskq == 0) || (pkt->pkt_flags & FLAG_NOINTR) != 0) { 1030 emul64_pkt_comp((caddr_t)pkt); 1031 } else { 1032 dispatched = NULL; 1033 if (emul64_collect_stats) { 1034 /* 1035 * If we are collecting statistics, call 1036 * taskq_dispatch in no sleep mode, so that we can 1037 * detect if we are exceeding the queue length that 1038 * was established in the call to taskq_create in 1039 * emul64_attach. If the no sleep call fails 1040 * (returns NULL), the task will be dispatched in 1041 * sleep mode below. 1042 */ 1043 dispatched = taskq_dispatch(emul64->emul64_taskq, 1044 emul64_pkt_comp, (void *)pkt, TQ_NOSLEEP); 1045 if (dispatched == NULL) { 1046 /* Queue was full. dispatch failed. */ 1047 mutex_enter(&emul64_stats_mutex); 1048 emul64_taskq_max++; 1049 mutex_exit(&emul64_stats_mutex); 1050 } 1051 } 1052 if (dispatched == NULL) { 1053 (void) taskq_dispatch(emul64->emul64_taskq, 1054 emul64_pkt_comp, (void *)pkt, TQ_SLEEP); 1055 } 1056 } 1057 1058 done: 1059 ASSERT(mutex_owned(EMUL64_REQ_MUTEX(emul64)) == 0 || ddi_in_panic()); 1060 ASSERT(mutex_owned(EMUL64_RESP_MUTEX(emul64)) == 0 || ddi_in_panic()); 1061 1062 return (rval); 1063 } 1064 1065 void 1066 emul64_check_cond(struct scsi_pkt *pkt, uchar_t key, uchar_t asc, uchar_t ascq) 1067 { 1068 struct scsi_arq_status *arq = 1069 (struct scsi_arq_status *)pkt->pkt_scbp; 1070 1071 /* got check, no data transferred and ARQ done */ 1072 arq->sts_status.sts_chk = 1; 1073 pkt->pkt_state |= STATE_ARQ_DONE; 1074 pkt->pkt_state &= ~STATE_XFERRED_DATA; 1075 1076 /* for ARQ */ 1077 arq->sts_rqpkt_reason = CMD_CMPLT; 1078 arq->sts_rqpkt_resid = 0; 1079 arq->sts_rqpkt_state = STATE_GOT_BUS | STATE_GOT_TARGET | 1080 STATE_SENT_CMD | STATE_XFERRED_DATA | STATE_GOT_STATUS; 1081 arq->sts_sensedata.es_valid = 1; 1082 arq->sts_sensedata.es_class = 0x7; 1083 arq->sts_sensedata.es_key = key; 1084 arq->sts_sensedata.es_add_code = asc; 1085 arq->sts_sensedata.es_qual_code = ascq; 1086 } 1087 1088 ushort_t 1089 emul64_error_inject(struct scsi_pkt *pkt) 1090 { 1091 struct emul64_cmd *sp = PKT2CMD(pkt); 1092 emul64_tgt_t *tgt; 1093 struct scsi_arq_status *arq = 1094 (struct scsi_arq_status *)pkt->pkt_scbp; 1095 uint_t max_sense_len; 1096 1097 EMUL64_MUTEX_ENTER(sp->cmd_emul64); 1098 tgt = find_tgt(sp->cmd_emul64, 1099 pkt->pkt_address.a_target, pkt->pkt_address.a_lun); 1100 EMUL64_MUTEX_EXIT(sp->cmd_emul64); 1101 1102 /* 1103 * If there is no target, skip the error injection and 1104 * let the packet be handled normally. This would normally 1105 * never happen since a_target and a_lun are setup in 1106 * emul64_scsi_init_pkt. 1107 */ 1108 if (tgt == NULL) { 1109 return (ERR_INJ_DISABLE); 1110 } 1111 1112 if (tgt->emul64_einj_state != ERR_INJ_DISABLE) { 1113 arq->sts_status = tgt->emul64_einj_scsi_status; 1114 pkt->pkt_state = tgt->emul64_einj_pkt_state; 1115 pkt->pkt_reason = tgt->emul64_einj_pkt_reason; 1116 1117 /* 1118 * Calculate available sense buffer length. We could just 1119 * assume sizeof(struct scsi_extended_sense) but hopefully 1120 * that limitation will go away soon. 1121 */ 1122 max_sense_len = sp->cmd_scblen - 1123 (sizeof (struct scsi_arq_status) - 1124 sizeof (struct scsi_extended_sense)); 1125 if (max_sense_len > tgt->emul64_einj_sense_length) { 1126 max_sense_len = tgt->emul64_einj_sense_length; 1127 } 1128 1129 /* for ARQ */ 1130 arq->sts_rqpkt_reason = CMD_CMPLT; 1131 arq->sts_rqpkt_resid = 0; 1132 arq->sts_rqpkt_state = STATE_GOT_BUS | STATE_GOT_TARGET | 1133 STATE_SENT_CMD | STATE_XFERRED_DATA | STATE_GOT_STATUS; 1134 1135 /* Copy sense data */ 1136 if (tgt->emul64_einj_sense_data != 0) { 1137 bcopy(tgt->emul64_einj_sense_data, 1138 (uint8_t *)&arq->sts_sensedata, 1139 max_sense_len); 1140 } 1141 } 1142 1143 /* Return current error injection state */ 1144 return (tgt->emul64_einj_state); 1145 } 1146 1147 int 1148 emul64_error_inject_req(struct emul64 *emul64, intptr_t arg) 1149 { 1150 emul64_tgt_t *tgt; 1151 struct emul64_error_inj_data error_inj_req; 1152 1153 /* Check args */ 1154 if (arg == NULL) { 1155 return (EINVAL); 1156 } 1157 1158 if (ddi_copyin((void *)arg, &error_inj_req, 1159 sizeof (error_inj_req), 0) != 0) { 1160 cmn_err(CE_WARN, "emul64: ioctl - inj copyin failed\n"); 1161 return (EFAULT); 1162 } 1163 1164 EMUL64_MUTEX_ENTER(emul64); 1165 tgt = find_tgt(emul64, error_inj_req.eccd_target, 1166 error_inj_req.eccd_lun); 1167 EMUL64_MUTEX_EXIT(emul64); 1168 1169 /* Make sure device exists */ 1170 if (tgt == NULL) { 1171 return (ENODEV); 1172 } 1173 1174 /* Free old sense buffer if we have one */ 1175 if (tgt->emul64_einj_sense_data != NULL) { 1176 ASSERT(tgt->emul64_einj_sense_length != 0); 1177 kmem_free(tgt->emul64_einj_sense_data, 1178 tgt->emul64_einj_sense_length); 1179 tgt->emul64_einj_sense_data = NULL; 1180 tgt->emul64_einj_sense_length = 0; 1181 } 1182 1183 /* 1184 * Now handle error injection request. If error injection 1185 * is requested we will return the sense data provided for 1186 * any I/O to this target until told to stop. 1187 */ 1188 tgt->emul64_einj_state = error_inj_req.eccd_inj_state; 1189 tgt->emul64_einj_sense_length = error_inj_req.eccd_sns_dlen; 1190 tgt->emul64_einj_pkt_state = error_inj_req.eccd_pkt_state; 1191 tgt->emul64_einj_pkt_reason = error_inj_req.eccd_pkt_reason; 1192 tgt->emul64_einj_scsi_status = error_inj_req.eccd_scsi_status; 1193 switch (error_inj_req.eccd_inj_state) { 1194 case ERR_INJ_ENABLE: 1195 case ERR_INJ_ENABLE_NODATA: 1196 if (error_inj_req.eccd_sns_dlen) { 1197 tgt->emul64_einj_sense_data = 1198 kmem_alloc(error_inj_req.eccd_sns_dlen, KM_SLEEP); 1199 /* Copy sense data */ 1200 if (ddi_copyin((void *)(arg + sizeof (error_inj_req)), 1201 tgt->emul64_einj_sense_data, 1202 error_inj_req.eccd_sns_dlen, 0) != 0) { 1203 cmn_err(CE_WARN, 1204 "emul64: sense data copy in failed\n"); 1205 return (EFAULT); 1206 } 1207 } 1208 break; 1209 case ERR_INJ_DISABLE: 1210 default: 1211 break; 1212 } 1213 1214 return (0); 1215 } 1216 1217 int bsd_scsi_start_stop_unit(struct scsi_pkt *); 1218 int bsd_scsi_test_unit_ready(struct scsi_pkt *); 1219 int bsd_scsi_request_sense(struct scsi_pkt *); 1220 int bsd_scsi_inquiry(struct scsi_pkt *); 1221 int bsd_scsi_format(struct scsi_pkt *); 1222 int bsd_scsi_io(struct scsi_pkt *); 1223 int bsd_scsi_log_sense(struct scsi_pkt *); 1224 int bsd_scsi_mode_sense(struct scsi_pkt *); 1225 int bsd_scsi_mode_select(struct scsi_pkt *); 1226 int bsd_scsi_read_capacity(struct scsi_pkt *); 1227 int bsd_scsi_read_capacity_16(struct scsi_pkt *); 1228 int bsd_scsi_reserve(struct scsi_pkt *); 1229 int bsd_scsi_format(struct scsi_pkt *); 1230 int bsd_scsi_release(struct scsi_pkt *); 1231 int bsd_scsi_read_defect_list(struct scsi_pkt *); 1232 int bsd_scsi_reassign_block(struct scsi_pkt *); 1233 int bsd_freeblkrange(emul64_tgt_t *, emul64_range_t *); 1234 1235 static void 1236 emul64_handle_cmd(struct scsi_pkt *pkt) 1237 { 1238 if (emul64_error_inject(pkt) == ERR_INJ_ENABLE_NODATA) { 1239 /* 1240 * If error injection is configured to return with 1241 * no data return now without handling the command. 1242 * This is how normal check conditions work. 1243 * 1244 * If the error injection state is ERR_INJ_ENABLE 1245 * (or if error injection is disabled) continue and 1246 * handle the command. This would be used for 1247 * KEY_RECOVERABLE_ERROR type conditions. 1248 */ 1249 return; 1250 } 1251 1252 switch (pkt->pkt_cdbp[0]) { 1253 case SCMD_START_STOP: 1254 (void) bsd_scsi_start_stop_unit(pkt); 1255 break; 1256 case SCMD_TEST_UNIT_READY: 1257 (void) bsd_scsi_test_unit_ready(pkt); 1258 break; 1259 case SCMD_REQUEST_SENSE: 1260 (void) bsd_scsi_request_sense(pkt); 1261 break; 1262 case SCMD_INQUIRY: 1263 (void) bsd_scsi_inquiry(pkt); 1264 break; 1265 case SCMD_FORMAT: 1266 (void) bsd_scsi_format(pkt); 1267 break; 1268 case SCMD_READ: 1269 case SCMD_WRITE: 1270 case SCMD_READ_G1: 1271 case SCMD_WRITE_G1: 1272 case SCMD_READ_G4: 1273 case SCMD_WRITE_G4: 1274 (void) bsd_scsi_io(pkt); 1275 break; 1276 case SCMD_LOG_SENSE_G1: 1277 (void) bsd_scsi_log_sense(pkt); 1278 break; 1279 case SCMD_MODE_SENSE: 1280 case SCMD_MODE_SENSE_G1: 1281 (void) bsd_scsi_mode_sense(pkt); 1282 break; 1283 case SCMD_MODE_SELECT: 1284 case SCMD_MODE_SELECT_G1: 1285 (void) bsd_scsi_mode_select(pkt); 1286 break; 1287 case SCMD_READ_CAPACITY: 1288 (void) bsd_scsi_read_capacity(pkt); 1289 break; 1290 case SCMD_SVC_ACTION_IN_G4: 1291 if (pkt->pkt_cdbp[1] == SSVC_ACTION_READ_CAPACITY_G4) { 1292 (void) bsd_scsi_read_capacity_16(pkt); 1293 } else { 1294 cmn_err(CE_WARN, "emul64: unrecognized G4 service " 1295 "action 0x%x", pkt->pkt_cdbp[1]); 1296 } 1297 break; 1298 case SCMD_RESERVE: 1299 case SCMD_RESERVE_G1: 1300 (void) bsd_scsi_reserve(pkt); 1301 break; 1302 case SCMD_RELEASE: 1303 case SCMD_RELEASE_G1: 1304 (void) bsd_scsi_release(pkt); 1305 break; 1306 case SCMD_REASSIGN_BLOCK: 1307 (void) bsd_scsi_reassign_block(pkt); 1308 break; 1309 case SCMD_READ_DEFECT_LIST: 1310 (void) bsd_scsi_read_defect_list(pkt); 1311 break; 1312 case SCMD_PRIN: 1313 case SCMD_PROUT: 1314 case SCMD_REPORT_LUNS: 1315 /* ASC 0x24 INVALID FIELD IN CDB */ 1316 emul64_check_cond(pkt, KEY_ILLEGAL_REQUEST, 0x24, 0x0); 1317 break; 1318 default: 1319 cmn_err(CE_WARN, "emul64: unrecognized " 1320 "SCSI cmd 0x%x", pkt->pkt_cdbp[0]); 1321 emul64_check_cond(pkt, KEY_ILLEGAL_REQUEST, 0x24, 0x0); 1322 break; 1323 case SCMD_GET_CONFIGURATION: 1324 case 0x35: /* SCMD_SYNCHRONIZE_CACHE */ 1325 /* Don't complain */ 1326 break; 1327 } 1328 } 1329 1330 static void 1331 emul64_pkt_comp(void * arg) 1332 { 1333 struct scsi_pkt *pkt = (struct scsi_pkt *)arg; 1334 struct emul64_cmd *sp = PKT2CMD(pkt); 1335 emul64_tgt_t *tgt; 1336 1337 EMUL64_MUTEX_ENTER(sp->cmd_emul64); 1338 tgt = find_tgt(sp->cmd_emul64, 1339 pkt->pkt_address.a_target, pkt->pkt_address.a_lun); 1340 EMUL64_MUTEX_EXIT(sp->cmd_emul64); 1341 if (!tgt) { 1342 pkt->pkt_reason = CMD_TIMEOUT; 1343 pkt->pkt_state = STATE_GOT_BUS | STATE_SENT_CMD; 1344 pkt->pkt_statistics = STAT_TIMEOUT; 1345 } else { 1346 pkt->pkt_reason = CMD_CMPLT; 1347 *pkt->pkt_scbp = STATUS_GOOD; 1348 pkt->pkt_state = STATE_GOT_BUS | STATE_GOT_TARGET | 1349 STATE_SENT_CMD | STATE_XFERRED_DATA | STATE_GOT_STATUS; 1350 pkt->pkt_statistics = 0; 1351 emul64_handle_cmd(pkt); 1352 } 1353 scsi_hba_pkt_comp(pkt); 1354 } 1355 1356 /* ARGSUSED */ 1357 static int 1358 emul64_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt) 1359 { 1360 return (1); 1361 } 1362 1363 /* ARGSUSED */ 1364 static int 1365 emul64_scsi_reset(struct scsi_address *ap, int level) 1366 { 1367 return (1); 1368 } 1369 1370 static int 1371 emul64_get_tgtrange(struct emul64 *emul64, 1372 intptr_t arg, 1373 emul64_tgt_t **tgtp, 1374 emul64_tgt_range_t *tgtr) 1375 { 1376 if (ddi_copyin((void *)arg, tgtr, sizeof (*tgtr), 0) != 0) { 1377 cmn_err(CE_WARN, "emul64: ioctl - copy in failed\n"); 1378 return (EFAULT); 1379 } 1380 EMUL64_MUTEX_ENTER(emul64); 1381 *tgtp = find_tgt(emul64, tgtr->emul64_target, tgtr->emul64_lun); 1382 EMUL64_MUTEX_EXIT(emul64); 1383 if (*tgtp == NULL) { 1384 cmn_err(CE_WARN, "emul64: ioctl - no target for %d,%d on %d", 1385 tgtr->emul64_target, tgtr->emul64_lun, 1386 ddi_get_instance(emul64->emul64_dip)); 1387 return (ENXIO); 1388 } 1389 return (0); 1390 } 1391 1392 static int 1393 emul64_ioctl(dev_t dev, 1394 int cmd, 1395 intptr_t arg, 1396 int mode, 1397 cred_t *credp, 1398 int *rvalp) 1399 { 1400 struct emul64 *emul64; 1401 int instance; 1402 int rv = 0; 1403 emul64_tgt_range_t tgtr; 1404 emul64_tgt_t *tgt; 1405 1406 instance = MINOR2INST(getminor(dev)); 1407 emul64 = (struct emul64 *)ddi_get_soft_state(emul64_state, instance); 1408 if (emul64 == NULL) { 1409 cmn_err(CE_WARN, "emul64: ioctl - no softstate for %d\n", 1410 getminor(dev)); 1411 return (ENXIO); 1412 } 1413 1414 switch (cmd) { 1415 case EMUL64_WRITE_OFF: 1416 rv = emul64_get_tgtrange(emul64, arg, &tgt, &tgtr); 1417 if (rv == 0) { 1418 rv = emul64_write_off(emul64, tgt, &tgtr); 1419 } 1420 break; 1421 case EMUL64_WRITE_ON: 1422 rv = emul64_get_tgtrange(emul64, arg, &tgt, &tgtr); 1423 if (rv == 0) { 1424 rv = emul64_write_on(emul64, tgt, &tgtr); 1425 } 1426 break; 1427 case EMUL64_ZERO_RANGE: 1428 rv = emul64_get_tgtrange(emul64, arg, &tgt, &tgtr); 1429 if (rv == 0) { 1430 mutex_enter(&tgt->emul64_tgt_blk_lock); 1431 rv = bsd_freeblkrange(tgt, &tgtr.emul64_blkrange); 1432 mutex_exit(&tgt->emul64_tgt_blk_lock); 1433 } 1434 break; 1435 case EMUL64_ERROR_INJECT: 1436 rv = emul64_error_inject_req(emul64, arg); 1437 break; 1438 default: 1439 rv = scsi_hba_ioctl(dev, cmd, arg, mode, credp, rvalp); 1440 break; 1441 } 1442 return (rv); 1443 } 1444 1445 /* ARGSUSED */ 1446 static int 1447 emul64_write_off(struct emul64 *emul64, 1448 emul64_tgt_t *tgt, 1449 emul64_tgt_range_t *tgtr) 1450 { 1451 size_t blkcnt = tgtr->emul64_blkrange.emul64_blkcnt; 1452 emul64_nowrite_t *cur; 1453 emul64_nowrite_t *nowrite; 1454 emul64_rng_overlap_t overlap = O_NONE; 1455 emul64_nowrite_t **prev = NULL; 1456 diskaddr_t sb = tgtr->emul64_blkrange.emul64_sb; 1457 1458 nowrite = emul64_nowrite_alloc(&tgtr->emul64_blkrange); 1459 1460 /* Find spot in list */ 1461 rw_enter(&tgt->emul64_tgt_nw_lock, RW_WRITER); 1462 cur = emul64_find_nowrite(tgt, sb, blkcnt, &overlap, &prev); 1463 if (overlap == O_NONE) { 1464 /* Insert into list */ 1465 *prev = nowrite; 1466 nowrite->emul64_nwnext = cur; 1467 } 1468 rw_exit(&tgt->emul64_tgt_nw_lock); 1469 if (overlap == O_NONE) { 1470 if (emul64_collect_stats) { 1471 mutex_enter(&emul64_stats_mutex); 1472 emul64_nowrite_count++; 1473 mutex_exit(&emul64_stats_mutex); 1474 } 1475 } else { 1476 cmn_err(CE_WARN, "emul64: EMUL64_WRITE_OFF 0x%llx,0x%" 1477 PRIx64 "overlaps 0x%llx,0x%" PRIx64 "\n", 1478 nowrite->emul64_blocked.emul64_sb, 1479 nowrite->emul64_blocked.emul64_blkcnt, 1480 cur->emul64_blocked.emul64_sb, 1481 cur->emul64_blocked.emul64_blkcnt); 1482 emul64_nowrite_free(nowrite); 1483 return (EINVAL); 1484 } 1485 return (0); 1486 } 1487 1488 /* ARGSUSED */ 1489 static int 1490 emul64_write_on(struct emul64 *emul64, 1491 emul64_tgt_t *tgt, 1492 emul64_tgt_range_t *tgtr) 1493 { 1494 size_t blkcnt = tgtr->emul64_blkrange.emul64_blkcnt; 1495 emul64_nowrite_t *cur; 1496 emul64_rng_overlap_t overlap = O_NONE; 1497 emul64_nowrite_t **prev = NULL; 1498 int rv = 0; 1499 diskaddr_t sb = tgtr->emul64_blkrange.emul64_sb; 1500 1501 /* Find spot in list */ 1502 rw_enter(&tgt->emul64_tgt_nw_lock, RW_WRITER); 1503 cur = emul64_find_nowrite(tgt, sb, blkcnt, &overlap, &prev); 1504 if (overlap == O_SAME) { 1505 /* Remove from list */ 1506 *prev = cur->emul64_nwnext; 1507 } 1508 rw_exit(&tgt->emul64_tgt_nw_lock); 1509 1510 switch (overlap) { 1511 case O_NONE: 1512 cmn_err(CE_WARN, "emul64: EMUL64_WRITE_ON 0x%llx,0x%lx " 1513 "range not found\n", sb, blkcnt); 1514 rv = ENXIO; 1515 break; 1516 case O_SAME: 1517 if (emul64_collect_stats) { 1518 mutex_enter(&emul64_stats_mutex); 1519 emul64_nowrite_count--; 1520 mutex_exit(&emul64_stats_mutex); 1521 } 1522 emul64_nowrite_free(cur); 1523 break; 1524 case O_OVERLAP: 1525 case O_SUBSET: 1526 cmn_err(CE_WARN, "emul64: EMUL64_WRITE_ON 0x%llx,0x%lx " 1527 "overlaps 0x%llx,0x%" PRIx64 "\n", 1528 sb, blkcnt, cur->emul64_blocked.emul64_sb, 1529 cur->emul64_blocked.emul64_blkcnt); 1530 rv = EINVAL; 1531 break; 1532 } 1533 return (rv); 1534 } 1535 1536 static emul64_nowrite_t * 1537 emul64_find_nowrite(emul64_tgt_t *tgt, 1538 diskaddr_t sb, 1539 size_t blkcnt, 1540 emul64_rng_overlap_t *overlap, 1541 emul64_nowrite_t ***prevp) 1542 { 1543 emul64_nowrite_t *cur; 1544 emul64_nowrite_t **prev; 1545 1546 /* Find spot in list */ 1547 *overlap = O_NONE; 1548 prev = &tgt->emul64_tgt_nowrite; 1549 cur = tgt->emul64_tgt_nowrite; 1550 while (cur != NULL) { 1551 *overlap = emul64_overlap(&cur->emul64_blocked, sb, blkcnt); 1552 if (*overlap != O_NONE) 1553 break; 1554 prev = &cur->emul64_nwnext; 1555 cur = cur->emul64_nwnext; 1556 } 1557 1558 *prevp = prev; 1559 return (cur); 1560 } 1561 1562 static emul64_nowrite_t * 1563 emul64_nowrite_alloc(emul64_range_t *range) 1564 { 1565 emul64_nowrite_t *nw; 1566 1567 nw = kmem_zalloc(sizeof (*nw), KM_SLEEP); 1568 bcopy((void *) range, 1569 (void *) &nw->emul64_blocked, 1570 sizeof (nw->emul64_blocked)); 1571 return (nw); 1572 } 1573 1574 static void 1575 emul64_nowrite_free(emul64_nowrite_t *nw) 1576 { 1577 kmem_free((void *) nw, sizeof (*nw)); 1578 } 1579 1580 emul64_rng_overlap_t 1581 emul64_overlap(emul64_range_t *rng, diskaddr_t sb, size_t cnt) 1582 { 1583 1584 if (rng->emul64_sb >= sb + cnt) 1585 return (O_NONE); 1586 if (rng->emul64_sb + rng->emul64_blkcnt <= sb) 1587 return (O_NONE); 1588 if ((rng->emul64_sb == sb) && (rng->emul64_blkcnt == cnt)) 1589 return (O_SAME); 1590 if ((sb >= rng->emul64_sb) && 1591 ((sb + cnt) <= (rng->emul64_sb + rng->emul64_blkcnt))) { 1592 return (O_SUBSET); 1593 } 1594 return (O_OVERLAP); 1595 } 1596 1597 #include <sys/varargs.h> 1598 1599 /* 1600 * Error logging, printing, and debug print routines 1601 */ 1602 1603 /*VARARGS3*/ 1604 static void 1605 emul64_i_log(struct emul64 *emul64, int level, char *fmt, ...) 1606 { 1607 char buf[256]; 1608 va_list ap; 1609 1610 va_start(ap, fmt); 1611 (void) vsnprintf(buf, sizeof (buf), fmt, ap); 1612 va_end(ap); 1613 1614 scsi_log(emul64 ? emul64->emul64_dip : NULL, 1615 "emul64", level, "%s\n", buf); 1616 } 1617 1618 1619 #ifdef EMUL64DEBUG 1620 1621 static void 1622 emul64_debug_dump_cdb(struct scsi_address *ap, struct scsi_pkt *pkt) 1623 { 1624 static char hex[] = "0123456789abcdef"; 1625 struct emul64 *emul64 = ADDR2EMUL64(ap); 1626 struct emul64_cmd *sp = PKT2CMD(pkt); 1627 uint8_t *cdb = pkt->pkt_cdbp; 1628 char buf [256]; 1629 char *p; 1630 int i; 1631 1632 (void) snprintf(buf, sizeof (buf), "emul64%d: <%d,%d> ", 1633 ddi_get_instance(emul64->emul64_dip), 1634 ap->a_target, ap->a_lun); 1635 1636 p = buf + strlen(buf); 1637 1638 *p++ = '['; 1639 for (i = 0; i < sp->cmd_cdblen; i++, cdb++) { 1640 if (i != 0) 1641 *p++ = ' '; 1642 *p++ = hex[(*cdb >> 4) & 0x0f]; 1643 *p++ = hex[*cdb & 0x0f]; 1644 } 1645 *p++ = ']'; 1646 *p++ = '\n'; 1647 *p = 0; 1648 1649 cmn_err(CE_CONT, buf); 1650 } 1651 #endif /* EMUL64DEBUG */ 1652