1*b1dd958fScth /* 2*b1dd958fScth * CDDL HEADER START 3*b1dd958fScth * 4*b1dd958fScth * The contents of this file are subject to the terms of the 5*b1dd958fScth * Common Development and Distribution License, Version 1.0 only 6*b1dd958fScth * (the "License"). You may not use this file except in compliance 7*b1dd958fScth * with the License. 8*b1dd958fScth * 9*b1dd958fScth * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10*b1dd958fScth * or http://www.opensolaris.org/os/licensing. 11*b1dd958fScth * See the License for the specific language governing permissions 12*b1dd958fScth * and limitations under the License. 13*b1dd958fScth * 14*b1dd958fScth * When distributing Covered Code, include this CDDL HEADER in each 15*b1dd958fScth * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16*b1dd958fScth * If applicable, add the following below this CDDL HEADER, with the 17*b1dd958fScth * fields enclosed by brackets "[]" replaced with your own identifying 18*b1dd958fScth * information: Portions Copyright [yyyy] [name of copyright owner] 19*b1dd958fScth * 20*b1dd958fScth * CDDL HEADER END 21*b1dd958fScth */ 22*b1dd958fScth /* 23*b1dd958fScth * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 24*b1dd958fScth * Use is subject to license terms. 25*b1dd958fScth */ 26*b1dd958fScth 27*b1dd958fScth #pragma ident "%Z%%M% %I% %E% SMI" 28*b1dd958fScth 29*b1dd958fScth /* 30*b1dd958fScth * SCSA HBA nexus driver that emulates an HBA connected to SCSI target 31*b1dd958fScth * devices (large disks). 32*b1dd958fScth */ 33*b1dd958fScth 34*b1dd958fScth #ifdef DEBUG 35*b1dd958fScth #define EMUL64DEBUG 36*b1dd958fScth #endif 37*b1dd958fScth 38*b1dd958fScth #include <sys/scsi/scsi.h> 39*b1dd958fScth #include <sys/ddi.h> 40*b1dd958fScth #include <sys/sunddi.h> 41*b1dd958fScth #include <sys/taskq.h> 42*b1dd958fScth #include <sys/disp.h> 43*b1dd958fScth #include <sys/types.h> 44*b1dd958fScth #include <sys/buf.h> 45*b1dd958fScth #include <sys/cpuvar.h> 46*b1dd958fScth #include <sys/dklabel.h> 47*b1dd958fScth 48*b1dd958fScth #include <sys/emul64.h> 49*b1dd958fScth #include <sys/emul64cmd.h> 50*b1dd958fScth #include <sys/emul64var.h> 51*b1dd958fScth 52*b1dd958fScth int emul64_usetaskq = 1; /* set to zero for debugging */ 53*b1dd958fScth int emul64debug = 0; 54*b1dd958fScth #ifdef EMUL64DEBUG 55*b1dd958fScth static int emul64_cdb_debug = 0; 56*b1dd958fScth #include <sys/debug.h> 57*b1dd958fScth #endif 58*b1dd958fScth 59*b1dd958fScth /* 60*b1dd958fScth * cb_ops function prototypes 61*b1dd958fScth */ 62*b1dd958fScth static int emul64_ioctl(dev_t, int cmd, intptr_t arg, int mode, 63*b1dd958fScth cred_t *credp, int *rvalp); 64*b1dd958fScth 65*b1dd958fScth /* 66*b1dd958fScth * dev_ops functions prototypes 67*b1dd958fScth */ 68*b1dd958fScth static int emul64_info(dev_info_t *dip, ddi_info_cmd_t infocmd, 69*b1dd958fScth void *arg, void **result); 70*b1dd958fScth static int emul64_attach(dev_info_t *dip, ddi_attach_cmd_t cmd); 71*b1dd958fScth static int emul64_detach(dev_info_t *dip, ddi_detach_cmd_t cmd); 72*b1dd958fScth 73*b1dd958fScth /* 74*b1dd958fScth * Function prototypes 75*b1dd958fScth * 76*b1dd958fScth * SCSA functions exported by means of the transport table 77*b1dd958fScth */ 78*b1dd958fScth static int emul64_tran_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip, 79*b1dd958fScth scsi_hba_tran_t *tran, struct scsi_device *sd); 80*b1dd958fScth static int emul64_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt); 81*b1dd958fScth static void emul64_pkt_comp(void *); 82*b1dd958fScth static int emul64_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt); 83*b1dd958fScth static int emul64_scsi_reset(struct scsi_address *ap, int level); 84*b1dd958fScth static int emul64_scsi_getcap(struct scsi_address *ap, char *cap, int whom); 85*b1dd958fScth static int emul64_scsi_setcap(struct scsi_address *ap, char *cap, int value, 86*b1dd958fScth int whom); 87*b1dd958fScth static struct scsi_pkt *emul64_scsi_init_pkt(struct scsi_address *ap, 88*b1dd958fScth struct scsi_pkt *pkt, struct buf *bp, int cmdlen, int statuslen, 89*b1dd958fScth int tgtlen, int flags, int (*callback)(), caddr_t arg); 90*b1dd958fScth static void emul64_scsi_destroy_pkt(struct scsi_address *ap, 91*b1dd958fScth struct scsi_pkt *pkt); 92*b1dd958fScth static void emul64_scsi_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt); 93*b1dd958fScth static void emul64_scsi_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt); 94*b1dd958fScth static int emul64_scsi_reset_notify(struct scsi_address *ap, int flag, 95*b1dd958fScth void (*callback)(caddr_t), caddr_t arg); 96*b1dd958fScth 97*b1dd958fScth /* 98*b1dd958fScth * internal functions 99*b1dd958fScth */ 100*b1dd958fScth static void emul64_i_initcap(struct emul64 *emul64); 101*b1dd958fScth 102*b1dd958fScth static void emul64_i_log(struct emul64 *emul64, int level, char *fmt, ...); 103*b1dd958fScth static int emul64_get_tgtrange(struct emul64 *, 104*b1dd958fScth intptr_t, 105*b1dd958fScth emul64_tgt_t **, 106*b1dd958fScth emul64_tgt_range_t *); 107*b1dd958fScth static int emul64_write_off(struct emul64 *, 108*b1dd958fScth emul64_tgt_t *, 109*b1dd958fScth emul64_tgt_range_t *); 110*b1dd958fScth static int emul64_write_on(struct emul64 *, 111*b1dd958fScth emul64_tgt_t *, 112*b1dd958fScth emul64_tgt_range_t *); 113*b1dd958fScth static emul64_nowrite_t *emul64_nowrite_alloc(emul64_range_t *); 114*b1dd958fScth static void emul64_nowrite_free(emul64_nowrite_t *); 115*b1dd958fScth static emul64_nowrite_t *emul64_find_nowrite(emul64_tgt_t *, 116*b1dd958fScth diskaddr_t start_block, 117*b1dd958fScth size_t blkcnt, 118*b1dd958fScth emul64_rng_overlap_t *overlapp, 119*b1dd958fScth emul64_nowrite_t ***prevp); 120*b1dd958fScth 121*b1dd958fScth extern emul64_tgt_t *find_tgt(struct emul64 *, ushort_t, ushort_t); 122*b1dd958fScth 123*b1dd958fScth #ifdef EMUL64DEBUG 124*b1dd958fScth static void emul64_debug_dump_cdb(struct scsi_address *ap, 125*b1dd958fScth struct scsi_pkt *pkt); 126*b1dd958fScth #endif 127*b1dd958fScth 128*b1dd958fScth 129*b1dd958fScth #ifdef _DDICT 130*b1dd958fScth static int ddi_in_panic(void); 131*b1dd958fScth static int ddi_in_panic() { return (0); } 132*b1dd958fScth #ifndef SCSI_CAP_RESET_NOTIFICATION 133*b1dd958fScth #define SCSI_CAP_RESET_NOTIFICATION 14 134*b1dd958fScth #endif 135*b1dd958fScth #ifndef SCSI_RESET_NOTIFY 136*b1dd958fScth #define SCSI_RESET_NOTIFY 0x01 137*b1dd958fScth #endif 138*b1dd958fScth #ifndef SCSI_RESET_CANCEL 139*b1dd958fScth #define SCSI_RESET_CANCEL 0x02 140*b1dd958fScth #endif 141*b1dd958fScth #endif 142*b1dd958fScth 143*b1dd958fScth /* 144*b1dd958fScth * Tunables: 145*b1dd958fScth * 146*b1dd958fScth * emul64_max_task 147*b1dd958fScth * The taskq facility is used to queue up SCSI start requests on a per 148*b1dd958fScth * controller basis. If the maximum number of queued tasks is hit, 149*b1dd958fScth * taskq_ent_alloc() delays for a second, which adversely impacts our 150*b1dd958fScth * performance. This value establishes the maximum number of task 151*b1dd958fScth * queue entries when taskq_create is called. 152*b1dd958fScth * 153*b1dd958fScth * emul64_task_nthreads 154*b1dd958fScth * Specifies the number of threads that should be used to process a 155*b1dd958fScth * controller's task queue. Our init function sets this to the number 156*b1dd958fScth * of CPUs on the system, but this can be overridden in emul64.conf. 157*b1dd958fScth */ 158*b1dd958fScth int emul64_max_task = 16; 159*b1dd958fScth int emul64_task_nthreads = 1; 160*b1dd958fScth 161*b1dd958fScth /* 162*b1dd958fScth * Local static data 163*b1dd958fScth */ 164*b1dd958fScth static void *emul64_state = NULL; 165*b1dd958fScth 166*b1dd958fScth /* 167*b1dd958fScth * Character/block operations. 168*b1dd958fScth */ 169*b1dd958fScth static struct cb_ops emul64_cbops = { 170*b1dd958fScth scsi_hba_open, /* cb_open */ 171*b1dd958fScth scsi_hba_close, /* cb_close */ 172*b1dd958fScth nodev, /* cb_strategy */ 173*b1dd958fScth nodev, /* cb_print */ 174*b1dd958fScth nodev, /* cb_dump */ 175*b1dd958fScth nodev, /* cb_read */ 176*b1dd958fScth nodev, /* cb_write */ 177*b1dd958fScth emul64_ioctl, /* cb_ioctl */ 178*b1dd958fScth nodev, /* cb_devmap */ 179*b1dd958fScth nodev, /* cb_mmap */ 180*b1dd958fScth nodev, /* cb_segmap */ 181*b1dd958fScth nochpoll, /* cb_chpoll */ 182*b1dd958fScth ddi_prop_op, /* cb_prop_op */ 183*b1dd958fScth NULL, /* cb_str */ 184*b1dd958fScth D_MP | D_64BIT | D_HOTPLUG, /* cb_flag */ 185*b1dd958fScth CB_REV, /* cb_rev */ 186*b1dd958fScth nodev, /* cb_aread */ 187*b1dd958fScth nodev /* cb_awrite */ 188*b1dd958fScth }; 189*b1dd958fScth 190*b1dd958fScth /* 191*b1dd958fScth * autoconfiguration routines. 192*b1dd958fScth */ 193*b1dd958fScth 194*b1dd958fScth static struct dev_ops emul64_ops = { 195*b1dd958fScth DEVO_REV, /* rev, */ 196*b1dd958fScth 0, /* refcnt */ 197*b1dd958fScth emul64_info, /* getinfo */ 198*b1dd958fScth nulldev, /* identify */ 199*b1dd958fScth nulldev, /* probe */ 200*b1dd958fScth emul64_attach, /* attach */ 201*b1dd958fScth emul64_detach, /* detach */ 202*b1dd958fScth nodev, /* reset */ 203*b1dd958fScth &emul64_cbops, /* char/block ops */ 204*b1dd958fScth NULL /* bus ops */ 205*b1dd958fScth }; 206*b1dd958fScth 207*b1dd958fScth char _depends_on[] = "misc/scsi"; 208*b1dd958fScth 209*b1dd958fScth static struct modldrv modldrv = { 210*b1dd958fScth &mod_driverops, /* module type - driver */ 211*b1dd958fScth "emul64 SCSI Host Bus Adapter", /* module name */ 212*b1dd958fScth &emul64_ops, /* driver ops */ 213*b1dd958fScth }; 214*b1dd958fScth 215*b1dd958fScth static struct modlinkage modlinkage = { 216*b1dd958fScth MODREV_1, /* ml_rev - must be MODREV_1 */ 217*b1dd958fScth &modldrv, /* ml_linkage */ 218*b1dd958fScth NULL /* end of driver linkage */ 219*b1dd958fScth }; 220*b1dd958fScth 221*b1dd958fScth int 222*b1dd958fScth _init(void) 223*b1dd958fScth { 224*b1dd958fScth int ret; 225*b1dd958fScth 226*b1dd958fScth ret = ddi_soft_state_init(&emul64_state, sizeof (struct emul64), 227*b1dd958fScth EMUL64_INITIAL_SOFT_SPACE); 228*b1dd958fScth if (ret != 0) 229*b1dd958fScth return (ret); 230*b1dd958fScth 231*b1dd958fScth if ((ret = scsi_hba_init(&modlinkage)) != 0) { 232*b1dd958fScth ddi_soft_state_fini(&emul64_state); 233*b1dd958fScth return (ret); 234*b1dd958fScth } 235*b1dd958fScth 236*b1dd958fScth /* Set the number of task threads to the number of CPUs */ 237*b1dd958fScth if (boot_max_ncpus == -1) { 238*b1dd958fScth emul64_task_nthreads = max_ncpus; 239*b1dd958fScth } else { 240*b1dd958fScth emul64_task_nthreads = boot_max_ncpus; 241*b1dd958fScth } 242*b1dd958fScth 243*b1dd958fScth emul64_bsd_init(); 244*b1dd958fScth 245*b1dd958fScth ret = mod_install(&modlinkage); 246*b1dd958fScth if (ret != 0) { 247*b1dd958fScth emul64_bsd_fini(); 248*b1dd958fScth scsi_hba_fini(&modlinkage); 249*b1dd958fScth ddi_soft_state_fini(&emul64_state); 250*b1dd958fScth } 251*b1dd958fScth 252*b1dd958fScth return (ret); 253*b1dd958fScth } 254*b1dd958fScth 255*b1dd958fScth int 256*b1dd958fScth _fini(void) 257*b1dd958fScth { 258*b1dd958fScth int ret; 259*b1dd958fScth 260*b1dd958fScth if ((ret = mod_remove(&modlinkage)) != 0) 261*b1dd958fScth return (ret); 262*b1dd958fScth 263*b1dd958fScth emul64_bsd_fini(); 264*b1dd958fScth 265*b1dd958fScth scsi_hba_fini(&modlinkage); 266*b1dd958fScth 267*b1dd958fScth ddi_soft_state_fini(&emul64_state); 268*b1dd958fScth 269*b1dd958fScth return (ret); 270*b1dd958fScth } 271*b1dd958fScth 272*b1dd958fScth int 273*b1dd958fScth _info(struct modinfo *modinfop) 274*b1dd958fScth { 275*b1dd958fScth return (mod_info(&modlinkage, modinfop)); 276*b1dd958fScth } 277*b1dd958fScth 278*b1dd958fScth /* 279*b1dd958fScth * Given the device number return the devinfo pointer 280*b1dd958fScth * from the scsi_device structure. 281*b1dd958fScth */ 282*b1dd958fScth /*ARGSUSED*/ 283*b1dd958fScth static int 284*b1dd958fScth emul64_info(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result) 285*b1dd958fScth { 286*b1dd958fScth struct emul64 *foo; 287*b1dd958fScth int instance = getminor((dev_t)arg); 288*b1dd958fScth 289*b1dd958fScth switch (cmd) { 290*b1dd958fScth case DDI_INFO_DEVT2DEVINFO: 291*b1dd958fScth foo = ddi_get_soft_state(emul64_state, instance); 292*b1dd958fScth if (foo != NULL) 293*b1dd958fScth *result = (void *)foo->emul64_dip; 294*b1dd958fScth else { 295*b1dd958fScth *result = NULL; 296*b1dd958fScth return (DDI_FAILURE); 297*b1dd958fScth } 298*b1dd958fScth break; 299*b1dd958fScth 300*b1dd958fScth case DDI_INFO_DEVT2INSTANCE: 301*b1dd958fScth *result = (void *)(uintptr_t)instance; 302*b1dd958fScth break; 303*b1dd958fScth 304*b1dd958fScth default: 305*b1dd958fScth return (DDI_FAILURE); 306*b1dd958fScth } 307*b1dd958fScth 308*b1dd958fScth return (DDI_SUCCESS); 309*b1dd958fScth } 310*b1dd958fScth 311*b1dd958fScth /* 312*b1dd958fScth * Attach an instance of an emul64 host adapter. Allocate data structures, 313*b1dd958fScth * initialize the emul64 and we're on the air. 314*b1dd958fScth */ 315*b1dd958fScth /*ARGSUSED*/ 316*b1dd958fScth static int 317*b1dd958fScth emul64_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 318*b1dd958fScth { 319*b1dd958fScth int mutex_initted = 0; 320*b1dd958fScth struct emul64 *emul64; 321*b1dd958fScth int instance; 322*b1dd958fScth scsi_hba_tran_t *tran = NULL; 323*b1dd958fScth ddi_dma_attr_t tmp_dma_attr; 324*b1dd958fScth 325*b1dd958fScth emul64_bsd_get_props(dip); 326*b1dd958fScth 327*b1dd958fScth bzero((void *) &tmp_dma_attr, sizeof (tmp_dma_attr)); 328*b1dd958fScth instance = ddi_get_instance(dip); 329*b1dd958fScth 330*b1dd958fScth switch (cmd) { 331*b1dd958fScth case DDI_ATTACH: 332*b1dd958fScth break; 333*b1dd958fScth 334*b1dd958fScth case DDI_RESUME: 335*b1dd958fScth tran = (scsi_hba_tran_t *)ddi_get_driver_private(dip); 336*b1dd958fScth if (!tran) { 337*b1dd958fScth return (DDI_FAILURE); 338*b1dd958fScth } 339*b1dd958fScth emul64 = TRAN2EMUL64(tran); 340*b1dd958fScth 341*b1dd958fScth return (DDI_SUCCESS); 342*b1dd958fScth 343*b1dd958fScth default: 344*b1dd958fScth emul64_i_log(NULL, CE_WARN, 345*b1dd958fScth "emul64%d: Cmd != DDI_ATTACH/DDI_RESUME", instance); 346*b1dd958fScth return (DDI_FAILURE); 347*b1dd958fScth } 348*b1dd958fScth 349*b1dd958fScth /* 350*b1dd958fScth * Allocate emul64 data structure. 351*b1dd958fScth */ 352*b1dd958fScth if (ddi_soft_state_zalloc(emul64_state, instance) != DDI_SUCCESS) { 353*b1dd958fScth emul64_i_log(NULL, CE_WARN, 354*b1dd958fScth "emul64%d: Failed to alloc soft state", 355*b1dd958fScth instance); 356*b1dd958fScth return (DDI_FAILURE); 357*b1dd958fScth } 358*b1dd958fScth 359*b1dd958fScth emul64 = (struct emul64 *)ddi_get_soft_state(emul64_state, instance); 360*b1dd958fScth if (emul64 == (struct emul64 *)NULL) { 361*b1dd958fScth emul64_i_log(NULL, CE_WARN, "emul64%d: Bad soft state", 362*b1dd958fScth instance); 363*b1dd958fScth ddi_soft_state_free(emul64_state, instance); 364*b1dd958fScth return (DDI_FAILURE); 365*b1dd958fScth } 366*b1dd958fScth 367*b1dd958fScth 368*b1dd958fScth /* 369*b1dd958fScth * Allocate a transport structure 370*b1dd958fScth */ 371*b1dd958fScth tran = scsi_hba_tran_alloc(dip, SCSI_HBA_CANSLEEP); 372*b1dd958fScth if (tran == NULL) { 373*b1dd958fScth cmn_err(CE_WARN, "emul64: scsi_hba_tran_alloc failed\n"); 374*b1dd958fScth goto fail; 375*b1dd958fScth } 376*b1dd958fScth 377*b1dd958fScth emul64->emul64_tran = tran; 378*b1dd958fScth emul64->emul64_dip = dip; 379*b1dd958fScth 380*b1dd958fScth tran->tran_hba_private = emul64; 381*b1dd958fScth tran->tran_tgt_private = NULL; 382*b1dd958fScth tran->tran_tgt_init = emul64_tran_tgt_init; 383*b1dd958fScth tran->tran_tgt_probe = scsi_hba_probe; 384*b1dd958fScth tran->tran_tgt_free = NULL; 385*b1dd958fScth 386*b1dd958fScth tran->tran_start = emul64_scsi_start; 387*b1dd958fScth tran->tran_abort = emul64_scsi_abort; 388*b1dd958fScth tran->tran_reset = emul64_scsi_reset; 389*b1dd958fScth tran->tran_getcap = emul64_scsi_getcap; 390*b1dd958fScth tran->tran_setcap = emul64_scsi_setcap; 391*b1dd958fScth tran->tran_init_pkt = emul64_scsi_init_pkt; 392*b1dd958fScth tran->tran_destroy_pkt = emul64_scsi_destroy_pkt; 393*b1dd958fScth tran->tran_dmafree = emul64_scsi_dmafree; 394*b1dd958fScth tran->tran_sync_pkt = emul64_scsi_sync_pkt; 395*b1dd958fScth tran->tran_reset_notify = emul64_scsi_reset_notify; 396*b1dd958fScth 397*b1dd958fScth tmp_dma_attr.dma_attr_minxfer = 0x1; 398*b1dd958fScth tmp_dma_attr.dma_attr_burstsizes = 0x7f; 399*b1dd958fScth 400*b1dd958fScth /* 401*b1dd958fScth * Attach this instance of the hba 402*b1dd958fScth */ 403*b1dd958fScth if (scsi_hba_attach_setup(dip, &tmp_dma_attr, tran, 404*b1dd958fScth 0) != DDI_SUCCESS) { 405*b1dd958fScth cmn_err(CE_WARN, "emul64: scsi_hba_attach failed\n"); 406*b1dd958fScth goto fail; 407*b1dd958fScth } 408*b1dd958fScth 409*b1dd958fScth emul64->emul64_initiator_id = 2; 410*b1dd958fScth 411*b1dd958fScth /* 412*b1dd958fScth * Look up the scsi-options property 413*b1dd958fScth */ 414*b1dd958fScth emul64->emul64_scsi_options = 415*b1dd958fScth ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, "scsi-options", 416*b1dd958fScth EMUL64_DEFAULT_SCSI_OPTIONS); 417*b1dd958fScth EMUL64_DEBUG(emul64, SCSI_DEBUG, "emul64 scsi-options=%x", 418*b1dd958fScth emul64->emul64_scsi_options); 419*b1dd958fScth 420*b1dd958fScth 421*b1dd958fScth /* mutexes to protect the emul64 request and response queue */ 422*b1dd958fScth mutex_init(EMUL64_REQ_MUTEX(emul64), NULL, MUTEX_DRIVER, 423*b1dd958fScth emul64->emul64_iblock); 424*b1dd958fScth mutex_init(EMUL64_RESP_MUTEX(emul64), NULL, MUTEX_DRIVER, 425*b1dd958fScth emul64->emul64_iblock); 426*b1dd958fScth 427*b1dd958fScth mutex_initted = 1; 428*b1dd958fScth 429*b1dd958fScth EMUL64_MUTEX_ENTER(emul64); 430*b1dd958fScth 431*b1dd958fScth /* 432*b1dd958fScth * Initialize the default Target Capabilities and Sync Rates 433*b1dd958fScth */ 434*b1dd958fScth emul64_i_initcap(emul64); 435*b1dd958fScth 436*b1dd958fScth EMUL64_MUTEX_EXIT(emul64); 437*b1dd958fScth 438*b1dd958fScth 439*b1dd958fScth ddi_report_dev(dip); 440*b1dd958fScth emul64->emul64_taskq = taskq_create("emul64_comp", 441*b1dd958fScth emul64_task_nthreads, MINCLSYSPRI, 1, emul64_max_task, 0); 442*b1dd958fScth 443*b1dd958fScth return (DDI_SUCCESS); 444*b1dd958fScth 445*b1dd958fScth fail: 446*b1dd958fScth emul64_i_log(NULL, CE_WARN, "emul64%d: Unable to attach", instance); 447*b1dd958fScth 448*b1dd958fScth if (mutex_initted) { 449*b1dd958fScth mutex_destroy(EMUL64_REQ_MUTEX(emul64)); 450*b1dd958fScth mutex_destroy(EMUL64_RESP_MUTEX(emul64)); 451*b1dd958fScth } 452*b1dd958fScth if (tran) { 453*b1dd958fScth scsi_hba_tran_free(tran); 454*b1dd958fScth } 455*b1dd958fScth ddi_soft_state_free(emul64_state, instance); 456*b1dd958fScth return (DDI_FAILURE); 457*b1dd958fScth } 458*b1dd958fScth 459*b1dd958fScth /*ARGSUSED*/ 460*b1dd958fScth static int 461*b1dd958fScth emul64_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 462*b1dd958fScth { 463*b1dd958fScth struct emul64 *emul64; 464*b1dd958fScth scsi_hba_tran_t *tran; 465*b1dd958fScth int instance = ddi_get_instance(dip); 466*b1dd958fScth 467*b1dd958fScth 468*b1dd958fScth /* get transport structure pointer from the dip */ 469*b1dd958fScth if (!(tran = (scsi_hba_tran_t *)ddi_get_driver_private(dip))) { 470*b1dd958fScth return (DDI_FAILURE); 471*b1dd958fScth } 472*b1dd958fScth 473*b1dd958fScth /* get soft state from transport structure */ 474*b1dd958fScth emul64 = TRAN2EMUL64(tran); 475*b1dd958fScth 476*b1dd958fScth if (!emul64) { 477*b1dd958fScth return (DDI_FAILURE); 478*b1dd958fScth } 479*b1dd958fScth 480*b1dd958fScth EMUL64_DEBUG(emul64, SCSI_DEBUG, "emul64_detach: cmd = %d", cmd); 481*b1dd958fScth 482*b1dd958fScth switch (cmd) { 483*b1dd958fScth case DDI_DETACH: 484*b1dd958fScth EMUL64_MUTEX_ENTER(emul64); 485*b1dd958fScth 486*b1dd958fScth taskq_destroy(emul64->emul64_taskq); 487*b1dd958fScth (void) scsi_hba_detach(dip); 488*b1dd958fScth 489*b1dd958fScth scsi_hba_tran_free(emul64->emul64_tran); 490*b1dd958fScth 491*b1dd958fScth 492*b1dd958fScth EMUL64_MUTEX_EXIT(emul64); 493*b1dd958fScth 494*b1dd958fScth mutex_destroy(EMUL64_REQ_MUTEX(emul64)); 495*b1dd958fScth mutex_destroy(EMUL64_RESP_MUTEX(emul64)); 496*b1dd958fScth 497*b1dd958fScth 498*b1dd958fScth EMUL64_DEBUG(emul64, SCSI_DEBUG, "emul64_detach: done"); 499*b1dd958fScth ddi_soft_state_free(emul64_state, instance); 500*b1dd958fScth 501*b1dd958fScth return (DDI_SUCCESS); 502*b1dd958fScth 503*b1dd958fScth case DDI_SUSPEND: 504*b1dd958fScth return (DDI_SUCCESS); 505*b1dd958fScth 506*b1dd958fScth default: 507*b1dd958fScth return (DDI_FAILURE); 508*b1dd958fScth } 509*b1dd958fScth } 510*b1dd958fScth 511*b1dd958fScth /* 512*b1dd958fScth * Function name : emul64_tran_tgt_init 513*b1dd958fScth * 514*b1dd958fScth * Return Values : DDI_SUCCESS if target supported, DDI_FAILURE otherwise 515*b1dd958fScth * 516*b1dd958fScth */ 517*b1dd958fScth /*ARGSUSED*/ 518*b1dd958fScth static int 519*b1dd958fScth emul64_tran_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip, 520*b1dd958fScth scsi_hba_tran_t *tran, struct scsi_device *sd) 521*b1dd958fScth { 522*b1dd958fScth struct emul64 *emul64; 523*b1dd958fScth emul64_tgt_t *tgt; 524*b1dd958fScth char **geo_vidpid = NULL; 525*b1dd958fScth char *geo, *vidpid; 526*b1dd958fScth uint32_t *geoip = NULL; 527*b1dd958fScth uint_t length; 528*b1dd958fScth uint_t length2; 529*b1dd958fScth lldaddr_t sector_count; 530*b1dd958fScth char prop_name[15]; 531*b1dd958fScth int ret = DDI_FAILURE; 532*b1dd958fScth 533*b1dd958fScth emul64 = TRAN2EMUL64(tran); 534*b1dd958fScth EMUL64_MUTEX_ENTER(emul64); 535*b1dd958fScth 536*b1dd958fScth /* 537*b1dd958fScth * We get called for each target driver.conf node, multiple 538*b1dd958fScth * nodes may map to the same tgt,lun (sd.conf, st.conf, etc). 539*b1dd958fScth * Check to see if transport to tgt,lun already established. 540*b1dd958fScth */ 541*b1dd958fScth tgt = find_tgt(emul64, sd->sd_address.a_target, sd->sd_address.a_lun); 542*b1dd958fScth if (tgt) { 543*b1dd958fScth ret = DDI_SUCCESS; 544*b1dd958fScth goto out; 545*b1dd958fScth } 546*b1dd958fScth 547*b1dd958fScth /* see if we have driver.conf specified device for this target,lun */ 548*b1dd958fScth (void) snprintf(prop_name, sizeof (prop_name), "targ_%d_%d", 549*b1dd958fScth sd->sd_address.a_target, sd->sd_address.a_lun); 550*b1dd958fScth if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, hba_dip, 551*b1dd958fScth DDI_PROP_DONTPASS, prop_name, 552*b1dd958fScth &geo_vidpid, &length) != DDI_PROP_SUCCESS) 553*b1dd958fScth goto out; 554*b1dd958fScth if (length < 2) { 555*b1dd958fScth cmn_err(CE_WARN, "emul64: %s property does not have 2 " 556*b1dd958fScth "elements", prop_name); 557*b1dd958fScth goto out; 558*b1dd958fScth } 559*b1dd958fScth 560*b1dd958fScth /* pick geometry name and vidpid string from string array */ 561*b1dd958fScth geo = *geo_vidpid; 562*b1dd958fScth vidpid = *(geo_vidpid + 1); 563*b1dd958fScth 564*b1dd958fScth /* lookup geometry property integer array */ 565*b1dd958fScth if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, hba_dip, DDI_PROP_DONTPASS, 566*b1dd958fScth geo, (int **)&geoip, &length2) != DDI_PROP_SUCCESS) { 567*b1dd958fScth cmn_err(CE_WARN, "emul64: didn't get prop '%s'", geo); 568*b1dd958fScth goto out; 569*b1dd958fScth } 570*b1dd958fScth if (length2 < 6) { 571*b1dd958fScth cmn_err(CE_WARN, "emul64: property %s does not have 6 " 572*b1dd958fScth "elements", *geo_vidpid); 573*b1dd958fScth goto out; 574*b1dd958fScth } 575*b1dd958fScth 576*b1dd958fScth /* allocate and initialize tgt structure for tgt,lun */ 577*b1dd958fScth tgt = kmem_zalloc(sizeof (emul64_tgt_t), KM_SLEEP); 578*b1dd958fScth rw_init(&tgt->emul64_tgt_nw_lock, NULL, RW_DRIVER, NULL); 579*b1dd958fScth mutex_init(&tgt->emul64_tgt_blk_lock, NULL, MUTEX_DRIVER, NULL); 580*b1dd958fScth 581*b1dd958fScth /* create avl for data block storage */ 582*b1dd958fScth avl_create(&tgt->emul64_tgt_data, emul64_bsd_blkcompare, 583*b1dd958fScth sizeof (blklist_t), offsetof(blklist_t, bl_node)); 584*b1dd958fScth 585*b1dd958fScth /* save scsi_address and vidpid */ 586*b1dd958fScth bcopy(sd, &tgt->emul64_tgt_saddr, sizeof (struct scsi_address)); 587*b1dd958fScth (void) strncpy(tgt->emul64_tgt_inq, vidpid, 588*b1dd958fScth sizeof (emul64->emul64_tgt->emul64_tgt_inq)); 589*b1dd958fScth 590*b1dd958fScth /* 591*b1dd958fScth * The high order 4 bytes of the sector count always come first in 592*b1dd958fScth * emul64.conf. They are followed by the low order 4 bytes. Not 593*b1dd958fScth * all CPU types want them in this order, but laddr_t takes care of 594*b1dd958fScth * this for us. We then pick up geometry (ncyl X nheads X nsect). 595*b1dd958fScth */ 596*b1dd958fScth sector_count._p._u = *(geoip + 0); 597*b1dd958fScth sector_count._p._l = *(geoip + 1); 598*b1dd958fScth /* 599*b1dd958fScth * On 32-bit platforms, fix block size if it's greater than the 600*b1dd958fScth * allowable maximum. 601*b1dd958fScth */ 602*b1dd958fScth #if !defined(_LP64) 603*b1dd958fScth if (sector_count._f > DK_MAX_BLOCKS) 604*b1dd958fScth sector_count._f = DK_MAX_BLOCKS; 605*b1dd958fScth #endif 606*b1dd958fScth tgt->emul64_tgt_sectors = sector_count._f; 607*b1dd958fScth tgt->emul64_tgt_dtype = *(geoip + 2); 608*b1dd958fScth tgt->emul64_tgt_ncyls = *(geoip + 3); 609*b1dd958fScth tgt->emul64_tgt_nheads = *(geoip + 4); 610*b1dd958fScth tgt->emul64_tgt_nsect = *(geoip + 5); 611*b1dd958fScth 612*b1dd958fScth /* insert target structure into list */ 613*b1dd958fScth tgt->emul64_tgt_next = emul64->emul64_tgt; 614*b1dd958fScth emul64->emul64_tgt = tgt; 615*b1dd958fScth ret = DDI_SUCCESS; 616*b1dd958fScth 617*b1dd958fScth out: EMUL64_MUTEX_EXIT(emul64); 618*b1dd958fScth if (geoip) 619*b1dd958fScth ddi_prop_free(geoip); 620*b1dd958fScth if (geo_vidpid) 621*b1dd958fScth ddi_prop_free(geo_vidpid); 622*b1dd958fScth return (ret); 623*b1dd958fScth } 624*b1dd958fScth 625*b1dd958fScth /* 626*b1dd958fScth * Function name : emul64_i_initcap 627*b1dd958fScth * 628*b1dd958fScth * Return Values : NONE 629*b1dd958fScth * Description : Initializes the default target capabilities and 630*b1dd958fScth * Sync Rates. 631*b1dd958fScth * 632*b1dd958fScth * Context : Called from the user thread through attach. 633*b1dd958fScth * 634*b1dd958fScth */ 635*b1dd958fScth static void 636*b1dd958fScth emul64_i_initcap(struct emul64 *emul64) 637*b1dd958fScth { 638*b1dd958fScth uint16_t cap, synch; 639*b1dd958fScth int i; 640*b1dd958fScth 641*b1dd958fScth cap = 0; 642*b1dd958fScth synch = 0; 643*b1dd958fScth for (i = 0; i < NTARGETS_WIDE; i++) { 644*b1dd958fScth emul64->emul64_cap[i] = cap; 645*b1dd958fScth emul64->emul64_synch[i] = synch; 646*b1dd958fScth } 647*b1dd958fScth EMUL64_DEBUG(emul64, SCSI_DEBUG, "default cap = 0x%x", cap); 648*b1dd958fScth } 649*b1dd958fScth 650*b1dd958fScth /* 651*b1dd958fScth * Function name : emul64_scsi_getcap() 652*b1dd958fScth * 653*b1dd958fScth * Return Values : current value of capability, if defined 654*b1dd958fScth * -1 if capability is not defined 655*b1dd958fScth * Description : returns current capability value 656*b1dd958fScth * 657*b1dd958fScth * Context : Can be called from different kernel process threads. 658*b1dd958fScth * Can be called by interrupt thread. 659*b1dd958fScth */ 660*b1dd958fScth static int 661*b1dd958fScth emul64_scsi_getcap(struct scsi_address *ap, char *cap, int whom) 662*b1dd958fScth { 663*b1dd958fScth struct emul64 *emul64 = ADDR2EMUL64(ap); 664*b1dd958fScth int rval = 0; 665*b1dd958fScth 666*b1dd958fScth /* 667*b1dd958fScth * We don't allow inquiring about capabilities for other targets 668*b1dd958fScth */ 669*b1dd958fScth if (cap == NULL || whom == 0) { 670*b1dd958fScth return (-1); 671*b1dd958fScth } 672*b1dd958fScth 673*b1dd958fScth EMUL64_MUTEX_ENTER(emul64); 674*b1dd958fScth 675*b1dd958fScth switch (scsi_hba_lookup_capstr(cap)) { 676*b1dd958fScth case SCSI_CAP_DMA_MAX: 677*b1dd958fScth rval = 1 << 24; /* Limit to 16MB max transfer */ 678*b1dd958fScth break; 679*b1dd958fScth case SCSI_CAP_MSG_OUT: 680*b1dd958fScth rval = 1; 681*b1dd958fScth break; 682*b1dd958fScth case SCSI_CAP_DISCONNECT: 683*b1dd958fScth rval = 1; 684*b1dd958fScth break; 685*b1dd958fScth case SCSI_CAP_SYNCHRONOUS: 686*b1dd958fScth rval = 1; 687*b1dd958fScth break; 688*b1dd958fScth case SCSI_CAP_WIDE_XFER: 689*b1dd958fScth rval = 1; 690*b1dd958fScth break; 691*b1dd958fScth case SCSI_CAP_TAGGED_QING: 692*b1dd958fScth rval = 1; 693*b1dd958fScth break; 694*b1dd958fScth case SCSI_CAP_UNTAGGED_QING: 695*b1dd958fScth rval = 1; 696*b1dd958fScth break; 697*b1dd958fScth case SCSI_CAP_PARITY: 698*b1dd958fScth rval = 1; 699*b1dd958fScth break; 700*b1dd958fScth case SCSI_CAP_INITIATOR_ID: 701*b1dd958fScth rval = emul64->emul64_initiator_id; 702*b1dd958fScth break; 703*b1dd958fScth case SCSI_CAP_ARQ: 704*b1dd958fScth rval = 1; 705*b1dd958fScth break; 706*b1dd958fScth case SCSI_CAP_LINKED_CMDS: 707*b1dd958fScth break; 708*b1dd958fScth case SCSI_CAP_RESET_NOTIFICATION: 709*b1dd958fScth rval = 1; 710*b1dd958fScth break; 711*b1dd958fScth 712*b1dd958fScth default: 713*b1dd958fScth rval = -1; 714*b1dd958fScth break; 715*b1dd958fScth } 716*b1dd958fScth 717*b1dd958fScth EMUL64_MUTEX_EXIT(emul64); 718*b1dd958fScth 719*b1dd958fScth return (rval); 720*b1dd958fScth } 721*b1dd958fScth 722*b1dd958fScth /* 723*b1dd958fScth * Function name : emul64_scsi_setcap() 724*b1dd958fScth * 725*b1dd958fScth * Return Values : 1 - capability exists and can be set to new value 726*b1dd958fScth * 0 - capability could not be set to new value 727*b1dd958fScth * -1 - no such capability 728*b1dd958fScth * 729*b1dd958fScth * Description : sets a capability for a target 730*b1dd958fScth * 731*b1dd958fScth * Context : Can be called from different kernel process threads. 732*b1dd958fScth * Can be called by interrupt thread. 733*b1dd958fScth */ 734*b1dd958fScth static int 735*b1dd958fScth emul64_scsi_setcap(struct scsi_address *ap, char *cap, int value, int whom) 736*b1dd958fScth { 737*b1dd958fScth struct emul64 *emul64 = ADDR2EMUL64(ap); 738*b1dd958fScth int rval = 0; 739*b1dd958fScth 740*b1dd958fScth /* 741*b1dd958fScth * We don't allow setting capabilities for other targets 742*b1dd958fScth */ 743*b1dd958fScth if (cap == NULL || whom == 0) { 744*b1dd958fScth return (-1); 745*b1dd958fScth } 746*b1dd958fScth 747*b1dd958fScth EMUL64_MUTEX_ENTER(emul64); 748*b1dd958fScth 749*b1dd958fScth switch (scsi_hba_lookup_capstr(cap)) { 750*b1dd958fScth case SCSI_CAP_DMA_MAX: 751*b1dd958fScth case SCSI_CAP_MSG_OUT: 752*b1dd958fScth case SCSI_CAP_PARITY: 753*b1dd958fScth case SCSI_CAP_UNTAGGED_QING: 754*b1dd958fScth case SCSI_CAP_LINKED_CMDS: 755*b1dd958fScth case SCSI_CAP_RESET_NOTIFICATION: 756*b1dd958fScth /* 757*b1dd958fScth * None of these are settable via 758*b1dd958fScth * the capability interface. 759*b1dd958fScth */ 760*b1dd958fScth break; 761*b1dd958fScth case SCSI_CAP_DISCONNECT: 762*b1dd958fScth rval = 1; 763*b1dd958fScth break; 764*b1dd958fScth case SCSI_CAP_SYNCHRONOUS: 765*b1dd958fScth rval = 1; 766*b1dd958fScth break; 767*b1dd958fScth case SCSI_CAP_TAGGED_QING: 768*b1dd958fScth rval = 1; 769*b1dd958fScth break; 770*b1dd958fScth case SCSI_CAP_WIDE_XFER: 771*b1dd958fScth rval = 1; 772*b1dd958fScth break; 773*b1dd958fScth case SCSI_CAP_INITIATOR_ID: 774*b1dd958fScth rval = -1; 775*b1dd958fScth break; 776*b1dd958fScth case SCSI_CAP_ARQ: 777*b1dd958fScth rval = 1; 778*b1dd958fScth break; 779*b1dd958fScth case SCSI_CAP_TOTAL_SECTORS: 780*b1dd958fScth emul64->nt_total_sectors[ap->a_target][ap->a_lun] = value; 781*b1dd958fScth rval = TRUE; 782*b1dd958fScth break; 783*b1dd958fScth case SCSI_CAP_SECTOR_SIZE: 784*b1dd958fScth rval = TRUE; 785*b1dd958fScth break; 786*b1dd958fScth default: 787*b1dd958fScth rval = -1; 788*b1dd958fScth break; 789*b1dd958fScth } 790*b1dd958fScth 791*b1dd958fScth 792*b1dd958fScth EMUL64_MUTEX_EXIT(emul64); 793*b1dd958fScth 794*b1dd958fScth return (rval); 795*b1dd958fScth } 796*b1dd958fScth 797*b1dd958fScth /* 798*b1dd958fScth * Function name : emul64_scsi_init_pkt 799*b1dd958fScth * 800*b1dd958fScth * Return Values : pointer to scsi_pkt, or NULL 801*b1dd958fScth * Description : Called by kernel on behalf of a target driver 802*b1dd958fScth * calling scsi_init_pkt(9F). 803*b1dd958fScth * Refer to tran_init_pkt(9E) man page 804*b1dd958fScth * 805*b1dd958fScth * Context : Can be called from different kernel process threads. 806*b1dd958fScth * Can be called by interrupt thread. 807*b1dd958fScth */ 808*b1dd958fScth /* ARGSUSED */ 809*b1dd958fScth static struct scsi_pkt * 810*b1dd958fScth emul64_scsi_init_pkt(struct scsi_address *ap, struct scsi_pkt *pkt, 811*b1dd958fScth struct buf *bp, int cmdlen, int statuslen, int tgtlen, 812*b1dd958fScth int flags, int (*callback)(), caddr_t arg) 813*b1dd958fScth { 814*b1dd958fScth struct emul64 *emul64 = ADDR2EMUL64(ap); 815*b1dd958fScth struct emul64_cmd *sp; 816*b1dd958fScth 817*b1dd958fScth ASSERT(callback == NULL_FUNC || callback == SLEEP_FUNC); 818*b1dd958fScth 819*b1dd958fScth /* 820*b1dd958fScth * First step of emul64_scsi_init_pkt: pkt allocation 821*b1dd958fScth */ 822*b1dd958fScth if (pkt == NULL) { 823*b1dd958fScth pkt = scsi_hba_pkt_alloc(emul64->emul64_dip, ap, cmdlen, 824*b1dd958fScth statuslen, 825*b1dd958fScth tgtlen, sizeof (struct emul64_cmd), callback, arg); 826*b1dd958fScth if (pkt == NULL) { 827*b1dd958fScth cmn_err(CE_WARN, "emul64_scsi_init_pkt: " 828*b1dd958fScth "scsi_hba_pkt_alloc failed"); 829*b1dd958fScth return (NULL); 830*b1dd958fScth } 831*b1dd958fScth 832*b1dd958fScth sp = PKT2CMD(pkt); 833*b1dd958fScth 834*b1dd958fScth /* 835*b1dd958fScth * Initialize the new pkt - we redundantly initialize 836*b1dd958fScth * all the fields for illustrative purposes. 837*b1dd958fScth */ 838*b1dd958fScth sp->cmd_pkt = pkt; 839*b1dd958fScth sp->cmd_flags = 0; 840*b1dd958fScth sp->cmd_scblen = statuslen; 841*b1dd958fScth sp->cmd_cdblen = cmdlen; 842*b1dd958fScth sp->cmd_emul64 = emul64; 843*b1dd958fScth pkt->pkt_address = *ap; 844*b1dd958fScth pkt->pkt_comp = (void (*)())NULL; 845*b1dd958fScth pkt->pkt_flags = 0; 846*b1dd958fScth pkt->pkt_time = 0; 847*b1dd958fScth pkt->pkt_resid = 0; 848*b1dd958fScth pkt->pkt_statistics = 0; 849*b1dd958fScth pkt->pkt_reason = 0; 850*b1dd958fScth 851*b1dd958fScth } else { 852*b1dd958fScth sp = PKT2CMD(pkt); 853*b1dd958fScth } 854*b1dd958fScth 855*b1dd958fScth /* 856*b1dd958fScth * Second step of emul64_scsi_init_pkt: dma allocation/move 857*b1dd958fScth */ 858*b1dd958fScth if (bp && bp->b_bcount != 0) { 859*b1dd958fScth if (bp->b_flags & B_READ) { 860*b1dd958fScth sp->cmd_flags &= ~CFLAG_DMASEND; 861*b1dd958fScth } else { 862*b1dd958fScth sp->cmd_flags |= CFLAG_DMASEND; 863*b1dd958fScth } 864*b1dd958fScth bp_mapin(bp); 865*b1dd958fScth sp->cmd_addr = (unsigned char *) bp->b_un.b_addr; 866*b1dd958fScth sp->cmd_count = bp->b_bcount; 867*b1dd958fScth pkt->pkt_resid = 0; 868*b1dd958fScth } 869*b1dd958fScth 870*b1dd958fScth return (pkt); 871*b1dd958fScth } 872*b1dd958fScth 873*b1dd958fScth 874*b1dd958fScth /* 875*b1dd958fScth * Function name : emul64_scsi_destroy_pkt 876*b1dd958fScth * 877*b1dd958fScth * Return Values : none 878*b1dd958fScth * Description : Called by kernel on behalf of a target driver 879*b1dd958fScth * calling scsi_destroy_pkt(9F). 880*b1dd958fScth * Refer to tran_destroy_pkt(9E) man page 881*b1dd958fScth * 882*b1dd958fScth * Context : Can be called from different kernel process threads. 883*b1dd958fScth * Can be called by interrupt thread. 884*b1dd958fScth */ 885*b1dd958fScth static void 886*b1dd958fScth emul64_scsi_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt) 887*b1dd958fScth { 888*b1dd958fScth struct emul64_cmd *sp = PKT2CMD(pkt); 889*b1dd958fScth 890*b1dd958fScth /* 891*b1dd958fScth * emul64_scsi_dmafree inline to make things faster 892*b1dd958fScth */ 893*b1dd958fScth if (sp->cmd_flags & CFLAG_DMAVALID) { 894*b1dd958fScth /* 895*b1dd958fScth * Free the mapping. 896*b1dd958fScth */ 897*b1dd958fScth sp->cmd_flags &= ~CFLAG_DMAVALID; 898*b1dd958fScth } 899*b1dd958fScth 900*b1dd958fScth /* 901*b1dd958fScth * Free the pkt 902*b1dd958fScth */ 903*b1dd958fScth scsi_hba_pkt_free(ap, pkt); 904*b1dd958fScth } 905*b1dd958fScth 906*b1dd958fScth 907*b1dd958fScth /* 908*b1dd958fScth * Function name : emul64_scsi_dmafree() 909*b1dd958fScth * 910*b1dd958fScth * Return Values : none 911*b1dd958fScth * Description : free dvma resources 912*b1dd958fScth * 913*b1dd958fScth * Context : Can be called from different kernel process threads. 914*b1dd958fScth * Can be called by interrupt thread. 915*b1dd958fScth */ 916*b1dd958fScth /*ARGSUSED*/ 917*b1dd958fScth static void 918*b1dd958fScth emul64_scsi_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt) 919*b1dd958fScth { 920*b1dd958fScth } 921*b1dd958fScth 922*b1dd958fScth /* 923*b1dd958fScth * Function name : emul64_scsi_sync_pkt() 924*b1dd958fScth * 925*b1dd958fScth * Return Values : none 926*b1dd958fScth * Description : sync dma 927*b1dd958fScth * 928*b1dd958fScth * Context : Can be called from different kernel process threads. 929*b1dd958fScth * Can be called by interrupt thread. 930*b1dd958fScth */ 931*b1dd958fScth /*ARGSUSED*/ 932*b1dd958fScth static void 933*b1dd958fScth emul64_scsi_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt) 934*b1dd958fScth { 935*b1dd958fScth } 936*b1dd958fScth 937*b1dd958fScth /* 938*b1dd958fScth * routine for reset notification setup, to register or cancel. 939*b1dd958fScth */ 940*b1dd958fScth static int 941*b1dd958fScth emul64_scsi_reset_notify(struct scsi_address *ap, int flag, 942*b1dd958fScth void (*callback)(caddr_t), caddr_t arg) 943*b1dd958fScth { 944*b1dd958fScth struct emul64 *emul64 = ADDR2EMUL64(ap); 945*b1dd958fScth struct emul64_reset_notify_entry *p, *beforep; 946*b1dd958fScth int rval = DDI_FAILURE; 947*b1dd958fScth 948*b1dd958fScth mutex_enter(EMUL64_REQ_MUTEX(emul64)); 949*b1dd958fScth 950*b1dd958fScth p = emul64->emul64_reset_notify_listf; 951*b1dd958fScth beforep = NULL; 952*b1dd958fScth 953*b1dd958fScth while (p) { 954*b1dd958fScth if (p->ap == ap) 955*b1dd958fScth break; /* An entry exists for this target */ 956*b1dd958fScth beforep = p; 957*b1dd958fScth p = p->next; 958*b1dd958fScth } 959*b1dd958fScth 960*b1dd958fScth if ((flag & SCSI_RESET_CANCEL) && (p != NULL)) { 961*b1dd958fScth if (beforep == NULL) { 962*b1dd958fScth emul64->emul64_reset_notify_listf = p->next; 963*b1dd958fScth } else { 964*b1dd958fScth beforep->next = p->next; 965*b1dd958fScth } 966*b1dd958fScth kmem_free((caddr_t)p, 967*b1dd958fScth sizeof (struct emul64_reset_notify_entry)); 968*b1dd958fScth rval = DDI_SUCCESS; 969*b1dd958fScth 970*b1dd958fScth } else if ((flag & SCSI_RESET_NOTIFY) && (p == NULL)) { 971*b1dd958fScth p = kmem_zalloc(sizeof (struct emul64_reset_notify_entry), 972*b1dd958fScth KM_SLEEP); 973*b1dd958fScth p->ap = ap; 974*b1dd958fScth p->callback = callback; 975*b1dd958fScth p->arg = arg; 976*b1dd958fScth p->next = emul64->emul64_reset_notify_listf; 977*b1dd958fScth emul64->emul64_reset_notify_listf = p; 978*b1dd958fScth rval = DDI_SUCCESS; 979*b1dd958fScth } 980*b1dd958fScth 981*b1dd958fScth mutex_exit(EMUL64_REQ_MUTEX(emul64)); 982*b1dd958fScth 983*b1dd958fScth return (rval); 984*b1dd958fScth } 985*b1dd958fScth 986*b1dd958fScth /* 987*b1dd958fScth * Function name : emul64_scsi_start() 988*b1dd958fScth * 989*b1dd958fScth * Return Values : TRAN_FATAL_ERROR - emul64 has been shutdown 990*b1dd958fScth * TRAN_BUSY - request queue is full 991*b1dd958fScth * TRAN_ACCEPT - pkt has been submitted to emul64 992*b1dd958fScth * 993*b1dd958fScth * Description : init pkt, start the request 994*b1dd958fScth * 995*b1dd958fScth * Context : Can be called from different kernel process threads. 996*b1dd958fScth * Can be called by interrupt thread. 997*b1dd958fScth */ 998*b1dd958fScth static int 999*b1dd958fScth emul64_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt) 1000*b1dd958fScth { 1001*b1dd958fScth struct emul64_cmd *sp = PKT2CMD(pkt); 1002*b1dd958fScth int rval = TRAN_ACCEPT; 1003*b1dd958fScth struct emul64 *emul64 = ADDR2EMUL64(ap); 1004*b1dd958fScth clock_t cur_lbolt; 1005*b1dd958fScth taskqid_t dispatched; 1006*b1dd958fScth 1007*b1dd958fScth ASSERT(mutex_owned(EMUL64_REQ_MUTEX(emul64)) == 0 || ddi_in_panic()); 1008*b1dd958fScth ASSERT(mutex_owned(EMUL64_RESP_MUTEX(emul64)) == 0 || ddi_in_panic()); 1009*b1dd958fScth 1010*b1dd958fScth EMUL64_DEBUG2(emul64, SCSI_DEBUG, "emul64_scsi_start %x", sp); 1011*b1dd958fScth 1012*b1dd958fScth pkt->pkt_reason = CMD_CMPLT; 1013*b1dd958fScth 1014*b1dd958fScth #ifdef EMUL64DEBUG 1015*b1dd958fScth if (emul64_cdb_debug) { 1016*b1dd958fScth emul64_debug_dump_cdb(ap, pkt); 1017*b1dd958fScth } 1018*b1dd958fScth #endif /* EMUL64DEBUG */ 1019*b1dd958fScth 1020*b1dd958fScth /* 1021*b1dd958fScth * calculate deadline from pkt_time 1022*b1dd958fScth * Instead of multiplying by 100 (ie. HZ), we multiply by 128 so 1023*b1dd958fScth * we can shift and at the same time have a 28% grace period 1024*b1dd958fScth * we ignore the rare case of pkt_time == 0 and deal with it 1025*b1dd958fScth * in emul64_i_watch() 1026*b1dd958fScth */ 1027*b1dd958fScth cur_lbolt = ddi_get_lbolt(); 1028*b1dd958fScth sp->cmd_deadline = cur_lbolt + (pkt->pkt_time * 128); 1029*b1dd958fScth 1030*b1dd958fScth if ((emul64_usetaskq == 0) || (pkt->pkt_flags & FLAG_NOINTR) != 0) { 1031*b1dd958fScth emul64_pkt_comp((caddr_t)pkt); 1032*b1dd958fScth } else { 1033*b1dd958fScth dispatched = NULL; 1034*b1dd958fScth if (emul64_collect_stats) { 1035*b1dd958fScth /* 1036*b1dd958fScth * If we are collecting statistics, call 1037*b1dd958fScth * taskq_dispatch in no sleep mode, so that we can 1038*b1dd958fScth * detect if we are exceeding the queue length that 1039*b1dd958fScth * was established in the call to taskq_create in 1040*b1dd958fScth * emul64_attach. If the no sleep call fails 1041*b1dd958fScth * (returns NULL), the task will be dispatched in 1042*b1dd958fScth * sleep mode below. 1043*b1dd958fScth */ 1044*b1dd958fScth dispatched = taskq_dispatch(emul64->emul64_taskq, 1045*b1dd958fScth emul64_pkt_comp, 1046*b1dd958fScth (void *)pkt, TQ_NOSLEEP); 1047*b1dd958fScth if (dispatched == NULL) { 1048*b1dd958fScth /* Queue was full. dispatch failed. */ 1049*b1dd958fScth mutex_enter(&emul64_stats_mutex); 1050*b1dd958fScth emul64_taskq_max++; 1051*b1dd958fScth mutex_exit(&emul64_stats_mutex); 1052*b1dd958fScth } 1053*b1dd958fScth } 1054*b1dd958fScth if (dispatched == NULL) { 1055*b1dd958fScth (void) taskq_dispatch(emul64->emul64_taskq, 1056*b1dd958fScth emul64_pkt_comp, (void *)pkt, TQ_SLEEP); 1057*b1dd958fScth } 1058*b1dd958fScth } 1059*b1dd958fScth 1060*b1dd958fScth done: 1061*b1dd958fScth ASSERT(mutex_owned(EMUL64_REQ_MUTEX(emul64)) == 0 || ddi_in_panic()); 1062*b1dd958fScth ASSERT(mutex_owned(EMUL64_RESP_MUTEX(emul64)) == 0 || ddi_in_panic()); 1063*b1dd958fScth 1064*b1dd958fScth return (rval); 1065*b1dd958fScth } 1066*b1dd958fScth 1067*b1dd958fScth void 1068*b1dd958fScth emul64_check_cond(struct scsi_pkt *pkt, uchar_t key, uchar_t asc, uchar_t ascq) 1069*b1dd958fScth { 1070*b1dd958fScth struct scsi_arq_status *arq = 1071*b1dd958fScth (struct scsi_arq_status *)pkt->pkt_scbp; 1072*b1dd958fScth 1073*b1dd958fScth /* got check, no data transferred and ARQ done */ 1074*b1dd958fScth arq->sts_status.sts_chk = 1; 1075*b1dd958fScth pkt->pkt_state |= STATE_ARQ_DONE; 1076*b1dd958fScth pkt->pkt_state &= ~STATE_XFERRED_DATA; 1077*b1dd958fScth 1078*b1dd958fScth /* for ARQ */ 1079*b1dd958fScth arq->sts_rqpkt_reason = CMD_CMPLT; 1080*b1dd958fScth arq->sts_rqpkt_resid = 0; 1081*b1dd958fScth arq->sts_rqpkt_state = STATE_GOT_BUS | STATE_GOT_TARGET | 1082*b1dd958fScth STATE_SENT_CMD | STATE_XFERRED_DATA | STATE_GOT_STATUS; 1083*b1dd958fScth arq->sts_sensedata.es_valid = 1; 1084*b1dd958fScth arq->sts_sensedata.es_class = 0x7; 1085*b1dd958fScth arq->sts_sensedata.es_key = key; 1086*b1dd958fScth arq->sts_sensedata.es_add_code = asc; 1087*b1dd958fScth arq->sts_sensedata.es_qual_code = ascq; 1088*b1dd958fScth } 1089*b1dd958fScth 1090*b1dd958fScth int bsd_scsi_start_stop_unit(struct scsi_pkt *); 1091*b1dd958fScth int bsd_scsi_test_unit_ready(struct scsi_pkt *); 1092*b1dd958fScth int bsd_scsi_request_sense(struct scsi_pkt *); 1093*b1dd958fScth int bsd_scsi_inquiry(struct scsi_pkt *); 1094*b1dd958fScth int bsd_scsi_format(struct scsi_pkt *); 1095*b1dd958fScth int bsd_scsi_io(struct scsi_pkt *); 1096*b1dd958fScth int bsd_scsi_log_sense(struct scsi_pkt *); 1097*b1dd958fScth int bsd_scsi_mode_sense(struct scsi_pkt *); 1098*b1dd958fScth int bsd_scsi_mode_select(struct scsi_pkt *); 1099*b1dd958fScth int bsd_scsi_read_capacity(struct scsi_pkt *); 1100*b1dd958fScth int bsd_scsi_read_capacity_16(struct scsi_pkt *); 1101*b1dd958fScth int bsd_scsi_reserve(struct scsi_pkt *); 1102*b1dd958fScth int bsd_scsi_format(struct scsi_pkt *); 1103*b1dd958fScth int bsd_scsi_release(struct scsi_pkt *); 1104*b1dd958fScth int bsd_scsi_read_defect_list(struct scsi_pkt *); 1105*b1dd958fScth int bsd_scsi_reassign_block(struct scsi_pkt *); 1106*b1dd958fScth int bsd_freeblkrange(emul64_tgt_t *, emul64_range_t *); 1107*b1dd958fScth 1108*b1dd958fScth static void 1109*b1dd958fScth emul64_handle_cmd(struct scsi_pkt *pkt) 1110*b1dd958fScth { 1111*b1dd958fScth switch (pkt->pkt_cdbp[0]) { 1112*b1dd958fScth case SCMD_START_STOP: 1113*b1dd958fScth (void) bsd_scsi_start_stop_unit(pkt); 1114*b1dd958fScth break; 1115*b1dd958fScth case SCMD_TEST_UNIT_READY: 1116*b1dd958fScth (void) bsd_scsi_test_unit_ready(pkt); 1117*b1dd958fScth break; 1118*b1dd958fScth case SCMD_REQUEST_SENSE: 1119*b1dd958fScth (void) bsd_scsi_request_sense(pkt); 1120*b1dd958fScth break; 1121*b1dd958fScth case SCMD_INQUIRY: 1122*b1dd958fScth (void) bsd_scsi_inquiry(pkt); 1123*b1dd958fScth break; 1124*b1dd958fScth case SCMD_FORMAT: 1125*b1dd958fScth (void) bsd_scsi_format(pkt); 1126*b1dd958fScth break; 1127*b1dd958fScth case SCMD_READ: 1128*b1dd958fScth case SCMD_WRITE: 1129*b1dd958fScth case SCMD_READ_G1: 1130*b1dd958fScth case SCMD_WRITE_G1: 1131*b1dd958fScth case SCMD_READ_G4: 1132*b1dd958fScth case SCMD_WRITE_G4: 1133*b1dd958fScth (void) bsd_scsi_io(pkt); 1134*b1dd958fScth break; 1135*b1dd958fScth case SCMD_LOG_SENSE_G1: 1136*b1dd958fScth (void) bsd_scsi_log_sense(pkt); 1137*b1dd958fScth break; 1138*b1dd958fScth case SCMD_MODE_SENSE: 1139*b1dd958fScth case SCMD_MODE_SENSE_G1: 1140*b1dd958fScth (void) bsd_scsi_mode_sense(pkt); 1141*b1dd958fScth break; 1142*b1dd958fScth case SCMD_MODE_SELECT: 1143*b1dd958fScth case SCMD_MODE_SELECT_G1: 1144*b1dd958fScth (void) bsd_scsi_mode_select(pkt); 1145*b1dd958fScth break; 1146*b1dd958fScth case SCMD_READ_CAPACITY: 1147*b1dd958fScth (void) bsd_scsi_read_capacity(pkt); 1148*b1dd958fScth break; 1149*b1dd958fScth case SCMD_SVC_ACTION_IN_G4: 1150*b1dd958fScth if (pkt->pkt_cdbp[1] == SSVC_ACTION_READ_CAPACITY_G4) { 1151*b1dd958fScth (void) bsd_scsi_read_capacity_16(pkt); 1152*b1dd958fScth } else { 1153*b1dd958fScth cmn_err(CE_WARN, "emul64: unrecognized G4 service " 1154*b1dd958fScth "action 0x%x", pkt->pkt_cdbp[1]); 1155*b1dd958fScth } 1156*b1dd958fScth break; 1157*b1dd958fScth case SCMD_RESERVE: 1158*b1dd958fScth case SCMD_RESERVE_G1: 1159*b1dd958fScth (void) bsd_scsi_reserve(pkt); 1160*b1dd958fScth break; 1161*b1dd958fScth case SCMD_RELEASE: 1162*b1dd958fScth case SCMD_RELEASE_G1: 1163*b1dd958fScth (void) bsd_scsi_release(pkt); 1164*b1dd958fScth break; 1165*b1dd958fScth case SCMD_REASSIGN_BLOCK: 1166*b1dd958fScth (void) bsd_scsi_reassign_block(pkt); 1167*b1dd958fScth break; 1168*b1dd958fScth case SCMD_READ_DEFECT_LIST: 1169*b1dd958fScth (void) bsd_scsi_read_defect_list(pkt); 1170*b1dd958fScth break; 1171*b1dd958fScth case SCMD_PRIN: 1172*b1dd958fScth case SCMD_PROUT: 1173*b1dd958fScth case SCMD_REPORT_LUNS: 1174*b1dd958fScth /* ASC 0x24 INVALID FIELD IN CDB */ 1175*b1dd958fScth emul64_check_cond(pkt, KEY_ILLEGAL_REQUEST, 0x24, 0x0); 1176*b1dd958fScth break; 1177*b1dd958fScth default: 1178*b1dd958fScth cmn_err(CE_WARN, "emul64: unrecognized " 1179*b1dd958fScth "SCSI cmd 0x%x", pkt->pkt_cdbp[0]); 1180*b1dd958fScth emul64_check_cond(pkt, KEY_ILLEGAL_REQUEST, 0x24, 0x0); 1181*b1dd958fScth break; 1182*b1dd958fScth case SCMD_GET_CONFIGURATION: 1183*b1dd958fScth case 0x35: /* SCMD_SYNCHRONIZE_CACHE */ 1184*b1dd958fScth /* Don't complain */ 1185*b1dd958fScth break; 1186*b1dd958fScth } 1187*b1dd958fScth } 1188*b1dd958fScth 1189*b1dd958fScth static void 1190*b1dd958fScth emul64_pkt_comp(void * arg) 1191*b1dd958fScth { 1192*b1dd958fScth struct scsi_pkt *pkt = (struct scsi_pkt *)arg; 1193*b1dd958fScth struct emul64_cmd *sp = PKT2CMD(pkt); 1194*b1dd958fScth emul64_tgt_t *tgt; 1195*b1dd958fScth 1196*b1dd958fScth EMUL64_MUTEX_ENTER(sp->cmd_emul64); 1197*b1dd958fScth tgt = find_tgt(sp->cmd_emul64, 1198*b1dd958fScth pkt->pkt_address.a_target, pkt->pkt_address.a_lun); 1199*b1dd958fScth EMUL64_MUTEX_EXIT(sp->cmd_emul64); 1200*b1dd958fScth if (!tgt) { 1201*b1dd958fScth pkt->pkt_reason = CMD_TIMEOUT; 1202*b1dd958fScth pkt->pkt_state = STATE_GOT_BUS | STATE_SENT_CMD; 1203*b1dd958fScth pkt->pkt_statistics = STAT_TIMEOUT; 1204*b1dd958fScth } else { 1205*b1dd958fScth pkt->pkt_reason = CMD_CMPLT; 1206*b1dd958fScth *pkt->pkt_scbp = STATUS_GOOD; 1207*b1dd958fScth pkt->pkt_state = STATE_GOT_BUS | STATE_GOT_TARGET | 1208*b1dd958fScth STATE_SENT_CMD | STATE_XFERRED_DATA | STATE_GOT_STATUS; 1209*b1dd958fScth pkt->pkt_statistics = 0; 1210*b1dd958fScth emul64_handle_cmd(pkt); 1211*b1dd958fScth } 1212*b1dd958fScth (*pkt->pkt_comp)(pkt); 1213*b1dd958fScth } 1214*b1dd958fScth 1215*b1dd958fScth /* ARGSUSED */ 1216*b1dd958fScth static int 1217*b1dd958fScth emul64_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt) 1218*b1dd958fScth { 1219*b1dd958fScth return (1); 1220*b1dd958fScth } 1221*b1dd958fScth 1222*b1dd958fScth /* ARGSUSED */ 1223*b1dd958fScth static int 1224*b1dd958fScth emul64_scsi_reset(struct scsi_address *ap, int level) 1225*b1dd958fScth { 1226*b1dd958fScth return (1); 1227*b1dd958fScth } 1228*b1dd958fScth 1229*b1dd958fScth static int 1230*b1dd958fScth emul64_get_tgtrange(struct emul64 *emul64, 1231*b1dd958fScth intptr_t arg, 1232*b1dd958fScth emul64_tgt_t **tgtp, 1233*b1dd958fScth emul64_tgt_range_t *tgtr) 1234*b1dd958fScth { 1235*b1dd958fScth if (ddi_copyin((void *)arg, tgtr, sizeof (*tgtr), 0) != 0) { 1236*b1dd958fScth cmn_err(CE_WARN, "emul64: ioctl - copy in failed\n"); 1237*b1dd958fScth return (EFAULT); 1238*b1dd958fScth } 1239*b1dd958fScth EMUL64_MUTEX_ENTER(emul64); 1240*b1dd958fScth *tgtp = find_tgt(emul64, tgtr->emul64_target, tgtr->emul64_lun); 1241*b1dd958fScth EMUL64_MUTEX_EXIT(emul64); 1242*b1dd958fScth if (*tgtp == NULL) { 1243*b1dd958fScth cmn_err(CE_WARN, "emul64: ioctl - no target for %d,%d on %d", 1244*b1dd958fScth tgtr->emul64_target, tgtr->emul64_lun, 1245*b1dd958fScth ddi_get_instance(emul64->emul64_dip)); 1246*b1dd958fScth return (ENXIO); 1247*b1dd958fScth } 1248*b1dd958fScth return (0); 1249*b1dd958fScth } 1250*b1dd958fScth 1251*b1dd958fScth static int 1252*b1dd958fScth emul64_ioctl(dev_t dev, 1253*b1dd958fScth int cmd, 1254*b1dd958fScth intptr_t arg, 1255*b1dd958fScth int mode, 1256*b1dd958fScth cred_t *credp, 1257*b1dd958fScth int *rvalp) 1258*b1dd958fScth { 1259*b1dd958fScth struct emul64 *emul64; 1260*b1dd958fScth int instance; 1261*b1dd958fScth int rv = 0; 1262*b1dd958fScth emul64_tgt_range_t tgtr; 1263*b1dd958fScth emul64_tgt_t *tgt; 1264*b1dd958fScth 1265*b1dd958fScth instance = MINOR2INST(getminor(dev)); 1266*b1dd958fScth emul64 = (struct emul64 *)ddi_get_soft_state(emul64_state, instance); 1267*b1dd958fScth if (emul64 == NULL) { 1268*b1dd958fScth cmn_err(CE_WARN, "emul64: ioctl - no softstate for %d\n", 1269*b1dd958fScth getminor(dev)); 1270*b1dd958fScth return (ENXIO); 1271*b1dd958fScth } 1272*b1dd958fScth 1273*b1dd958fScth switch (cmd) { 1274*b1dd958fScth case EMUL64_WRITE_OFF: 1275*b1dd958fScth rv = emul64_get_tgtrange(emul64, arg, &tgt, &tgtr); 1276*b1dd958fScth if (rv == 0) { 1277*b1dd958fScth rv = emul64_write_off(emul64, tgt, &tgtr); 1278*b1dd958fScth } 1279*b1dd958fScth break; 1280*b1dd958fScth case EMUL64_WRITE_ON: 1281*b1dd958fScth rv = emul64_get_tgtrange(emul64, arg, &tgt, &tgtr); 1282*b1dd958fScth if (rv == 0) { 1283*b1dd958fScth rv = emul64_write_on(emul64, tgt, &tgtr); 1284*b1dd958fScth } 1285*b1dd958fScth break; 1286*b1dd958fScth case EMUL64_ZERO_RANGE: 1287*b1dd958fScth rv = emul64_get_tgtrange(emul64, arg, &tgt, &tgtr); 1288*b1dd958fScth if (rv == 0) { 1289*b1dd958fScth mutex_enter(&tgt->emul64_tgt_blk_lock); 1290*b1dd958fScth rv = bsd_freeblkrange(tgt, &tgtr.emul64_blkrange); 1291*b1dd958fScth mutex_exit(&tgt->emul64_tgt_blk_lock); 1292*b1dd958fScth } 1293*b1dd958fScth break; 1294*b1dd958fScth default: 1295*b1dd958fScth rv = scsi_hba_ioctl(dev, cmd, arg, mode, credp, rvalp); 1296*b1dd958fScth break; 1297*b1dd958fScth } 1298*b1dd958fScth return (rv); 1299*b1dd958fScth } 1300*b1dd958fScth 1301*b1dd958fScth /* ARGSUSED */ 1302*b1dd958fScth static int 1303*b1dd958fScth emul64_write_off(struct emul64 *emul64, 1304*b1dd958fScth emul64_tgt_t *tgt, 1305*b1dd958fScth emul64_tgt_range_t *tgtr) 1306*b1dd958fScth { 1307*b1dd958fScth size_t blkcnt = tgtr->emul64_blkrange.emul64_blkcnt; 1308*b1dd958fScth emul64_nowrite_t *cur; 1309*b1dd958fScth emul64_nowrite_t *nowrite; 1310*b1dd958fScth emul64_rng_overlap_t overlap = O_NONE; 1311*b1dd958fScth emul64_nowrite_t **prev = NULL; 1312*b1dd958fScth diskaddr_t sb = tgtr->emul64_blkrange.emul64_sb; 1313*b1dd958fScth 1314*b1dd958fScth nowrite = emul64_nowrite_alloc(&tgtr->emul64_blkrange); 1315*b1dd958fScth 1316*b1dd958fScth /* Find spot in list */ 1317*b1dd958fScth rw_enter(&tgt->emul64_tgt_nw_lock, RW_WRITER); 1318*b1dd958fScth cur = emul64_find_nowrite(tgt, sb, blkcnt, &overlap, &prev); 1319*b1dd958fScth if (overlap == O_NONE) { 1320*b1dd958fScth /* Insert into list */ 1321*b1dd958fScth *prev = nowrite; 1322*b1dd958fScth nowrite->emul64_nwnext = cur; 1323*b1dd958fScth } 1324*b1dd958fScth rw_exit(&tgt->emul64_tgt_nw_lock); 1325*b1dd958fScth if (overlap == O_NONE) { 1326*b1dd958fScth if (emul64_collect_stats) { 1327*b1dd958fScth mutex_enter(&emul64_stats_mutex); 1328*b1dd958fScth emul64_nowrite_count++; 1329*b1dd958fScth mutex_exit(&emul64_stats_mutex); 1330*b1dd958fScth } 1331*b1dd958fScth } else { 1332*b1dd958fScth cmn_err(CE_WARN, "emul64: EMUL64_WRITE_OFF 0x%llx,0x%" 1333*b1dd958fScth PRIx64 "overlaps 0x%llx,0x%" PRIx64 "\n", 1334*b1dd958fScth nowrite->emul64_blocked.emul64_sb, 1335*b1dd958fScth nowrite->emul64_blocked.emul64_blkcnt, 1336*b1dd958fScth cur->emul64_blocked.emul64_sb, 1337*b1dd958fScth cur->emul64_blocked.emul64_blkcnt); 1338*b1dd958fScth emul64_nowrite_free(nowrite); 1339*b1dd958fScth return (EINVAL); 1340*b1dd958fScth } 1341*b1dd958fScth return (0); 1342*b1dd958fScth } 1343*b1dd958fScth 1344*b1dd958fScth /* ARGSUSED */ 1345*b1dd958fScth static int 1346*b1dd958fScth emul64_write_on(struct emul64 *emul64, 1347*b1dd958fScth emul64_tgt_t *tgt, 1348*b1dd958fScth emul64_tgt_range_t *tgtr) 1349*b1dd958fScth { 1350*b1dd958fScth size_t blkcnt = tgtr->emul64_blkrange.emul64_blkcnt; 1351*b1dd958fScth emul64_nowrite_t *cur; 1352*b1dd958fScth emul64_rng_overlap_t overlap = O_NONE; 1353*b1dd958fScth emul64_nowrite_t **prev = NULL; 1354*b1dd958fScth int rv = 0; 1355*b1dd958fScth diskaddr_t sb = tgtr->emul64_blkrange.emul64_sb; 1356*b1dd958fScth 1357*b1dd958fScth /* Find spot in list */ 1358*b1dd958fScth rw_enter(&tgt->emul64_tgt_nw_lock, RW_WRITER); 1359*b1dd958fScth cur = emul64_find_nowrite(tgt, sb, blkcnt, &overlap, &prev); 1360*b1dd958fScth if (overlap == O_SAME) { 1361*b1dd958fScth /* Remove from list */ 1362*b1dd958fScth *prev = cur->emul64_nwnext; 1363*b1dd958fScth } 1364*b1dd958fScth rw_exit(&tgt->emul64_tgt_nw_lock); 1365*b1dd958fScth 1366*b1dd958fScth switch (overlap) { 1367*b1dd958fScth case O_NONE: 1368*b1dd958fScth cmn_err(CE_WARN, "emul64: EMUL64_WRITE_ON 0x%llx,0x%lx " 1369*b1dd958fScth "range not found\n", sb, blkcnt); 1370*b1dd958fScth rv = ENXIO; 1371*b1dd958fScth break; 1372*b1dd958fScth case O_SAME: 1373*b1dd958fScth if (emul64_collect_stats) { 1374*b1dd958fScth mutex_enter(&emul64_stats_mutex); 1375*b1dd958fScth emul64_nowrite_count--; 1376*b1dd958fScth mutex_exit(&emul64_stats_mutex); 1377*b1dd958fScth } 1378*b1dd958fScth emul64_nowrite_free(cur); 1379*b1dd958fScth break; 1380*b1dd958fScth case O_OVERLAP: 1381*b1dd958fScth case O_SUBSET: 1382*b1dd958fScth cmn_err(CE_WARN, "emul64: EMUL64_WRITE_ON 0x%llx,0x%lx " 1383*b1dd958fScth "overlaps 0x%llx,0x%" PRIx64 "\n", 1384*b1dd958fScth sb, blkcnt, cur->emul64_blocked.emul64_sb, 1385*b1dd958fScth cur->emul64_blocked.emul64_blkcnt); 1386*b1dd958fScth rv = EINVAL; 1387*b1dd958fScth break; 1388*b1dd958fScth } 1389*b1dd958fScth return (rv); 1390*b1dd958fScth } 1391*b1dd958fScth 1392*b1dd958fScth static emul64_nowrite_t * 1393*b1dd958fScth emul64_find_nowrite(emul64_tgt_t *tgt, 1394*b1dd958fScth diskaddr_t sb, 1395*b1dd958fScth size_t blkcnt, 1396*b1dd958fScth emul64_rng_overlap_t *overlap, 1397*b1dd958fScth emul64_nowrite_t ***prevp) 1398*b1dd958fScth { 1399*b1dd958fScth emul64_nowrite_t *cur; 1400*b1dd958fScth emul64_nowrite_t **prev; 1401*b1dd958fScth 1402*b1dd958fScth /* Find spot in list */ 1403*b1dd958fScth *overlap = O_NONE; 1404*b1dd958fScth prev = &tgt->emul64_tgt_nowrite; 1405*b1dd958fScth cur = tgt->emul64_tgt_nowrite; 1406*b1dd958fScth while (cur != NULL) { 1407*b1dd958fScth *overlap = emul64_overlap(&cur->emul64_blocked, sb, blkcnt); 1408*b1dd958fScth if (*overlap != O_NONE) 1409*b1dd958fScth break; 1410*b1dd958fScth prev = &cur->emul64_nwnext; 1411*b1dd958fScth cur = cur->emul64_nwnext; 1412*b1dd958fScth } 1413*b1dd958fScth 1414*b1dd958fScth *prevp = prev; 1415*b1dd958fScth return (cur); 1416*b1dd958fScth } 1417*b1dd958fScth 1418*b1dd958fScth static emul64_nowrite_t * 1419*b1dd958fScth emul64_nowrite_alloc(emul64_range_t *range) 1420*b1dd958fScth { 1421*b1dd958fScth emul64_nowrite_t *nw; 1422*b1dd958fScth 1423*b1dd958fScth nw = kmem_zalloc(sizeof (*nw), KM_SLEEP); 1424*b1dd958fScth bcopy((void *) range, 1425*b1dd958fScth (void *) &nw->emul64_blocked, 1426*b1dd958fScth sizeof (nw->emul64_blocked)); 1427*b1dd958fScth return (nw); 1428*b1dd958fScth } 1429*b1dd958fScth 1430*b1dd958fScth static void 1431*b1dd958fScth emul64_nowrite_free(emul64_nowrite_t *nw) 1432*b1dd958fScth { 1433*b1dd958fScth kmem_free((void *) nw, sizeof (*nw)); 1434*b1dd958fScth } 1435*b1dd958fScth 1436*b1dd958fScth emul64_rng_overlap_t 1437*b1dd958fScth emul64_overlap(emul64_range_t *rng, diskaddr_t sb, size_t cnt) 1438*b1dd958fScth { 1439*b1dd958fScth 1440*b1dd958fScth if (rng->emul64_sb >= sb + cnt) 1441*b1dd958fScth return (O_NONE); 1442*b1dd958fScth if (rng->emul64_sb + rng->emul64_blkcnt <= sb) 1443*b1dd958fScth return (O_NONE); 1444*b1dd958fScth if ((rng->emul64_sb == sb) && (rng->emul64_blkcnt == cnt)) 1445*b1dd958fScth return (O_SAME); 1446*b1dd958fScth if ((sb >= rng->emul64_sb) && 1447*b1dd958fScth ((sb + cnt) <= (rng->emul64_sb + rng->emul64_blkcnt))) { 1448*b1dd958fScth return (O_SUBSET); 1449*b1dd958fScth } 1450*b1dd958fScth return (O_OVERLAP); 1451*b1dd958fScth } 1452*b1dd958fScth 1453*b1dd958fScth #include <sys/varargs.h> 1454*b1dd958fScth 1455*b1dd958fScth /* 1456*b1dd958fScth * Error logging, printing, and debug print routines 1457*b1dd958fScth */ 1458*b1dd958fScth 1459*b1dd958fScth /*VARARGS3*/ 1460*b1dd958fScth static void 1461*b1dd958fScth emul64_i_log(struct emul64 *emul64, int level, char *fmt, ...) 1462*b1dd958fScth { 1463*b1dd958fScth char buf[256]; 1464*b1dd958fScth va_list ap; 1465*b1dd958fScth 1466*b1dd958fScth va_start(ap, fmt); 1467*b1dd958fScth (void) vsnprintf(buf, sizeof (buf), fmt, ap); 1468*b1dd958fScth va_end(ap); 1469*b1dd958fScth 1470*b1dd958fScth scsi_log(emul64 ? emul64->emul64_dip : NULL, 1471*b1dd958fScth "emul64", level, "%s\n", buf); 1472*b1dd958fScth } 1473*b1dd958fScth 1474*b1dd958fScth 1475*b1dd958fScth #ifdef EMUL64DEBUG 1476*b1dd958fScth 1477*b1dd958fScth static void 1478*b1dd958fScth emul64_debug_dump_cdb(struct scsi_address *ap, struct scsi_pkt *pkt) 1479*b1dd958fScth { 1480*b1dd958fScth static char hex[] = "0123456789abcdef"; 1481*b1dd958fScth struct emul64 *emul64 = ADDR2EMUL64(ap); 1482*b1dd958fScth struct emul64_cmd *sp = PKT2CMD(pkt); 1483*b1dd958fScth uint8_t *cdb = pkt->pkt_cdbp; 1484*b1dd958fScth char buf [256]; 1485*b1dd958fScth char *p; 1486*b1dd958fScth int i; 1487*b1dd958fScth 1488*b1dd958fScth (void) snprintf(buf, sizeof (buf), "emul64%d: <%d,%d> ", 1489*b1dd958fScth ddi_get_instance(emul64->emul64_dip), 1490*b1dd958fScth ap->a_target, ap->a_lun); 1491*b1dd958fScth 1492*b1dd958fScth p = buf + strlen(buf); 1493*b1dd958fScth 1494*b1dd958fScth *p++ = '['; 1495*b1dd958fScth for (i = 0; i < sp->cmd_cdblen; i++, cdb++) { 1496*b1dd958fScth if (i != 0) 1497*b1dd958fScth *p++ = ' '; 1498*b1dd958fScth *p++ = hex[(*cdb >> 4) & 0x0f]; 1499*b1dd958fScth *p++ = hex[*cdb & 0x0f]; 1500*b1dd958fScth } 1501*b1dd958fScth *p++ = ']'; 1502*b1dd958fScth *p++ = '\n'; 1503*b1dd958fScth *p = 0; 1504*b1dd958fScth 1505*b1dd958fScth cmn_err(CE_CONT, buf); 1506*b1dd958fScth } 1507*b1dd958fScth #endif /* EMUL64DEBUG */ 1508