xref: /illumos-gate/usr/src/uts/common/io/emul64.c (revision 7a088f03b431bdffa96c3b2175964d4d38420caa)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 
27 /*
28  * SCSA HBA nexus driver that emulates an HBA connected to SCSI target
29  * devices (large disks).
30  */
31 
32 #ifdef DEBUG
33 #define	EMUL64DEBUG
34 #endif
35 
36 #include <sys/scsi/scsi.h>
37 #include <sys/ddi.h>
38 #include <sys/sunddi.h>
39 #include <sys/taskq.h>
40 #include <sys/disp.h>
41 #include <sys/types.h>
42 #include <sys/buf.h>
43 #include <sys/cpuvar.h>
44 #include <sys/dklabel.h>
45 
46 #include <sys/emul64.h>
47 #include <sys/emul64cmd.h>
48 #include <sys/emul64var.h>
49 
50 int emul64_usetaskq	= 1;	/* set to zero for debugging */
51 int emul64debug		= 0;
52 #ifdef	EMUL64DEBUG
53 static int emul64_cdb_debug	= 0;
54 #include <sys/debug.h>
55 #endif
56 
57 /*
58  * cb_ops function prototypes
59  */
60 static int emul64_ioctl(dev_t, int cmd, intptr_t arg, int mode,
61 			cred_t *credp, int *rvalp);
62 
63 /*
64  * dev_ops functions prototypes
65  */
66 static int emul64_info(dev_info_t *dip, ddi_info_cmd_t infocmd,
67     void *arg, void **result);
68 static int emul64_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
69 static int emul64_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
70 
71 /*
72  * Function prototypes
73  *
74  * SCSA functions exported by means of the transport table
75  */
76 static int emul64_tran_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
77 	scsi_hba_tran_t *tran, struct scsi_device *sd);
78 static int emul64_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt);
79 static void emul64_pkt_comp(void *);
80 static int emul64_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt);
81 static int emul64_scsi_reset(struct scsi_address *ap, int level);
82 static int emul64_scsi_getcap(struct scsi_address *ap, char *cap, int whom);
83 static int emul64_scsi_setcap(struct scsi_address *ap, char *cap, int value,
84     int whom);
85 static struct scsi_pkt *emul64_scsi_init_pkt(struct scsi_address *ap,
86     struct scsi_pkt *pkt, struct buf *bp, int cmdlen, int statuslen,
87     int tgtlen, int flags, int (*callback)(), caddr_t arg);
88 static void emul64_scsi_destroy_pkt(struct scsi_address *ap,
89 					struct scsi_pkt *pkt);
90 static void emul64_scsi_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt);
91 static void emul64_scsi_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt);
92 static int emul64_scsi_reset_notify(struct scsi_address *ap, int flag,
93     void (*callback)(caddr_t), caddr_t arg);
94 
95 /*
96  * internal functions
97  */
98 static void emul64_i_initcap(struct emul64 *emul64);
99 
100 static void emul64_i_log(struct emul64 *emul64, int level, char *fmt, ...);
101 static int emul64_get_tgtrange(struct emul64 *,
102 				intptr_t,
103 				emul64_tgt_t **,
104 				emul64_tgt_range_t *);
105 static int emul64_write_off(struct emul64 *,
106 			    emul64_tgt_t *,
107 			    emul64_tgt_range_t *);
108 static int emul64_write_on(struct emul64 *,
109 				emul64_tgt_t *,
110 				emul64_tgt_range_t *);
111 static emul64_nowrite_t *emul64_nowrite_alloc(emul64_range_t *);
112 static void emul64_nowrite_free(emul64_nowrite_t *);
113 static emul64_nowrite_t *emul64_find_nowrite(emul64_tgt_t *,
114 					diskaddr_t start_block,
115 					size_t blkcnt,
116 					emul64_rng_overlap_t *overlapp,
117 					emul64_nowrite_t ***prevp);
118 
119 extern emul64_tgt_t *find_tgt(struct emul64 *, ushort_t, ushort_t);
120 
121 #ifdef EMUL64DEBUG
122 static void emul64_debug_dump_cdb(struct scsi_address *ap,
123 		struct scsi_pkt *pkt);
124 #endif
125 
126 
127 #ifdef	_DDICT
128 static int	ddi_in_panic(void);
129 static int	ddi_in_panic() { return (0); }
130 #ifndef	SCSI_CAP_RESET_NOTIFICATION
131 #define	SCSI_CAP_RESET_NOTIFICATION		14
132 #endif
133 #ifndef	SCSI_RESET_NOTIFY
134 #define	SCSI_RESET_NOTIFY			0x01
135 #endif
136 #ifndef	SCSI_RESET_CANCEL
137 #define	SCSI_RESET_CANCEL			0x02
138 #endif
139 #endif
140 
141 /*
142  * Tunables:
143  *
144  * emul64_max_task
145  *	The taskq facility is used to queue up SCSI start requests on a per
146  *	controller basis.  If the maximum number of queued tasks is hit,
147  *	taskq_ent_alloc() delays for a second, which adversely impacts our
148  *	performance.  This value establishes the maximum number of task
149  *	queue entries when taskq_create is called.
150  *
151  * emul64_task_nthreads
152  *	Specifies the number of threads that should be used to process a
153  *	controller's task queue.  Our init function sets this to the number
154  *	of CPUs on the system, but this can be overridden in emul64.conf.
155  */
156 int emul64_max_task = 16;
157 int emul64_task_nthreads = 1;
158 
159 /*
160  * Local static data
161  */
162 static void		*emul64_state = NULL;
163 
164 /*
165  * Character/block operations.
166  */
167 static struct cb_ops emul64_cbops = {
168 	scsi_hba_open,		/* cb_open */
169 	scsi_hba_close,		/* cb_close */
170 	nodev,			/* cb_strategy */
171 	nodev,			/* cb_print */
172 	nodev,			/* cb_dump */
173 	nodev,			/* cb_read */
174 	nodev,			/* cb_write */
175 	emul64_ioctl,		/* cb_ioctl */
176 	nodev,			/* cb_devmap */
177 	nodev,			/* cb_mmap */
178 	nodev,			/* cb_segmap */
179 	nochpoll,		/* cb_chpoll */
180 	ddi_prop_op,		/* cb_prop_op */
181 	NULL,			/* cb_str */
182 	D_MP | D_64BIT | D_HOTPLUG, /* cb_flag */
183 	CB_REV,			/* cb_rev */
184 	nodev,			/* cb_aread */
185 	nodev			/* cb_awrite */
186 };
187 
188 /*
189  * autoconfiguration routines.
190  */
191 
192 static struct dev_ops emul64_ops = {
193 	DEVO_REV,			/* rev, */
194 	0,				/* refcnt */
195 	emul64_info,			/* getinfo */
196 	nulldev,			/* identify */
197 	nulldev,			/* probe */
198 	emul64_attach,			/* attach */
199 	emul64_detach,			/* detach */
200 	nodev,				/* reset */
201 	&emul64_cbops,			/* char/block ops */
202 	NULL,				/* bus ops */
203 	NULL,				/* power */
204 	ddi_quiesce_not_needed,			/* quiesce */
205 };
206 
207 char _depends_on[] = "misc/scsi";
208 
209 static struct modldrv modldrv = {
210 	&mod_driverops,			/* module type - driver */
211 	"emul64 SCSI Host Bus Adapter",	/* module name */
212 	&emul64_ops,			/* driver ops */
213 };
214 
215 static struct modlinkage modlinkage = {
216 	MODREV_1,			/* ml_rev - must be MODREV_1 */
217 	&modldrv,			/* ml_linkage */
218 	NULL				/* end of driver linkage */
219 };
220 
221 int
222 _init(void)
223 {
224 	int	ret;
225 
226 	ret = ddi_soft_state_init(&emul64_state, sizeof (struct emul64),
227 	    EMUL64_INITIAL_SOFT_SPACE);
228 	if (ret != 0)
229 		return (ret);
230 
231 	if ((ret = scsi_hba_init(&modlinkage)) != 0) {
232 		ddi_soft_state_fini(&emul64_state);
233 		return (ret);
234 	}
235 
236 	/* Set the number of task threads to the number of CPUs */
237 	if (boot_max_ncpus == -1) {
238 		emul64_task_nthreads = max_ncpus;
239 	} else {
240 		emul64_task_nthreads = boot_max_ncpus;
241 	}
242 
243 	emul64_bsd_init();
244 
245 	ret = mod_install(&modlinkage);
246 	if (ret != 0) {
247 		emul64_bsd_fini();
248 		scsi_hba_fini(&modlinkage);
249 		ddi_soft_state_fini(&emul64_state);
250 	}
251 
252 	return (ret);
253 }
254 
255 int
256 _fini(void)
257 {
258 	int	ret;
259 
260 	if ((ret = mod_remove(&modlinkage)) != 0)
261 		return (ret);
262 
263 	emul64_bsd_fini();
264 
265 	scsi_hba_fini(&modlinkage);
266 
267 	ddi_soft_state_fini(&emul64_state);
268 
269 	return (ret);
270 }
271 
272 int
273 _info(struct modinfo *modinfop)
274 {
275 	return (mod_info(&modlinkage, modinfop));
276 }
277 
278 /*
279  * Given the device number return the devinfo pointer
280  * from the scsi_device structure.
281  */
282 /*ARGSUSED*/
283 static int
284 emul64_info(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result)
285 {
286 	struct emul64	*foo;
287 	int		instance = getminor((dev_t)arg);
288 
289 	switch (cmd) {
290 	case DDI_INFO_DEVT2DEVINFO:
291 		foo = ddi_get_soft_state(emul64_state, instance);
292 		if (foo != NULL)
293 			*result = (void *)foo->emul64_dip;
294 		else {
295 			*result = NULL;
296 			return (DDI_FAILURE);
297 		}
298 		break;
299 
300 	case DDI_INFO_DEVT2INSTANCE:
301 		*result = (void *)(uintptr_t)instance;
302 		break;
303 
304 	default:
305 		return (DDI_FAILURE);
306 	}
307 
308 	return (DDI_SUCCESS);
309 }
310 
311 /*
312  * Attach an instance of an emul64 host adapter.  Allocate data structures,
313  * initialize the emul64 and we're on the air.
314  */
315 /*ARGSUSED*/
316 static int
317 emul64_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
318 {
319 	int		mutex_initted = 0;
320 	struct emul64	*emul64;
321 	int		instance;
322 	scsi_hba_tran_t	*tran = NULL;
323 	ddi_dma_attr_t	tmp_dma_attr;
324 
325 	emul64_bsd_get_props(dip);
326 
327 	bzero((void *) &tmp_dma_attr, sizeof (tmp_dma_attr));
328 	instance = ddi_get_instance(dip);
329 
330 	switch (cmd) {
331 	case DDI_ATTACH:
332 		break;
333 
334 	case DDI_RESUME:
335 		tran = (scsi_hba_tran_t *)ddi_get_driver_private(dip);
336 		if (!tran) {
337 			return (DDI_FAILURE);
338 		}
339 		emul64 = TRAN2EMUL64(tran);
340 
341 		return (DDI_SUCCESS);
342 
343 	default:
344 		emul64_i_log(NULL, CE_WARN,
345 		    "emul64%d: Cmd != DDI_ATTACH/DDI_RESUME", instance);
346 		return (DDI_FAILURE);
347 	}
348 
349 	/*
350 	 * Allocate emul64 data structure.
351 	 */
352 	if (ddi_soft_state_zalloc(emul64_state, instance) != DDI_SUCCESS) {
353 		emul64_i_log(NULL, CE_WARN,
354 		    "emul64%d: Failed to alloc soft state",
355 		    instance);
356 		return (DDI_FAILURE);
357 	}
358 
359 	emul64 = (struct emul64 *)ddi_get_soft_state(emul64_state, instance);
360 	if (emul64 == (struct emul64 *)NULL) {
361 		emul64_i_log(NULL, CE_WARN, "emul64%d: Bad soft state",
362 		    instance);
363 		ddi_soft_state_free(emul64_state, instance);
364 		return (DDI_FAILURE);
365 	}
366 
367 
368 	/*
369 	 * Allocate a transport structure
370 	 */
371 	tran = scsi_hba_tran_alloc(dip, SCSI_HBA_CANSLEEP);
372 	if (tran == NULL) {
373 		cmn_err(CE_WARN, "emul64: scsi_hba_tran_alloc failed\n");
374 		goto fail;
375 	}
376 
377 	emul64->emul64_tran			= tran;
378 	emul64->emul64_dip			= dip;
379 
380 	tran->tran_hba_private		= emul64;
381 	tran->tran_tgt_private		= NULL;
382 	tran->tran_tgt_init		= emul64_tran_tgt_init;
383 	tran->tran_tgt_probe		= scsi_hba_probe;
384 	tran->tran_tgt_free		= NULL;
385 
386 	tran->tran_start		= emul64_scsi_start;
387 	tran->tran_abort		= emul64_scsi_abort;
388 	tran->tran_reset		= emul64_scsi_reset;
389 	tran->tran_getcap		= emul64_scsi_getcap;
390 	tran->tran_setcap		= emul64_scsi_setcap;
391 	tran->tran_init_pkt		= emul64_scsi_init_pkt;
392 	tran->tran_destroy_pkt		= emul64_scsi_destroy_pkt;
393 	tran->tran_dmafree		= emul64_scsi_dmafree;
394 	tran->tran_sync_pkt		= emul64_scsi_sync_pkt;
395 	tran->tran_reset_notify 	= emul64_scsi_reset_notify;
396 
397 	tmp_dma_attr.dma_attr_minxfer = 0x1;
398 	tmp_dma_attr.dma_attr_burstsizes = 0x7f;
399 
400 	/*
401 	 * Attach this instance of the hba
402 	 */
403 	if (scsi_hba_attach_setup(dip, &tmp_dma_attr, tran,
404 	    0) != DDI_SUCCESS) {
405 		cmn_err(CE_WARN, "emul64: scsi_hba_attach failed\n");
406 		goto fail;
407 	}
408 
409 	emul64->emul64_initiator_id = 2;
410 
411 	/*
412 	 * Look up the scsi-options property
413 	 */
414 	emul64->emul64_scsi_options =
415 	    ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, "scsi-options",
416 	    EMUL64_DEFAULT_SCSI_OPTIONS);
417 	EMUL64_DEBUG(emul64, SCSI_DEBUG, "emul64 scsi-options=%x",
418 	    emul64->emul64_scsi_options);
419 
420 
421 	/* mutexes to protect the emul64 request and response queue */
422 	mutex_init(EMUL64_REQ_MUTEX(emul64), NULL, MUTEX_DRIVER,
423 	    emul64->emul64_iblock);
424 	mutex_init(EMUL64_RESP_MUTEX(emul64), NULL, MUTEX_DRIVER,
425 	    emul64->emul64_iblock);
426 
427 	mutex_initted = 1;
428 
429 	EMUL64_MUTEX_ENTER(emul64);
430 
431 	/*
432 	 * Initialize the default Target Capabilities and Sync Rates
433 	 */
434 	emul64_i_initcap(emul64);
435 
436 	EMUL64_MUTEX_EXIT(emul64);
437 
438 
439 	ddi_report_dev(dip);
440 	emul64->emul64_taskq = taskq_create("emul64_comp",
441 	    emul64_task_nthreads, MINCLSYSPRI, 1, emul64_max_task, 0);
442 
443 	return (DDI_SUCCESS);
444 
445 fail:
446 	emul64_i_log(NULL, CE_WARN, "emul64%d: Unable to attach", instance);
447 
448 	if (mutex_initted) {
449 		mutex_destroy(EMUL64_REQ_MUTEX(emul64));
450 		mutex_destroy(EMUL64_RESP_MUTEX(emul64));
451 	}
452 	if (tran) {
453 		scsi_hba_tran_free(tran);
454 	}
455 	ddi_soft_state_free(emul64_state, instance);
456 	return (DDI_FAILURE);
457 }
458 
459 /*ARGSUSED*/
460 static int
461 emul64_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
462 {
463 	struct emul64	*emul64;
464 	scsi_hba_tran_t	*tran;
465 	int		instance = ddi_get_instance(dip);
466 
467 
468 	/* get transport structure pointer from the dip */
469 	if (!(tran = (scsi_hba_tran_t *)ddi_get_driver_private(dip))) {
470 		return (DDI_FAILURE);
471 	}
472 
473 	/* get soft state from transport structure */
474 	emul64 = TRAN2EMUL64(tran);
475 
476 	if (!emul64) {
477 		return (DDI_FAILURE);
478 	}
479 
480 	EMUL64_DEBUG(emul64, SCSI_DEBUG, "emul64_detach: cmd = %d", cmd);
481 
482 	switch (cmd) {
483 	case DDI_DETACH:
484 		EMUL64_MUTEX_ENTER(emul64);
485 
486 		taskq_destroy(emul64->emul64_taskq);
487 		(void) scsi_hba_detach(dip);
488 
489 		scsi_hba_tran_free(emul64->emul64_tran);
490 
491 
492 		EMUL64_MUTEX_EXIT(emul64);
493 
494 		mutex_destroy(EMUL64_REQ_MUTEX(emul64));
495 		mutex_destroy(EMUL64_RESP_MUTEX(emul64));
496 
497 
498 		EMUL64_DEBUG(emul64, SCSI_DEBUG, "emul64_detach: done");
499 		ddi_soft_state_free(emul64_state, instance);
500 
501 		return (DDI_SUCCESS);
502 
503 	case DDI_SUSPEND:
504 		return (DDI_SUCCESS);
505 
506 	default:
507 		return (DDI_FAILURE);
508 	}
509 }
510 
511 /*
512  * Function name : emul64_tran_tgt_init
513  *
514  * Return Values : DDI_SUCCESS if target supported, DDI_FAILURE otherwise
515  *
516  */
517 /*ARGSUSED*/
518 static int
519 emul64_tran_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
520 	scsi_hba_tran_t *tran, struct scsi_device *sd)
521 {
522 	struct emul64	*emul64;
523 	emul64_tgt_t	*tgt;
524 	char		**geo_vidpid = NULL;
525 	char		*geo, *vidpid;
526 	uint32_t	*geoip = NULL;
527 	uint_t		length;
528 	uint_t		length2;
529 	lldaddr_t	sector_count;
530 	char		prop_name[15];
531 	int		ret = DDI_FAILURE;
532 
533 	emul64 = TRAN2EMUL64(tran);
534 	EMUL64_MUTEX_ENTER(emul64);
535 
536 	/*
537 	 * We get called for each target driver.conf node, multiple
538 	 * nodes may map to the same tgt,lun (sd.conf, st.conf, etc).
539 	 * Check to see if transport to tgt,lun already established.
540 	 */
541 	tgt = find_tgt(emul64, sd->sd_address.a_target, sd->sd_address.a_lun);
542 	if (tgt) {
543 		ret = DDI_SUCCESS;
544 		goto out;
545 	}
546 
547 	/* see if we have driver.conf specified device for this target,lun */
548 	(void) snprintf(prop_name, sizeof (prop_name), "targ_%d_%d",
549 	    sd->sd_address.a_target, sd->sd_address.a_lun);
550 	if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, hba_dip,
551 	    DDI_PROP_DONTPASS, prop_name,
552 	    &geo_vidpid, &length) != DDI_PROP_SUCCESS)
553 		goto out;
554 	if (length < 2) {
555 		cmn_err(CE_WARN, "emul64: %s property does not have 2 "
556 		    "elements", prop_name);
557 		goto out;
558 	}
559 
560 	/* pick geometry name and vidpid string from string array */
561 	geo = *geo_vidpid;
562 	vidpid = *(geo_vidpid + 1);
563 
564 	/* lookup geometry property integer array */
565 	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, hba_dip, DDI_PROP_DONTPASS,
566 	    geo, (int **)&geoip, &length2) != DDI_PROP_SUCCESS) {
567 		cmn_err(CE_WARN, "emul64: didn't get prop '%s'", geo);
568 		goto out;
569 	}
570 	if (length2 < 6) {
571 		cmn_err(CE_WARN, "emul64: property %s does not have 6 "
572 		    "elements", *geo_vidpid);
573 		goto out;
574 	}
575 
576 	/* allocate and initialize tgt structure for tgt,lun */
577 	tgt = kmem_zalloc(sizeof (emul64_tgt_t), KM_SLEEP);
578 	rw_init(&tgt->emul64_tgt_nw_lock, NULL, RW_DRIVER, NULL);
579 	mutex_init(&tgt->emul64_tgt_blk_lock, NULL, MUTEX_DRIVER, NULL);
580 
581 	/* create avl for data block storage */
582 	avl_create(&tgt->emul64_tgt_data, emul64_bsd_blkcompare,
583 	    sizeof (blklist_t), offsetof(blklist_t, bl_node));
584 
585 	/* save scsi_address and vidpid */
586 	bcopy(sd, &tgt->emul64_tgt_saddr, sizeof (struct scsi_address));
587 	(void) strncpy(tgt->emul64_tgt_inq, vidpid,
588 	    sizeof (emul64->emul64_tgt->emul64_tgt_inq));
589 
590 	/*
591 	 * The high order 4 bytes of the sector count always come first in
592 	 * emul64.conf.  They are followed by the low order 4 bytes.  Not
593 	 * all CPU types want them in this order, but laddr_t takes care of
594 	 * this for us.  We then pick up geometry (ncyl X nheads X nsect).
595 	 */
596 	sector_count._p._u	= *(geoip + 0);
597 	sector_count._p._l	= *(geoip + 1);
598 	/*
599 	 * On 32-bit platforms, fix block size if it's greater than the
600 	 * allowable maximum.
601 	 */
602 #if !defined(_LP64)
603 	if (sector_count._f > DK_MAX_BLOCKS)
604 		sector_count._f = DK_MAX_BLOCKS;
605 #endif
606 	tgt->emul64_tgt_sectors = sector_count._f;
607 	tgt->emul64_tgt_dtype	= *(geoip + 2);
608 	tgt->emul64_tgt_ncyls	= *(geoip + 3);
609 	tgt->emul64_tgt_nheads	= *(geoip + 4);
610 	tgt->emul64_tgt_nsect	= *(geoip + 5);
611 
612 	/* insert target structure into list */
613 	tgt->emul64_tgt_next = emul64->emul64_tgt;
614 	emul64->emul64_tgt = tgt;
615 	ret = DDI_SUCCESS;
616 
617 out:	EMUL64_MUTEX_EXIT(emul64);
618 	if (geoip)
619 		ddi_prop_free(geoip);
620 	if (geo_vidpid)
621 		ddi_prop_free(geo_vidpid);
622 	return (ret);
623 }
624 
625 /*
626  * Function name : emul64_i_initcap
627  *
628  * Return Values : NONE
629  * Description	 : Initializes the default target capabilities and
630  *		   Sync Rates.
631  *
632  * Context	 : Called from the user thread through attach.
633  *
634  */
635 static void
636 emul64_i_initcap(struct emul64 *emul64)
637 {
638 	uint16_t	cap, synch;
639 	int		i;
640 
641 	cap = 0;
642 	synch = 0;
643 	for (i = 0; i < NTARGETS_WIDE; i++) {
644 		emul64->emul64_cap[i] = cap;
645 		emul64->emul64_synch[i] = synch;
646 	}
647 	EMUL64_DEBUG(emul64, SCSI_DEBUG, "default cap = 0x%x", cap);
648 }
649 
650 /*
651  * Function name : emul64_scsi_getcap()
652  *
653  * Return Values : current value of capability, if defined
654  *		   -1 if capability is not defined
655  * Description	 : returns current capability value
656  *
657  * Context	 : Can be called from different kernel process threads.
658  *		   Can be called by interrupt thread.
659  */
660 static int
661 emul64_scsi_getcap(struct scsi_address *ap, char *cap, int whom)
662 {
663 	struct emul64	*emul64	= ADDR2EMUL64(ap);
664 	int		rval = 0;
665 
666 	/*
667 	 * We don't allow inquiring about capabilities for other targets
668 	 */
669 	if (cap == NULL || whom == 0) {
670 		return (-1);
671 	}
672 
673 	EMUL64_MUTEX_ENTER(emul64);
674 
675 	switch (scsi_hba_lookup_capstr(cap)) {
676 	case SCSI_CAP_DMA_MAX:
677 		rval = 1 << 24; /* Limit to 16MB max transfer */
678 		break;
679 	case SCSI_CAP_MSG_OUT:
680 		rval = 1;
681 		break;
682 	case SCSI_CAP_DISCONNECT:
683 		rval = 1;
684 		break;
685 	case SCSI_CAP_SYNCHRONOUS:
686 		rval = 1;
687 		break;
688 	case SCSI_CAP_WIDE_XFER:
689 		rval = 1;
690 		break;
691 	case SCSI_CAP_TAGGED_QING:
692 		rval = 1;
693 		break;
694 	case SCSI_CAP_UNTAGGED_QING:
695 		rval = 1;
696 		break;
697 	case SCSI_CAP_PARITY:
698 		rval = 1;
699 		break;
700 	case SCSI_CAP_INITIATOR_ID:
701 		rval = emul64->emul64_initiator_id;
702 		break;
703 	case SCSI_CAP_ARQ:
704 		rval = 1;
705 		break;
706 	case SCSI_CAP_LINKED_CMDS:
707 		break;
708 	case SCSI_CAP_RESET_NOTIFICATION:
709 		rval = 1;
710 		break;
711 
712 	default:
713 		rval = -1;
714 		break;
715 	}
716 
717 	EMUL64_MUTEX_EXIT(emul64);
718 
719 	return (rval);
720 }
721 
722 /*
723  * Function name : emul64_scsi_setcap()
724  *
725  * Return Values : 1 - capability exists and can be set to new value
726  *		   0 - capability could not be set to new value
727  *		  -1 - no such capability
728  *
729  * Description	 : sets a capability for a target
730  *
731  * Context	 : Can be called from different kernel process threads.
732  *		   Can be called by interrupt thread.
733  */
734 static int
735 emul64_scsi_setcap(struct scsi_address *ap, char *cap, int value, int whom)
736 {
737 	struct emul64	*emul64	= ADDR2EMUL64(ap);
738 	int		rval = 0;
739 
740 	/*
741 	 * We don't allow setting capabilities for other targets
742 	 */
743 	if (cap == NULL || whom == 0) {
744 		return (-1);
745 	}
746 
747 	EMUL64_MUTEX_ENTER(emul64);
748 
749 	switch (scsi_hba_lookup_capstr(cap)) {
750 	case SCSI_CAP_DMA_MAX:
751 	case SCSI_CAP_MSG_OUT:
752 	case SCSI_CAP_PARITY:
753 	case SCSI_CAP_UNTAGGED_QING:
754 	case SCSI_CAP_LINKED_CMDS:
755 	case SCSI_CAP_RESET_NOTIFICATION:
756 		/*
757 		 * None of these are settable via
758 		 * the capability interface.
759 		 */
760 		break;
761 	case SCSI_CAP_DISCONNECT:
762 		rval = 1;
763 		break;
764 	case SCSI_CAP_SYNCHRONOUS:
765 		rval = 1;
766 		break;
767 	case SCSI_CAP_TAGGED_QING:
768 		rval = 1;
769 		break;
770 	case SCSI_CAP_WIDE_XFER:
771 		rval = 1;
772 		break;
773 	case SCSI_CAP_INITIATOR_ID:
774 		rval = -1;
775 		break;
776 	case SCSI_CAP_ARQ:
777 		rval = 1;
778 		break;
779 	case SCSI_CAP_TOTAL_SECTORS:
780 		emul64->nt_total_sectors[ap->a_target][ap->a_lun] = value;
781 		rval = TRUE;
782 		break;
783 	case SCSI_CAP_SECTOR_SIZE:
784 		rval = TRUE;
785 		break;
786 	default:
787 		rval = -1;
788 		break;
789 	}
790 
791 
792 	EMUL64_MUTEX_EXIT(emul64);
793 
794 	return (rval);
795 }
796 
797 /*
798  * Function name : emul64_scsi_init_pkt
799  *
800  * Return Values : pointer to scsi_pkt, or NULL
801  * Description	 : Called by kernel on behalf of a target driver
802  *		   calling scsi_init_pkt(9F).
803  *		   Refer to tran_init_pkt(9E) man page
804  *
805  * Context	 : Can be called from different kernel process threads.
806  *		   Can be called by interrupt thread.
807  */
808 /* ARGSUSED */
809 static struct scsi_pkt *
810 emul64_scsi_init_pkt(struct scsi_address *ap, struct scsi_pkt *pkt,
811 	struct buf *bp, int cmdlen, int statuslen, int tgtlen,
812 	int flags, int (*callback)(), caddr_t arg)
813 {
814 	struct emul64		*emul64	= ADDR2EMUL64(ap);
815 	struct emul64_cmd	*sp;
816 
817 	ASSERT(callback == NULL_FUNC || callback == SLEEP_FUNC);
818 
819 	/*
820 	 * First step of emul64_scsi_init_pkt:  pkt allocation
821 	 */
822 	if (pkt == NULL) {
823 		pkt = scsi_hba_pkt_alloc(emul64->emul64_dip, ap, cmdlen,
824 		    statuslen,
825 		    tgtlen, sizeof (struct emul64_cmd), callback, arg);
826 		if (pkt == NULL) {
827 			cmn_err(CE_WARN, "emul64_scsi_init_pkt: "
828 			    "scsi_hba_pkt_alloc failed");
829 			return (NULL);
830 		}
831 
832 		sp = PKT2CMD(pkt);
833 
834 		/*
835 		 * Initialize the new pkt - we redundantly initialize
836 		 * all the fields for illustrative purposes.
837 		 */
838 		sp->cmd_pkt		= pkt;
839 		sp->cmd_flags		= 0;
840 		sp->cmd_scblen		= statuslen;
841 		sp->cmd_cdblen		= cmdlen;
842 		sp->cmd_emul64		= emul64;
843 		pkt->pkt_address	= *ap;
844 		pkt->pkt_comp		= (void (*)())NULL;
845 		pkt->pkt_flags		= 0;
846 		pkt->pkt_time		= 0;
847 		pkt->pkt_resid		= 0;
848 		pkt->pkt_statistics	= 0;
849 		pkt->pkt_reason		= 0;
850 
851 	} else {
852 		sp = PKT2CMD(pkt);
853 	}
854 
855 	/*
856 	 * Second step of emul64_scsi_init_pkt:  dma allocation/move
857 	 */
858 	if (bp && bp->b_bcount != 0) {
859 		if (bp->b_flags & B_READ) {
860 			sp->cmd_flags &= ~CFLAG_DMASEND;
861 		} else {
862 			sp->cmd_flags |= CFLAG_DMASEND;
863 		}
864 		bp_mapin(bp);
865 		sp->cmd_addr = (unsigned char *) bp->b_un.b_addr;
866 		sp->cmd_count = bp->b_bcount;
867 		pkt->pkt_resid = 0;
868 	}
869 
870 	return (pkt);
871 }
872 
873 
874 /*
875  * Function name : emul64_scsi_destroy_pkt
876  *
877  * Return Values : none
878  * Description	 : Called by kernel on behalf of a target driver
879  *		   calling scsi_destroy_pkt(9F).
880  *		   Refer to tran_destroy_pkt(9E) man page
881  *
882  * Context	 : Can be called from different kernel process threads.
883  *		   Can be called by interrupt thread.
884  */
885 static void
886 emul64_scsi_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
887 {
888 	struct emul64_cmd	*sp = PKT2CMD(pkt);
889 
890 	/*
891 	 * emul64_scsi_dmafree inline to make things faster
892 	 */
893 	if (sp->cmd_flags & CFLAG_DMAVALID) {
894 		/*
895 		 * Free the mapping.
896 		 */
897 		sp->cmd_flags &= ~CFLAG_DMAVALID;
898 	}
899 
900 	/*
901 	 * Free the pkt
902 	 */
903 	scsi_hba_pkt_free(ap, pkt);
904 }
905 
906 
907 /*
908  * Function name : emul64_scsi_dmafree()
909  *
910  * Return Values : none
911  * Description	 : free dvma resources
912  *
913  * Context	 : Can be called from different kernel process threads.
914  *		   Can be called by interrupt thread.
915  */
916 /*ARGSUSED*/
917 static void
918 emul64_scsi_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
919 {
920 }
921 
922 /*
923  * Function name : emul64_scsi_sync_pkt()
924  *
925  * Return Values : none
926  * Description	 : sync dma
927  *
928  * Context	 : Can be called from different kernel process threads.
929  *		   Can be called by interrupt thread.
930  */
931 /*ARGSUSED*/
932 static void
933 emul64_scsi_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
934 {
935 }
936 
937 /*
938  * routine for reset notification setup, to register or cancel.
939  */
940 static int
941 emul64_scsi_reset_notify(struct scsi_address *ap, int flag,
942 void (*callback)(caddr_t), caddr_t arg)
943 {
944 	struct emul64				*emul64 = ADDR2EMUL64(ap);
945 	struct emul64_reset_notify_entry	*p, *beforep;
946 	int					rval = DDI_FAILURE;
947 
948 	mutex_enter(EMUL64_REQ_MUTEX(emul64));
949 
950 	p = emul64->emul64_reset_notify_listf;
951 	beforep = NULL;
952 
953 	while (p) {
954 		if (p->ap == ap)
955 			break;	/* An entry exists for this target */
956 		beforep = p;
957 		p = p->next;
958 	}
959 
960 	if ((flag & SCSI_RESET_CANCEL) && (p != NULL)) {
961 		if (beforep == NULL) {
962 			emul64->emul64_reset_notify_listf = p->next;
963 		} else {
964 			beforep->next = p->next;
965 		}
966 		kmem_free((caddr_t)p,
967 		    sizeof (struct emul64_reset_notify_entry));
968 		rval = DDI_SUCCESS;
969 
970 	} else if ((flag & SCSI_RESET_NOTIFY) && (p == NULL)) {
971 		p = kmem_zalloc(sizeof (struct emul64_reset_notify_entry),
972 		    KM_SLEEP);
973 		p->ap = ap;
974 		p->callback = callback;
975 		p->arg = arg;
976 		p->next = emul64->emul64_reset_notify_listf;
977 		emul64->emul64_reset_notify_listf = p;
978 		rval = DDI_SUCCESS;
979 	}
980 
981 	mutex_exit(EMUL64_REQ_MUTEX(emul64));
982 
983 	return (rval);
984 }
985 
986 /*
987  * Function name : emul64_scsi_start()
988  *
989  * Return Values : TRAN_FATAL_ERROR	- emul64 has been shutdown
990  *		   TRAN_BUSY		- request queue is full
991  *		   TRAN_ACCEPT		- pkt has been submitted to emul64
992  *
993  * Description	 : init pkt, start the request
994  *
995  * Context	 : Can be called from different kernel process threads.
996  *		   Can be called by interrupt thread.
997  */
998 static int
999 emul64_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt)
1000 {
1001 	struct emul64_cmd	*sp	= PKT2CMD(pkt);
1002 	int			rval	= TRAN_ACCEPT;
1003 	struct emul64		*emul64	= ADDR2EMUL64(ap);
1004 	clock_t			cur_lbolt;
1005 	taskqid_t		dispatched;
1006 
1007 	ASSERT(mutex_owned(EMUL64_REQ_MUTEX(emul64)) == 0 || ddi_in_panic());
1008 	ASSERT(mutex_owned(EMUL64_RESP_MUTEX(emul64)) == 0 || ddi_in_panic());
1009 
1010 	EMUL64_DEBUG2(emul64, SCSI_DEBUG, "emul64_scsi_start %x", sp);
1011 
1012 	pkt->pkt_reason = CMD_CMPLT;
1013 
1014 #ifdef	EMUL64DEBUG
1015 	if (emul64_cdb_debug) {
1016 		emul64_debug_dump_cdb(ap, pkt);
1017 	}
1018 #endif	/* EMUL64DEBUG */
1019 
1020 	/*
1021 	 * calculate deadline from pkt_time
1022 	 * Instead of multiplying by 100 (ie. HZ), we multiply by 128 so
1023 	 * we can shift and at the same time have a 28% grace period
1024 	 * we ignore the rare case of pkt_time == 0 and deal with it
1025 	 * in emul64_i_watch()
1026 	 */
1027 	cur_lbolt = ddi_get_lbolt();
1028 	sp->cmd_deadline = cur_lbolt + (pkt->pkt_time * 128);
1029 
1030 	if ((emul64_usetaskq == 0) || (pkt->pkt_flags & FLAG_NOINTR) != 0) {
1031 		emul64_pkt_comp((caddr_t)pkt);
1032 	} else {
1033 		dispatched = NULL;
1034 		if (emul64_collect_stats) {
1035 			/*
1036 			 * If we are collecting statistics, call
1037 			 * taskq_dispatch in no sleep mode, so that we can
1038 			 * detect if we are exceeding the queue length that
1039 			 * was established in the call to taskq_create in
1040 			 * emul64_attach.  If the no sleep call fails
1041 			 * (returns NULL), the task will be dispatched in
1042 			 * sleep mode below.
1043 			 */
1044 			dispatched = taskq_dispatch(emul64->emul64_taskq,
1045 			    emul64_pkt_comp, (void *)pkt, TQ_NOSLEEP);
1046 			if (dispatched == NULL) {
1047 				/* Queue was full.  dispatch failed. */
1048 				mutex_enter(&emul64_stats_mutex);
1049 				emul64_taskq_max++;
1050 				mutex_exit(&emul64_stats_mutex);
1051 			}
1052 		}
1053 		if (dispatched == NULL) {
1054 			(void) taskq_dispatch(emul64->emul64_taskq,
1055 			    emul64_pkt_comp, (void *)pkt, TQ_SLEEP);
1056 		}
1057 	}
1058 
1059 done:
1060 	ASSERT(mutex_owned(EMUL64_REQ_MUTEX(emul64)) == 0 || ddi_in_panic());
1061 	ASSERT(mutex_owned(EMUL64_RESP_MUTEX(emul64)) == 0 || ddi_in_panic());
1062 
1063 	return (rval);
1064 }
1065 
1066 void
1067 emul64_check_cond(struct scsi_pkt *pkt, uchar_t key, uchar_t asc, uchar_t ascq)
1068 {
1069 	struct scsi_arq_status *arq =
1070 	    (struct scsi_arq_status *)pkt->pkt_scbp;
1071 
1072 	/* got check, no data transferred and ARQ done */
1073 	arq->sts_status.sts_chk = 1;
1074 	pkt->pkt_state |= STATE_ARQ_DONE;
1075 	pkt->pkt_state &= ~STATE_XFERRED_DATA;
1076 
1077 	/* for ARQ */
1078 	arq->sts_rqpkt_reason = CMD_CMPLT;
1079 	arq->sts_rqpkt_resid = 0;
1080 	arq->sts_rqpkt_state = STATE_GOT_BUS | STATE_GOT_TARGET |
1081 	    STATE_SENT_CMD | STATE_XFERRED_DATA | STATE_GOT_STATUS;
1082 	arq->sts_sensedata.es_valid = 1;
1083 	arq->sts_sensedata.es_class = 0x7;
1084 	arq->sts_sensedata.es_key = key;
1085 	arq->sts_sensedata.es_add_code = asc;
1086 	arq->sts_sensedata.es_qual_code = ascq;
1087 }
1088 
1089 ushort_t
1090 emul64_error_inject(struct scsi_pkt *pkt)
1091 {
1092 	struct emul64_cmd	*sp	= PKT2CMD(pkt);
1093 	emul64_tgt_t		*tgt;
1094 	struct scsi_arq_status *arq =
1095 	    (struct scsi_arq_status *)pkt->pkt_scbp;
1096 	uint_t			max_sense_len;
1097 
1098 	EMUL64_MUTEX_ENTER(sp->cmd_emul64);
1099 	tgt = find_tgt(sp->cmd_emul64,
1100 	    pkt->pkt_address.a_target, pkt->pkt_address.a_lun);
1101 	EMUL64_MUTEX_EXIT(sp->cmd_emul64);
1102 
1103 	/*
1104 	 * If there is no target, skip the error injection and
1105 	 * let the packet be handled normally.  This would normally
1106 	 * never happen since a_target and a_lun are setup in
1107 	 * emul64_scsi_init_pkt.
1108 	 */
1109 	if (tgt == NULL) {
1110 		return (ERR_INJ_DISABLE);
1111 	}
1112 
1113 	if (tgt->emul64_einj_state != ERR_INJ_DISABLE) {
1114 		arq->sts_status = tgt->emul64_einj_scsi_status;
1115 		pkt->pkt_state = tgt->emul64_einj_pkt_state;
1116 		pkt->pkt_reason = tgt->emul64_einj_pkt_reason;
1117 
1118 		/*
1119 		 * Calculate available sense buffer length.  We could just
1120 		 * assume sizeof(struct scsi_extended_sense) but hopefully
1121 		 * that limitation will go away soon.
1122 		 */
1123 		max_sense_len = sp->cmd_scblen  -
1124 		    (sizeof (struct scsi_arq_status) -
1125 		    sizeof (struct scsi_extended_sense));
1126 		if (max_sense_len > tgt->emul64_einj_sense_length) {
1127 			max_sense_len = tgt->emul64_einj_sense_length;
1128 		}
1129 
1130 		/* for ARQ */
1131 		arq->sts_rqpkt_reason = CMD_CMPLT;
1132 		arq->sts_rqpkt_resid = 0;
1133 		arq->sts_rqpkt_state = STATE_GOT_BUS | STATE_GOT_TARGET |
1134 		    STATE_SENT_CMD | STATE_XFERRED_DATA | STATE_GOT_STATUS;
1135 
1136 		/* Copy sense data */
1137 		if (tgt->emul64_einj_sense_data != 0) {
1138 			bcopy(tgt->emul64_einj_sense_data,
1139 			    (uint8_t *)&arq->sts_sensedata,
1140 			    max_sense_len);
1141 		}
1142 	}
1143 
1144 	/* Return current error injection state */
1145 	return (tgt->emul64_einj_state);
1146 }
1147 
1148 int
1149 emul64_error_inject_req(struct emul64 *emul64, intptr_t arg)
1150 {
1151 	emul64_tgt_t		*tgt;
1152 	struct emul64_error_inj_data error_inj_req;
1153 
1154 	/* Check args */
1155 	if (arg == NULL) {
1156 		return (EINVAL);
1157 	}
1158 
1159 	if (ddi_copyin((void *)arg, &error_inj_req,
1160 	    sizeof (error_inj_req), 0) != 0) {
1161 		cmn_err(CE_WARN, "emul64: ioctl - inj copyin failed\n");
1162 		return (EFAULT);
1163 	}
1164 
1165 	EMUL64_MUTEX_ENTER(emul64);
1166 	tgt = find_tgt(emul64, error_inj_req.eccd_target,
1167 	    error_inj_req.eccd_lun);
1168 	EMUL64_MUTEX_EXIT(emul64);
1169 
1170 	/* Make sure device exists */
1171 	if (tgt == NULL) {
1172 		return (ENODEV);
1173 	}
1174 
1175 	/* Free old sense buffer if we have one */
1176 	if (tgt->emul64_einj_sense_data != NULL) {
1177 		ASSERT(tgt->emul64_einj_sense_length != 0);
1178 		kmem_free(tgt->emul64_einj_sense_data,
1179 		    tgt->emul64_einj_sense_length);
1180 		tgt->emul64_einj_sense_data = NULL;
1181 		tgt->emul64_einj_sense_length = 0;
1182 	}
1183 
1184 	/*
1185 	 * Now handle error injection request.  If error injection
1186 	 * is requested we will return the sense data provided for
1187 	 * any I/O to this target until told to stop.
1188 	 */
1189 	tgt->emul64_einj_state = error_inj_req.eccd_inj_state;
1190 	tgt->emul64_einj_sense_length = error_inj_req.eccd_sns_dlen;
1191 	tgt->emul64_einj_pkt_state = error_inj_req.eccd_pkt_state;
1192 	tgt->emul64_einj_pkt_reason = error_inj_req.eccd_pkt_reason;
1193 	tgt->emul64_einj_scsi_status = error_inj_req.eccd_scsi_status;
1194 	switch (error_inj_req.eccd_inj_state) {
1195 	case ERR_INJ_ENABLE:
1196 	case ERR_INJ_ENABLE_NODATA:
1197 		if (error_inj_req.eccd_sns_dlen) {
1198 			tgt->emul64_einj_sense_data =
1199 			    kmem_alloc(error_inj_req.eccd_sns_dlen, KM_SLEEP);
1200 			/* Copy sense data */
1201 			if (ddi_copyin((void *)(arg + sizeof (error_inj_req)),
1202 			    tgt->emul64_einj_sense_data,
1203 			    error_inj_req.eccd_sns_dlen, 0) != 0) {
1204 				cmn_err(CE_WARN,
1205 				    "emul64: sense data copy in failed\n");
1206 				return (EFAULT);
1207 			}
1208 		}
1209 		break;
1210 	case ERR_INJ_DISABLE:
1211 	default:
1212 		break;
1213 	}
1214 
1215 	return (0);
1216 }
1217 
1218 int bsd_scsi_start_stop_unit(struct scsi_pkt *);
1219 int bsd_scsi_test_unit_ready(struct scsi_pkt *);
1220 int bsd_scsi_request_sense(struct scsi_pkt *);
1221 int bsd_scsi_inquiry(struct scsi_pkt *);
1222 int bsd_scsi_format(struct scsi_pkt *);
1223 int bsd_scsi_io(struct scsi_pkt *);
1224 int bsd_scsi_log_sense(struct scsi_pkt *);
1225 int bsd_scsi_mode_sense(struct scsi_pkt *);
1226 int bsd_scsi_mode_select(struct scsi_pkt *);
1227 int bsd_scsi_read_capacity(struct scsi_pkt *);
1228 int bsd_scsi_read_capacity_16(struct scsi_pkt *);
1229 int bsd_scsi_reserve(struct scsi_pkt *);
1230 int bsd_scsi_format(struct scsi_pkt *);
1231 int bsd_scsi_release(struct scsi_pkt *);
1232 int bsd_scsi_read_defect_list(struct scsi_pkt *);
1233 int bsd_scsi_reassign_block(struct scsi_pkt *);
1234 int bsd_freeblkrange(emul64_tgt_t *, emul64_range_t *);
1235 
1236 static void
1237 emul64_handle_cmd(struct scsi_pkt *pkt)
1238 {
1239 	if (emul64_error_inject(pkt) == ERR_INJ_ENABLE_NODATA) {
1240 		/*
1241 		 * If error injection is configured to return with
1242 		 * no data return now without handling the command.
1243 		 * This is how normal check conditions work.
1244 		 *
1245 		 * If the error injection state is ERR_INJ_ENABLE
1246 		 * (or if error injection is disabled) continue and
1247 		 * handle the command.  This would be used for
1248 		 * KEY_RECOVERABLE_ERROR type conditions.
1249 		 */
1250 		return;
1251 	}
1252 
1253 	switch (pkt->pkt_cdbp[0]) {
1254 	case SCMD_START_STOP:
1255 		(void) bsd_scsi_start_stop_unit(pkt);
1256 		break;
1257 	case SCMD_TEST_UNIT_READY:
1258 		(void) bsd_scsi_test_unit_ready(pkt);
1259 		break;
1260 	case SCMD_REQUEST_SENSE:
1261 		(void) bsd_scsi_request_sense(pkt);
1262 		break;
1263 	case SCMD_INQUIRY:
1264 		(void) bsd_scsi_inquiry(pkt);
1265 		break;
1266 	case SCMD_FORMAT:
1267 		(void) bsd_scsi_format(pkt);
1268 		break;
1269 	case SCMD_READ:
1270 	case SCMD_WRITE:
1271 	case SCMD_READ_G1:
1272 	case SCMD_WRITE_G1:
1273 	case SCMD_READ_G4:
1274 	case SCMD_WRITE_G4:
1275 		(void) bsd_scsi_io(pkt);
1276 		break;
1277 	case SCMD_LOG_SENSE_G1:
1278 		(void) bsd_scsi_log_sense(pkt);
1279 		break;
1280 	case SCMD_MODE_SENSE:
1281 	case SCMD_MODE_SENSE_G1:
1282 		(void) bsd_scsi_mode_sense(pkt);
1283 		break;
1284 	case SCMD_MODE_SELECT:
1285 	case SCMD_MODE_SELECT_G1:
1286 		(void) bsd_scsi_mode_select(pkt);
1287 		break;
1288 	case SCMD_READ_CAPACITY:
1289 		(void) bsd_scsi_read_capacity(pkt);
1290 		break;
1291 	case SCMD_SVC_ACTION_IN_G4:
1292 		if (pkt->pkt_cdbp[1] == SSVC_ACTION_READ_CAPACITY_G4) {
1293 			(void) bsd_scsi_read_capacity_16(pkt);
1294 		} else {
1295 			cmn_err(CE_WARN, "emul64: unrecognized G4 service "
1296 			    "action 0x%x", pkt->pkt_cdbp[1]);
1297 		}
1298 		break;
1299 	case SCMD_RESERVE:
1300 	case SCMD_RESERVE_G1:
1301 		(void) bsd_scsi_reserve(pkt);
1302 		break;
1303 	case SCMD_RELEASE:
1304 	case SCMD_RELEASE_G1:
1305 		(void) bsd_scsi_release(pkt);
1306 		break;
1307 	case SCMD_REASSIGN_BLOCK:
1308 		(void) bsd_scsi_reassign_block(pkt);
1309 		break;
1310 	case SCMD_READ_DEFECT_LIST:
1311 		(void) bsd_scsi_read_defect_list(pkt);
1312 		break;
1313 	case SCMD_PRIN:
1314 	case SCMD_PROUT:
1315 	case SCMD_REPORT_LUNS:
1316 		/* ASC 0x24 INVALID FIELD IN CDB */
1317 		emul64_check_cond(pkt, KEY_ILLEGAL_REQUEST, 0x24, 0x0);
1318 		break;
1319 	default:
1320 		cmn_err(CE_WARN, "emul64: unrecognized "
1321 		    "SCSI cmd 0x%x", pkt->pkt_cdbp[0]);
1322 		emul64_check_cond(pkt, KEY_ILLEGAL_REQUEST, 0x24, 0x0);
1323 		break;
1324 	case SCMD_GET_CONFIGURATION:
1325 	case 0x35:			/* SCMD_SYNCHRONIZE_CACHE */
1326 		/* Don't complain */
1327 		break;
1328 	}
1329 }
1330 
1331 static void
1332 emul64_pkt_comp(void * arg)
1333 {
1334 	struct scsi_pkt		*pkt = (struct scsi_pkt *)arg;
1335 	struct emul64_cmd	*sp = PKT2CMD(pkt);
1336 	emul64_tgt_t		*tgt;
1337 
1338 	EMUL64_MUTEX_ENTER(sp->cmd_emul64);
1339 	tgt = find_tgt(sp->cmd_emul64,
1340 	    pkt->pkt_address.a_target, pkt->pkt_address.a_lun);
1341 	EMUL64_MUTEX_EXIT(sp->cmd_emul64);
1342 	if (!tgt) {
1343 		pkt->pkt_reason = CMD_TIMEOUT;
1344 		pkt->pkt_state = STATE_GOT_BUS | STATE_SENT_CMD;
1345 		pkt->pkt_statistics = STAT_TIMEOUT;
1346 	} else {
1347 		pkt->pkt_reason = CMD_CMPLT;
1348 		*pkt->pkt_scbp = STATUS_GOOD;
1349 		pkt->pkt_state = STATE_GOT_BUS | STATE_GOT_TARGET |
1350 		    STATE_SENT_CMD | STATE_XFERRED_DATA | STATE_GOT_STATUS;
1351 		pkt->pkt_statistics = 0;
1352 		emul64_handle_cmd(pkt);
1353 	}
1354 	scsi_hba_pkt_comp(pkt);
1355 }
1356 
1357 /* ARGSUSED */
1358 static int
1359 emul64_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt)
1360 {
1361 	return (1);
1362 }
1363 
1364 /* ARGSUSED */
1365 static int
1366 emul64_scsi_reset(struct scsi_address *ap, int level)
1367 {
1368 	return (1);
1369 }
1370 
1371 static int
1372 emul64_get_tgtrange(struct emul64 *emul64,
1373 		    intptr_t arg,
1374 		    emul64_tgt_t **tgtp,
1375 		    emul64_tgt_range_t *tgtr)
1376 {
1377 	if (ddi_copyin((void *)arg, tgtr, sizeof (*tgtr), 0) != 0) {
1378 		cmn_err(CE_WARN, "emul64: ioctl - copy in failed\n");
1379 		return (EFAULT);
1380 	}
1381 	EMUL64_MUTEX_ENTER(emul64);
1382 	*tgtp = find_tgt(emul64, tgtr->emul64_target, tgtr->emul64_lun);
1383 	EMUL64_MUTEX_EXIT(emul64);
1384 	if (*tgtp == NULL) {
1385 		cmn_err(CE_WARN, "emul64: ioctl - no target for %d,%d on %d",
1386 		    tgtr->emul64_target, tgtr->emul64_lun,
1387 		    ddi_get_instance(emul64->emul64_dip));
1388 		return (ENXIO);
1389 	}
1390 	return (0);
1391 }
1392 
1393 static int
1394 emul64_ioctl(dev_t dev,
1395 	int cmd,
1396 	intptr_t arg,
1397 	int mode,
1398 	cred_t *credp,
1399 	int *rvalp)
1400 {
1401 	struct emul64		*emul64;
1402 	int			instance;
1403 	int			rv = 0;
1404 	emul64_tgt_range_t	tgtr;
1405 	emul64_tgt_t		*tgt;
1406 
1407 	instance = MINOR2INST(getminor(dev));
1408 	emul64 = (struct emul64 *)ddi_get_soft_state(emul64_state, instance);
1409 	if (emul64 == NULL) {
1410 		cmn_err(CE_WARN, "emul64: ioctl - no softstate for %d\n",
1411 		    getminor(dev));
1412 		return (ENXIO);
1413 	}
1414 
1415 	switch (cmd) {
1416 	case EMUL64_WRITE_OFF:
1417 		rv = emul64_get_tgtrange(emul64, arg, &tgt, &tgtr);
1418 		if (rv == 0) {
1419 			rv = emul64_write_off(emul64, tgt, &tgtr);
1420 		}
1421 		break;
1422 	case EMUL64_WRITE_ON:
1423 		rv = emul64_get_tgtrange(emul64, arg, &tgt, &tgtr);
1424 		if (rv == 0) {
1425 			rv = emul64_write_on(emul64, tgt, &tgtr);
1426 		}
1427 		break;
1428 	case EMUL64_ZERO_RANGE:
1429 		rv = emul64_get_tgtrange(emul64, arg, &tgt, &tgtr);
1430 		if (rv == 0) {
1431 			mutex_enter(&tgt->emul64_tgt_blk_lock);
1432 			rv = bsd_freeblkrange(tgt, &tgtr.emul64_blkrange);
1433 			mutex_exit(&tgt->emul64_tgt_blk_lock);
1434 		}
1435 		break;
1436 	case EMUL64_ERROR_INJECT:
1437 		rv = emul64_error_inject_req(emul64, arg);
1438 		break;
1439 	default:
1440 		rv  = scsi_hba_ioctl(dev, cmd, arg, mode, credp, rvalp);
1441 		break;
1442 	}
1443 	return (rv);
1444 }
1445 
1446 /* ARGSUSED */
1447 static int
1448 emul64_write_off(struct emul64 *emul64,
1449 	emul64_tgt_t *tgt,
1450 	emul64_tgt_range_t *tgtr)
1451 {
1452 	size_t			blkcnt = tgtr->emul64_blkrange.emul64_blkcnt;
1453 	emul64_nowrite_t	*cur;
1454 	emul64_nowrite_t	*nowrite;
1455 	emul64_rng_overlap_t	overlap = O_NONE;
1456 	emul64_nowrite_t	**prev = NULL;
1457 	diskaddr_t		sb = tgtr->emul64_blkrange.emul64_sb;
1458 
1459 	nowrite = emul64_nowrite_alloc(&tgtr->emul64_blkrange);
1460 
1461 	/* Find spot in list */
1462 	rw_enter(&tgt->emul64_tgt_nw_lock, RW_WRITER);
1463 	cur = emul64_find_nowrite(tgt, sb, blkcnt, &overlap, &prev);
1464 	if (overlap == O_NONE) {
1465 		/* Insert into list */
1466 		*prev = nowrite;
1467 		nowrite->emul64_nwnext = cur;
1468 	}
1469 	rw_exit(&tgt->emul64_tgt_nw_lock);
1470 	if (overlap == O_NONE) {
1471 		if (emul64_collect_stats) {
1472 			mutex_enter(&emul64_stats_mutex);
1473 			emul64_nowrite_count++;
1474 			mutex_exit(&emul64_stats_mutex);
1475 		}
1476 	} else {
1477 		cmn_err(CE_WARN, "emul64: EMUL64_WRITE_OFF 0x%llx,0x%"
1478 		    PRIx64 "overlaps 0x%llx,0x%" PRIx64 "\n",
1479 		    nowrite->emul64_blocked.emul64_sb,
1480 		    nowrite->emul64_blocked.emul64_blkcnt,
1481 		    cur->emul64_blocked.emul64_sb,
1482 		    cur->emul64_blocked.emul64_blkcnt);
1483 		emul64_nowrite_free(nowrite);
1484 		return (EINVAL);
1485 	}
1486 	return (0);
1487 }
1488 
1489 /* ARGSUSED */
1490 static int
1491 emul64_write_on(struct emul64 *emul64,
1492 		emul64_tgt_t *tgt,
1493 		emul64_tgt_range_t *tgtr)
1494 {
1495 	size_t			blkcnt = tgtr->emul64_blkrange.emul64_blkcnt;
1496 	emul64_nowrite_t	*cur;
1497 	emul64_rng_overlap_t	overlap = O_NONE;
1498 	emul64_nowrite_t	**prev = NULL;
1499 	int			rv = 0;
1500 	diskaddr_t		sb = tgtr->emul64_blkrange.emul64_sb;
1501 
1502 	/* Find spot in list */
1503 	rw_enter(&tgt->emul64_tgt_nw_lock, RW_WRITER);
1504 	cur = emul64_find_nowrite(tgt, sb, blkcnt, &overlap, &prev);
1505 	if (overlap == O_SAME) {
1506 		/* Remove from list */
1507 		*prev = cur->emul64_nwnext;
1508 	}
1509 	rw_exit(&tgt->emul64_tgt_nw_lock);
1510 
1511 	switch (overlap) {
1512 	case O_NONE:
1513 		cmn_err(CE_WARN, "emul64: EMUL64_WRITE_ON 0x%llx,0x%lx "
1514 		    "range not found\n", sb, blkcnt);
1515 		rv = ENXIO;
1516 		break;
1517 	case O_SAME:
1518 		if (emul64_collect_stats) {
1519 			mutex_enter(&emul64_stats_mutex);
1520 			emul64_nowrite_count--;
1521 			mutex_exit(&emul64_stats_mutex);
1522 		}
1523 		emul64_nowrite_free(cur);
1524 		break;
1525 	case O_OVERLAP:
1526 	case O_SUBSET:
1527 		cmn_err(CE_WARN, "emul64: EMUL64_WRITE_ON 0x%llx,0x%lx "
1528 		    "overlaps 0x%llx,0x%" PRIx64 "\n",
1529 		    sb, blkcnt, cur->emul64_blocked.emul64_sb,
1530 		    cur->emul64_blocked.emul64_blkcnt);
1531 		rv = EINVAL;
1532 		break;
1533 	}
1534 	return (rv);
1535 }
1536 
1537 static emul64_nowrite_t *
1538 emul64_find_nowrite(emul64_tgt_t *tgt,
1539 		    diskaddr_t sb,
1540 		    size_t blkcnt,
1541 		    emul64_rng_overlap_t *overlap,
1542 		    emul64_nowrite_t ***prevp)
1543 {
1544 	emul64_nowrite_t	*cur;
1545 	emul64_nowrite_t	**prev;
1546 
1547 	/* Find spot in list */
1548 	*overlap = O_NONE;
1549 	prev = &tgt->emul64_tgt_nowrite;
1550 	cur = tgt->emul64_tgt_nowrite;
1551 	while (cur != NULL) {
1552 		*overlap = emul64_overlap(&cur->emul64_blocked, sb, blkcnt);
1553 		if (*overlap != O_NONE)
1554 			break;
1555 		prev = &cur->emul64_nwnext;
1556 		cur = cur->emul64_nwnext;
1557 	}
1558 
1559 	*prevp = prev;
1560 	return (cur);
1561 }
1562 
1563 static emul64_nowrite_t *
1564 emul64_nowrite_alloc(emul64_range_t *range)
1565 {
1566 	emul64_nowrite_t	*nw;
1567 
1568 	nw = kmem_zalloc(sizeof (*nw), KM_SLEEP);
1569 	bcopy((void *) range,
1570 	    (void *) &nw->emul64_blocked,
1571 	    sizeof (nw->emul64_blocked));
1572 	return (nw);
1573 }
1574 
1575 static void
1576 emul64_nowrite_free(emul64_nowrite_t *nw)
1577 {
1578 	kmem_free((void *) nw, sizeof (*nw));
1579 }
1580 
1581 emul64_rng_overlap_t
1582 emul64_overlap(emul64_range_t *rng, diskaddr_t sb, size_t cnt)
1583 {
1584 
1585 	if (rng->emul64_sb >= sb + cnt)
1586 		return (O_NONE);
1587 	if (rng->emul64_sb + rng->emul64_blkcnt <= sb)
1588 		return (O_NONE);
1589 	if ((rng->emul64_sb == sb) && (rng->emul64_blkcnt == cnt))
1590 		return (O_SAME);
1591 	if ((sb >= rng->emul64_sb) &&
1592 	    ((sb + cnt) <= (rng->emul64_sb + rng->emul64_blkcnt))) {
1593 		return (O_SUBSET);
1594 	}
1595 	return (O_OVERLAP);
1596 }
1597 
1598 #include <sys/varargs.h>
1599 
1600 /*
1601  * Error logging, printing, and debug print routines
1602  */
1603 
1604 /*VARARGS3*/
1605 static void
1606 emul64_i_log(struct emul64 *emul64, int level, char *fmt, ...)
1607 {
1608 	char	buf[256];
1609 	va_list	ap;
1610 
1611 	va_start(ap, fmt);
1612 	(void) vsnprintf(buf, sizeof (buf), fmt, ap);
1613 	va_end(ap);
1614 
1615 	scsi_log(emul64 ? emul64->emul64_dip : NULL,
1616 	    "emul64", level, "%s\n", buf);
1617 }
1618 
1619 
1620 #ifdef EMUL64DEBUG
1621 
1622 static void
1623 emul64_debug_dump_cdb(struct scsi_address *ap, struct scsi_pkt *pkt)
1624 {
1625 	static char	hex[]	= "0123456789abcdef";
1626 	struct emul64	*emul64	= ADDR2EMUL64(ap);
1627 	struct emul64_cmd	*sp	= PKT2CMD(pkt);
1628 	uint8_t		*cdb	= pkt->pkt_cdbp;
1629 	char		buf [256];
1630 	char		*p;
1631 	int		i;
1632 
1633 	(void) snprintf(buf, sizeof (buf), "emul64%d: <%d,%d> ",
1634 	    ddi_get_instance(emul64->emul64_dip),
1635 	    ap->a_target, ap->a_lun);
1636 
1637 	p = buf + strlen(buf);
1638 
1639 	*p++ = '[';
1640 	for (i = 0; i < sp->cmd_cdblen; i++, cdb++) {
1641 		if (i != 0)
1642 			*p++ = ' ';
1643 		*p++ = hex[(*cdb >> 4) & 0x0f];
1644 		*p++ = hex[*cdb & 0x0f];
1645 	}
1646 	*p++ = ']';
1647 	*p++ = '\n';
1648 	*p = 0;
1649 
1650 	cmn_err(CE_CONT, buf);
1651 }
1652 #endif	/* EMUL64DEBUG */
1653