xref: /titanic_41/usr/src/uts/common/io/emul64.c (revision 89b43686db1fe9681d80a7cf5662730cb9378cae)
1b1dd958fScth /*
2b1dd958fScth  * CDDL HEADER START
3b1dd958fScth  *
4b1dd958fScth  * The contents of this file are subject to the terms of the
519397407SSherry Moore  * Common Development and Distribution License (the "License").
619397407SSherry Moore  * You may not use this file except in compliance with the License.
7b1dd958fScth  *
8b1dd958fScth  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9b1dd958fScth  * or http://www.opensolaris.org/os/licensing.
10b1dd958fScth  * See the License for the specific language governing permissions
11b1dd958fScth  * and limitations under the License.
12b1dd958fScth  *
13b1dd958fScth  * When distributing Covered Code, include this CDDL HEADER in each
14b1dd958fScth  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15b1dd958fScth  * If applicable, add the following below this CDDL HEADER, with the
16b1dd958fScth  * fields enclosed by brackets "[]" replaced with your own identifying
17b1dd958fScth  * information: Portions Copyright [yyyy] [name of copyright owner]
18b1dd958fScth  *
19b1dd958fScth  * CDDL HEADER END
20b1dd958fScth  */
21b1dd958fScth /*
229c57abc8Ssrivijitha dugganapalli  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23b1dd958fScth  * Use is subject to license terms.
24*89b43686SBayard Bell  * Copyright (c) 2011 Bayard G. Bell. All rights reserved.
25b1dd958fScth  */
26b1dd958fScth 
27b1dd958fScth 
28b1dd958fScth /*
29b1dd958fScth  * SCSA HBA nexus driver that emulates an HBA connected to SCSI target
30b1dd958fScth  * devices (large disks).
31b1dd958fScth  */
32b1dd958fScth 
33b1dd958fScth #ifdef DEBUG
34b1dd958fScth #define	EMUL64DEBUG
35b1dd958fScth #endif
36b1dd958fScth 
37b1dd958fScth #include <sys/scsi/scsi.h>
38b1dd958fScth #include <sys/ddi.h>
39b1dd958fScth #include <sys/sunddi.h>
40b1dd958fScth #include <sys/taskq.h>
41b1dd958fScth #include <sys/disp.h>
42b1dd958fScth #include <sys/types.h>
43b1dd958fScth #include <sys/buf.h>
44b1dd958fScth #include <sys/cpuvar.h>
45b1dd958fScth #include <sys/dklabel.h>
46b1dd958fScth 
47b1dd958fScth #include <sys/emul64.h>
48b1dd958fScth #include <sys/emul64cmd.h>
49b1dd958fScth #include <sys/emul64var.h>
50b1dd958fScth 
51b1dd958fScth int emul64_usetaskq	= 1;	/* set to zero for debugging */
52b1dd958fScth int emul64debug		= 0;
53b1dd958fScth #ifdef	EMUL64DEBUG
54b1dd958fScth static int emul64_cdb_debug	= 0;
55b1dd958fScth #include <sys/debug.h>
56b1dd958fScth #endif
57b1dd958fScth 
58b1dd958fScth /*
59b1dd958fScth  * cb_ops function prototypes
60b1dd958fScth  */
61b1dd958fScth static int emul64_ioctl(dev_t, int cmd, intptr_t arg, int mode,
62b1dd958fScth 			cred_t *credp, int *rvalp);
63b1dd958fScth 
64b1dd958fScth /*
65b1dd958fScth  * dev_ops functions prototypes
66b1dd958fScth  */
67b1dd958fScth static int emul64_info(dev_info_t *dip, ddi_info_cmd_t infocmd,
68b1dd958fScth     void *arg, void **result);
69b1dd958fScth static int emul64_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
70b1dd958fScth static int emul64_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
71b1dd958fScth 
72b1dd958fScth /*
73b1dd958fScth  * Function prototypes
74b1dd958fScth  *
75b1dd958fScth  * SCSA functions exported by means of the transport table
76b1dd958fScth  */
77b1dd958fScth static int emul64_tran_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
78b1dd958fScth 	scsi_hba_tran_t *tran, struct scsi_device *sd);
79b1dd958fScth static int emul64_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt);
80b1dd958fScth static void emul64_pkt_comp(void *);
81b1dd958fScth static int emul64_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt);
82b1dd958fScth static int emul64_scsi_reset(struct scsi_address *ap, int level);
83b1dd958fScth static int emul64_scsi_getcap(struct scsi_address *ap, char *cap, int whom);
84b1dd958fScth static int emul64_scsi_setcap(struct scsi_address *ap, char *cap, int value,
85b1dd958fScth     int whom);
86b1dd958fScth static struct scsi_pkt *emul64_scsi_init_pkt(struct scsi_address *ap,
87b1dd958fScth     struct scsi_pkt *pkt, struct buf *bp, int cmdlen, int statuslen,
88b1dd958fScth     int tgtlen, int flags, int (*callback)(), caddr_t arg);
89b1dd958fScth static void emul64_scsi_destroy_pkt(struct scsi_address *ap,
90b1dd958fScth 					struct scsi_pkt *pkt);
91b1dd958fScth static void emul64_scsi_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt);
92b1dd958fScth static void emul64_scsi_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt);
93b1dd958fScth static int emul64_scsi_reset_notify(struct scsi_address *ap, int flag,
94b1dd958fScth     void (*callback)(caddr_t), caddr_t arg);
95b1dd958fScth 
96b1dd958fScth /*
97b1dd958fScth  * internal functions
98b1dd958fScth  */
99b1dd958fScth static void emul64_i_initcap(struct emul64 *emul64);
100b1dd958fScth 
101b1dd958fScth static void emul64_i_log(struct emul64 *emul64, int level, char *fmt, ...);
102b1dd958fScth static int emul64_get_tgtrange(struct emul64 *,
103b1dd958fScth 				intptr_t,
104b1dd958fScth 				emul64_tgt_t **,
105b1dd958fScth 				emul64_tgt_range_t *);
106b1dd958fScth static int emul64_write_off(struct emul64 *,
107b1dd958fScth 			    emul64_tgt_t *,
108b1dd958fScth 			    emul64_tgt_range_t *);
109b1dd958fScth static int emul64_write_on(struct emul64 *,
110b1dd958fScth 				emul64_tgt_t *,
111b1dd958fScth 				emul64_tgt_range_t *);
112b1dd958fScth static emul64_nowrite_t *emul64_nowrite_alloc(emul64_range_t *);
113b1dd958fScth static void emul64_nowrite_free(emul64_nowrite_t *);
114b1dd958fScth static emul64_nowrite_t *emul64_find_nowrite(emul64_tgt_t *,
115b1dd958fScth 					diskaddr_t start_block,
116b1dd958fScth 					size_t blkcnt,
117b1dd958fScth 					emul64_rng_overlap_t *overlapp,
118b1dd958fScth 					emul64_nowrite_t ***prevp);
119b1dd958fScth 
120b1dd958fScth extern emul64_tgt_t *find_tgt(struct emul64 *, ushort_t, ushort_t);
121b1dd958fScth 
122b1dd958fScth #ifdef EMUL64DEBUG
123b1dd958fScth static void emul64_debug_dump_cdb(struct scsi_address *ap,
124b1dd958fScth 		struct scsi_pkt *pkt);
125b1dd958fScth #endif
126b1dd958fScth 
127b1dd958fScth 
128b1dd958fScth #ifdef	_DDICT
129b1dd958fScth static int	ddi_in_panic(void);
ddi_in_panic()130b1dd958fScth static int	ddi_in_panic() { return (0); }
131b1dd958fScth #ifndef	SCSI_CAP_RESET_NOTIFICATION
132b1dd958fScth #define	SCSI_CAP_RESET_NOTIFICATION		14
133b1dd958fScth #endif
134b1dd958fScth #ifndef	SCSI_RESET_NOTIFY
135b1dd958fScth #define	SCSI_RESET_NOTIFY			0x01
136b1dd958fScth #endif
137b1dd958fScth #ifndef	SCSI_RESET_CANCEL
138b1dd958fScth #define	SCSI_RESET_CANCEL			0x02
139b1dd958fScth #endif
140b1dd958fScth #endif
141b1dd958fScth 
142b1dd958fScth /*
143b1dd958fScth  * Tunables:
144b1dd958fScth  *
145b1dd958fScth  * emul64_max_task
146b1dd958fScth  *	The taskq facility is used to queue up SCSI start requests on a per
147b1dd958fScth  *	controller basis.  If the maximum number of queued tasks is hit,
148b1dd958fScth  *	taskq_ent_alloc() delays for a second, which adversely impacts our
149b1dd958fScth  *	performance.  This value establishes the maximum number of task
150b1dd958fScth  *	queue entries when taskq_create is called.
151b1dd958fScth  *
152b1dd958fScth  * emul64_task_nthreads
153b1dd958fScth  *	Specifies the number of threads that should be used to process a
154b1dd958fScth  *	controller's task queue.  Our init function sets this to the number
155b1dd958fScth  *	of CPUs on the system, but this can be overridden in emul64.conf.
156b1dd958fScth  */
157b1dd958fScth int emul64_max_task = 16;
158b1dd958fScth int emul64_task_nthreads = 1;
159b1dd958fScth 
160b1dd958fScth /*
161b1dd958fScth  * Local static data
162b1dd958fScth  */
163b1dd958fScth static void		*emul64_state = NULL;
164b1dd958fScth 
165b1dd958fScth /*
166b1dd958fScth  * Character/block operations.
167b1dd958fScth  */
168b1dd958fScth static struct cb_ops emul64_cbops = {
169b1dd958fScth 	scsi_hba_open,		/* cb_open */
170b1dd958fScth 	scsi_hba_close,		/* cb_close */
171b1dd958fScth 	nodev,			/* cb_strategy */
172b1dd958fScth 	nodev,			/* cb_print */
173b1dd958fScth 	nodev,			/* cb_dump */
174b1dd958fScth 	nodev,			/* cb_read */
175b1dd958fScth 	nodev,			/* cb_write */
176b1dd958fScth 	emul64_ioctl,		/* cb_ioctl */
177b1dd958fScth 	nodev,			/* cb_devmap */
178b1dd958fScth 	nodev,			/* cb_mmap */
179b1dd958fScth 	nodev,			/* cb_segmap */
180b1dd958fScth 	nochpoll,		/* cb_chpoll */
181b1dd958fScth 	ddi_prop_op,		/* cb_prop_op */
182b1dd958fScth 	NULL,			/* cb_str */
183b1dd958fScth 	D_MP | D_64BIT | D_HOTPLUG, /* cb_flag */
184b1dd958fScth 	CB_REV,			/* cb_rev */
185b1dd958fScth 	nodev,			/* cb_aread */
186b1dd958fScth 	nodev			/* cb_awrite */
187b1dd958fScth };
188b1dd958fScth 
189b1dd958fScth /*
190b1dd958fScth  * autoconfiguration routines.
191b1dd958fScth  */
192b1dd958fScth 
193b1dd958fScth static struct dev_ops emul64_ops = {
194b1dd958fScth 	DEVO_REV,			/* rev, */
195b1dd958fScth 	0,				/* refcnt */
196b1dd958fScth 	emul64_info,			/* getinfo */
197b1dd958fScth 	nulldev,			/* identify */
198b1dd958fScth 	nulldev,			/* probe */
199b1dd958fScth 	emul64_attach,			/* attach */
200b1dd958fScth 	emul64_detach,			/* detach */
201b1dd958fScth 	nodev,				/* reset */
202b1dd958fScth 	&emul64_cbops,			/* char/block ops */
20319397407SSherry Moore 	NULL,				/* bus ops */
20419397407SSherry Moore 	NULL,				/* power */
20519397407SSherry Moore 	ddi_quiesce_not_needed,			/* quiesce */
206b1dd958fScth };
207b1dd958fScth 
208b1dd958fScth static struct modldrv modldrv = {
209b1dd958fScth 	&mod_driverops,			/* module type - driver */
210b1dd958fScth 	"emul64 SCSI Host Bus Adapter",	/* module name */
211b1dd958fScth 	&emul64_ops,			/* driver ops */
212b1dd958fScth };
213b1dd958fScth 
214b1dd958fScth static struct modlinkage modlinkage = {
215b1dd958fScth 	MODREV_1,			/* ml_rev - must be MODREV_1 */
216b1dd958fScth 	&modldrv,			/* ml_linkage */
217b1dd958fScth 	NULL				/* end of driver linkage */
218b1dd958fScth };
219b1dd958fScth 
220b1dd958fScth int
_init(void)221b1dd958fScth _init(void)
222b1dd958fScth {
223b1dd958fScth 	int	ret;
224b1dd958fScth 
225b1dd958fScth 	ret = ddi_soft_state_init(&emul64_state, sizeof (struct emul64),
226b1dd958fScth 	    EMUL64_INITIAL_SOFT_SPACE);
227b1dd958fScth 	if (ret != 0)
228b1dd958fScth 		return (ret);
229b1dd958fScth 
230b1dd958fScth 	if ((ret = scsi_hba_init(&modlinkage)) != 0) {
231b1dd958fScth 		ddi_soft_state_fini(&emul64_state);
232b1dd958fScth 		return (ret);
233b1dd958fScth 	}
234b1dd958fScth 
235b1dd958fScth 	/* Set the number of task threads to the number of CPUs */
236b1dd958fScth 	if (boot_max_ncpus == -1) {
237b1dd958fScth 		emul64_task_nthreads = max_ncpus;
238b1dd958fScth 	} else {
239b1dd958fScth 		emul64_task_nthreads = boot_max_ncpus;
240b1dd958fScth 	}
241b1dd958fScth 
242b1dd958fScth 	emul64_bsd_init();
243b1dd958fScth 
244b1dd958fScth 	ret = mod_install(&modlinkage);
245b1dd958fScth 	if (ret != 0) {
246b1dd958fScth 		emul64_bsd_fini();
247b1dd958fScth 		scsi_hba_fini(&modlinkage);
248b1dd958fScth 		ddi_soft_state_fini(&emul64_state);
249b1dd958fScth 	}
250b1dd958fScth 
251b1dd958fScth 	return (ret);
252b1dd958fScth }
253b1dd958fScth 
254b1dd958fScth int
_fini(void)255b1dd958fScth _fini(void)
256b1dd958fScth {
257b1dd958fScth 	int	ret;
258b1dd958fScth 
259b1dd958fScth 	if ((ret = mod_remove(&modlinkage)) != 0)
260b1dd958fScth 		return (ret);
261b1dd958fScth 
262b1dd958fScth 	emul64_bsd_fini();
263b1dd958fScth 
264b1dd958fScth 	scsi_hba_fini(&modlinkage);
265b1dd958fScth 
266b1dd958fScth 	ddi_soft_state_fini(&emul64_state);
267b1dd958fScth 
268b1dd958fScth 	return (ret);
269b1dd958fScth }
270b1dd958fScth 
271b1dd958fScth int
_info(struct modinfo * modinfop)272b1dd958fScth _info(struct modinfo *modinfop)
273b1dd958fScth {
274b1dd958fScth 	return (mod_info(&modlinkage, modinfop));
275b1dd958fScth }
276b1dd958fScth 
277b1dd958fScth /*
278b1dd958fScth  * Given the device number return the devinfo pointer
279b1dd958fScth  * from the scsi_device structure.
280b1dd958fScth  */
281b1dd958fScth /*ARGSUSED*/
282b1dd958fScth static int
emul64_info(dev_info_t * dip,ddi_info_cmd_t cmd,void * arg,void ** result)283b1dd958fScth emul64_info(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result)
284b1dd958fScth {
285b1dd958fScth 	struct emul64	*foo;
286b1dd958fScth 	int		instance = getminor((dev_t)arg);
287b1dd958fScth 
288b1dd958fScth 	switch (cmd) {
289b1dd958fScth 	case DDI_INFO_DEVT2DEVINFO:
290b1dd958fScth 		foo = ddi_get_soft_state(emul64_state, instance);
291b1dd958fScth 		if (foo != NULL)
292b1dd958fScth 			*result = (void *)foo->emul64_dip;
293b1dd958fScth 		else {
294b1dd958fScth 			*result = NULL;
295b1dd958fScth 			return (DDI_FAILURE);
296b1dd958fScth 		}
297b1dd958fScth 		break;
298b1dd958fScth 
299b1dd958fScth 	case DDI_INFO_DEVT2INSTANCE:
300b1dd958fScth 		*result = (void *)(uintptr_t)instance;
301b1dd958fScth 		break;
302b1dd958fScth 
303b1dd958fScth 	default:
304b1dd958fScth 		return (DDI_FAILURE);
305b1dd958fScth 	}
306b1dd958fScth 
307b1dd958fScth 	return (DDI_SUCCESS);
308b1dd958fScth }
309b1dd958fScth 
310b1dd958fScth /*
311b1dd958fScth  * Attach an instance of an emul64 host adapter.  Allocate data structures,
312b1dd958fScth  * initialize the emul64 and we're on the air.
313b1dd958fScth  */
314b1dd958fScth /*ARGSUSED*/
315b1dd958fScth static int
emul64_attach(dev_info_t * dip,ddi_attach_cmd_t cmd)316b1dd958fScth emul64_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
317b1dd958fScth {
318b1dd958fScth 	int		mutex_initted = 0;
319b1dd958fScth 	struct emul64	*emul64;
320b1dd958fScth 	int		instance;
321b1dd958fScth 	scsi_hba_tran_t	*tran = NULL;
322b1dd958fScth 	ddi_dma_attr_t	tmp_dma_attr;
323b1dd958fScth 
324b1dd958fScth 	emul64_bsd_get_props(dip);
325b1dd958fScth 
326b1dd958fScth 	bzero((void *) &tmp_dma_attr, sizeof (tmp_dma_attr));
327b1dd958fScth 	instance = ddi_get_instance(dip);
328b1dd958fScth 
329b1dd958fScth 	switch (cmd) {
330b1dd958fScth 	case DDI_ATTACH:
331b1dd958fScth 		break;
332b1dd958fScth 
333b1dd958fScth 	case DDI_RESUME:
334b1dd958fScth 		tran = (scsi_hba_tran_t *)ddi_get_driver_private(dip);
335b1dd958fScth 		if (!tran) {
336b1dd958fScth 			return (DDI_FAILURE);
337b1dd958fScth 		}
338b1dd958fScth 		emul64 = TRAN2EMUL64(tran);
339b1dd958fScth 
340b1dd958fScth 		return (DDI_SUCCESS);
341b1dd958fScth 
342b1dd958fScth 	default:
343b1dd958fScth 		emul64_i_log(NULL, CE_WARN,
344b1dd958fScth 		    "emul64%d: Cmd != DDI_ATTACH/DDI_RESUME", instance);
345b1dd958fScth 		return (DDI_FAILURE);
346b1dd958fScth 	}
347b1dd958fScth 
348b1dd958fScth 	/*
349b1dd958fScth 	 * Allocate emul64 data structure.
350b1dd958fScth 	 */
351b1dd958fScth 	if (ddi_soft_state_zalloc(emul64_state, instance) != DDI_SUCCESS) {
352b1dd958fScth 		emul64_i_log(NULL, CE_WARN,
353b1dd958fScth 		    "emul64%d: Failed to alloc soft state",
354b1dd958fScth 		    instance);
355b1dd958fScth 		return (DDI_FAILURE);
356b1dd958fScth 	}
357b1dd958fScth 
358b1dd958fScth 	emul64 = (struct emul64 *)ddi_get_soft_state(emul64_state, instance);
359b1dd958fScth 	if (emul64 == (struct emul64 *)NULL) {
360b1dd958fScth 		emul64_i_log(NULL, CE_WARN, "emul64%d: Bad soft state",
361b1dd958fScth 		    instance);
362b1dd958fScth 		ddi_soft_state_free(emul64_state, instance);
363b1dd958fScth 		return (DDI_FAILURE);
364b1dd958fScth 	}
365b1dd958fScth 
366b1dd958fScth 
367b1dd958fScth 	/*
368b1dd958fScth 	 * Allocate a transport structure
369b1dd958fScth 	 */
370b1dd958fScth 	tran = scsi_hba_tran_alloc(dip, SCSI_HBA_CANSLEEP);
371b1dd958fScth 	if (tran == NULL) {
372b1dd958fScth 		cmn_err(CE_WARN, "emul64: scsi_hba_tran_alloc failed\n");
373b1dd958fScth 		goto fail;
374b1dd958fScth 	}
375b1dd958fScth 
376b1dd958fScth 	emul64->emul64_tran			= tran;
377b1dd958fScth 	emul64->emul64_dip			= dip;
378b1dd958fScth 
379b1dd958fScth 	tran->tran_hba_private		= emul64;
380b1dd958fScth 	tran->tran_tgt_private		= NULL;
381b1dd958fScth 	tran->tran_tgt_init		= emul64_tran_tgt_init;
382b1dd958fScth 	tran->tran_tgt_probe		= scsi_hba_probe;
383b1dd958fScth 	tran->tran_tgt_free		= NULL;
384b1dd958fScth 
385b1dd958fScth 	tran->tran_start		= emul64_scsi_start;
386b1dd958fScth 	tran->tran_abort		= emul64_scsi_abort;
387b1dd958fScth 	tran->tran_reset		= emul64_scsi_reset;
388b1dd958fScth 	tran->tran_getcap		= emul64_scsi_getcap;
389b1dd958fScth 	tran->tran_setcap		= emul64_scsi_setcap;
390b1dd958fScth 	tran->tran_init_pkt		= emul64_scsi_init_pkt;
391b1dd958fScth 	tran->tran_destroy_pkt		= emul64_scsi_destroy_pkt;
392b1dd958fScth 	tran->tran_dmafree		= emul64_scsi_dmafree;
393b1dd958fScth 	tran->tran_sync_pkt		= emul64_scsi_sync_pkt;
394b1dd958fScth 	tran->tran_reset_notify 	= emul64_scsi_reset_notify;
395b1dd958fScth 
396b1dd958fScth 	tmp_dma_attr.dma_attr_minxfer = 0x1;
397b1dd958fScth 	tmp_dma_attr.dma_attr_burstsizes = 0x7f;
398b1dd958fScth 
399b1dd958fScth 	/*
400b1dd958fScth 	 * Attach this instance of the hba
401b1dd958fScth 	 */
402b1dd958fScth 	if (scsi_hba_attach_setup(dip, &tmp_dma_attr, tran,
403b1dd958fScth 	    0) != DDI_SUCCESS) {
404b1dd958fScth 		cmn_err(CE_WARN, "emul64: scsi_hba_attach failed\n");
405b1dd958fScth 		goto fail;
406b1dd958fScth 	}
407b1dd958fScth 
408b1dd958fScth 	emul64->emul64_initiator_id = 2;
409b1dd958fScth 
410b1dd958fScth 	/*
411b1dd958fScth 	 * Look up the scsi-options property
412b1dd958fScth 	 */
413b1dd958fScth 	emul64->emul64_scsi_options =
414b1dd958fScth 	    ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, "scsi-options",
415b1dd958fScth 	    EMUL64_DEFAULT_SCSI_OPTIONS);
416b1dd958fScth 	EMUL64_DEBUG(emul64, SCSI_DEBUG, "emul64 scsi-options=%x",
417b1dd958fScth 	    emul64->emul64_scsi_options);
418b1dd958fScth 
419b1dd958fScth 
420b1dd958fScth 	/* mutexes to protect the emul64 request and response queue */
421b1dd958fScth 	mutex_init(EMUL64_REQ_MUTEX(emul64), NULL, MUTEX_DRIVER,
422b1dd958fScth 	    emul64->emul64_iblock);
423b1dd958fScth 	mutex_init(EMUL64_RESP_MUTEX(emul64), NULL, MUTEX_DRIVER,
424b1dd958fScth 	    emul64->emul64_iblock);
425b1dd958fScth 
426b1dd958fScth 	mutex_initted = 1;
427b1dd958fScth 
428b1dd958fScth 	EMUL64_MUTEX_ENTER(emul64);
429b1dd958fScth 
430b1dd958fScth 	/*
431b1dd958fScth 	 * Initialize the default Target Capabilities and Sync Rates
432b1dd958fScth 	 */
433b1dd958fScth 	emul64_i_initcap(emul64);
434b1dd958fScth 
435b1dd958fScth 	EMUL64_MUTEX_EXIT(emul64);
436b1dd958fScth 
437b1dd958fScth 
438b1dd958fScth 	ddi_report_dev(dip);
439b1dd958fScth 	emul64->emul64_taskq = taskq_create("emul64_comp",
440b1dd958fScth 	    emul64_task_nthreads, MINCLSYSPRI, 1, emul64_max_task, 0);
441b1dd958fScth 
442b1dd958fScth 	return (DDI_SUCCESS);
443b1dd958fScth 
444b1dd958fScth fail:
445b1dd958fScth 	emul64_i_log(NULL, CE_WARN, "emul64%d: Unable to attach", instance);
446b1dd958fScth 
447b1dd958fScth 	if (mutex_initted) {
448b1dd958fScth 		mutex_destroy(EMUL64_REQ_MUTEX(emul64));
449b1dd958fScth 		mutex_destroy(EMUL64_RESP_MUTEX(emul64));
450b1dd958fScth 	}
451b1dd958fScth 	if (tran) {
452b1dd958fScth 		scsi_hba_tran_free(tran);
453b1dd958fScth 	}
454b1dd958fScth 	ddi_soft_state_free(emul64_state, instance);
455b1dd958fScth 	return (DDI_FAILURE);
456b1dd958fScth }
457b1dd958fScth 
458b1dd958fScth /*ARGSUSED*/
459b1dd958fScth static int
emul64_detach(dev_info_t * dip,ddi_detach_cmd_t cmd)460b1dd958fScth emul64_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
461b1dd958fScth {
462b1dd958fScth 	struct emul64	*emul64;
463b1dd958fScth 	scsi_hba_tran_t	*tran;
464b1dd958fScth 	int		instance = ddi_get_instance(dip);
465b1dd958fScth 
466b1dd958fScth 
467b1dd958fScth 	/* get transport structure pointer from the dip */
468b1dd958fScth 	if (!(tran = (scsi_hba_tran_t *)ddi_get_driver_private(dip))) {
469b1dd958fScth 		return (DDI_FAILURE);
470b1dd958fScth 	}
471b1dd958fScth 
472b1dd958fScth 	/* get soft state from transport structure */
473b1dd958fScth 	emul64 = TRAN2EMUL64(tran);
474b1dd958fScth 
475b1dd958fScth 	if (!emul64) {
476b1dd958fScth 		return (DDI_FAILURE);
477b1dd958fScth 	}
478b1dd958fScth 
479b1dd958fScth 	EMUL64_DEBUG(emul64, SCSI_DEBUG, "emul64_detach: cmd = %d", cmd);
480b1dd958fScth 
481b1dd958fScth 	switch (cmd) {
482b1dd958fScth 	case DDI_DETACH:
483b1dd958fScth 		EMUL64_MUTEX_ENTER(emul64);
484b1dd958fScth 
485b1dd958fScth 		taskq_destroy(emul64->emul64_taskq);
486b1dd958fScth 		(void) scsi_hba_detach(dip);
487b1dd958fScth 
488b1dd958fScth 		scsi_hba_tran_free(emul64->emul64_tran);
489b1dd958fScth 
490b1dd958fScth 
491b1dd958fScth 		EMUL64_MUTEX_EXIT(emul64);
492b1dd958fScth 
493b1dd958fScth 		mutex_destroy(EMUL64_REQ_MUTEX(emul64));
494b1dd958fScth 		mutex_destroy(EMUL64_RESP_MUTEX(emul64));
495b1dd958fScth 
496b1dd958fScth 
497b1dd958fScth 		EMUL64_DEBUG(emul64, SCSI_DEBUG, "emul64_detach: done");
498b1dd958fScth 		ddi_soft_state_free(emul64_state, instance);
499b1dd958fScth 
500b1dd958fScth 		return (DDI_SUCCESS);
501b1dd958fScth 
502b1dd958fScth 	case DDI_SUSPEND:
503b1dd958fScth 		return (DDI_SUCCESS);
504b1dd958fScth 
505b1dd958fScth 	default:
506b1dd958fScth 		return (DDI_FAILURE);
507b1dd958fScth 	}
508b1dd958fScth }
509b1dd958fScth 
510b1dd958fScth /*
511b1dd958fScth  * Function name : emul64_tran_tgt_init
512b1dd958fScth  *
513b1dd958fScth  * Return Values : DDI_SUCCESS if target supported, DDI_FAILURE otherwise
514b1dd958fScth  *
515b1dd958fScth  */
516b1dd958fScth /*ARGSUSED*/
517b1dd958fScth static int
emul64_tran_tgt_init(dev_info_t * hba_dip,dev_info_t * tgt_dip,scsi_hba_tran_t * tran,struct scsi_device * sd)518b1dd958fScth emul64_tran_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
519b1dd958fScth 	scsi_hba_tran_t *tran, struct scsi_device *sd)
520b1dd958fScth {
521b1dd958fScth 	struct emul64	*emul64;
522b1dd958fScth 	emul64_tgt_t	*tgt;
523b1dd958fScth 	char		**geo_vidpid = NULL;
524b1dd958fScth 	char		*geo, *vidpid;
525b1dd958fScth 	uint32_t	*geoip = NULL;
526b1dd958fScth 	uint_t		length;
527b1dd958fScth 	uint_t		length2;
528b1dd958fScth 	lldaddr_t	sector_count;
529b1dd958fScth 	char		prop_name[15];
530b1dd958fScth 	int		ret = DDI_FAILURE;
531b1dd958fScth 
532b1dd958fScth 	emul64 = TRAN2EMUL64(tran);
533b1dd958fScth 	EMUL64_MUTEX_ENTER(emul64);
534b1dd958fScth 
535b1dd958fScth 	/*
536b1dd958fScth 	 * We get called for each target driver.conf node, multiple
537b1dd958fScth 	 * nodes may map to the same tgt,lun (sd.conf, st.conf, etc).
538b1dd958fScth 	 * Check to see if transport to tgt,lun already established.
539b1dd958fScth 	 */
540b1dd958fScth 	tgt = find_tgt(emul64, sd->sd_address.a_target, sd->sd_address.a_lun);
541b1dd958fScth 	if (tgt) {
542b1dd958fScth 		ret = DDI_SUCCESS;
543b1dd958fScth 		goto out;
544b1dd958fScth 	}
545b1dd958fScth 
546b1dd958fScth 	/* see if we have driver.conf specified device for this target,lun */
547b1dd958fScth 	(void) snprintf(prop_name, sizeof (prop_name), "targ_%d_%d",
548b1dd958fScth 	    sd->sd_address.a_target, sd->sd_address.a_lun);
549b1dd958fScth 	if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, hba_dip,
550b1dd958fScth 	    DDI_PROP_DONTPASS, prop_name,
551b1dd958fScth 	    &geo_vidpid, &length) != DDI_PROP_SUCCESS)
552b1dd958fScth 		goto out;
553b1dd958fScth 	if (length < 2) {
554b1dd958fScth 		cmn_err(CE_WARN, "emul64: %s property does not have 2 "
555b1dd958fScth 		    "elements", prop_name);
556b1dd958fScth 		goto out;
557b1dd958fScth 	}
558b1dd958fScth 
559b1dd958fScth 	/* pick geometry name and vidpid string from string array */
560b1dd958fScth 	geo = *geo_vidpid;
561b1dd958fScth 	vidpid = *(geo_vidpid + 1);
562b1dd958fScth 
563b1dd958fScth 	/* lookup geometry property integer array */
564b1dd958fScth 	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, hba_dip, DDI_PROP_DONTPASS,
565b1dd958fScth 	    geo, (int **)&geoip, &length2) != DDI_PROP_SUCCESS) {
566b1dd958fScth 		cmn_err(CE_WARN, "emul64: didn't get prop '%s'", geo);
567b1dd958fScth 		goto out;
568b1dd958fScth 	}
569b1dd958fScth 	if (length2 < 6) {
570b1dd958fScth 		cmn_err(CE_WARN, "emul64: property %s does not have 6 "
571b1dd958fScth 		    "elements", *geo_vidpid);
572b1dd958fScth 		goto out;
573b1dd958fScth 	}
574b1dd958fScth 
575b1dd958fScth 	/* allocate and initialize tgt structure for tgt,lun */
576b1dd958fScth 	tgt = kmem_zalloc(sizeof (emul64_tgt_t), KM_SLEEP);
577b1dd958fScth 	rw_init(&tgt->emul64_tgt_nw_lock, NULL, RW_DRIVER, NULL);
578b1dd958fScth 	mutex_init(&tgt->emul64_tgt_blk_lock, NULL, MUTEX_DRIVER, NULL);
579b1dd958fScth 
580b1dd958fScth 	/* create avl for data block storage */
581b1dd958fScth 	avl_create(&tgt->emul64_tgt_data, emul64_bsd_blkcompare,
582b1dd958fScth 	    sizeof (blklist_t), offsetof(blklist_t, bl_node));
583b1dd958fScth 
584b1dd958fScth 	/* save scsi_address and vidpid */
585b1dd958fScth 	bcopy(sd, &tgt->emul64_tgt_saddr, sizeof (struct scsi_address));
586b1dd958fScth 	(void) strncpy(tgt->emul64_tgt_inq, vidpid,
587b1dd958fScth 	    sizeof (emul64->emul64_tgt->emul64_tgt_inq));
588b1dd958fScth 
589b1dd958fScth 	/*
590b1dd958fScth 	 * The high order 4 bytes of the sector count always come first in
591b1dd958fScth 	 * emul64.conf.  They are followed by the low order 4 bytes.  Not
592b1dd958fScth 	 * all CPU types want them in this order, but laddr_t takes care of
593b1dd958fScth 	 * this for us.  We then pick up geometry (ncyl X nheads X nsect).
594b1dd958fScth 	 */
595b1dd958fScth 	sector_count._p._u	= *(geoip + 0);
596b1dd958fScth 	sector_count._p._l	= *(geoip + 1);
597b1dd958fScth 	/*
598b1dd958fScth 	 * On 32-bit platforms, fix block size if it's greater than the
599b1dd958fScth 	 * allowable maximum.
600b1dd958fScth 	 */
601b1dd958fScth #if !defined(_LP64)
602b1dd958fScth 	if (sector_count._f > DK_MAX_BLOCKS)
603b1dd958fScth 		sector_count._f = DK_MAX_BLOCKS;
604b1dd958fScth #endif
605b1dd958fScth 	tgt->emul64_tgt_sectors = sector_count._f;
606b1dd958fScth 	tgt->emul64_tgt_dtype	= *(geoip + 2);
607b1dd958fScth 	tgt->emul64_tgt_ncyls	= *(geoip + 3);
608b1dd958fScth 	tgt->emul64_tgt_nheads	= *(geoip + 4);
609b1dd958fScth 	tgt->emul64_tgt_nsect	= *(geoip + 5);
610b1dd958fScth 
611b1dd958fScth 	/* insert target structure into list */
612b1dd958fScth 	tgt->emul64_tgt_next = emul64->emul64_tgt;
613b1dd958fScth 	emul64->emul64_tgt = tgt;
614b1dd958fScth 	ret = DDI_SUCCESS;
615b1dd958fScth 
616b1dd958fScth out:	EMUL64_MUTEX_EXIT(emul64);
617b1dd958fScth 	if (geoip)
618b1dd958fScth 		ddi_prop_free(geoip);
619b1dd958fScth 	if (geo_vidpid)
620b1dd958fScth 		ddi_prop_free(geo_vidpid);
621b1dd958fScth 	return (ret);
622b1dd958fScth }
623b1dd958fScth 
624b1dd958fScth /*
625b1dd958fScth  * Function name : emul64_i_initcap
626b1dd958fScth  *
627b1dd958fScth  * Return Values : NONE
628b1dd958fScth  * Description	 : Initializes the default target capabilities and
629b1dd958fScth  *		   Sync Rates.
630b1dd958fScth  *
631b1dd958fScth  * Context	 : Called from the user thread through attach.
632b1dd958fScth  *
633b1dd958fScth  */
634b1dd958fScth static void
emul64_i_initcap(struct emul64 * emul64)635b1dd958fScth emul64_i_initcap(struct emul64 *emul64)
636b1dd958fScth {
637b1dd958fScth 	uint16_t	cap, synch;
638b1dd958fScth 	int		i;
639b1dd958fScth 
640b1dd958fScth 	cap = 0;
641b1dd958fScth 	synch = 0;
642b1dd958fScth 	for (i = 0; i < NTARGETS_WIDE; i++) {
643b1dd958fScth 		emul64->emul64_cap[i] = cap;
644b1dd958fScth 		emul64->emul64_synch[i] = synch;
645b1dd958fScth 	}
646b1dd958fScth 	EMUL64_DEBUG(emul64, SCSI_DEBUG, "default cap = 0x%x", cap);
647b1dd958fScth }
648b1dd958fScth 
649b1dd958fScth /*
650b1dd958fScth  * Function name : emul64_scsi_getcap()
651b1dd958fScth  *
652b1dd958fScth  * Return Values : current value of capability, if defined
653b1dd958fScth  *		   -1 if capability is not defined
654b1dd958fScth  * Description	 : returns current capability value
655b1dd958fScth  *
656b1dd958fScth  * Context	 : Can be called from different kernel process threads.
657b1dd958fScth  *		   Can be called by interrupt thread.
658b1dd958fScth  */
659b1dd958fScth static int
emul64_scsi_getcap(struct scsi_address * ap,char * cap,int whom)660b1dd958fScth emul64_scsi_getcap(struct scsi_address *ap, char *cap, int whom)
661b1dd958fScth {
662b1dd958fScth 	struct emul64	*emul64	= ADDR2EMUL64(ap);
663b1dd958fScth 	int		rval = 0;
664b1dd958fScth 
665b1dd958fScth 	/*
666b1dd958fScth 	 * We don't allow inquiring about capabilities for other targets
667b1dd958fScth 	 */
668b1dd958fScth 	if (cap == NULL || whom == 0) {
669b1dd958fScth 		return (-1);
670b1dd958fScth 	}
671b1dd958fScth 
672b1dd958fScth 	EMUL64_MUTEX_ENTER(emul64);
673b1dd958fScth 
674b1dd958fScth 	switch (scsi_hba_lookup_capstr(cap)) {
675b1dd958fScth 	case SCSI_CAP_DMA_MAX:
676b1dd958fScth 		rval = 1 << 24; /* Limit to 16MB max transfer */
677b1dd958fScth 		break;
678b1dd958fScth 	case SCSI_CAP_MSG_OUT:
679b1dd958fScth 		rval = 1;
680b1dd958fScth 		break;
681b1dd958fScth 	case SCSI_CAP_DISCONNECT:
682b1dd958fScth 		rval = 1;
683b1dd958fScth 		break;
684b1dd958fScth 	case SCSI_CAP_SYNCHRONOUS:
685b1dd958fScth 		rval = 1;
686b1dd958fScth 		break;
687b1dd958fScth 	case SCSI_CAP_WIDE_XFER:
688b1dd958fScth 		rval = 1;
689b1dd958fScth 		break;
690b1dd958fScth 	case SCSI_CAP_TAGGED_QING:
691b1dd958fScth 		rval = 1;
692b1dd958fScth 		break;
693b1dd958fScth 	case SCSI_CAP_UNTAGGED_QING:
694b1dd958fScth 		rval = 1;
695b1dd958fScth 		break;
696b1dd958fScth 	case SCSI_CAP_PARITY:
697b1dd958fScth 		rval = 1;
698b1dd958fScth 		break;
699b1dd958fScth 	case SCSI_CAP_INITIATOR_ID:
700b1dd958fScth 		rval = emul64->emul64_initiator_id;
701b1dd958fScth 		break;
702b1dd958fScth 	case SCSI_CAP_ARQ:
703b1dd958fScth 		rval = 1;
704b1dd958fScth 		break;
705b1dd958fScth 	case SCSI_CAP_LINKED_CMDS:
706b1dd958fScth 		break;
707b1dd958fScth 	case SCSI_CAP_RESET_NOTIFICATION:
708b1dd958fScth 		rval = 1;
709b1dd958fScth 		break;
710b1dd958fScth 
711b1dd958fScth 	default:
712b1dd958fScth 		rval = -1;
713b1dd958fScth 		break;
714b1dd958fScth 	}
715b1dd958fScth 
716b1dd958fScth 	EMUL64_MUTEX_EXIT(emul64);
717b1dd958fScth 
718b1dd958fScth 	return (rval);
719b1dd958fScth }
720b1dd958fScth 
721b1dd958fScth /*
722b1dd958fScth  * Function name : emul64_scsi_setcap()
723b1dd958fScth  *
724b1dd958fScth  * Return Values : 1 - capability exists and can be set to new value
725b1dd958fScth  *		   0 - capability could not be set to new value
726b1dd958fScth  *		  -1 - no such capability
727b1dd958fScth  *
728b1dd958fScth  * Description	 : sets a capability for a target
729b1dd958fScth  *
730b1dd958fScth  * Context	 : Can be called from different kernel process threads.
731b1dd958fScth  *		   Can be called by interrupt thread.
732b1dd958fScth  */
733b1dd958fScth static int
emul64_scsi_setcap(struct scsi_address * ap,char * cap,int value,int whom)734b1dd958fScth emul64_scsi_setcap(struct scsi_address *ap, char *cap, int value, int whom)
735b1dd958fScth {
736b1dd958fScth 	struct emul64	*emul64	= ADDR2EMUL64(ap);
737b1dd958fScth 	int		rval = 0;
738b1dd958fScth 
739b1dd958fScth 	/*
740b1dd958fScth 	 * We don't allow setting capabilities for other targets
741b1dd958fScth 	 */
742b1dd958fScth 	if (cap == NULL || whom == 0) {
743b1dd958fScth 		return (-1);
744b1dd958fScth 	}
745b1dd958fScth 
746b1dd958fScth 	EMUL64_MUTEX_ENTER(emul64);
747b1dd958fScth 
748b1dd958fScth 	switch (scsi_hba_lookup_capstr(cap)) {
749b1dd958fScth 	case SCSI_CAP_DMA_MAX:
750b1dd958fScth 	case SCSI_CAP_MSG_OUT:
751b1dd958fScth 	case SCSI_CAP_PARITY:
752b1dd958fScth 	case SCSI_CAP_UNTAGGED_QING:
753b1dd958fScth 	case SCSI_CAP_LINKED_CMDS:
754b1dd958fScth 	case SCSI_CAP_RESET_NOTIFICATION:
755b1dd958fScth 		/*
756b1dd958fScth 		 * None of these are settable via
757b1dd958fScth 		 * the capability interface.
758b1dd958fScth 		 */
759b1dd958fScth 		break;
760b1dd958fScth 	case SCSI_CAP_DISCONNECT:
761b1dd958fScth 		rval = 1;
762b1dd958fScth 		break;
763b1dd958fScth 	case SCSI_CAP_SYNCHRONOUS:
764b1dd958fScth 		rval = 1;
765b1dd958fScth 		break;
766b1dd958fScth 	case SCSI_CAP_TAGGED_QING:
767b1dd958fScth 		rval = 1;
768b1dd958fScth 		break;
769b1dd958fScth 	case SCSI_CAP_WIDE_XFER:
770b1dd958fScth 		rval = 1;
771b1dd958fScth 		break;
772b1dd958fScth 	case SCSI_CAP_INITIATOR_ID:
773b1dd958fScth 		rval = -1;
774b1dd958fScth 		break;
775b1dd958fScth 	case SCSI_CAP_ARQ:
776b1dd958fScth 		rval = 1;
777b1dd958fScth 		break;
778b1dd958fScth 	case SCSI_CAP_TOTAL_SECTORS:
779b1dd958fScth 		emul64->nt_total_sectors[ap->a_target][ap->a_lun] = value;
780b1dd958fScth 		rval = TRUE;
781b1dd958fScth 		break;
782b1dd958fScth 	case SCSI_CAP_SECTOR_SIZE:
783b1dd958fScth 		rval = TRUE;
784b1dd958fScth 		break;
785b1dd958fScth 	default:
786b1dd958fScth 		rval = -1;
787b1dd958fScth 		break;
788b1dd958fScth 	}
789b1dd958fScth 
790b1dd958fScth 
791b1dd958fScth 	EMUL64_MUTEX_EXIT(emul64);
792b1dd958fScth 
793b1dd958fScth 	return (rval);
794b1dd958fScth }
795b1dd958fScth 
796b1dd958fScth /*
797b1dd958fScth  * Function name : emul64_scsi_init_pkt
798b1dd958fScth  *
799b1dd958fScth  * Return Values : pointer to scsi_pkt, or NULL
800b1dd958fScth  * Description	 : Called by kernel on behalf of a target driver
801b1dd958fScth  *		   calling scsi_init_pkt(9F).
802b1dd958fScth  *		   Refer to tran_init_pkt(9E) man page
803b1dd958fScth  *
804b1dd958fScth  * Context	 : Can be called from different kernel process threads.
805b1dd958fScth  *		   Can be called by interrupt thread.
806b1dd958fScth  */
807b1dd958fScth /* ARGSUSED */
808b1dd958fScth static struct scsi_pkt *
emul64_scsi_init_pkt(struct scsi_address * ap,struct scsi_pkt * pkt,struct buf * bp,int cmdlen,int statuslen,int tgtlen,int flags,int (* callback)(),caddr_t arg)809b1dd958fScth emul64_scsi_init_pkt(struct scsi_address *ap, struct scsi_pkt *pkt,
810b1dd958fScth 	struct buf *bp, int cmdlen, int statuslen, int tgtlen,
811b1dd958fScth 	int flags, int (*callback)(), caddr_t arg)
812b1dd958fScth {
813b1dd958fScth 	struct emul64		*emul64	= ADDR2EMUL64(ap);
814b1dd958fScth 	struct emul64_cmd	*sp;
815b1dd958fScth 
816b1dd958fScth 	ASSERT(callback == NULL_FUNC || callback == SLEEP_FUNC);
817b1dd958fScth 
818b1dd958fScth 	/*
819b1dd958fScth 	 * First step of emul64_scsi_init_pkt:  pkt allocation
820b1dd958fScth 	 */
821b1dd958fScth 	if (pkt == NULL) {
822b1dd958fScth 		pkt = scsi_hba_pkt_alloc(emul64->emul64_dip, ap, cmdlen,
823b1dd958fScth 		    statuslen,
824b1dd958fScth 		    tgtlen, sizeof (struct emul64_cmd), callback, arg);
825b1dd958fScth 		if (pkt == NULL) {
826b1dd958fScth 			cmn_err(CE_WARN, "emul64_scsi_init_pkt: "
827b1dd958fScth 			    "scsi_hba_pkt_alloc failed");
828b1dd958fScth 			return (NULL);
829b1dd958fScth 		}
830b1dd958fScth 
831b1dd958fScth 		sp = PKT2CMD(pkt);
832b1dd958fScth 
833b1dd958fScth 		/*
834b1dd958fScth 		 * Initialize the new pkt - we redundantly initialize
835b1dd958fScth 		 * all the fields for illustrative purposes.
836b1dd958fScth 		 */
837b1dd958fScth 		sp->cmd_pkt		= pkt;
838b1dd958fScth 		sp->cmd_flags		= 0;
839b1dd958fScth 		sp->cmd_scblen		= statuslen;
840b1dd958fScth 		sp->cmd_cdblen		= cmdlen;
841b1dd958fScth 		sp->cmd_emul64		= emul64;
842b1dd958fScth 		pkt->pkt_address	= *ap;
843b1dd958fScth 		pkt->pkt_comp		= (void (*)())NULL;
844b1dd958fScth 		pkt->pkt_flags		= 0;
845b1dd958fScth 		pkt->pkt_time		= 0;
846b1dd958fScth 		pkt->pkt_resid		= 0;
847b1dd958fScth 		pkt->pkt_statistics	= 0;
848b1dd958fScth 		pkt->pkt_reason		= 0;
849b1dd958fScth 
850b1dd958fScth 	} else {
851b1dd958fScth 		sp = PKT2CMD(pkt);
852b1dd958fScth 	}
853b1dd958fScth 
854b1dd958fScth 	/*
855b1dd958fScth 	 * Second step of emul64_scsi_init_pkt:  dma allocation/move
856b1dd958fScth 	 */
857b1dd958fScth 	if (bp && bp->b_bcount != 0) {
858b1dd958fScth 		if (bp->b_flags & B_READ) {
859b1dd958fScth 			sp->cmd_flags &= ~CFLAG_DMASEND;
860b1dd958fScth 		} else {
861b1dd958fScth 			sp->cmd_flags |= CFLAG_DMASEND;
862b1dd958fScth 		}
863b1dd958fScth 		bp_mapin(bp);
864b1dd958fScth 		sp->cmd_addr = (unsigned char *) bp->b_un.b_addr;
865b1dd958fScth 		sp->cmd_count = bp->b_bcount;
866b1dd958fScth 		pkt->pkt_resid = 0;
867b1dd958fScth 	}
868b1dd958fScth 
869b1dd958fScth 	return (pkt);
870b1dd958fScth }
871b1dd958fScth 
872b1dd958fScth 
873b1dd958fScth /*
874b1dd958fScth  * Function name : emul64_scsi_destroy_pkt
875b1dd958fScth  *
876b1dd958fScth  * Return Values : none
877b1dd958fScth  * Description	 : Called by kernel on behalf of a target driver
878b1dd958fScth  *		   calling scsi_destroy_pkt(9F).
879b1dd958fScth  *		   Refer to tran_destroy_pkt(9E) man page
880b1dd958fScth  *
881b1dd958fScth  * Context	 : Can be called from different kernel process threads.
882b1dd958fScth  *		   Can be called by interrupt thread.
883b1dd958fScth  */
884b1dd958fScth static void
emul64_scsi_destroy_pkt(struct scsi_address * ap,struct scsi_pkt * pkt)885b1dd958fScth emul64_scsi_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
886b1dd958fScth {
887b1dd958fScth 	struct emul64_cmd	*sp = PKT2CMD(pkt);
888b1dd958fScth 
889b1dd958fScth 	/*
890b1dd958fScth 	 * emul64_scsi_dmafree inline to make things faster
891b1dd958fScth 	 */
892b1dd958fScth 	if (sp->cmd_flags & CFLAG_DMAVALID) {
893b1dd958fScth 		/*
894b1dd958fScth 		 * Free the mapping.
895b1dd958fScth 		 */
896b1dd958fScth 		sp->cmd_flags &= ~CFLAG_DMAVALID;
897b1dd958fScth 	}
898b1dd958fScth 
899b1dd958fScth 	/*
900b1dd958fScth 	 * Free the pkt
901b1dd958fScth 	 */
902b1dd958fScth 	scsi_hba_pkt_free(ap, pkt);
903b1dd958fScth }
904b1dd958fScth 
905b1dd958fScth 
906b1dd958fScth /*
907b1dd958fScth  * Function name : emul64_scsi_dmafree()
908b1dd958fScth  *
909b1dd958fScth  * Return Values : none
910b1dd958fScth  * Description	 : free dvma resources
911b1dd958fScth  *
912b1dd958fScth  * Context	 : Can be called from different kernel process threads.
913b1dd958fScth  *		   Can be called by interrupt thread.
914b1dd958fScth  */
915b1dd958fScth /*ARGSUSED*/
916b1dd958fScth static void
emul64_scsi_dmafree(struct scsi_address * ap,struct scsi_pkt * pkt)917b1dd958fScth emul64_scsi_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
918b1dd958fScth {
919b1dd958fScth }
920b1dd958fScth 
921b1dd958fScth /*
922b1dd958fScth  * Function name : emul64_scsi_sync_pkt()
923b1dd958fScth  *
924b1dd958fScth  * Return Values : none
925b1dd958fScth  * Description	 : sync dma
926b1dd958fScth  *
927b1dd958fScth  * Context	 : Can be called from different kernel process threads.
928b1dd958fScth  *		   Can be called by interrupt thread.
929b1dd958fScth  */
930b1dd958fScth /*ARGSUSED*/
931b1dd958fScth static void
emul64_scsi_sync_pkt(struct scsi_address * ap,struct scsi_pkt * pkt)932b1dd958fScth emul64_scsi_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
933b1dd958fScth {
934b1dd958fScth }
935b1dd958fScth 
936b1dd958fScth /*
937b1dd958fScth  * routine for reset notification setup, to register or cancel.
938b1dd958fScth  */
939b1dd958fScth static int
emul64_scsi_reset_notify(struct scsi_address * ap,int flag,void (* callback)(caddr_t),caddr_t arg)940b1dd958fScth emul64_scsi_reset_notify(struct scsi_address *ap, int flag,
941b1dd958fScth void (*callback)(caddr_t), caddr_t arg)
942b1dd958fScth {
943b1dd958fScth 	struct emul64				*emul64 = ADDR2EMUL64(ap);
944b1dd958fScth 	struct emul64_reset_notify_entry	*p, *beforep;
945b1dd958fScth 	int					rval = DDI_FAILURE;
946b1dd958fScth 
947b1dd958fScth 	mutex_enter(EMUL64_REQ_MUTEX(emul64));
948b1dd958fScth 
949b1dd958fScth 	p = emul64->emul64_reset_notify_listf;
950b1dd958fScth 	beforep = NULL;
951b1dd958fScth 
952b1dd958fScth 	while (p) {
953b1dd958fScth 		if (p->ap == ap)
954b1dd958fScth 			break;	/* An entry exists for this target */
955b1dd958fScth 		beforep = p;
956b1dd958fScth 		p = p->next;
957b1dd958fScth 	}
958b1dd958fScth 
959b1dd958fScth 	if ((flag & SCSI_RESET_CANCEL) && (p != NULL)) {
960b1dd958fScth 		if (beforep == NULL) {
961b1dd958fScth 			emul64->emul64_reset_notify_listf = p->next;
962b1dd958fScth 		} else {
963b1dd958fScth 			beforep->next = p->next;
964b1dd958fScth 		}
965b1dd958fScth 		kmem_free((caddr_t)p,
966b1dd958fScth 		    sizeof (struct emul64_reset_notify_entry));
967b1dd958fScth 		rval = DDI_SUCCESS;
968b1dd958fScth 
969b1dd958fScth 	} else if ((flag & SCSI_RESET_NOTIFY) && (p == NULL)) {
970b1dd958fScth 		p = kmem_zalloc(sizeof (struct emul64_reset_notify_entry),
971b1dd958fScth 		    KM_SLEEP);
972b1dd958fScth 		p->ap = ap;
973b1dd958fScth 		p->callback = callback;
974b1dd958fScth 		p->arg = arg;
975b1dd958fScth 		p->next = emul64->emul64_reset_notify_listf;
976b1dd958fScth 		emul64->emul64_reset_notify_listf = p;
977b1dd958fScth 		rval = DDI_SUCCESS;
978b1dd958fScth 	}
979b1dd958fScth 
980b1dd958fScth 	mutex_exit(EMUL64_REQ_MUTEX(emul64));
981b1dd958fScth 
982b1dd958fScth 	return (rval);
983b1dd958fScth }
984b1dd958fScth 
985b1dd958fScth /*
986b1dd958fScth  * Function name : emul64_scsi_start()
987b1dd958fScth  *
988b1dd958fScth  * Return Values : TRAN_FATAL_ERROR	- emul64 has been shutdown
989b1dd958fScth  *		   TRAN_BUSY		- request queue is full
990b1dd958fScth  *		   TRAN_ACCEPT		- pkt has been submitted to emul64
991b1dd958fScth  *
992b1dd958fScth  * Description	 : init pkt, start the request
993b1dd958fScth  *
994b1dd958fScth  * Context	 : Can be called from different kernel process threads.
995b1dd958fScth  *		   Can be called by interrupt thread.
996b1dd958fScth  */
997b1dd958fScth static int
emul64_scsi_start(struct scsi_address * ap,struct scsi_pkt * pkt)998b1dd958fScth emul64_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt)
999b1dd958fScth {
1000b1dd958fScth 	struct emul64_cmd	*sp	= PKT2CMD(pkt);
1001b1dd958fScth 	int			rval	= TRAN_ACCEPT;
1002b1dd958fScth 	struct emul64		*emul64	= ADDR2EMUL64(ap);
1003b1dd958fScth 	clock_t			cur_lbolt;
1004b1dd958fScth 	taskqid_t		dispatched;
1005b1dd958fScth 
1006b1dd958fScth 	ASSERT(mutex_owned(EMUL64_REQ_MUTEX(emul64)) == 0 || ddi_in_panic());
1007b1dd958fScth 	ASSERT(mutex_owned(EMUL64_RESP_MUTEX(emul64)) == 0 || ddi_in_panic());
1008b1dd958fScth 
1009b1dd958fScth 	EMUL64_DEBUG2(emul64, SCSI_DEBUG, "emul64_scsi_start %x", sp);
1010b1dd958fScth 
1011b1dd958fScth 	pkt->pkt_reason = CMD_CMPLT;
1012b1dd958fScth 
1013b1dd958fScth #ifdef	EMUL64DEBUG
1014b1dd958fScth 	if (emul64_cdb_debug) {
1015b1dd958fScth 		emul64_debug_dump_cdb(ap, pkt);
1016b1dd958fScth 	}
1017b1dd958fScth #endif	/* EMUL64DEBUG */
1018b1dd958fScth 
1019b1dd958fScth 	/*
1020b1dd958fScth 	 * calculate deadline from pkt_time
1021b1dd958fScth 	 * Instead of multiplying by 100 (ie. HZ), we multiply by 128 so
1022b1dd958fScth 	 * we can shift and at the same time have a 28% grace period
1023b1dd958fScth 	 * we ignore the rare case of pkt_time == 0 and deal with it
1024b1dd958fScth 	 * in emul64_i_watch()
1025b1dd958fScth 	 */
1026b1dd958fScth 	cur_lbolt = ddi_get_lbolt();
1027b1dd958fScth 	sp->cmd_deadline = cur_lbolt + (pkt->pkt_time * 128);
1028b1dd958fScth 
1029b1dd958fScth 	if ((emul64_usetaskq == 0) || (pkt->pkt_flags & FLAG_NOINTR) != 0) {
1030b1dd958fScth 		emul64_pkt_comp((caddr_t)pkt);
1031b1dd958fScth 	} else {
1032b1dd958fScth 		dispatched = NULL;
1033b1dd958fScth 		if (emul64_collect_stats) {
1034b1dd958fScth 			/*
1035b1dd958fScth 			 * If we are collecting statistics, call
1036b1dd958fScth 			 * taskq_dispatch in no sleep mode, so that we can
1037b1dd958fScth 			 * detect if we are exceeding the queue length that
1038b1dd958fScth 			 * was established in the call to taskq_create in
1039b1dd958fScth 			 * emul64_attach.  If the no sleep call fails
1040b1dd958fScth 			 * (returns NULL), the task will be dispatched in
1041b1dd958fScth 			 * sleep mode below.
1042b1dd958fScth 			 */
1043b1dd958fScth 			dispatched = taskq_dispatch(emul64->emul64_taskq,
104419397407SSherry Moore 			    emul64_pkt_comp, (void *)pkt, TQ_NOSLEEP);
1045b1dd958fScth 			if (dispatched == NULL) {
1046b1dd958fScth 				/* Queue was full.  dispatch failed. */
1047b1dd958fScth 				mutex_enter(&emul64_stats_mutex);
1048b1dd958fScth 				emul64_taskq_max++;
1049b1dd958fScth 				mutex_exit(&emul64_stats_mutex);
1050b1dd958fScth 			}
1051b1dd958fScth 		}
1052b1dd958fScth 		if (dispatched == NULL) {
1053b1dd958fScth 			(void) taskq_dispatch(emul64->emul64_taskq,
1054b1dd958fScth 			    emul64_pkt_comp, (void *)pkt, TQ_SLEEP);
1055b1dd958fScth 		}
1056b1dd958fScth 	}
1057b1dd958fScth 
1058b1dd958fScth done:
1059b1dd958fScth 	ASSERT(mutex_owned(EMUL64_REQ_MUTEX(emul64)) == 0 || ddi_in_panic());
1060b1dd958fScth 	ASSERT(mutex_owned(EMUL64_RESP_MUTEX(emul64)) == 0 || ddi_in_panic());
1061b1dd958fScth 
1062b1dd958fScth 	return (rval);
1063b1dd958fScth }
1064b1dd958fScth 
1065b1dd958fScth void
emul64_check_cond(struct scsi_pkt * pkt,uchar_t key,uchar_t asc,uchar_t ascq)1066b1dd958fScth emul64_check_cond(struct scsi_pkt *pkt, uchar_t key, uchar_t asc, uchar_t ascq)
1067b1dd958fScth {
1068b1dd958fScth 	struct scsi_arq_status *arq =
1069b1dd958fScth 	    (struct scsi_arq_status *)pkt->pkt_scbp;
1070b1dd958fScth 
1071b1dd958fScth 	/* got check, no data transferred and ARQ done */
1072b1dd958fScth 	arq->sts_status.sts_chk = 1;
1073b1dd958fScth 	pkt->pkt_state |= STATE_ARQ_DONE;
1074b1dd958fScth 	pkt->pkt_state &= ~STATE_XFERRED_DATA;
1075b1dd958fScth 
1076b1dd958fScth 	/* for ARQ */
1077b1dd958fScth 	arq->sts_rqpkt_reason = CMD_CMPLT;
1078b1dd958fScth 	arq->sts_rqpkt_resid = 0;
1079b1dd958fScth 	arq->sts_rqpkt_state = STATE_GOT_BUS | STATE_GOT_TARGET |
1080b1dd958fScth 	    STATE_SENT_CMD | STATE_XFERRED_DATA | STATE_GOT_STATUS;
1081b1dd958fScth 	arq->sts_sensedata.es_valid = 1;
1082b1dd958fScth 	arq->sts_sensedata.es_class = 0x7;
1083b1dd958fScth 	arq->sts_sensedata.es_key = key;
1084b1dd958fScth 	arq->sts_sensedata.es_add_code = asc;
1085b1dd958fScth 	arq->sts_sensedata.es_qual_code = ascq;
1086b1dd958fScth }
1087b1dd958fScth 
1088cefe316eSpd144616 ushort_t
emul64_error_inject(struct scsi_pkt * pkt)1089cefe316eSpd144616 emul64_error_inject(struct scsi_pkt *pkt)
1090cefe316eSpd144616 {
1091cefe316eSpd144616 	struct emul64_cmd	*sp	= PKT2CMD(pkt);
1092cefe316eSpd144616 	emul64_tgt_t		*tgt;
1093cefe316eSpd144616 	struct scsi_arq_status *arq =
1094cefe316eSpd144616 	    (struct scsi_arq_status *)pkt->pkt_scbp;
1095cefe316eSpd144616 	uint_t			max_sense_len;
1096cefe316eSpd144616 
1097cefe316eSpd144616 	EMUL64_MUTEX_ENTER(sp->cmd_emul64);
1098cefe316eSpd144616 	tgt = find_tgt(sp->cmd_emul64,
1099cefe316eSpd144616 	    pkt->pkt_address.a_target, pkt->pkt_address.a_lun);
1100cefe316eSpd144616 	EMUL64_MUTEX_EXIT(sp->cmd_emul64);
1101cefe316eSpd144616 
1102cefe316eSpd144616 	/*
1103cefe316eSpd144616 	 * If there is no target, skip the error injection and
1104cefe316eSpd144616 	 * let the packet be handled normally.  This would normally
1105cefe316eSpd144616 	 * never happen since a_target and a_lun are setup in
1106cefe316eSpd144616 	 * emul64_scsi_init_pkt.
1107cefe316eSpd144616 	 */
1108cefe316eSpd144616 	if (tgt == NULL) {
1109cefe316eSpd144616 		return (ERR_INJ_DISABLE);
1110cefe316eSpd144616 	}
1111cefe316eSpd144616 
1112cefe316eSpd144616 	if (tgt->emul64_einj_state != ERR_INJ_DISABLE) {
1113cefe316eSpd144616 		arq->sts_status = tgt->emul64_einj_scsi_status;
1114cefe316eSpd144616 		pkt->pkt_state = tgt->emul64_einj_pkt_state;
1115cefe316eSpd144616 		pkt->pkt_reason = tgt->emul64_einj_pkt_reason;
1116cefe316eSpd144616 
1117cefe316eSpd144616 		/*
1118cefe316eSpd144616 		 * Calculate available sense buffer length.  We could just
1119cefe316eSpd144616 		 * assume sizeof(struct scsi_extended_sense) but hopefully
1120cefe316eSpd144616 		 * that limitation will go away soon.
1121cefe316eSpd144616 		 */
1122cefe316eSpd144616 		max_sense_len = sp->cmd_scblen  -
1123cefe316eSpd144616 		    (sizeof (struct scsi_arq_status) -
1124cefe316eSpd144616 		    sizeof (struct scsi_extended_sense));
1125cefe316eSpd144616 		if (max_sense_len > tgt->emul64_einj_sense_length) {
1126cefe316eSpd144616 			max_sense_len = tgt->emul64_einj_sense_length;
1127cefe316eSpd144616 		}
1128cefe316eSpd144616 
1129cefe316eSpd144616 		/* for ARQ */
1130cefe316eSpd144616 		arq->sts_rqpkt_reason = CMD_CMPLT;
1131cefe316eSpd144616 		arq->sts_rqpkt_resid = 0;
1132cefe316eSpd144616 		arq->sts_rqpkt_state = STATE_GOT_BUS | STATE_GOT_TARGET |
1133cefe316eSpd144616 		    STATE_SENT_CMD | STATE_XFERRED_DATA | STATE_GOT_STATUS;
1134cefe316eSpd144616 
1135cefe316eSpd144616 		/* Copy sense data */
1136cefe316eSpd144616 		if (tgt->emul64_einj_sense_data != 0) {
1137cefe316eSpd144616 			bcopy(tgt->emul64_einj_sense_data,
1138cefe316eSpd144616 			    (uint8_t *)&arq->sts_sensedata,
1139cefe316eSpd144616 			    max_sense_len);
1140cefe316eSpd144616 		}
1141cefe316eSpd144616 	}
1142cefe316eSpd144616 
1143cefe316eSpd144616 	/* Return current error injection state */
1144cefe316eSpd144616 	return (tgt->emul64_einj_state);
1145cefe316eSpd144616 }
1146cefe316eSpd144616 
1147cefe316eSpd144616 int
emul64_error_inject_req(struct emul64 * emul64,intptr_t arg)1148cefe316eSpd144616 emul64_error_inject_req(struct emul64 *emul64, intptr_t arg)
1149cefe316eSpd144616 {
1150cefe316eSpd144616 	emul64_tgt_t		*tgt;
1151cefe316eSpd144616 	struct emul64_error_inj_data error_inj_req;
1152cefe316eSpd144616 
1153cefe316eSpd144616 	/* Check args */
1154cefe316eSpd144616 	if (arg == NULL) {
1155cefe316eSpd144616 		return (EINVAL);
1156cefe316eSpd144616 	}
1157cefe316eSpd144616 
1158cefe316eSpd144616 	if (ddi_copyin((void *)arg, &error_inj_req,
1159cefe316eSpd144616 	    sizeof (error_inj_req), 0) != 0) {
1160cefe316eSpd144616 		cmn_err(CE_WARN, "emul64: ioctl - inj copyin failed\n");
1161cefe316eSpd144616 		return (EFAULT);
1162cefe316eSpd144616 	}
1163cefe316eSpd144616 
1164cefe316eSpd144616 	EMUL64_MUTEX_ENTER(emul64);
1165cefe316eSpd144616 	tgt = find_tgt(emul64, error_inj_req.eccd_target,
1166cefe316eSpd144616 	    error_inj_req.eccd_lun);
1167cefe316eSpd144616 	EMUL64_MUTEX_EXIT(emul64);
1168cefe316eSpd144616 
1169cefe316eSpd144616 	/* Make sure device exists */
1170cefe316eSpd144616 	if (tgt == NULL) {
1171cefe316eSpd144616 		return (ENODEV);
1172cefe316eSpd144616 	}
1173cefe316eSpd144616 
1174cefe316eSpd144616 	/* Free old sense buffer if we have one */
1175cefe316eSpd144616 	if (tgt->emul64_einj_sense_data != NULL) {
1176cefe316eSpd144616 		ASSERT(tgt->emul64_einj_sense_length != 0);
1177cefe316eSpd144616 		kmem_free(tgt->emul64_einj_sense_data,
1178cefe316eSpd144616 		    tgt->emul64_einj_sense_length);
1179cefe316eSpd144616 		tgt->emul64_einj_sense_data = NULL;
1180cefe316eSpd144616 		tgt->emul64_einj_sense_length = 0;
1181cefe316eSpd144616 	}
1182cefe316eSpd144616 
1183cefe316eSpd144616 	/*
1184cefe316eSpd144616 	 * Now handle error injection request.  If error injection
1185cefe316eSpd144616 	 * is requested we will return the sense data provided for
1186cefe316eSpd144616 	 * any I/O to this target until told to stop.
1187cefe316eSpd144616 	 */
1188cefe316eSpd144616 	tgt->emul64_einj_state = error_inj_req.eccd_inj_state;
1189cefe316eSpd144616 	tgt->emul64_einj_sense_length = error_inj_req.eccd_sns_dlen;
1190cefe316eSpd144616 	tgt->emul64_einj_pkt_state = error_inj_req.eccd_pkt_state;
1191cefe316eSpd144616 	tgt->emul64_einj_pkt_reason = error_inj_req.eccd_pkt_reason;
1192cefe316eSpd144616 	tgt->emul64_einj_scsi_status = error_inj_req.eccd_scsi_status;
1193cefe316eSpd144616 	switch (error_inj_req.eccd_inj_state) {
1194cefe316eSpd144616 	case ERR_INJ_ENABLE:
1195cefe316eSpd144616 	case ERR_INJ_ENABLE_NODATA:
1196cefe316eSpd144616 		if (error_inj_req.eccd_sns_dlen) {
1197cefe316eSpd144616 			tgt->emul64_einj_sense_data =
1198cefe316eSpd144616 			    kmem_alloc(error_inj_req.eccd_sns_dlen, KM_SLEEP);
1199cefe316eSpd144616 			/* Copy sense data */
1200cefe316eSpd144616 			if (ddi_copyin((void *)(arg + sizeof (error_inj_req)),
1201cefe316eSpd144616 			    tgt->emul64_einj_sense_data,
1202cefe316eSpd144616 			    error_inj_req.eccd_sns_dlen, 0) != 0) {
1203cefe316eSpd144616 				cmn_err(CE_WARN,
1204cefe316eSpd144616 				    "emul64: sense data copy in failed\n");
1205cefe316eSpd144616 				return (EFAULT);
1206cefe316eSpd144616 			}
1207cefe316eSpd144616 		}
1208cefe316eSpd144616 		break;
1209cefe316eSpd144616 	case ERR_INJ_DISABLE:
1210cefe316eSpd144616 	default:
1211cefe316eSpd144616 		break;
1212cefe316eSpd144616 	}
1213cefe316eSpd144616 
1214cefe316eSpd144616 	return (0);
1215cefe316eSpd144616 }
1216cefe316eSpd144616 
1217b1dd958fScth int bsd_scsi_start_stop_unit(struct scsi_pkt *);
1218b1dd958fScth int bsd_scsi_test_unit_ready(struct scsi_pkt *);
1219b1dd958fScth int bsd_scsi_request_sense(struct scsi_pkt *);
1220b1dd958fScth int bsd_scsi_inquiry(struct scsi_pkt *);
1221b1dd958fScth int bsd_scsi_format(struct scsi_pkt *);
1222b1dd958fScth int bsd_scsi_io(struct scsi_pkt *);
1223b1dd958fScth int bsd_scsi_log_sense(struct scsi_pkt *);
1224b1dd958fScth int bsd_scsi_mode_sense(struct scsi_pkt *);
1225b1dd958fScth int bsd_scsi_mode_select(struct scsi_pkt *);
1226b1dd958fScth int bsd_scsi_read_capacity(struct scsi_pkt *);
1227b1dd958fScth int bsd_scsi_read_capacity_16(struct scsi_pkt *);
1228b1dd958fScth int bsd_scsi_reserve(struct scsi_pkt *);
1229b1dd958fScth int bsd_scsi_format(struct scsi_pkt *);
1230b1dd958fScth int bsd_scsi_release(struct scsi_pkt *);
1231b1dd958fScth int bsd_scsi_read_defect_list(struct scsi_pkt *);
1232b1dd958fScth int bsd_scsi_reassign_block(struct scsi_pkt *);
1233b1dd958fScth int bsd_freeblkrange(emul64_tgt_t *, emul64_range_t *);
1234b1dd958fScth 
1235b1dd958fScth static void
emul64_handle_cmd(struct scsi_pkt * pkt)1236b1dd958fScth emul64_handle_cmd(struct scsi_pkt *pkt)
1237b1dd958fScth {
1238cefe316eSpd144616 	if (emul64_error_inject(pkt) == ERR_INJ_ENABLE_NODATA) {
1239cefe316eSpd144616 		/*
1240cefe316eSpd144616 		 * If error injection is configured to return with
1241cefe316eSpd144616 		 * no data return now without handling the command.
1242cefe316eSpd144616 		 * This is how normal check conditions work.
1243cefe316eSpd144616 		 *
1244cefe316eSpd144616 		 * If the error injection state is ERR_INJ_ENABLE
1245cefe316eSpd144616 		 * (or if error injection is disabled) continue and
1246cefe316eSpd144616 		 * handle the command.  This would be used for
1247cefe316eSpd144616 		 * KEY_RECOVERABLE_ERROR type conditions.
1248cefe316eSpd144616 		 */
1249cefe316eSpd144616 		return;
1250cefe316eSpd144616 	}
1251cefe316eSpd144616 
1252b1dd958fScth 	switch (pkt->pkt_cdbp[0]) {
1253b1dd958fScth 	case SCMD_START_STOP:
1254b1dd958fScth 		(void) bsd_scsi_start_stop_unit(pkt);
1255b1dd958fScth 		break;
1256b1dd958fScth 	case SCMD_TEST_UNIT_READY:
1257b1dd958fScth 		(void) bsd_scsi_test_unit_ready(pkt);
1258b1dd958fScth 		break;
1259b1dd958fScth 	case SCMD_REQUEST_SENSE:
1260b1dd958fScth 		(void) bsd_scsi_request_sense(pkt);
1261b1dd958fScth 		break;
1262b1dd958fScth 	case SCMD_INQUIRY:
1263b1dd958fScth 		(void) bsd_scsi_inquiry(pkt);
1264b1dd958fScth 		break;
1265b1dd958fScth 	case SCMD_FORMAT:
1266b1dd958fScth 		(void) bsd_scsi_format(pkt);
1267b1dd958fScth 		break;
1268b1dd958fScth 	case SCMD_READ:
1269b1dd958fScth 	case SCMD_WRITE:
1270b1dd958fScth 	case SCMD_READ_G1:
1271b1dd958fScth 	case SCMD_WRITE_G1:
1272b1dd958fScth 	case SCMD_READ_G4:
1273b1dd958fScth 	case SCMD_WRITE_G4:
1274b1dd958fScth 		(void) bsd_scsi_io(pkt);
1275b1dd958fScth 		break;
1276b1dd958fScth 	case SCMD_LOG_SENSE_G1:
1277b1dd958fScth 		(void) bsd_scsi_log_sense(pkt);
1278b1dd958fScth 		break;
1279b1dd958fScth 	case SCMD_MODE_SENSE:
1280b1dd958fScth 	case SCMD_MODE_SENSE_G1:
1281b1dd958fScth 		(void) bsd_scsi_mode_sense(pkt);
1282b1dd958fScth 		break;
1283b1dd958fScth 	case SCMD_MODE_SELECT:
1284b1dd958fScth 	case SCMD_MODE_SELECT_G1:
1285b1dd958fScth 		(void) bsd_scsi_mode_select(pkt);
1286b1dd958fScth 		break;
1287b1dd958fScth 	case SCMD_READ_CAPACITY:
1288b1dd958fScth 		(void) bsd_scsi_read_capacity(pkt);
1289b1dd958fScth 		break;
1290b1dd958fScth 	case SCMD_SVC_ACTION_IN_G4:
1291b1dd958fScth 		if (pkt->pkt_cdbp[1] == SSVC_ACTION_READ_CAPACITY_G4) {
1292b1dd958fScth 			(void) bsd_scsi_read_capacity_16(pkt);
1293b1dd958fScth 		} else {
1294b1dd958fScth 			cmn_err(CE_WARN, "emul64: unrecognized G4 service "
1295b1dd958fScth 			    "action 0x%x", pkt->pkt_cdbp[1]);
1296b1dd958fScth 		}
1297b1dd958fScth 		break;
1298b1dd958fScth 	case SCMD_RESERVE:
1299b1dd958fScth 	case SCMD_RESERVE_G1:
1300b1dd958fScth 		(void) bsd_scsi_reserve(pkt);
1301b1dd958fScth 		break;
1302b1dd958fScth 	case SCMD_RELEASE:
1303b1dd958fScth 	case SCMD_RELEASE_G1:
1304b1dd958fScth 		(void) bsd_scsi_release(pkt);
1305b1dd958fScth 		break;
1306b1dd958fScth 	case SCMD_REASSIGN_BLOCK:
1307b1dd958fScth 		(void) bsd_scsi_reassign_block(pkt);
1308b1dd958fScth 		break;
1309b1dd958fScth 	case SCMD_READ_DEFECT_LIST:
1310b1dd958fScth 		(void) bsd_scsi_read_defect_list(pkt);
1311b1dd958fScth 		break;
1312b1dd958fScth 	case SCMD_PRIN:
1313b1dd958fScth 	case SCMD_PROUT:
1314b1dd958fScth 	case SCMD_REPORT_LUNS:
1315b1dd958fScth 		/* ASC 0x24 INVALID FIELD IN CDB */
1316b1dd958fScth 		emul64_check_cond(pkt, KEY_ILLEGAL_REQUEST, 0x24, 0x0);
1317b1dd958fScth 		break;
1318b1dd958fScth 	default:
1319b1dd958fScth 		cmn_err(CE_WARN, "emul64: unrecognized "
1320b1dd958fScth 		    "SCSI cmd 0x%x", pkt->pkt_cdbp[0]);
1321b1dd958fScth 		emul64_check_cond(pkt, KEY_ILLEGAL_REQUEST, 0x24, 0x0);
1322b1dd958fScth 		break;
1323b1dd958fScth 	case SCMD_GET_CONFIGURATION:
1324b1dd958fScth 	case 0x35:			/* SCMD_SYNCHRONIZE_CACHE */
1325b1dd958fScth 		/* Don't complain */
1326b1dd958fScth 		break;
1327b1dd958fScth 	}
1328b1dd958fScth }
1329b1dd958fScth 
1330b1dd958fScth static void
emul64_pkt_comp(void * arg)1331b1dd958fScth emul64_pkt_comp(void * arg)
1332b1dd958fScth {
1333b1dd958fScth 	struct scsi_pkt		*pkt = (struct scsi_pkt *)arg;
1334b1dd958fScth 	struct emul64_cmd	*sp = PKT2CMD(pkt);
1335b1dd958fScth 	emul64_tgt_t		*tgt;
1336b1dd958fScth 
1337b1dd958fScth 	EMUL64_MUTEX_ENTER(sp->cmd_emul64);
1338b1dd958fScth 	tgt = find_tgt(sp->cmd_emul64,
1339b1dd958fScth 	    pkt->pkt_address.a_target, pkt->pkt_address.a_lun);
1340b1dd958fScth 	EMUL64_MUTEX_EXIT(sp->cmd_emul64);
1341b1dd958fScth 	if (!tgt) {
1342b1dd958fScth 		pkt->pkt_reason = CMD_TIMEOUT;
1343b1dd958fScth 		pkt->pkt_state = STATE_GOT_BUS | STATE_SENT_CMD;
1344b1dd958fScth 		pkt->pkt_statistics = STAT_TIMEOUT;
1345b1dd958fScth 	} else {
1346b1dd958fScth 		pkt->pkt_reason = CMD_CMPLT;
1347b1dd958fScth 		*pkt->pkt_scbp = STATUS_GOOD;
1348b1dd958fScth 		pkt->pkt_state = STATE_GOT_BUS | STATE_GOT_TARGET |
1349b1dd958fScth 		    STATE_SENT_CMD | STATE_XFERRED_DATA | STATE_GOT_STATUS;
1350b1dd958fScth 		pkt->pkt_statistics = 0;
1351b1dd958fScth 		emul64_handle_cmd(pkt);
1352b1dd958fScth 	}
13539c57abc8Ssrivijitha dugganapalli 	scsi_hba_pkt_comp(pkt);
1354b1dd958fScth }
1355b1dd958fScth 
1356b1dd958fScth /* ARGSUSED */
1357b1dd958fScth static int
emul64_scsi_abort(struct scsi_address * ap,struct scsi_pkt * pkt)1358b1dd958fScth emul64_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt)
1359b1dd958fScth {
1360b1dd958fScth 	return (1);
1361b1dd958fScth }
1362b1dd958fScth 
1363b1dd958fScth /* ARGSUSED */
1364b1dd958fScth static int
emul64_scsi_reset(struct scsi_address * ap,int level)1365b1dd958fScth emul64_scsi_reset(struct scsi_address *ap, int level)
1366b1dd958fScth {
1367b1dd958fScth 	return (1);
1368b1dd958fScth }
1369b1dd958fScth 
1370b1dd958fScth static int
emul64_get_tgtrange(struct emul64 * emul64,intptr_t arg,emul64_tgt_t ** tgtp,emul64_tgt_range_t * tgtr)1371b1dd958fScth emul64_get_tgtrange(struct emul64 *emul64,
1372b1dd958fScth 		    intptr_t arg,
1373b1dd958fScth 		    emul64_tgt_t **tgtp,
1374b1dd958fScth 		    emul64_tgt_range_t *tgtr)
1375b1dd958fScth {
1376b1dd958fScth 	if (ddi_copyin((void *)arg, tgtr, sizeof (*tgtr), 0) != 0) {
1377b1dd958fScth 		cmn_err(CE_WARN, "emul64: ioctl - copy in failed\n");
1378b1dd958fScth 		return (EFAULT);
1379b1dd958fScth 	}
1380b1dd958fScth 	EMUL64_MUTEX_ENTER(emul64);
1381b1dd958fScth 	*tgtp = find_tgt(emul64, tgtr->emul64_target, tgtr->emul64_lun);
1382b1dd958fScth 	EMUL64_MUTEX_EXIT(emul64);
1383b1dd958fScth 	if (*tgtp == NULL) {
1384b1dd958fScth 		cmn_err(CE_WARN, "emul64: ioctl - no target for %d,%d on %d",
1385b1dd958fScth 		    tgtr->emul64_target, tgtr->emul64_lun,
1386b1dd958fScth 		    ddi_get_instance(emul64->emul64_dip));
1387b1dd958fScth 		return (ENXIO);
1388b1dd958fScth 	}
1389b1dd958fScth 	return (0);
1390b1dd958fScth }
1391b1dd958fScth 
1392b1dd958fScth static int
emul64_ioctl(dev_t dev,int cmd,intptr_t arg,int mode,cred_t * credp,int * rvalp)1393b1dd958fScth emul64_ioctl(dev_t dev,
1394b1dd958fScth 	int cmd,
1395b1dd958fScth 	intptr_t arg,
1396b1dd958fScth 	int mode,
1397b1dd958fScth 	cred_t *credp,
1398b1dd958fScth 	int *rvalp)
1399b1dd958fScth {
1400b1dd958fScth 	struct emul64		*emul64;
1401b1dd958fScth 	int			instance;
1402b1dd958fScth 	int			rv = 0;
1403b1dd958fScth 	emul64_tgt_range_t	tgtr;
1404b1dd958fScth 	emul64_tgt_t		*tgt;
1405b1dd958fScth 
1406b1dd958fScth 	instance = MINOR2INST(getminor(dev));
1407b1dd958fScth 	emul64 = (struct emul64 *)ddi_get_soft_state(emul64_state, instance);
1408b1dd958fScth 	if (emul64 == NULL) {
1409b1dd958fScth 		cmn_err(CE_WARN, "emul64: ioctl - no softstate for %d\n",
1410b1dd958fScth 		    getminor(dev));
1411b1dd958fScth 		return (ENXIO);
1412b1dd958fScth 	}
1413b1dd958fScth 
1414b1dd958fScth 	switch (cmd) {
1415b1dd958fScth 	case EMUL64_WRITE_OFF:
1416b1dd958fScth 		rv = emul64_get_tgtrange(emul64, arg, &tgt, &tgtr);
1417b1dd958fScth 		if (rv == 0) {
1418b1dd958fScth 			rv = emul64_write_off(emul64, tgt, &tgtr);
1419b1dd958fScth 		}
1420b1dd958fScth 		break;
1421b1dd958fScth 	case EMUL64_WRITE_ON:
1422b1dd958fScth 		rv = emul64_get_tgtrange(emul64, arg, &tgt, &tgtr);
1423b1dd958fScth 		if (rv == 0) {
1424b1dd958fScth 			rv = emul64_write_on(emul64, tgt, &tgtr);
1425b1dd958fScth 		}
1426b1dd958fScth 		break;
1427b1dd958fScth 	case EMUL64_ZERO_RANGE:
1428b1dd958fScth 		rv = emul64_get_tgtrange(emul64, arg, &tgt, &tgtr);
1429b1dd958fScth 		if (rv == 0) {
1430b1dd958fScth 			mutex_enter(&tgt->emul64_tgt_blk_lock);
1431b1dd958fScth 			rv = bsd_freeblkrange(tgt, &tgtr.emul64_blkrange);
1432b1dd958fScth 			mutex_exit(&tgt->emul64_tgt_blk_lock);
1433b1dd958fScth 		}
1434b1dd958fScth 		break;
1435cefe316eSpd144616 	case EMUL64_ERROR_INJECT:
1436cefe316eSpd144616 		rv = emul64_error_inject_req(emul64, arg);
1437cefe316eSpd144616 		break;
1438b1dd958fScth 	default:
1439b1dd958fScth 		rv  = scsi_hba_ioctl(dev, cmd, arg, mode, credp, rvalp);
1440b1dd958fScth 		break;
1441b1dd958fScth 	}
1442b1dd958fScth 	return (rv);
1443b1dd958fScth }
1444b1dd958fScth 
1445b1dd958fScth /* ARGSUSED */
1446b1dd958fScth static int
emul64_write_off(struct emul64 * emul64,emul64_tgt_t * tgt,emul64_tgt_range_t * tgtr)1447b1dd958fScth emul64_write_off(struct emul64 *emul64,
1448b1dd958fScth 	emul64_tgt_t *tgt,
1449b1dd958fScth 	emul64_tgt_range_t *tgtr)
1450b1dd958fScth {
1451b1dd958fScth 	size_t			blkcnt = tgtr->emul64_blkrange.emul64_blkcnt;
1452b1dd958fScth 	emul64_nowrite_t	*cur;
1453b1dd958fScth 	emul64_nowrite_t	*nowrite;
1454b1dd958fScth 	emul64_rng_overlap_t	overlap = O_NONE;
1455b1dd958fScth 	emul64_nowrite_t	**prev = NULL;
1456b1dd958fScth 	diskaddr_t		sb = tgtr->emul64_blkrange.emul64_sb;
1457b1dd958fScth 
1458b1dd958fScth 	nowrite = emul64_nowrite_alloc(&tgtr->emul64_blkrange);
1459b1dd958fScth 
1460b1dd958fScth 	/* Find spot in list */
1461b1dd958fScth 	rw_enter(&tgt->emul64_tgt_nw_lock, RW_WRITER);
1462b1dd958fScth 	cur = emul64_find_nowrite(tgt, sb, blkcnt, &overlap, &prev);
1463b1dd958fScth 	if (overlap == O_NONE) {
1464b1dd958fScth 		/* Insert into list */
1465b1dd958fScth 		*prev = nowrite;
1466b1dd958fScth 		nowrite->emul64_nwnext = cur;
1467b1dd958fScth 	}
1468b1dd958fScth 	rw_exit(&tgt->emul64_tgt_nw_lock);
1469b1dd958fScth 	if (overlap == O_NONE) {
1470b1dd958fScth 		if (emul64_collect_stats) {
1471b1dd958fScth 			mutex_enter(&emul64_stats_mutex);
1472b1dd958fScth 			emul64_nowrite_count++;
1473b1dd958fScth 			mutex_exit(&emul64_stats_mutex);
1474b1dd958fScth 		}
1475b1dd958fScth 	} else {
1476b1dd958fScth 		cmn_err(CE_WARN, "emul64: EMUL64_WRITE_OFF 0x%llx,0x%"
1477b1dd958fScth 		    PRIx64 "overlaps 0x%llx,0x%" PRIx64 "\n",
1478b1dd958fScth 		    nowrite->emul64_blocked.emul64_sb,
1479b1dd958fScth 		    nowrite->emul64_blocked.emul64_blkcnt,
1480b1dd958fScth 		    cur->emul64_blocked.emul64_sb,
1481b1dd958fScth 		    cur->emul64_blocked.emul64_blkcnt);
1482b1dd958fScth 		emul64_nowrite_free(nowrite);
1483b1dd958fScth 		return (EINVAL);
1484b1dd958fScth 	}
1485b1dd958fScth 	return (0);
1486b1dd958fScth }
1487b1dd958fScth 
1488b1dd958fScth /* ARGSUSED */
1489b1dd958fScth static int
emul64_write_on(struct emul64 * emul64,emul64_tgt_t * tgt,emul64_tgt_range_t * tgtr)1490b1dd958fScth emul64_write_on(struct emul64 *emul64,
1491b1dd958fScth 		emul64_tgt_t *tgt,
1492b1dd958fScth 		emul64_tgt_range_t *tgtr)
1493b1dd958fScth {
1494b1dd958fScth 	size_t			blkcnt = tgtr->emul64_blkrange.emul64_blkcnt;
1495b1dd958fScth 	emul64_nowrite_t	*cur;
1496b1dd958fScth 	emul64_rng_overlap_t	overlap = O_NONE;
1497b1dd958fScth 	emul64_nowrite_t	**prev = NULL;
1498b1dd958fScth 	int			rv = 0;
1499b1dd958fScth 	diskaddr_t		sb = tgtr->emul64_blkrange.emul64_sb;
1500b1dd958fScth 
1501b1dd958fScth 	/* Find spot in list */
1502b1dd958fScth 	rw_enter(&tgt->emul64_tgt_nw_lock, RW_WRITER);
1503b1dd958fScth 	cur = emul64_find_nowrite(tgt, sb, blkcnt, &overlap, &prev);
1504b1dd958fScth 	if (overlap == O_SAME) {
1505b1dd958fScth 		/* Remove from list */
1506b1dd958fScth 		*prev = cur->emul64_nwnext;
1507b1dd958fScth 	}
1508b1dd958fScth 	rw_exit(&tgt->emul64_tgt_nw_lock);
1509b1dd958fScth 
1510b1dd958fScth 	switch (overlap) {
1511b1dd958fScth 	case O_NONE:
1512b1dd958fScth 		cmn_err(CE_WARN, "emul64: EMUL64_WRITE_ON 0x%llx,0x%lx "
1513b1dd958fScth 		    "range not found\n", sb, blkcnt);
1514b1dd958fScth 		rv = ENXIO;
1515b1dd958fScth 		break;
1516b1dd958fScth 	case O_SAME:
1517b1dd958fScth 		if (emul64_collect_stats) {
1518b1dd958fScth 			mutex_enter(&emul64_stats_mutex);
1519b1dd958fScth 			emul64_nowrite_count--;
1520b1dd958fScth 			mutex_exit(&emul64_stats_mutex);
1521b1dd958fScth 		}
1522b1dd958fScth 		emul64_nowrite_free(cur);
1523b1dd958fScth 		break;
1524b1dd958fScth 	case O_OVERLAP:
1525b1dd958fScth 	case O_SUBSET:
1526b1dd958fScth 		cmn_err(CE_WARN, "emul64: EMUL64_WRITE_ON 0x%llx,0x%lx "
1527b1dd958fScth 		    "overlaps 0x%llx,0x%" PRIx64 "\n",
1528b1dd958fScth 		    sb, blkcnt, cur->emul64_blocked.emul64_sb,
1529b1dd958fScth 		    cur->emul64_blocked.emul64_blkcnt);
1530b1dd958fScth 		rv = EINVAL;
1531b1dd958fScth 		break;
1532b1dd958fScth 	}
1533b1dd958fScth 	return (rv);
1534b1dd958fScth }
1535b1dd958fScth 
1536b1dd958fScth static emul64_nowrite_t *
emul64_find_nowrite(emul64_tgt_t * tgt,diskaddr_t sb,size_t blkcnt,emul64_rng_overlap_t * overlap,emul64_nowrite_t *** prevp)1537b1dd958fScth emul64_find_nowrite(emul64_tgt_t *tgt,
1538b1dd958fScth 		    diskaddr_t sb,
1539b1dd958fScth 		    size_t blkcnt,
1540b1dd958fScth 		    emul64_rng_overlap_t *overlap,
1541b1dd958fScth 		    emul64_nowrite_t ***prevp)
1542b1dd958fScth {
1543b1dd958fScth 	emul64_nowrite_t	*cur;
1544b1dd958fScth 	emul64_nowrite_t	**prev;
1545b1dd958fScth 
1546b1dd958fScth 	/* Find spot in list */
1547b1dd958fScth 	*overlap = O_NONE;
1548b1dd958fScth 	prev = &tgt->emul64_tgt_nowrite;
1549b1dd958fScth 	cur = tgt->emul64_tgt_nowrite;
1550b1dd958fScth 	while (cur != NULL) {
1551b1dd958fScth 		*overlap = emul64_overlap(&cur->emul64_blocked, sb, blkcnt);
1552b1dd958fScth 		if (*overlap != O_NONE)
1553b1dd958fScth 			break;
1554b1dd958fScth 		prev = &cur->emul64_nwnext;
1555b1dd958fScth 		cur = cur->emul64_nwnext;
1556b1dd958fScth 	}
1557b1dd958fScth 
1558b1dd958fScth 	*prevp = prev;
1559b1dd958fScth 	return (cur);
1560b1dd958fScth }
1561b1dd958fScth 
1562b1dd958fScth static emul64_nowrite_t *
emul64_nowrite_alloc(emul64_range_t * range)1563b1dd958fScth emul64_nowrite_alloc(emul64_range_t *range)
1564b1dd958fScth {
1565b1dd958fScth 	emul64_nowrite_t	*nw;
1566b1dd958fScth 
1567b1dd958fScth 	nw = kmem_zalloc(sizeof (*nw), KM_SLEEP);
1568b1dd958fScth 	bcopy((void *) range,
1569b1dd958fScth 	    (void *) &nw->emul64_blocked,
1570b1dd958fScth 	    sizeof (nw->emul64_blocked));
1571b1dd958fScth 	return (nw);
1572b1dd958fScth }
1573b1dd958fScth 
1574b1dd958fScth static void
emul64_nowrite_free(emul64_nowrite_t * nw)1575b1dd958fScth emul64_nowrite_free(emul64_nowrite_t *nw)
1576b1dd958fScth {
1577b1dd958fScth 	kmem_free((void *) nw, sizeof (*nw));
1578b1dd958fScth }
1579b1dd958fScth 
1580b1dd958fScth emul64_rng_overlap_t
emul64_overlap(emul64_range_t * rng,diskaddr_t sb,size_t cnt)1581b1dd958fScth emul64_overlap(emul64_range_t *rng, diskaddr_t sb, size_t cnt)
1582b1dd958fScth {
1583b1dd958fScth 
1584b1dd958fScth 	if (rng->emul64_sb >= sb + cnt)
1585b1dd958fScth 		return (O_NONE);
1586b1dd958fScth 	if (rng->emul64_sb + rng->emul64_blkcnt <= sb)
1587b1dd958fScth 		return (O_NONE);
1588b1dd958fScth 	if ((rng->emul64_sb == sb) && (rng->emul64_blkcnt == cnt))
1589b1dd958fScth 		return (O_SAME);
1590b1dd958fScth 	if ((sb >= rng->emul64_sb) &&
1591b1dd958fScth 	    ((sb + cnt) <= (rng->emul64_sb + rng->emul64_blkcnt))) {
1592b1dd958fScth 		return (O_SUBSET);
1593b1dd958fScth 	}
1594b1dd958fScth 	return (O_OVERLAP);
1595b1dd958fScth }
1596b1dd958fScth 
1597b1dd958fScth #include <sys/varargs.h>
1598b1dd958fScth 
1599b1dd958fScth /*
1600b1dd958fScth  * Error logging, printing, and debug print routines
1601b1dd958fScth  */
1602b1dd958fScth 
1603b1dd958fScth /*VARARGS3*/
1604b1dd958fScth static void
emul64_i_log(struct emul64 * emul64,int level,char * fmt,...)1605b1dd958fScth emul64_i_log(struct emul64 *emul64, int level, char *fmt, ...)
1606b1dd958fScth {
1607b1dd958fScth 	char	buf[256];
1608b1dd958fScth 	va_list	ap;
1609b1dd958fScth 
1610b1dd958fScth 	va_start(ap, fmt);
1611b1dd958fScth 	(void) vsnprintf(buf, sizeof (buf), fmt, ap);
1612b1dd958fScth 	va_end(ap);
1613b1dd958fScth 
1614b1dd958fScth 	scsi_log(emul64 ? emul64->emul64_dip : NULL,
1615b1dd958fScth 	    "emul64", level, "%s\n", buf);
1616b1dd958fScth }
1617b1dd958fScth 
1618b1dd958fScth 
1619b1dd958fScth #ifdef EMUL64DEBUG
1620b1dd958fScth 
1621b1dd958fScth static void
emul64_debug_dump_cdb(struct scsi_address * ap,struct scsi_pkt * pkt)1622b1dd958fScth emul64_debug_dump_cdb(struct scsi_address *ap, struct scsi_pkt *pkt)
1623b1dd958fScth {
1624b1dd958fScth 	static char	hex[]	= "0123456789abcdef";
1625b1dd958fScth 	struct emul64	*emul64	= ADDR2EMUL64(ap);
1626b1dd958fScth 	struct emul64_cmd	*sp	= PKT2CMD(pkt);
1627b1dd958fScth 	uint8_t		*cdb	= pkt->pkt_cdbp;
1628b1dd958fScth 	char		buf [256];
1629b1dd958fScth 	char		*p;
1630b1dd958fScth 	int		i;
1631b1dd958fScth 
1632b1dd958fScth 	(void) snprintf(buf, sizeof (buf), "emul64%d: <%d,%d> ",
1633b1dd958fScth 	    ddi_get_instance(emul64->emul64_dip),
1634b1dd958fScth 	    ap->a_target, ap->a_lun);
1635b1dd958fScth 
1636b1dd958fScth 	p = buf + strlen(buf);
1637b1dd958fScth 
1638b1dd958fScth 	*p++ = '[';
1639b1dd958fScth 	for (i = 0; i < sp->cmd_cdblen; i++, cdb++) {
1640b1dd958fScth 		if (i != 0)
1641b1dd958fScth 			*p++ = ' ';
1642b1dd958fScth 		*p++ = hex[(*cdb >> 4) & 0x0f];
1643b1dd958fScth 		*p++ = hex[*cdb & 0x0f];
1644b1dd958fScth 	}
1645b1dd958fScth 	*p++ = ']';
1646b1dd958fScth 	*p++ = '\n';
1647b1dd958fScth 	*p = 0;
1648b1dd958fScth 
1649b1dd958fScth 	cmn_err(CE_CONT, buf);
1650b1dd958fScth }
1651b1dd958fScth #endif	/* EMUL64DEBUG */
1652