xref: /titanic_44/usr/src/uts/common/io/emul64_bsd.c (revision d3d50737e566cade9a08d73d2af95105ac7cd960)
1b1dd958fScth /*
2b1dd958fScth  * CDDL HEADER START
3b1dd958fScth  *
4b1dd958fScth  * The contents of this file are subject to the terms of the
536c5fee3Smcneal  * Common Development and Distribution License (the "License").
636c5fee3Smcneal  * You may not use this file except in compliance with the License.
7b1dd958fScth  *
8b1dd958fScth  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9b1dd958fScth  * or http://www.opensolaris.org/os/licensing.
10b1dd958fScth  * See the License for the specific language governing permissions
11b1dd958fScth  * and limitations under the License.
12b1dd958fScth  *
13b1dd958fScth  * When distributing Covered Code, include this CDDL HEADER in each
14b1dd958fScth  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15b1dd958fScth  * If applicable, add the following below this CDDL HEADER, with the
16b1dd958fScth  * fields enclosed by brackets "[]" replaced with your own identifying
17b1dd958fScth  * information: Portions Copyright [yyyy] [name of copyright owner]
18b1dd958fScth  *
19b1dd958fScth  * CDDL HEADER END
20b1dd958fScth  */
21b1dd958fScth /*
22*d3d50737SRafael Vanoni  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23b1dd958fScth  * Use is subject to license terms.
24b1dd958fScth  */
25b1dd958fScth 
26b1dd958fScth /*
27b1dd958fScth  * pseudo scsi disk driver
28b1dd958fScth  */
29b1dd958fScth 
30b1dd958fScth #include <sys/scsi/scsi.h>
31b1dd958fScth #include <sys/ddi.h>
32b1dd958fScth #include <sys/sunddi.h>
33b1dd958fScth #include <sys/kmem.h>
34b1dd958fScth #include <sys/taskq.h>
35b1dd958fScth #include <sys/disp.h>
36b1dd958fScth #include <sys/types.h>
37b1dd958fScth #include <sys/buf.h>
38b1dd958fScth 
39b1dd958fScth #include <sys/emul64.h>
40b1dd958fScth #include <sys/emul64cmd.h>
41b1dd958fScth #include <sys/emul64var.h>
42b1dd958fScth 
43b1dd958fScth /*
44b1dd958fScth  * Mode sense/select page control
45b1dd958fScth  */
46b1dd958fScth #define	MODE_SENSE_PC_CURRENT		0
47b1dd958fScth #define	MODE_SENSE_PC_CHANGEABLE	1
48b1dd958fScth #define	MODE_SENSE_PC_DEFAULT		2
49b1dd958fScth #define	MODE_SENSE_PC_SAVED		3
50b1dd958fScth 
51b1dd958fScth /*
52b1dd958fScth  * Byte conversion macros
53b1dd958fScth  */
54b1dd958fScth #if	defined(_BIG_ENDIAN)
55b1dd958fScth #define	ushort_to_scsi_ushort(n)	(n)
56b1dd958fScth #define	uint32_to_scsi_uint32(n)	(n)
57b1dd958fScth #define	uint64_to_scsi_uint64(n)	(n)
58b1dd958fScth #elif	defined(_LITTLE_ENDIAN)
59b1dd958fScth 
60b1dd958fScth #define	ushort_to_scsi_ushort(n)			\
61b1dd958fScth 		((((n) & 0x00ff) << 8) |		\
62b1dd958fScth 		(((n)  & 0xff00) >> 8))
63b1dd958fScth 
64b1dd958fScth #define	uint32_to_scsi_uint32(n)			\
65b1dd958fScth 		((((n) & 0x000000ff) << 24) |		\
66b1dd958fScth 		(((n)  & 0x0000ff00) << 8) |		\
67b1dd958fScth 		(((n)  & 0x00ff0000) >> 8) |		\
68b1dd958fScth 		(((n)  & 0xff000000) >> 24))
69b1dd958fScth #define	uint64_to_scsi_uint64(n)				\
70b1dd958fScth 		((((n) & 0x00000000000000ff) << 56) |           \
71b1dd958fScth 		(((n)  & 0x000000000000ff00) << 40) |           \
72b1dd958fScth 		(((n)  & 0x0000000000ff0000) << 24) |           \
73b1dd958fScth 		(((n)  & 0x00000000ff000000) << 8) |            \
74b1dd958fScth 		(((n)  & 0x000000ff00000000) >> 8) |            \
75b1dd958fScth 		(((n)  & 0x0000ff0000000000) >> 24) |           \
76b1dd958fScth 		(((n)  & 0x00ff000000000000) >> 40) |           \
77b1dd958fScth 		(((n)  & 0xff00000000000000) >> 56))
78b1dd958fScth #else
79b1dd958fScth error no _BIG_ENDIAN or _LITTLE_ENDIAN
80b1dd958fScth #endif
81b1dd958fScth #define	uint_to_byte0(n)		((n) & 0xff)
82b1dd958fScth #define	uint_to_byte1(n)		(((n)>>8) & 0xff)
83b1dd958fScth #define	uint_to_byte2(n)		(((n)>>16) & 0xff)
84b1dd958fScth #define	uint_to_byte3(n)		(((n)>>24) & 0xff)
85b1dd958fScth 
86b1dd958fScth /*
87b1dd958fScth  * struct prop_map
88b1dd958fScth  *
89b1dd958fScth  * This structure maps a property name to the place to store its value.
90b1dd958fScth  */
91b1dd958fScth struct prop_map {
92b1dd958fScth 	char 		*pm_name;	/* Name of the property. */
93b1dd958fScth 	int		*pm_value;	/* Place to store the value. */
94b1dd958fScth };
95b1dd958fScth 
96b1dd958fScth static int emul64_debug_blklist = 0;
97b1dd958fScth 
98b1dd958fScth /*
99b1dd958fScth  * Some interesting statistics.  These are protected by the
100b1dd958fScth  * emul64_stats_mutex.  It would be nice to have an ioctl to print them out,
101b1dd958fScth  * but we don't have the development time for that now.  You can at least
102b1dd958fScth  * look at them with adb.
103b1dd958fScth  */
104b1dd958fScth 
105b1dd958fScth int		emul64_collect_stats = 1; /* Collect stats if non-zero */
106b1dd958fScth kmutex_t	emul64_stats_mutex;	/* Protect these variables */
107b1dd958fScth long		emul64_nowrite_count = 0; /* # active nowrite ranges */
108b1dd958fScth static uint64_t	emul64_skipped_io = 0;	/* Skipped I/O operations, because of */
109b1dd958fScth 					/* EMUL64_WRITE_OFF. */
110b1dd958fScth static uint64_t	emul64_skipped_blk = 0;	/* Skipped blocks because of */
111b1dd958fScth 					/* EMUL64_WRITE_OFF. */
112b1dd958fScth static uint64_t	emul64_io_ops = 0;	/* Total number of I/O operations */
113b1dd958fScth 					/* including skipped and actual. */
114b1dd958fScth static uint64_t	emul64_io_blocks = 0;	/* Total number of blocks involved */
115b1dd958fScth 					/* in I/O operations. */
116b1dd958fScth static uint64_t	emul64_nonzero = 0;	/* Number of non-zero data blocks */
117b1dd958fScth 					/* currently held in memory */
118b1dd958fScth static uint64_t	emul64_max_list_length = 0; /* Maximum size of a linked */
119b1dd958fScth 					    /* list of non-zero blocks. */
120b1dd958fScth uint64_t emul64_taskq_max = 0;		/* emul64_scsi_start uses the taskq */
121b1dd958fScth 					/* mechanism to dispatch work. */
122b1dd958fScth 					/* If the number of entries in the */
123b1dd958fScth 					/* exceeds the maximum for the queue */
124b1dd958fScth 					/* the queue a 1 second delay is */
125b1dd958fScth 					/* encountered in taskq_ent_alloc. */
126b1dd958fScth 					/* This counter counts the number */
127b1dd958fScth 					/* times that this happens. */
128b1dd958fScth 
129b1dd958fScth /*
130b1dd958fScth  * Since emul64 does no physical I/O, operations that would normally be I/O
131b1dd958fScth  * intensive become CPU bound.  An example of this is RAID 5
132b1dd958fScth  * initialization.  When the kernel becomes CPU bound, it looks as if the
133b1dd958fScth  * machine is hung.
134b1dd958fScth  *
135b1dd958fScth  * To avoid this problem, we provide a function, emul64_yield_check, that does a
136b1dd958fScth  * delay from time to time to yield up the CPU.  The following variables
137b1dd958fScth  * are tunables for this algorithm.
138b1dd958fScth  *
139b1dd958fScth  *	emul64_num_delay_called	Number of times we called delay.  This is
140b1dd958fScth  *				not really a tunable.  Rather it is a
141b1dd958fScth  *				counter that provides useful information
142b1dd958fScth  *				for adjusting the tunables.
143b1dd958fScth  *	emul64_yield_length	Number of microseconds to yield the CPU.
144b1dd958fScth  *	emul64_yield_period	Number of I/O operations between yields.
145b1dd958fScth  *	emul64_yield_enable	emul64 will yield the CPU, only if this
146b1dd958fScth  *				variable contains a non-zero value.  This
147b1dd958fScth  *				allows the yield functionality to be turned
148b1dd958fScth  *				off for experimentation purposes.
149b1dd958fScth  *
150b1dd958fScth  * The value of 1000 for emul64_yield_period has been determined by
151b1dd958fScth  * experience with running the tests.
152b1dd958fScth  */
153b1dd958fScth static uint64_t		emul64_num_delay_called = 0;
154b1dd958fScth static int		emul64_yield_length = 1000;
155b1dd958fScth static int		emul64_yield_period = 1000;
156b1dd958fScth static int		emul64_yield_enable = 1;
157b1dd958fScth static kmutex_t		emul64_yield_mutex;
158b1dd958fScth static kcondvar_t 	emul64_yield_cv;
159b1dd958fScth 
160b1dd958fScth /*
161b1dd958fScth  * This array establishes a set of tunable variables that can be set by
162b1dd958fScth  * defining properties in the emul64.conf file.
163b1dd958fScth  */
164b1dd958fScth struct prop_map emul64_properties[] = {
165b1dd958fScth 	"emul64_collect_stats",		&emul64_collect_stats,
166b1dd958fScth 	"emul64_yield_length",		&emul64_yield_length,
167b1dd958fScth 	"emul64_yield_period",		&emul64_yield_period,
168b1dd958fScth 	"emul64_yield_enable",		&emul64_yield_enable,
169b1dd958fScth 	"emul64_max_task",		&emul64_max_task,
170b1dd958fScth 	"emul64_task_nthreads",		&emul64_task_nthreads
171b1dd958fScth };
172b1dd958fScth 
173b1dd958fScth static unsigned char *emul64_zeros = NULL; /* Block of 0s for comparison */
174b1dd958fScth 
175b1dd958fScth extern void emul64_check_cond(struct scsi_pkt *pkt, uchar_t key,
176b1dd958fScth 				uchar_t asc, uchar_t ascq);
177b1dd958fScth /* ncyl=250000 acyl=2 nhead=24 nsect=357 */
178b1dd958fScth uint_t dkg_rpm = 3600;
179b1dd958fScth 
180b1dd958fScth static int bsd_mode_sense_dad_mode_geometry(struct scsi_pkt *);
181b1dd958fScth static int bsd_mode_sense_dad_mode_err_recov(struct scsi_pkt *);
182b1dd958fScth static int bsd_mode_sense_modepage_disco_reco(struct scsi_pkt *);
183b1dd958fScth static int bsd_mode_sense_dad_mode_format(struct scsi_pkt *);
184b1dd958fScth static int bsd_mode_sense_dad_mode_cache(struct scsi_pkt *);
185b1dd958fScth static int bsd_readblks(struct emul64 *, ushort_t, ushort_t, diskaddr_t,
186b1dd958fScth 				int, unsigned char *);
187b1dd958fScth static int bsd_writeblks(struct emul64 *, ushort_t, ushort_t, diskaddr_t,
188b1dd958fScth 				int, unsigned char *);
189b1dd958fScth emul64_tgt_t *find_tgt(struct emul64 *, ushort_t, ushort_t);
190b1dd958fScth static blklist_t *bsd_findblk(emul64_tgt_t *, diskaddr_t, avl_index_t *);
191b1dd958fScth static void bsd_allocblk(emul64_tgt_t *, diskaddr_t, caddr_t, avl_index_t);
192b1dd958fScth static void bsd_freeblk(emul64_tgt_t *, blklist_t *);
193b1dd958fScth static void emul64_yield_check();
194b1dd958fScth static emul64_rng_overlap_t bsd_tgt_overlap(emul64_tgt_t *, diskaddr_t, int);
195b1dd958fScth 
196b1dd958fScth char *emul64_name = "emul64";
197b1dd958fScth 
198b1dd958fScth 
199b1dd958fScth /*
200b1dd958fScth  * Initialize globals in this file.
201b1dd958fScth  */
202b1dd958fScth void
emul64_bsd_init()203b1dd958fScth emul64_bsd_init()
204b1dd958fScth {
205b1dd958fScth 	emul64_zeros = (unsigned char *) kmem_zalloc(DEV_BSIZE, KM_SLEEP);
206b1dd958fScth 	mutex_init(&emul64_stats_mutex, NULL, MUTEX_DRIVER, NULL);
207b1dd958fScth 	mutex_init(&emul64_yield_mutex, NULL, MUTEX_DRIVER, NULL);
208b1dd958fScth 	cv_init(&emul64_yield_cv, NULL, CV_DRIVER, NULL);
209b1dd958fScth }
210b1dd958fScth 
211b1dd958fScth /*
212b1dd958fScth  * Clean up globals in this file.
213b1dd958fScth  */
214b1dd958fScth void
emul64_bsd_fini()215b1dd958fScth emul64_bsd_fini()
216b1dd958fScth {
217b1dd958fScth 	cv_destroy(&emul64_yield_cv);
218b1dd958fScth 	mutex_destroy(&emul64_yield_mutex);
219b1dd958fScth 	mutex_destroy(&emul64_stats_mutex);
220b1dd958fScth 	if (emul64_zeros != NULL) {
221b1dd958fScth 		kmem_free(emul64_zeros, DEV_BSIZE);
222b1dd958fScth 		emul64_zeros = NULL;
223b1dd958fScth 	}
224b1dd958fScth }
225b1dd958fScth 
226b1dd958fScth /*
227b1dd958fScth  * Attempt to get the values of the properties that are specified in the
228b1dd958fScth  * emul64_properties array.  If the property exists, copy its value to the
229b1dd958fScth  * specified location.  All the properties have been assigned default
230b1dd958fScth  * values in this driver, so if we cannot get the property that is not a
231b1dd958fScth  * problem.
232b1dd958fScth  */
233b1dd958fScth void
emul64_bsd_get_props(dev_info_t * dip)234b1dd958fScth emul64_bsd_get_props(dev_info_t *dip)
235b1dd958fScth {
236b1dd958fScth 	uint_t		count;
237b1dd958fScth 	uint_t		i;
238b1dd958fScth 	struct prop_map	*pmp;
239b1dd958fScth 	int		*properties;
240b1dd958fScth 
241b1dd958fScth 	for (pmp = emul64_properties, i = 0;
242b1dd958fScth 	    i < sizeof (emul64_properties) / sizeof (struct prop_map);
243b1dd958fScth 	    i++, pmp++) {
244b1dd958fScth 		if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip,
245cd210bb4SChris Horne 		    DDI_PROP_DONTPASS, pmp->pm_name, &properties,
246b1dd958fScth 		    &count) == DDI_PROP_SUCCESS) {
247b1dd958fScth 			if (count >= 1) {
248b1dd958fScth 				*pmp->pm_value = *properties;
249b1dd958fScth 			}
250b1dd958fScth 			ddi_prop_free((void *) properties);
251b1dd958fScth 		}
252b1dd958fScth 	}
253b1dd958fScth }
254b1dd958fScth 
255b1dd958fScth int
emul64_bsd_blkcompare(const void * a1,const void * b1)256b1dd958fScth emul64_bsd_blkcompare(const void *a1, const void *b1)
257b1dd958fScth {
258b1dd958fScth 	blklist_t	*a = (blklist_t *)a1;
259b1dd958fScth 	blklist_t	*b = (blklist_t *)b1;
260b1dd958fScth 
261b1dd958fScth 	if (a->bl_blkno < b->bl_blkno)
262b1dd958fScth 		return (-1);
263b1dd958fScth 	if (a->bl_blkno == b->bl_blkno)
264b1dd958fScth 		return (0);
265b1dd958fScth 	return (1);
266b1dd958fScth }
267b1dd958fScth 
268b1dd958fScth /* ARGSUSED 0 */
269b1dd958fScth int
bsd_scsi_start_stop_unit(struct scsi_pkt * pkt)270b1dd958fScth bsd_scsi_start_stop_unit(struct scsi_pkt *pkt)
271b1dd958fScth {
272b1dd958fScth 	return (0);
273b1dd958fScth }
274b1dd958fScth 
275b1dd958fScth /* ARGSUSED 0 */
276b1dd958fScth int
bsd_scsi_test_unit_ready(struct scsi_pkt * pkt)277b1dd958fScth bsd_scsi_test_unit_ready(struct scsi_pkt *pkt)
278b1dd958fScth {
279b1dd958fScth 	return (0);
280b1dd958fScth }
281b1dd958fScth 
282b1dd958fScth /* ARGSUSED 0 */
283b1dd958fScth int
bsd_scsi_request_sense(struct scsi_pkt * pkt)284b1dd958fScth bsd_scsi_request_sense(struct scsi_pkt *pkt)
285b1dd958fScth {
286b1dd958fScth 	return (0);
287b1dd958fScth }
288b1dd958fScth 
289b1dd958fScth int
bsd_scsi_inq_page0(struct scsi_pkt * pkt,uchar_t pqdtype)290b1dd958fScth bsd_scsi_inq_page0(struct scsi_pkt *pkt, uchar_t pqdtype)
291b1dd958fScth {
292b1dd958fScth 	struct emul64_cmd	*sp = PKT2CMD(pkt);
293b1dd958fScth 
294b1dd958fScth 	if (sp->cmd_count < 6) {
295b1dd958fScth 		cmn_err(CE_CONT, "%s: bsd_scsi_inq_page0: size %d required\n",
296b1dd958fScth 		    emul64_name, 6);
297b1dd958fScth 		return (EIO);
298b1dd958fScth 	}
299b1dd958fScth 
300b1dd958fScth 	sp->cmd_addr[0] = pqdtype;	/* periph qual., dtype */
301b1dd958fScth 	sp->cmd_addr[1] = 0;		/* page code */
302b1dd958fScth 	sp->cmd_addr[2] = 0;		/* reserved */
303b1dd958fScth 	sp->cmd_addr[3] = 6 - 3;	/* length */
304b1dd958fScth 	sp->cmd_addr[4] = 0;		/* 1st page */
305b1dd958fScth 	sp->cmd_addr[5] = 0x83;		/* 2nd page */
306b1dd958fScth 
307b1dd958fScth 	pkt->pkt_resid = sp->cmd_count - 6;
308b1dd958fScth 	return (0);
309b1dd958fScth }
310b1dd958fScth 
311b1dd958fScth int
bsd_scsi_inq_page83(struct scsi_pkt * pkt,uchar_t pqdtype)312b1dd958fScth bsd_scsi_inq_page83(struct scsi_pkt *pkt, uchar_t pqdtype)
313b1dd958fScth {
314b1dd958fScth 	struct emul64		*emul64 = PKT2EMUL64(pkt);
315b1dd958fScth 	struct emul64_cmd	*sp = PKT2CMD(pkt);
316b1dd958fScth 	int			instance = ddi_get_instance(emul64->emul64_dip);
317b1dd958fScth 
318b1dd958fScth 	if (sp->cmd_count < 22) {
319b1dd958fScth 		cmn_err(CE_CONT, "%s: bsd_scsi_inq_page83: size %d required\n",
320b1dd958fScth 		    emul64_name, 22);
321b1dd958fScth 		return (EIO);
322b1dd958fScth 	}
323b1dd958fScth 
324b1dd958fScth 	sp->cmd_addr[0] = pqdtype;	/* periph qual., dtype */
325b1dd958fScth 	sp->cmd_addr[1] = 0x83;		/* page code */
326b1dd958fScth 	sp->cmd_addr[2] = 0;		/* reserved */
327b1dd958fScth 	sp->cmd_addr[3] = (22 - 8) + 4;	/* length */
328b1dd958fScth 
329b1dd958fScth 	sp->cmd_addr[4] = 1;		/* code set - binary */
330b1dd958fScth 	sp->cmd_addr[5] = 3;		/* association and device ID type 3 */
331b1dd958fScth 	sp->cmd_addr[6] = 0;		/* reserved */
332b1dd958fScth 	sp->cmd_addr[7] = 22 - 8;	/* ID length */
333b1dd958fScth 
334b1dd958fScth 	sp->cmd_addr[8] = 0xde;		/* @8: identifier, byte 0 */
335b1dd958fScth 	sp->cmd_addr[9] = 0xca;
336b1dd958fScth 	sp->cmd_addr[10] = 0xde;
337b1dd958fScth 	sp->cmd_addr[11] = 0x80;
338b1dd958fScth 
339b1dd958fScth 	sp->cmd_addr[12] = 0xba;
340b1dd958fScth 	sp->cmd_addr[13] = 0xbe;
341b1dd958fScth 	sp->cmd_addr[14] = 0xab;
342b1dd958fScth 	sp->cmd_addr[15] = 0xba;
343b1dd958fScth 					/* @22: */
344b1dd958fScth 
345b1dd958fScth 	/*
346b1dd958fScth 	 * Instances seem to be assigned sequentially, so it unlikely that we
347b1dd958fScth 	 * will have more than 65535 of them.
348b1dd958fScth 	 */
349b1dd958fScth 	sp->cmd_addr[16] = uint_to_byte1(instance);
350b1dd958fScth 	sp->cmd_addr[17] = uint_to_byte0(instance);
351b1dd958fScth 	sp->cmd_addr[18] = uint_to_byte1(TGT(sp));
352b1dd958fScth 	sp->cmd_addr[19] = uint_to_byte0(TGT(sp));
353b1dd958fScth 	sp->cmd_addr[20] = uint_to_byte1(LUN(sp));
354b1dd958fScth 	sp->cmd_addr[21] = uint_to_byte0(LUN(sp));
355b1dd958fScth 
356b1dd958fScth 	pkt->pkt_resid = sp->cmd_count - 22;
357b1dd958fScth 	return (0);
358b1dd958fScth }
359b1dd958fScth 
360b1dd958fScth int
bsd_scsi_inquiry(struct scsi_pkt * pkt)361b1dd958fScth bsd_scsi_inquiry(struct scsi_pkt *pkt)
362b1dd958fScth {
363b1dd958fScth 	struct emul64_cmd	*sp = PKT2CMD(pkt);
364b1dd958fScth 	union scsi_cdb		*cdb = (union scsi_cdb *)pkt->pkt_cdbp;
365b1dd958fScth 	emul64_tgt_t		*tgt;
366b1dd958fScth 	uchar_t			pqdtype;
367b1dd958fScth 	struct scsi_inquiry	inq;
368b1dd958fScth 
369b1dd958fScth 	EMUL64_MUTEX_ENTER(sp->cmd_emul64);
370b1dd958fScth 	tgt = find_tgt(sp->cmd_emul64,
371b1dd958fScth 	    pkt->pkt_address.a_target, pkt->pkt_address.a_lun);
372b1dd958fScth 	EMUL64_MUTEX_EXIT(sp->cmd_emul64);
373b1dd958fScth 
374b1dd958fScth 	if (sp->cmd_count < sizeof (inq)) {
375b1dd958fScth 		cmn_err(CE_CONT, "%s: bsd_scsi_inquiry: size %d required\n",
376b1dd958fScth 		    emul64_name, (int)sizeof (inq));
377b1dd958fScth 		return (EIO);
378b1dd958fScth 	}
379b1dd958fScth 
380b1dd958fScth 	if (cdb->cdb_opaque[1] & 0xfc) {
381b1dd958fScth 		cmn_err(CE_WARN, "%s: bsd_scsi_inquiry: 0x%x",
382b1dd958fScth 		    emul64_name, cdb->cdb_opaque[1]);
383b1dd958fScth 		emul64_check_cond(pkt, 0x5, 0x24, 0x0);	/* inv. fld in cdb */
384b1dd958fScth 		return (0);
385b1dd958fScth 	}
386b1dd958fScth 
387b1dd958fScth 	pqdtype = tgt->emul64_tgt_dtype;
388b1dd958fScth 	if (cdb->cdb_opaque[1] & 0x1) {
389b1dd958fScth 		switch (cdb->cdb_opaque[2]) {
390b1dd958fScth 		case 0x00:
391b1dd958fScth 			return (bsd_scsi_inq_page0(pkt, pqdtype));
392b1dd958fScth 		case 0x83:
393b1dd958fScth 			return (bsd_scsi_inq_page83(pkt, pqdtype));
394b1dd958fScth 		default:
395b1dd958fScth 			cmn_err(CE_WARN, "%s: bsd_scsi_inquiry: "
396b1dd958fScth 			    "unsupported 0x%x",
397b1dd958fScth 			    emul64_name, cdb->cdb_opaque[2]);
398b1dd958fScth 			return (0);
399b1dd958fScth 		}
400b1dd958fScth 	}
401b1dd958fScth 
402b1dd958fScth 	/* set up the inquiry data we return */
403b1dd958fScth 	(void) bzero((void *)&inq, sizeof (inq));
404b1dd958fScth 
405b1dd958fScth 	inq.inq_dtype = pqdtype;
406b1dd958fScth 	inq.inq_ansi = 2;
407b1dd958fScth 	inq.inq_rdf = 2;
408b1dd958fScth 	inq.inq_len = sizeof (inq) - 4;
409b1dd958fScth 	inq.inq_wbus16 = 1;
410b1dd958fScth 	inq.inq_cmdque = 1;
411b1dd958fScth 
412b1dd958fScth 	(void) bcopy(tgt->emul64_tgt_inq, inq.inq_vid,
413b1dd958fScth 	    sizeof (tgt->emul64_tgt_inq));
414b1dd958fScth 	(void) bcopy("1", inq.inq_revision, 2);
415b1dd958fScth 	(void) bcopy((void *)&inq, sp->cmd_addr, sizeof (inq));
416b1dd958fScth 
417b1dd958fScth 	pkt->pkt_resid = sp->cmd_count - sizeof (inq);
418b1dd958fScth 	return (0);
419b1dd958fScth }
420b1dd958fScth 
421b1dd958fScth /* ARGSUSED 0 */
422b1dd958fScth int
bsd_scsi_format(struct scsi_pkt * pkt)423b1dd958fScth bsd_scsi_format(struct scsi_pkt *pkt)
424b1dd958fScth {
425b1dd958fScth 	return (0);
426b1dd958fScth }
427b1dd958fScth 
428b1dd958fScth int
bsd_scsi_io(struct scsi_pkt * pkt)429b1dd958fScth bsd_scsi_io(struct scsi_pkt *pkt)
430b1dd958fScth {
431b1dd958fScth 	struct emul64_cmd	*sp = PKT2CMD(pkt);
432b1dd958fScth 	union scsi_cdb		*cdb = (union scsi_cdb *)pkt->pkt_cdbp;
433b1dd958fScth 	diskaddr_t		lblkno;
434b1dd958fScth 	int			nblks;
435b1dd958fScth 
436b1dd958fScth 	switch (cdb->scc_cmd) {
437b1dd958fScth 	case SCMD_READ:
438b1dd958fScth 			lblkno = (uint32_t)GETG0ADDR(cdb);
439b1dd958fScth 			nblks = GETG0COUNT(cdb);
440b1dd958fScth 			pkt->pkt_resid = bsd_readblks(sp->cmd_emul64,
441cd210bb4SChris Horne 			    pkt->pkt_address.a_target, pkt->pkt_address.a_lun,
442b1dd958fScth 			    lblkno, nblks, sp->cmd_addr);
443b1dd958fScth 			if (emul64debug) {
444b1dd958fScth 				cmn_err(CE_CONT, "%s: bsd_scsi_io: "
445b1dd958fScth 				    "read g0 blk=%lld (0x%llx) nblks=%d\n",
446b1dd958fScth 				    emul64_name, lblkno, lblkno, nblks);
447b1dd958fScth 			}
448b1dd958fScth 		break;
449b1dd958fScth 	case SCMD_WRITE:
450b1dd958fScth 			lblkno = (uint32_t)GETG0ADDR(cdb);
451b1dd958fScth 			nblks = GETG0COUNT(cdb);
452b1dd958fScth 			pkt->pkt_resid = bsd_writeblks(sp->cmd_emul64,
453cd210bb4SChris Horne 			    pkt->pkt_address.a_target, pkt->pkt_address.a_lun,
454b1dd958fScth 			    lblkno, nblks, sp->cmd_addr);
455b1dd958fScth 			if (emul64debug) {
456b1dd958fScth 				cmn_err(CE_CONT, "%s: bsd_scsi_io: "
457b1dd958fScth 				    "write g0 blk=%lld (0x%llx) nblks=%d\n",
458b1dd958fScth 				    emul64_name, lblkno, lblkno, nblks);
459b1dd958fScth 			}
460b1dd958fScth 		break;
461b1dd958fScth 	case SCMD_READ_G1:
462b1dd958fScth 			lblkno = (uint32_t)GETG1ADDR(cdb);
463b1dd958fScth 			nblks = GETG1COUNT(cdb);
464b1dd958fScth 			pkt->pkt_resid = bsd_readblks(sp->cmd_emul64,
465cd210bb4SChris Horne 			    pkt->pkt_address.a_target, pkt->pkt_address.a_lun,
466b1dd958fScth 			    lblkno, nblks, sp->cmd_addr);
467b1dd958fScth 			if (emul64debug) {
468b1dd958fScth 				cmn_err(CE_CONT, "%s: bsd_scsi_io: "
469b1dd958fScth 				    "read g1 blk=%lld (0x%llx) nblks=%d\n",
470b1dd958fScth 				    emul64_name, lblkno, lblkno, nblks);
471b1dd958fScth 			}
472b1dd958fScth 		break;
473b1dd958fScth 	case SCMD_WRITE_G1:
474b1dd958fScth 			lblkno = (uint32_t)GETG1ADDR(cdb);
475b1dd958fScth 			nblks = GETG1COUNT(cdb);
476b1dd958fScth 			pkt->pkt_resid = bsd_writeblks(sp->cmd_emul64,
477cd210bb4SChris Horne 			    pkt->pkt_address.a_target, pkt->pkt_address.a_lun,
478b1dd958fScth 			    lblkno, nblks, sp->cmd_addr);
479b1dd958fScth 			if (emul64debug) {
480b1dd958fScth 				cmn_err(CE_CONT, "%s: bsd_scsi_io: "
481b1dd958fScth 				    "write g1 blk=%lld (0x%llx) nblks=%d\n",
482b1dd958fScth 				    emul64_name, lblkno, lblkno, nblks);
483b1dd958fScth 			}
484b1dd958fScth 		break;
485b1dd958fScth 	case SCMD_READ_G4:
486b1dd958fScth 			lblkno = GETG4ADDR(cdb);
487b1dd958fScth 			lblkno <<= 32;
488b1dd958fScth 			lblkno |= (uint32_t)GETG4ADDRTL(cdb);
489b1dd958fScth 			nblks = GETG4COUNT(cdb);
490b1dd958fScth 			pkt->pkt_resid = bsd_readblks(sp->cmd_emul64,
491cd210bb4SChris Horne 			    pkt->pkt_address.a_target, pkt->pkt_address.a_lun,
492b1dd958fScth 			    lblkno, nblks, sp->cmd_addr);
493b1dd958fScth 			if (emul64debug) {
494b1dd958fScth 				cmn_err(CE_CONT, "%s: bsd_scsi_io: "
495b1dd958fScth 				    "read g4 blk=%lld (0x%llx) nblks=%d\n",
496b1dd958fScth 				    emul64_name, lblkno, lblkno, nblks);
497b1dd958fScth 			}
498b1dd958fScth 		break;
499b1dd958fScth 	case SCMD_WRITE_G4:
500b1dd958fScth 			lblkno = GETG4ADDR(cdb);
501b1dd958fScth 			lblkno <<= 32;
502b1dd958fScth 			lblkno |= (uint32_t)GETG4ADDRTL(cdb);
503b1dd958fScth 			nblks = GETG4COUNT(cdb);
504b1dd958fScth 			pkt->pkt_resid = bsd_writeblks(sp->cmd_emul64,
505cd210bb4SChris Horne 			    pkt->pkt_address.a_target, pkt->pkt_address.a_lun,
506b1dd958fScth 			    lblkno, nblks, sp->cmd_addr);
507b1dd958fScth 			if (emul64debug) {
508b1dd958fScth 				cmn_err(CE_CONT, "%s: bsd_scsi_io: "
509b1dd958fScth 				    "write g4 blk=%lld (0x%llx) nblks=%d\n",
510b1dd958fScth 				    emul64_name, lblkno, lblkno, nblks);
511b1dd958fScth 			}
512b1dd958fScth 		break;
513b1dd958fScth 	default:
514b1dd958fScth 		cmn_err(CE_WARN, "%s: bsd_scsi_io: unhandled I/O: 0x%x",
515b1dd958fScth 		    emul64_name, cdb->scc_cmd);
516b1dd958fScth 		break;
517b1dd958fScth 	}
518b1dd958fScth 
519b1dd958fScth 	if (pkt->pkt_resid != 0)
520b1dd958fScth 		cmn_err(CE_WARN, "%s: bsd_scsi_io: "
521b1dd958fScth 		    "pkt_resid: 0x%lx, lblkno %lld, nblks %d",
522b1dd958fScth 		    emul64_name, pkt->pkt_resid, lblkno, nblks);
523b1dd958fScth 
524b1dd958fScth 	return (0);
525b1dd958fScth }
526b1dd958fScth 
527b1dd958fScth int
bsd_scsi_log_sense(struct scsi_pkt * pkt)528b1dd958fScth bsd_scsi_log_sense(struct scsi_pkt *pkt)
529b1dd958fScth {
530b1dd958fScth 	union scsi_cdb		*cdb = (union scsi_cdb *)pkt->pkt_cdbp;
531b1dd958fScth 	struct emul64_cmd	*sp = PKT2CMD(pkt);
532b1dd958fScth 	int			page_code;
533b1dd958fScth 
534b1dd958fScth 	if (sp->cmd_count < 9) {
535b1dd958fScth 		cmn_err(CE_CONT, "%s: bsd_scsi_log_sense size %d required\n",
536b1dd958fScth 		    emul64_name, 9);
537b1dd958fScth 		return (EIO);
538b1dd958fScth 	}
539b1dd958fScth 
540b1dd958fScth 	page_code = cdb->cdb_opaque[2] & 0x3f;
541b1dd958fScth 	if (page_code) {
542b1dd958fScth 		cmn_err(CE_CONT, "%s: bsd_scsi_log_sense: "
543b1dd958fScth 		    "page 0x%x not supported\n", emul64_name, page_code);
544b1dd958fScth 		emul64_check_cond(pkt, 0x5, 0x24, 0x0); /* inv. fld in cdb */
545b1dd958fScth 		return (0);
546b1dd958fScth 	}
547b1dd958fScth 
548b1dd958fScth 	sp->cmd_addr[0] = 0;		/* page code */
549b1dd958fScth 	sp->cmd_addr[1] = 0;		/* reserved */
550b1dd958fScth 	sp->cmd_addr[2] = 0;		/* MSB of page length */
551b1dd958fScth 	sp->cmd_addr[3] = 8 - 3;	/* LSB of page length */
552b1dd958fScth 
553b1dd958fScth 	sp->cmd_addr[4] = 0;		/* MSB of parameter code */
554b1dd958fScth 	sp->cmd_addr[5] = 0;		/* LSB of parameter code */
555b1dd958fScth 	sp->cmd_addr[6] = 0;		/* parameter control byte */
556b1dd958fScth 	sp->cmd_addr[7] = 4 - 3;	/* parameter length */
557b1dd958fScth 	sp->cmd_addr[8] = 0x0;		/* parameter value */
558b1dd958fScth 
559b1dd958fScth 	pkt->pkt_resid = sp->cmd_count - 9;
560b1dd958fScth 	return (0);
561b1dd958fScth }
562b1dd958fScth 
563b1dd958fScth int
bsd_scsi_mode_sense(struct scsi_pkt * pkt)564b1dd958fScth bsd_scsi_mode_sense(struct scsi_pkt *pkt)
565b1dd958fScth {
566b1dd958fScth 	union scsi_cdb	*cdb = (union scsi_cdb *)pkt->pkt_cdbp;
567b1dd958fScth 	int		page_control;
568b1dd958fScth 	int		page_code;
569b1dd958fScth 	int		rval = 0;
570b1dd958fScth 
571b1dd958fScth 	switch (cdb->scc_cmd) {
572b1dd958fScth 	case SCMD_MODE_SENSE:
573b1dd958fScth 			page_code = cdb->cdb_opaque[2] & 0x3f;
574b1dd958fScth 			page_control = (cdb->cdb_opaque[2] >> 6) & 0x03;
575b1dd958fScth 			if (emul64debug) {
576b1dd958fScth 				cmn_err(CE_CONT, "%s: bsd_scsi_mode_sense: "
577b1dd958fScth 				    "page=0x%x control=0x%x nbytes=%d\n",
578b1dd958fScth 				    emul64_name, page_code, page_control,
579b1dd958fScth 				    GETG0COUNT(cdb));
580b1dd958fScth 			}
581b1dd958fScth 		break;
582b1dd958fScth 	case SCMD_MODE_SENSE_G1:
583b1dd958fScth 			page_code = cdb->cdb_opaque[2] & 0x3f;
584b1dd958fScth 			page_control = (cdb->cdb_opaque[2] >> 6) & 0x03;
585b1dd958fScth 			if (emul64debug) {
586b1dd958fScth 				cmn_err(CE_CONT, "%s: bsd_scsi_mode_sense: "
587b1dd958fScth 				    "page=0x%x control=0x%x nbytes=%d\n",
588b1dd958fScth 				    emul64_name, page_code, page_control,
589b1dd958fScth 				    GETG1COUNT(cdb));
590b1dd958fScth 			}
591b1dd958fScth 		break;
592b1dd958fScth 	default:
593b1dd958fScth 		cmn_err(CE_CONT, "%s: bsd_scsi_mode_sense: "
594b1dd958fScth 		    "cmd 0x%x not supported\n", emul64_name, cdb->scc_cmd);
595b1dd958fScth 		return (EIO);
596b1dd958fScth 	}
597b1dd958fScth 
598b1dd958fScth 	switch (page_code) {
599b1dd958fScth 	case DAD_MODE_GEOMETRY:
600b1dd958fScth 		rval = bsd_mode_sense_dad_mode_geometry(pkt);
601b1dd958fScth 		break;
602b1dd958fScth 	case DAD_MODE_ERR_RECOV:
603b1dd958fScth 		rval = bsd_mode_sense_dad_mode_err_recov(pkt);
604b1dd958fScth 		break;
605b1dd958fScth 	case MODEPAGE_DISCO_RECO:
606b1dd958fScth 		rval = bsd_mode_sense_modepage_disco_reco(pkt);
607b1dd958fScth 		break;
608b1dd958fScth 	case DAD_MODE_FORMAT:
609b1dd958fScth 		rval = bsd_mode_sense_dad_mode_format(pkt);
610b1dd958fScth 		break;
611b1dd958fScth 	case DAD_MODE_CACHE:
612b1dd958fScth 		rval = bsd_mode_sense_dad_mode_cache(pkt);
613b1dd958fScth 		break;
614b1dd958fScth 	default:
615b1dd958fScth 		cmn_err(CE_CONT, "%s: bsd_scsi_mode_sense: "
616b1dd958fScth 		    "page 0x%x not supported\n", emul64_name, page_code);
617b1dd958fScth 		rval = EIO;
618b1dd958fScth 		break;
619b1dd958fScth 	}
620b1dd958fScth 
621b1dd958fScth 	return (rval);
622b1dd958fScth }
623b1dd958fScth 
624b1dd958fScth 
625b1dd958fScth static int
bsd_mode_sense_dad_mode_geometry(struct scsi_pkt * pkt)626b1dd958fScth bsd_mode_sense_dad_mode_geometry(struct scsi_pkt *pkt)
627b1dd958fScth {
628b1dd958fScth 	struct emul64_cmd	*sp = PKT2CMD(pkt);
629b1dd958fScth 	union scsi_cdb		*cdb = (union scsi_cdb *)pkt->pkt_cdbp;
630b1dd958fScth 	uchar_t			*addr = (uchar_t *)sp->cmd_addr;
631b1dd958fScth 	emul64_tgt_t		*tgt;
632b1dd958fScth 	int			page_control;
633b1dd958fScth 	struct mode_header	header;
634b1dd958fScth 	struct mode_geometry	page4;
635b1dd958fScth 	int			ncyl;
636b1dd958fScth 	int			rval = 0;
637b1dd958fScth 
638b1dd958fScth 	page_control = (cdb->cdb_opaque[2] >> 6) & 0x03;
639b1dd958fScth 
640b1dd958fScth 	if (emul64debug) {
641b1dd958fScth 		cmn_err(CE_CONT, "%s: bsd_mode_sense_dad_mode_geometry: "
642b1dd958fScth 		    "pc=%d n=%d\n", emul64_name, page_control, sp->cmd_count);
643b1dd958fScth 	}
644b1dd958fScth 
645b1dd958fScth 	if (sp->cmd_count < (sizeof (header) + sizeof (page4))) {
646b1dd958fScth 		cmn_err(CE_CONT, "%s: bsd_mode_sense_dad_mode_geometry: "
647b1dd958fScth 		    "size %d required\n",
648b1dd958fScth 		    emul64_name, (int)(sizeof (header) + sizeof (page4)));
649b1dd958fScth 		return (EIO);
650b1dd958fScth 	}
651b1dd958fScth 
652b1dd958fScth 	(void) bzero(&header, sizeof (header));
653b1dd958fScth 	(void) bzero(&page4, sizeof (page4));
654b1dd958fScth 
655b1dd958fScth 	header.length = sizeof (header) + sizeof (page4) - 1;
656b1dd958fScth 	header.bdesc_length = 0;
657b1dd958fScth 
658b1dd958fScth 	page4.mode_page.code = DAD_MODE_GEOMETRY;
659b1dd958fScth 	page4.mode_page.ps = 1;
660b1dd958fScth 	page4.mode_page.length = sizeof (page4) - sizeof (struct mode_page);
661b1dd958fScth 
662b1dd958fScth 	switch (page_control) {
663b1dd958fScth 	case MODE_SENSE_PC_CURRENT:
664b1dd958fScth 	case MODE_SENSE_PC_DEFAULT:
665b1dd958fScth 	case MODE_SENSE_PC_SAVED:
666b1dd958fScth 		EMUL64_MUTEX_ENTER(sp->cmd_emul64);
667b1dd958fScth 		tgt = find_tgt(sp->cmd_emul64,
668b1dd958fScth 		    pkt->pkt_address.a_target, pkt->pkt_address.a_lun);
669b1dd958fScth 		EMUL64_MUTEX_EXIT(sp->cmd_emul64);
670b1dd958fScth 		ncyl = tgt->emul64_tgt_ncyls;
671b1dd958fScth 		page4.cyl_ub = uint_to_byte2(ncyl);
672b1dd958fScth 		page4.cyl_mb = uint_to_byte1(ncyl);
673b1dd958fScth 		page4.cyl_lb = uint_to_byte0(ncyl);
674b1dd958fScth 		page4.heads = uint_to_byte0(tgt->emul64_tgt_nheads);
675b1dd958fScth 		page4.rpm = ushort_to_scsi_ushort(dkg_rpm);
676b1dd958fScth 		break;
677b1dd958fScth 	case MODE_SENSE_PC_CHANGEABLE:
678b1dd958fScth 		page4.cyl_ub = 0xff;
679b1dd958fScth 		page4.cyl_mb = 0xff;
680b1dd958fScth 		page4.cyl_lb = 0xff;
681b1dd958fScth 		page4.heads = 0xff;
682b1dd958fScth 		page4.rpm = 0xffff;
683b1dd958fScth 		break;
684b1dd958fScth 	}
685b1dd958fScth 
686b1dd958fScth 	(void) bcopy(&header, addr, sizeof (header));
687b1dd958fScth 	(void) bcopy(&page4, addr + sizeof (header), sizeof (page4));
688b1dd958fScth 
689b1dd958fScth 	pkt->pkt_resid = sp->cmd_count - sizeof (page4) - sizeof (header);
690b1dd958fScth 	rval = 0;
691b1dd958fScth 
692b1dd958fScth 	return (rval);
693b1dd958fScth }
694b1dd958fScth 
695b1dd958fScth static int
bsd_mode_sense_dad_mode_err_recov(struct scsi_pkt * pkt)696b1dd958fScth bsd_mode_sense_dad_mode_err_recov(struct scsi_pkt *pkt)
697b1dd958fScth {
698b1dd958fScth 	struct emul64_cmd	*sp = PKT2CMD(pkt);
699b1dd958fScth 	union scsi_cdb		*cdb = (union scsi_cdb *)pkt->pkt_cdbp;
700b1dd958fScth 	uchar_t			*addr = (uchar_t *)sp->cmd_addr;
701b1dd958fScth 	int			page_control;
702b1dd958fScth 	struct mode_header	header;
703b1dd958fScth 	struct mode_err_recov	page1;
704b1dd958fScth 	int			rval = 0;
705b1dd958fScth 
706b1dd958fScth 	page_control = (cdb->cdb_opaque[2] >> 6) & 0x03;
707b1dd958fScth 
708b1dd958fScth 	if (emul64debug) {
709b1dd958fScth 		cmn_err(CE_CONT, "%s: bsd_mode_sense_dad_mode_err_recov: "
710b1dd958fScth 		    "pc=%d n=%d\n", emul64_name, page_control, sp->cmd_count);
711b1dd958fScth 	}
712b1dd958fScth 
713b1dd958fScth 	if (sp->cmd_count < (sizeof (header) + sizeof (page1))) {
714b1dd958fScth 		cmn_err(CE_CONT, "%s: bsd_mode_sense_dad_mode_err_recov: "
715b1dd958fScth 		    "size %d required\n",
716b1dd958fScth 		    emul64_name, (int)(sizeof (header) + sizeof (page1)));
717b1dd958fScth 		return (EIO);
718b1dd958fScth 	}
719b1dd958fScth 
720b1dd958fScth 	(void) bzero(&header, sizeof (header));
721b1dd958fScth 	(void) bzero(&page1, sizeof (page1));
722b1dd958fScth 
723b1dd958fScth 	header.length = sizeof (header) + sizeof (page1) - 1;
724b1dd958fScth 	header.bdesc_length = 0;
725b1dd958fScth 
726b1dd958fScth 	page1.mode_page.code = DAD_MODE_ERR_RECOV;
727b1dd958fScth 	page1.mode_page.ps = 1;
728b1dd958fScth 	page1.mode_page.length = sizeof (page1) - sizeof (struct mode_page);
729b1dd958fScth 
730b1dd958fScth 	switch (page_control) {
731b1dd958fScth 	case MODE_SENSE_PC_CURRENT:
732b1dd958fScth 	case MODE_SENSE_PC_DEFAULT:
733b1dd958fScth 	case MODE_SENSE_PC_SAVED:
734b1dd958fScth 		break;
735b1dd958fScth 	case MODE_SENSE_PC_CHANGEABLE:
736b1dd958fScth 		break;
737b1dd958fScth 	}
738b1dd958fScth 
739b1dd958fScth 	(void) bcopy(&header, addr, sizeof (header));
740b1dd958fScth 	(void) bcopy(&page1, addr + sizeof (header), sizeof (page1));
741b1dd958fScth 
742b1dd958fScth 	pkt->pkt_resid = sp->cmd_count - sizeof (page1) - sizeof (header);
743b1dd958fScth 	rval = 0;
744b1dd958fScth 
745b1dd958fScth 	return (rval);
746b1dd958fScth }
747b1dd958fScth 
748b1dd958fScth static int
bsd_mode_sense_modepage_disco_reco(struct scsi_pkt * pkt)749b1dd958fScth bsd_mode_sense_modepage_disco_reco(struct scsi_pkt *pkt)
750b1dd958fScth {
751b1dd958fScth 	struct emul64_cmd	*sp = PKT2CMD(pkt);
752b1dd958fScth 	union scsi_cdb		*cdb = (union scsi_cdb *)pkt->pkt_cdbp;
753b1dd958fScth 	int			rval = 0;
754b1dd958fScth 	uchar_t			*addr = (uchar_t *)sp->cmd_addr;
755b1dd958fScth 	int			page_control;
756b1dd958fScth 	struct mode_header	header;
757b1dd958fScth 	struct mode_disco_reco	page2;
758b1dd958fScth 
759b1dd958fScth 	page_control = (cdb->cdb_opaque[2] >> 6) & 0x03;
760b1dd958fScth 
761b1dd958fScth 	if (emul64debug) {
762b1dd958fScth 		cmn_err(CE_CONT, "%s: bsd_mode_sense_modepage_disco_reco: "
763b1dd958fScth 		    "pc=%d n=%d\n", emul64_name, page_control, sp->cmd_count);
764b1dd958fScth 	}
765b1dd958fScth 
766b1dd958fScth 	if (sp->cmd_count < (sizeof (header) + sizeof (page2))) {
767b1dd958fScth 		cmn_err(CE_CONT, "%s: bsd_mode_sense_modepage_disco_reco: "
768b1dd958fScth 		    "size %d required\n",
769b1dd958fScth 		    emul64_name, (int)(sizeof (header) + sizeof (page2)));
770b1dd958fScth 		return (EIO);
771b1dd958fScth 	}
772b1dd958fScth 
773b1dd958fScth 	(void) bzero(&header, sizeof (header));
774b1dd958fScth 	(void) bzero(&page2, sizeof (page2));
775b1dd958fScth 
776b1dd958fScth 	header.length = sizeof (header) + sizeof (page2) - 1;
777b1dd958fScth 	header.bdesc_length = 0;
778b1dd958fScth 
779b1dd958fScth 	page2.mode_page.code = MODEPAGE_DISCO_RECO;
780b1dd958fScth 	page2.mode_page.ps = 1;
781b1dd958fScth 	page2.mode_page.length = sizeof (page2) - sizeof (struct mode_page);
782b1dd958fScth 
783b1dd958fScth 	switch (page_control) {
784b1dd958fScth 	case MODE_SENSE_PC_CURRENT:
785b1dd958fScth 	case MODE_SENSE_PC_DEFAULT:
786b1dd958fScth 	case MODE_SENSE_PC_SAVED:
787b1dd958fScth 		break;
788b1dd958fScth 	case MODE_SENSE_PC_CHANGEABLE:
789b1dd958fScth 		break;
790b1dd958fScth 	}
791b1dd958fScth 
792b1dd958fScth 	(void) bcopy(&header, addr, sizeof (header));
793b1dd958fScth 	(void) bcopy(&page2, addr + sizeof (header), sizeof (page2));
794b1dd958fScth 
795b1dd958fScth 	pkt->pkt_resid = sp->cmd_count - sizeof (page2) - sizeof (header);
796b1dd958fScth 	rval = 0;
797b1dd958fScth 
798b1dd958fScth 	return (rval);
799b1dd958fScth }
800b1dd958fScth 
801b1dd958fScth static int
bsd_mode_sense_dad_mode_format(struct scsi_pkt * pkt)802b1dd958fScth bsd_mode_sense_dad_mode_format(struct scsi_pkt *pkt)
803b1dd958fScth {
804b1dd958fScth 	struct emul64_cmd	*sp = PKT2CMD(pkt);
805b1dd958fScth 	union scsi_cdb		*cdb = (union scsi_cdb *)pkt->pkt_cdbp;
806b1dd958fScth 	uchar_t			*addr = (uchar_t *)sp->cmd_addr;
807b1dd958fScth 	emul64_tgt_t		*tgt;
808b1dd958fScth 	int			page_control;
809b1dd958fScth 	struct mode_header	header;
810b1dd958fScth 	struct mode_format	page3;
811b1dd958fScth 	int			rval = 0;
812b1dd958fScth 
813b1dd958fScth 	page_control = (cdb->cdb_opaque[2] >> 6) & 0x03;
814b1dd958fScth 
815b1dd958fScth 	if (emul64debug) {
816b1dd958fScth 		cmn_err(CE_CONT, "%s: bsd_mode_sense_dad_mode_format: "
817b1dd958fScth 		    "pc=%d n=%d\n", emul64_name, page_control, sp->cmd_count);
818b1dd958fScth 	}
819b1dd958fScth 
820b1dd958fScth 	if (sp->cmd_count < (sizeof (header) + sizeof (page3))) {
821b1dd958fScth 		cmn_err(CE_CONT, "%s: bsd_mode_sense_dad_mode_format: "
822b1dd958fScth 		    "size %d required\n",
823b1dd958fScth 		    emul64_name, (int)(sizeof (header) + sizeof (page3)));
824b1dd958fScth 		return (EIO);
825b1dd958fScth 	}
826b1dd958fScth 
827b1dd958fScth 	(void) bzero(&header, sizeof (header));
828b1dd958fScth 	(void) bzero(&page3, sizeof (page3));
829b1dd958fScth 
830b1dd958fScth 	header.length = sizeof (header) + sizeof (page3) - 1;
831b1dd958fScth 	header.bdesc_length = 0;
832b1dd958fScth 
833b1dd958fScth 	page3.mode_page.code = DAD_MODE_FORMAT;
834b1dd958fScth 	page3.mode_page.ps = 1;
835b1dd958fScth 	page3.mode_page.length = sizeof (page3) - sizeof (struct mode_page);
836b1dd958fScth 
837b1dd958fScth 	switch (page_control) {
838b1dd958fScth 	case MODE_SENSE_PC_CURRENT:
839b1dd958fScth 	case MODE_SENSE_PC_DEFAULT:
840b1dd958fScth 	case MODE_SENSE_PC_SAVED:
841b1dd958fScth 		page3.data_bytes_sect = ushort_to_scsi_ushort(DEV_BSIZE);
842b1dd958fScth 		page3.interleave = ushort_to_scsi_ushort(1);
843b1dd958fScth 		EMUL64_MUTEX_ENTER(sp->cmd_emul64);
844b1dd958fScth 		tgt = find_tgt(sp->cmd_emul64,
845b1dd958fScth 		    pkt->pkt_address.a_target, pkt->pkt_address.a_lun);
846b1dd958fScth 		EMUL64_MUTEX_EXIT(sp->cmd_emul64);
847b1dd958fScth 		page3.sect_track = ushort_to_scsi_ushort(tgt->emul64_tgt_nsect);
848b1dd958fScth 		break;
849b1dd958fScth 	case MODE_SENSE_PC_CHANGEABLE:
850b1dd958fScth 		break;
851b1dd958fScth 	}
852b1dd958fScth 
853b1dd958fScth 	(void) bcopy(&header, addr, sizeof (header));
854b1dd958fScth 	(void) bcopy(&page3, addr + sizeof (header), sizeof (page3));
855b1dd958fScth 
856b1dd958fScth 	pkt->pkt_resid = sp->cmd_count - sizeof (page3) - sizeof (header);
857b1dd958fScth 	rval = 0;
858b1dd958fScth 
859b1dd958fScth 	return (rval);
860b1dd958fScth }
861b1dd958fScth 
862b1dd958fScth static int
bsd_mode_sense_dad_mode_cache(struct scsi_pkt * pkt)863b1dd958fScth bsd_mode_sense_dad_mode_cache(struct scsi_pkt *pkt)
864b1dd958fScth {
865b1dd958fScth 	struct emul64_cmd	*sp = PKT2CMD(pkt);
866b1dd958fScth 	union scsi_cdb		*cdb = (union scsi_cdb *)pkt->pkt_cdbp;
867b1dd958fScth 	uchar_t			*addr = (uchar_t *)sp->cmd_addr;
868b1dd958fScth 	int			page_control;
869b1dd958fScth 	struct mode_header	header;
870b1dd958fScth 	struct mode_cache	page8;
871b1dd958fScth 	int			rval = 0;
872b1dd958fScth 
873b1dd958fScth 	page_control = (cdb->cdb_opaque[2] >> 6) & 0x03;
874b1dd958fScth 
875b1dd958fScth 	if (emul64debug) {
876b1dd958fScth 		cmn_err(CE_CONT, "%s: bsd_mode_sense_dad_mode_cache: "
877b1dd958fScth 		    "pc=%d n=%d\n", emul64_name, page_control, sp->cmd_count);
878b1dd958fScth 	}
879b1dd958fScth 
880b1dd958fScth 	if (sp->cmd_count < (sizeof (header) + sizeof (page8))) {
881b1dd958fScth 		cmn_err(CE_CONT, "%s: bsd_mode_sense_dad_mode_cache: "
882b1dd958fScth 		    "size %d required\n",
883b1dd958fScth 		    emul64_name, (int)(sizeof (header) + sizeof (page8)));
884b1dd958fScth 		return (EIO);
885b1dd958fScth 	}
886b1dd958fScth 
887b1dd958fScth 	(void) bzero(&header, sizeof (header));
888b1dd958fScth 	(void) bzero(&page8, sizeof (page8));
889b1dd958fScth 
890b1dd958fScth 	header.length = sizeof (header) + sizeof (page8) - 1;
891b1dd958fScth 	header.bdesc_length = 0;
892b1dd958fScth 
893b1dd958fScth 	page8.mode_page.code = DAD_MODE_CACHE;
894b1dd958fScth 	page8.mode_page.ps = 1;
895b1dd958fScth 	page8.mode_page.length = sizeof (page8) - sizeof (struct mode_page);
896b1dd958fScth 
897b1dd958fScth 	switch (page_control) {
898b1dd958fScth 	case MODE_SENSE_PC_CURRENT:
899b1dd958fScth 	case MODE_SENSE_PC_DEFAULT:
900b1dd958fScth 	case MODE_SENSE_PC_SAVED:
901b1dd958fScth 		break;
902b1dd958fScth 	case MODE_SENSE_PC_CHANGEABLE:
903b1dd958fScth 		break;
904b1dd958fScth 	}
905b1dd958fScth 
906b1dd958fScth 	(void) bcopy(&header, addr, sizeof (header));
907b1dd958fScth 	(void) bcopy(&page8, addr + sizeof (header), sizeof (page8));
908b1dd958fScth 
909b1dd958fScth 	pkt->pkt_resid = sp->cmd_count - sizeof (page8) - sizeof (header);
910b1dd958fScth 	rval = 0;
911b1dd958fScth 
912b1dd958fScth 	return (rval);
913b1dd958fScth }
914b1dd958fScth 
915b1dd958fScth /* ARGSUSED 0 */
916b1dd958fScth int
bsd_scsi_mode_select(struct scsi_pkt * pkt)917b1dd958fScth bsd_scsi_mode_select(struct scsi_pkt *pkt)
918b1dd958fScth {
919b1dd958fScth 	return (0);
920b1dd958fScth }
921b1dd958fScth 
922b1dd958fScth int
bsd_scsi_read_capacity_8(struct scsi_pkt * pkt)923b1dd958fScth bsd_scsi_read_capacity_8(struct scsi_pkt *pkt)
924b1dd958fScth {
925b1dd958fScth 	struct emul64_cmd	*sp = PKT2CMD(pkt);
926b1dd958fScth 	emul64_tgt_t		*tgt;
927b1dd958fScth 	struct scsi_capacity	cap;
928b1dd958fScth 	int			rval = 0;
929b1dd958fScth 
930b1dd958fScth 	EMUL64_MUTEX_ENTER(sp->cmd_emul64);
931b1dd958fScth 	tgt = find_tgt(sp->cmd_emul64,
932b1dd958fScth 	    pkt->pkt_address.a_target, pkt->pkt_address.a_lun);
933b1dd958fScth 	EMUL64_MUTEX_EXIT(sp->cmd_emul64);
934b1dd958fScth 	if (tgt->emul64_tgt_sectors > 0xffffffff)
935b1dd958fScth 		cap.capacity = 0xffffffff;
936b1dd958fScth 	else
937b1dd958fScth 		cap.capacity =
938b1dd958fScth 		    uint32_to_scsi_uint32(tgt->emul64_tgt_sectors);
939b1dd958fScth 	cap.lbasize = uint32_to_scsi_uint32((uint_t)DEV_BSIZE);
940b1dd958fScth 
941b1dd958fScth 	pkt->pkt_resid = sp->cmd_count - sizeof (struct scsi_capacity);
942b1dd958fScth 
943b1dd958fScth 	(void) bcopy(&cap, (caddr_t)sp->cmd_addr,
944b1dd958fScth 	    sizeof (struct scsi_capacity));
945b1dd958fScth 	return (rval);
946b1dd958fScth }
947b1dd958fScth 
948b1dd958fScth int
bsd_scsi_read_capacity_16(struct scsi_pkt * pkt)949b1dd958fScth bsd_scsi_read_capacity_16(struct scsi_pkt *pkt)
950b1dd958fScth {
951b1dd958fScth 	struct emul64_cmd	*sp = PKT2CMD(pkt);
952b1dd958fScth 	emul64_tgt_t		*tgt;
953b1dd958fScth 	struct scsi_capacity_16 cap;
954b1dd958fScth 	int			rval = 0;
955b1dd958fScth 
956b1dd958fScth 	EMUL64_MUTEX_ENTER(sp->cmd_emul64);
957b1dd958fScth 	tgt = find_tgt(sp->cmd_emul64,
958b1dd958fScth 	    pkt->pkt_address.a_target, pkt->pkt_address.a_lun);
959b1dd958fScth 	EMUL64_MUTEX_EXIT(sp->cmd_emul64);
960b1dd958fScth 
961b1dd958fScth 	cap.sc_capacity = uint64_to_scsi_uint64(tgt->emul64_tgt_sectors);
962b1dd958fScth 	cap.sc_lbasize = uint32_to_scsi_uint32((uint_t)DEV_BSIZE);
963b1dd958fScth 	cap.sc_rto_en = 0;
964b1dd958fScth 	cap.sc_prot_en = 0;
965b1dd958fScth 	cap.sc_rsvd0 = 0;
966b1dd958fScth 	bzero(&cap.sc_rsvd1[0], sizeof (cap.sc_rsvd1));
967b1dd958fScth 
968b1dd958fScth 	pkt->pkt_resid = sp->cmd_count - sizeof (struct scsi_capacity_16);
969b1dd958fScth 
970b1dd958fScth 	(void) bcopy(&cap, (caddr_t)sp->cmd_addr,
971b1dd958fScth 	    sizeof (struct scsi_capacity_16));
972b1dd958fScth 	return (rval);
973b1dd958fScth }
974b1dd958fScth int
bsd_scsi_read_capacity(struct scsi_pkt * pkt)975b1dd958fScth bsd_scsi_read_capacity(struct scsi_pkt *pkt)
976b1dd958fScth {
977b1dd958fScth 	return (bsd_scsi_read_capacity_8(pkt));
978b1dd958fScth }
979b1dd958fScth 
980b1dd958fScth 
981b1dd958fScth /* ARGSUSED 0 */
982b1dd958fScth int
bsd_scsi_reserve(struct scsi_pkt * pkt)983b1dd958fScth bsd_scsi_reserve(struct scsi_pkt *pkt)
984b1dd958fScth {
985b1dd958fScth 	return (0);
986b1dd958fScth }
987b1dd958fScth 
988b1dd958fScth /* ARGSUSED 0 */
989b1dd958fScth int
bsd_scsi_release(struct scsi_pkt * pkt)990b1dd958fScth bsd_scsi_release(struct scsi_pkt *pkt)
991b1dd958fScth {
992b1dd958fScth 	return (0);
993b1dd958fScth }
994b1dd958fScth 
995b1dd958fScth 
996b1dd958fScth int
bsd_scsi_read_defect_list(struct scsi_pkt * pkt)997b1dd958fScth bsd_scsi_read_defect_list(struct scsi_pkt *pkt)
998b1dd958fScth {
999b1dd958fScth 	pkt->pkt_resid = 0;
1000b1dd958fScth 	return (0);
1001b1dd958fScth }
1002b1dd958fScth 
1003b1dd958fScth 
1004b1dd958fScth /* ARGSUSED 0 */
1005b1dd958fScth int
bsd_scsi_reassign_block(struct scsi_pkt * pkt)1006b1dd958fScth bsd_scsi_reassign_block(struct scsi_pkt *pkt)
1007b1dd958fScth {
1008b1dd958fScth 	return (0);
1009b1dd958fScth }
1010b1dd958fScth 
1011b1dd958fScth 
1012b1dd958fScth static int
bsd_readblks(struct emul64 * emul64,ushort_t target,ushort_t lun,diskaddr_t blkno,int nblks,unsigned char * bufaddr)1013cd210bb4SChris Horne bsd_readblks(struct emul64 *emul64, ushort_t target, ushort_t lun,
1014b1dd958fScth     diskaddr_t blkno, int nblks, unsigned char *bufaddr)
1015b1dd958fScth {
1016b1dd958fScth 	emul64_tgt_t	*tgt;
1017b1dd958fScth 	blklist_t	*blk;
1018b1dd958fScth 	emul64_rng_overlap_t overlap;
1019b1dd958fScth 	int		i = 0;
1020b1dd958fScth 
1021b1dd958fScth 	if (emul64debug) {
1022b1dd958fScth 		cmn_err(CE_CONT, "%s: bsd_readblks: "
1023b1dd958fScth 		    "<%d,%d> blk %llu (0x%llx) nblks %d\n",
1024cd210bb4SChris Horne 		    emul64_name, target, lun, blkno, blkno, nblks);
1025b1dd958fScth 	}
1026b1dd958fScth 
1027b1dd958fScth 	emul64_yield_check();
1028b1dd958fScth 
1029b1dd958fScth 	EMUL64_MUTEX_ENTER(emul64);
1030cd210bb4SChris Horne 	tgt = find_tgt(emul64, target, lun);
1031b1dd958fScth 	EMUL64_MUTEX_EXIT(emul64);
1032b1dd958fScth 	if (tgt == NULL) {
1033b1dd958fScth 		cmn_err(CE_WARN, "%s: bsd_readblks: no target for %d,%d\n",
1034cd210bb4SChris Horne 		    emul64_name, target, lun);
1035b1dd958fScth 		goto unlocked_out;
1036b1dd958fScth 	}
1037b1dd958fScth 
1038b1dd958fScth 	if (emul64_collect_stats) {
1039b1dd958fScth 		mutex_enter(&emul64_stats_mutex);
1040b1dd958fScth 		emul64_io_ops++;
1041b1dd958fScth 		emul64_io_blocks += nblks;
1042b1dd958fScth 		mutex_exit(&emul64_stats_mutex);
1043b1dd958fScth 	}
1044b1dd958fScth 	mutex_enter(&tgt->emul64_tgt_blk_lock);
1045b1dd958fScth 
1046b1dd958fScth 	/*
1047b1dd958fScth 	 * Keep the ioctls from changing the nowrite list for the duration
1048b1dd958fScth 	 * of this I/O by grabbing emul64_tgt_nw_lock.  This will keep the
1049b1dd958fScth 	 * results from our call to bsd_tgt_overlap from changing while we
1050b1dd958fScth 	 * do the I/O.
1051b1dd958fScth 	 */
1052b1dd958fScth 	rw_enter(&tgt->emul64_tgt_nw_lock, RW_READER);
1053b1dd958fScth 
1054b1dd958fScth 	overlap = bsd_tgt_overlap(tgt, blkno, nblks);
1055b1dd958fScth 	switch (overlap) {
1056b1dd958fScth 	case O_SAME:
1057b1dd958fScth 	case O_SUBSET:
1058b1dd958fScth 	case O_OVERLAP:
1059b1dd958fScth 		cmn_err(CE_WARN, "%s: bsd_readblks: "
1060b1dd958fScth 		    "read to blocked area %lld,%d\n",
1061b1dd958fScth 		    emul64_name, blkno, nblks);
1062b1dd958fScth 		rw_exit(&tgt->emul64_tgt_nw_lock);
1063b1dd958fScth 		goto errout;
1064b1dd958fScth 	case O_NONE:
1065b1dd958fScth 		break;
1066b1dd958fScth 	}
1067b1dd958fScth 	for (i = 0; i < nblks; i++) {
1068b1dd958fScth 		if (emul64_debug_blklist)
1069b1dd958fScth 			cmn_err(CE_CONT, "%s: bsd_readblks: "
1070b1dd958fScth 			    "%d of %d: blkno %lld\n",
1071b1dd958fScth 			    emul64_name, i+1, nblks, blkno);
1072b1dd958fScth 		if (blkno > tgt->emul64_tgt_sectors)
1073b1dd958fScth 			break;
1074b1dd958fScth 		blk = bsd_findblk(tgt, blkno, NULL);
1075b1dd958fScth 		if (blk) {
1076b1dd958fScth 			(void) bcopy(blk->bl_data, bufaddr, DEV_BSIZE);
1077b1dd958fScth 		} else {
1078b1dd958fScth 			(void) bzero(bufaddr, DEV_BSIZE);
1079b1dd958fScth 		}
1080b1dd958fScth 		blkno++;
1081b1dd958fScth 		bufaddr += DEV_BSIZE;
1082b1dd958fScth 	}
1083b1dd958fScth 	rw_exit(&tgt->emul64_tgt_nw_lock);
1084b1dd958fScth 
1085b1dd958fScth errout:
1086b1dd958fScth 	mutex_exit(&tgt->emul64_tgt_blk_lock);
1087b1dd958fScth 
1088b1dd958fScth unlocked_out:
1089b1dd958fScth 	return ((nblks - i) * DEV_BSIZE);
1090b1dd958fScth }
1091b1dd958fScth 
1092b1dd958fScth 
1093b1dd958fScth static int
bsd_writeblks(struct emul64 * emul64,ushort_t target,ushort_t lun,diskaddr_t blkno,int nblks,unsigned char * bufaddr)1094cd210bb4SChris Horne bsd_writeblks(struct emul64 *emul64, ushort_t target, ushort_t lun,
1095b1dd958fScth     diskaddr_t blkno, int nblks, unsigned char *bufaddr)
1096b1dd958fScth {
1097b1dd958fScth 	emul64_tgt_t	*tgt;
1098b1dd958fScth 	blklist_t	*blk;
1099b1dd958fScth 	emul64_rng_overlap_t overlap;
1100b1dd958fScth 	avl_index_t	where;
1101b1dd958fScth 	int		i = 0;
1102b1dd958fScth 
1103b1dd958fScth 	if (emul64debug) {
1104b1dd958fScth 		cmn_err(CE_CONT, "%s: bsd_writeblks: "
1105b1dd958fScth 		    "<%d,%d> blk %llu (0x%llx) nblks %d\n",
1106cd210bb4SChris Horne 		    emul64_name, target, lun, blkno, blkno, nblks);
1107b1dd958fScth 	}
1108b1dd958fScth 
1109b1dd958fScth 	emul64_yield_check();
1110b1dd958fScth 
1111b1dd958fScth 	EMUL64_MUTEX_ENTER(emul64);
1112cd210bb4SChris Horne 	tgt = find_tgt(emul64, target, lun);
1113b1dd958fScth 	EMUL64_MUTEX_EXIT(emul64);
1114b1dd958fScth 	if (tgt == NULL) {
1115b1dd958fScth 		cmn_err(CE_WARN, "%s: bsd_writeblks: no target for %d,%d\n",
1116cd210bb4SChris Horne 		    emul64_name, target, lun);
1117b1dd958fScth 		goto unlocked_out;
1118b1dd958fScth 	}
1119b1dd958fScth 
1120b1dd958fScth 	if (emul64_collect_stats) {
1121b1dd958fScth 		mutex_enter(&emul64_stats_mutex);
1122b1dd958fScth 		emul64_io_ops++;
1123b1dd958fScth 		emul64_io_blocks += nblks;
1124b1dd958fScth 		mutex_exit(&emul64_stats_mutex);
1125b1dd958fScth 	}
1126b1dd958fScth 	mutex_enter(&tgt->emul64_tgt_blk_lock);
1127b1dd958fScth 
1128b1dd958fScth 	/*
1129b1dd958fScth 	 * Keep the ioctls from changing the nowrite list for the duration
1130b1dd958fScth 	 * of this I/O by grabbing emul64_tgt_nw_lock.  This will keep the
1131b1dd958fScth 	 * results from our call to bsd_tgt_overlap from changing while we
1132b1dd958fScth 	 * do the I/O.
1133b1dd958fScth 	 */
1134b1dd958fScth 	rw_enter(&tgt->emul64_tgt_nw_lock, RW_READER);
1135b1dd958fScth 	overlap = bsd_tgt_overlap(tgt, blkno, nblks);
1136b1dd958fScth 	switch (overlap) {
1137b1dd958fScth 	case O_SAME:
1138b1dd958fScth 	case O_SUBSET:
1139b1dd958fScth 		if (emul64_collect_stats) {
1140b1dd958fScth 			mutex_enter(&emul64_stats_mutex);
1141b1dd958fScth 			emul64_skipped_io++;
1142b1dd958fScth 			emul64_skipped_blk += nblks;
1143b1dd958fScth 			mutex_exit(&emul64_stats_mutex);
1144b1dd958fScth 		}
1145b1dd958fScth 		rw_exit(&tgt->emul64_tgt_nw_lock);
1146b1dd958fScth 		mutex_exit(&tgt->emul64_tgt_blk_lock);
1147b1dd958fScth 		return (0);
1148b1dd958fScth 	case O_OVERLAP:
1149b1dd958fScth 	case O_NONE:
1150b1dd958fScth 		break;
1151b1dd958fScth 	}
1152b1dd958fScth 	for (i = 0; i < nblks; i++) {
1153b1dd958fScth 		if ((overlap == O_NONE) ||
1154b1dd958fScth 		    (bsd_tgt_overlap(tgt, blkno, 1) == O_NONE)) {
1155b1dd958fScth 			/*
1156b1dd958fScth 			 * If there was no overlap for the entire I/O range
1157b1dd958fScth 			 * or if there is no overlap for this particular
1158b1dd958fScth 			 * block, then we need to do the write.
1159b1dd958fScth 			 */
1160b1dd958fScth 			if (emul64_debug_blklist)
1161b1dd958fScth 				cmn_err(CE_CONT, "%s: bsd_writeblks: "
1162b1dd958fScth 				    "%d of %d: blkno %lld\n",
1163b1dd958fScth 				    emul64_name, i+1, nblks, blkno);
1164b1dd958fScth 			if (blkno > tgt->emul64_tgt_sectors) {
1165b1dd958fScth 				cmn_err(CE_WARN, "%s: bsd_writeblks: "
1166b1dd958fScth 				    "blkno %lld, tgt_sectors %lld\n",
1167b1dd958fScth 				    emul64_name, blkno,
1168b1dd958fScth 				    tgt->emul64_tgt_sectors);
1169b1dd958fScth 				break;
1170b1dd958fScth 			}
1171b1dd958fScth 
1172b1dd958fScth 			blk = bsd_findblk(tgt, blkno, &where);
1173b1dd958fScth 			if (bcmp(bufaddr, emul64_zeros, DEV_BSIZE) == 0) {
1174b1dd958fScth 				if (blk) {
1175b1dd958fScth 					bsd_freeblk(tgt, blk);
1176b1dd958fScth 				}
1177b1dd958fScth 			} else {
1178b1dd958fScth 				if (blk) {
1179b1dd958fScth 					(void) bcopy(bufaddr, blk->bl_data,
1180b1dd958fScth 					    DEV_BSIZE);
1181b1dd958fScth 				} else {
1182cd210bb4SChris Horne 					bsd_allocblk(tgt, blkno,
1183cd210bb4SChris Horne 					    (caddr_t)bufaddr, where);
1184b1dd958fScth 				}
1185b1dd958fScth 			}
1186b1dd958fScth 		}
1187b1dd958fScth 		blkno++;
1188b1dd958fScth 		bufaddr += DEV_BSIZE;
1189b1dd958fScth 	}
1190b1dd958fScth 
1191b1dd958fScth 	/*
1192b1dd958fScth 	 * Now that we're done with our I/O, allow the ioctls to change the
1193b1dd958fScth 	 * nowrite list.
1194b1dd958fScth 	 */
1195b1dd958fScth 	rw_exit(&tgt->emul64_tgt_nw_lock);
1196b1dd958fScth 
1197b1dd958fScth errout:
1198b1dd958fScth 	mutex_exit(&tgt->emul64_tgt_blk_lock);
1199b1dd958fScth 
1200b1dd958fScth unlocked_out:
1201b1dd958fScth 	return ((nblks - i) * DEV_BSIZE);
1202b1dd958fScth }
1203b1dd958fScth 
1204b1dd958fScth emul64_tgt_t *
find_tgt(struct emul64 * emul64,ushort_t target,ushort_t lun)1205cd210bb4SChris Horne find_tgt(struct emul64 *emul64, ushort_t target, ushort_t lun)
1206b1dd958fScth {
1207b1dd958fScth 	emul64_tgt_t	*tgt;
1208b1dd958fScth 
1209b1dd958fScth 	tgt = emul64->emul64_tgt;
1210b1dd958fScth 	while (tgt) {
1211cd210bb4SChris Horne 		if (tgt->emul64_tgt_saddr.a_target == target &&
1212cd210bb4SChris Horne 		    tgt->emul64_tgt_saddr.a_lun == lun) {
1213b1dd958fScth 			break;
1214b1dd958fScth 		}
1215b1dd958fScth 		tgt = tgt->emul64_tgt_next;
1216b1dd958fScth 	}
1217b1dd958fScth 	return (tgt);
1218b1dd958fScth 
1219b1dd958fScth }
1220b1dd958fScth 
1221b1dd958fScth /*
1222b1dd958fScth  * Free all blocks that are part of the specified range.
1223b1dd958fScth  */
1224b1dd958fScth int
bsd_freeblkrange(emul64_tgt_t * tgt,emul64_range_t * range)1225b1dd958fScth bsd_freeblkrange(emul64_tgt_t *tgt, emul64_range_t *range)
1226b1dd958fScth {
1227b1dd958fScth 	blklist_t	*blk;
1228b1dd958fScth 	blklist_t	*nextblk;
1229b1dd958fScth 
1230b1dd958fScth 	ASSERT(mutex_owned(&tgt->emul64_tgt_blk_lock));
1231b1dd958fScth 	for (blk = (blklist_t *)avl_first(&tgt->emul64_tgt_data);
1232b1dd958fScth 	    blk != NULL;
1233b1dd958fScth 	    blk = nextblk) {
1234b1dd958fScth 		/*
1235b1dd958fScth 		 * We need to get the next block pointer now, because blk
1236b1dd958fScth 		 * will be freed inside the if statement.
1237b1dd958fScth 		 */
1238b1dd958fScth 		nextblk = AVL_NEXT(&tgt->emul64_tgt_data, blk);
1239b1dd958fScth 
1240b1dd958fScth 		if (emul64_overlap(range, blk->bl_blkno, (size_t)1) != O_NONE) {
1241b1dd958fScth 			bsd_freeblk(tgt, blk);
1242b1dd958fScth 		}
1243b1dd958fScth 	}
1244b1dd958fScth 	return (0);
1245b1dd958fScth }
1246b1dd958fScth 
1247b1dd958fScth static blklist_t *
bsd_findblk(emul64_tgt_t * tgt,diskaddr_t blkno,avl_index_t * where)1248b1dd958fScth bsd_findblk(emul64_tgt_t *tgt, diskaddr_t blkno, avl_index_t *where)
1249b1dd958fScth {
1250b1dd958fScth 	blklist_t	*blk;
1251b1dd958fScth 	blklist_t	search;
1252b1dd958fScth 
1253b1dd958fScth 	ASSERT(mutex_owned(&tgt->emul64_tgt_blk_lock));
1254b1dd958fScth 
1255b1dd958fScth 	search.bl_blkno = blkno;
1256b1dd958fScth 	blk = (blklist_t *)avl_find(&tgt->emul64_tgt_data, &search, where);
1257b1dd958fScth 	return (blk);
1258b1dd958fScth }
1259b1dd958fScth 
1260b1dd958fScth 
1261b1dd958fScth static void
bsd_allocblk(emul64_tgt_t * tgt,diskaddr_t blkno,caddr_t data,avl_index_t where)1262b1dd958fScth bsd_allocblk(emul64_tgt_t *tgt,
1263b1dd958fScth 		diskaddr_t blkno,
1264b1dd958fScth 		caddr_t data,
1265b1dd958fScth 		avl_index_t where)
1266b1dd958fScth {
1267b1dd958fScth 	blklist_t	*blk;
1268b1dd958fScth 
1269b1dd958fScth 	if (emul64_debug_blklist)
1270b1dd958fScth 		cmn_err(CE_CONT, "%s: bsd_allocblk: %llu\n",
1271b1dd958fScth 		    emul64_name, blkno);
1272b1dd958fScth 
1273b1dd958fScth 	ASSERT(mutex_owned(&tgt->emul64_tgt_blk_lock));
1274b1dd958fScth 
1275b1dd958fScth 	blk = (blklist_t *)kmem_zalloc(sizeof (blklist_t), KM_SLEEP);
1276b1dd958fScth 	blk->bl_data = (uchar_t *)kmem_zalloc(DEV_BSIZE, KM_SLEEP);
1277b1dd958fScth 	blk->bl_blkno = blkno;
1278b1dd958fScth 	(void) bcopy(data, blk->bl_data, DEV_BSIZE);
1279b1dd958fScth 	avl_insert(&tgt->emul64_tgt_data, (void *) blk, where);
1280b1dd958fScth 
1281b1dd958fScth 	if (emul64_collect_stats) {
1282b1dd958fScth 		mutex_enter(&emul64_stats_mutex);
1283b1dd958fScth 		emul64_nonzero++;
1284b1dd958fScth 		tgt->emul64_list_length++;
1285b1dd958fScth 		if (tgt->emul64_list_length > emul64_max_list_length) {
1286b1dd958fScth 			emul64_max_list_length = tgt->emul64_list_length;
1287b1dd958fScth 		}
1288b1dd958fScth 		mutex_exit(&emul64_stats_mutex);
1289b1dd958fScth 	}
1290b1dd958fScth }
1291b1dd958fScth 
1292b1dd958fScth static void
bsd_freeblk(emul64_tgt_t * tgt,blklist_t * blk)1293b1dd958fScth bsd_freeblk(emul64_tgt_t *tgt, blklist_t *blk)
1294b1dd958fScth {
1295b1dd958fScth 	if (emul64_debug_blklist)
1296b1dd958fScth 		cmn_err(CE_CONT, "%s: bsd_freeblk: <%d,%d> blk=%lld\n",
1297b1dd958fScth 		    emul64_name, tgt->emul64_tgt_saddr.a_target,
1298b1dd958fScth 		    tgt->emul64_tgt_saddr.a_lun, blk->bl_blkno);
1299b1dd958fScth 
1300b1dd958fScth 	ASSERT(mutex_owned(&tgt->emul64_tgt_blk_lock));
1301b1dd958fScth 
1302b1dd958fScth 	avl_remove(&tgt->emul64_tgt_data, (void *) blk);
1303b1dd958fScth 	if (emul64_collect_stats) {
1304b1dd958fScth 		mutex_enter(&emul64_stats_mutex);
1305b1dd958fScth 		emul64_nonzero--;
1306b1dd958fScth 		tgt->emul64_list_length--;
1307b1dd958fScth 		mutex_exit(&emul64_stats_mutex);
1308b1dd958fScth 	}
1309b1dd958fScth 	kmem_free(blk->bl_data, DEV_BSIZE);
1310b1dd958fScth 	kmem_free(blk, sizeof (blklist_t));
1311b1dd958fScth }
1312b1dd958fScth 
1313b1dd958fScth /*
1314b1dd958fScth  * Look for overlap between a nowrite range and a block range.
1315b1dd958fScth  *
1316b1dd958fScth  * NOTE:  Callers of this function must hold the tgt->emul64_tgt_nw_lock
1317b1dd958fScth  *	  lock.  For the purposes of this function, a reader lock is
1318b1dd958fScth  *	  sufficient.
1319b1dd958fScth  */
1320b1dd958fScth static emul64_rng_overlap_t
bsd_tgt_overlap(emul64_tgt_t * tgt,diskaddr_t blkno,int count)1321b1dd958fScth bsd_tgt_overlap(emul64_tgt_t *tgt, diskaddr_t blkno, int count)
1322b1dd958fScth {
1323b1dd958fScth 	emul64_nowrite_t	*nw;
1324b1dd958fScth 	emul64_rng_overlap_t	rv = O_NONE;
1325b1dd958fScth 
1326b1dd958fScth 	for (nw = tgt->emul64_tgt_nowrite;
1327b1dd958fScth 	    (nw != NULL) && (rv == O_NONE);
1328b1dd958fScth 	    nw = nw->emul64_nwnext) {
1329cd210bb4SChris Horne 		rv = emul64_overlap(&nw->emul64_blocked, blkno, (size_t)count);
1330b1dd958fScth 	}
1331b1dd958fScth 	return (rv);
1332b1dd958fScth }
1333b1dd958fScth 
1334b1dd958fScth /*
1335b1dd958fScth  * Operations that do a lot of I/O, such as RAID 5 initializations, result
1336b1dd958fScth  * in a CPU bound kernel when the device is an emul64 device.  This makes
1337b1dd958fScth  * the machine look hung.  To avoid this problem, give up the CPU from time
1338b1dd958fScth  * to time.
1339b1dd958fScth  */
1340b1dd958fScth 
1341b1dd958fScth static void
emul64_yield_check()1342b1dd958fScth emul64_yield_check()
1343b1dd958fScth {
1344b1dd958fScth 	static uint_t	emul64_io_count = 0;	/* # I/Os since last wait */
1345b1dd958fScth 	static uint_t	emul64_waiting = FALSE;	/* TRUE -> a thread is in */
1346b1dd958fScth 						/*   cv_timed wait. */
1347b1dd958fScth 	clock_t		ticks;
1348b1dd958fScth 
1349b1dd958fScth 	if (emul64_yield_enable == 0)
1350b1dd958fScth 		return;
1351b1dd958fScth 
1352b1dd958fScth 	mutex_enter(&emul64_yield_mutex);
1353b1dd958fScth 
1354b1dd958fScth 	if (emul64_waiting == TRUE) {
1355b1dd958fScth 		/*
1356b1dd958fScth 		 * Another thread has already started the timer.  We'll
1357b1dd958fScth 		 * just wait here until their time expires, and they
1358b1dd958fScth 		 * broadcast to us.  When they do that, we'll return and
1359b1dd958fScth 		 * let our caller do more I/O.
1360b1dd958fScth 		 */
1361b1dd958fScth 		cv_wait(&emul64_yield_cv, &emul64_yield_mutex);
1362b1dd958fScth 	} else if (emul64_io_count++ > emul64_yield_period) {
1363b1dd958fScth 		/*
1364b1dd958fScth 		 * Set emul64_waiting to let other threads know that we
1365b1dd958fScth 		 * have started the timer.
1366b1dd958fScth 		 */
1367b1dd958fScth 		emul64_waiting = TRUE;
1368b1dd958fScth 		emul64_num_delay_called++;
1369b1dd958fScth 		ticks = drv_usectohz(emul64_yield_length);
1370b1dd958fScth 		if (ticks == 0)
1371b1dd958fScth 			ticks = 1;
1372*d3d50737SRafael Vanoni 		(void) cv_reltimedwait(&emul64_yield_cv, &emul64_yield_mutex,
1373*d3d50737SRafael Vanoni 		    ticks, TR_CLOCK_TICK);
1374b1dd958fScth 		emul64_io_count = 0;
1375b1dd958fScth 		emul64_waiting = FALSE;
1376b1dd958fScth 
1377b1dd958fScth 		/* Broadcast in case others are waiting. */
1378b1dd958fScth 		cv_broadcast(&emul64_yield_cv);
1379b1dd958fScth 	}
1380b1dd958fScth 
1381b1dd958fScth 	mutex_exit(&emul64_yield_mutex);
1382b1dd958fScth }
1383