xref: /titanic_41/usr/src/uts/common/io/emul64_bsd.c (revision c40d696f8f0e05103b3795dd37198e00ae7ef955)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 /*
29  * pseudo scsi disk driver
30  */
31 
32 #include <sys/scsi/scsi.h>
33 #include <sys/ddi.h>
34 #include <sys/sunddi.h>
35 #include <sys/kmem.h>
36 #include <sys/taskq.h>
37 #include <sys/disp.h>
38 #include <sys/types.h>
39 #include <sys/buf.h>
40 
41 #include <sys/emul64.h>
42 #include <sys/emul64cmd.h>
43 #include <sys/emul64var.h>
44 
45 /*
46  * Mode sense/select page control
47  */
48 #define	MODE_SENSE_PC_CURRENT		0
49 #define	MODE_SENSE_PC_CHANGEABLE	1
50 #define	MODE_SENSE_PC_DEFAULT		2
51 #define	MODE_SENSE_PC_SAVED		3
52 
53 /*
54  * Byte conversion macros
55  */
56 #if	defined(_BIG_ENDIAN)
57 #define	ushort_to_scsi_ushort(n)	(n)
58 #define	uint32_to_scsi_uint32(n)	(n)
59 #define	uint64_to_scsi_uint64(n)	(n)
60 #elif	defined(_LITTLE_ENDIAN)
61 
62 #define	ushort_to_scsi_ushort(n)			\
63 		((((n) & 0x00ff) << 8) |		\
64 		(((n)  & 0xff00) >> 8))
65 
66 #define	uint32_to_scsi_uint32(n)			\
67 		((((n) & 0x000000ff) << 24) |		\
68 		(((n)  & 0x0000ff00) << 8) |		\
69 		(((n)  & 0x00ff0000) >> 8) |		\
70 		(((n)  & 0xff000000) >> 24))
71 #define	uint64_to_scsi_uint64(n)				\
72 		((((n) & 0x00000000000000ff) << 56) |           \
73 		(((n)  & 0x000000000000ff00) << 40) |           \
74 		(((n)  & 0x0000000000ff0000) << 24) |           \
75 		(((n)  & 0x00000000ff000000) << 8) |            \
76 		(((n)  & 0x000000ff00000000) >> 8) |            \
77 		(((n)  & 0x0000ff0000000000) >> 24) |           \
78 		(((n)  & 0x00ff000000000000) >> 40) |           \
79 		(((n)  & 0xff00000000000000) >> 56))
80 #else
81 error no _BIG_ENDIAN or _LITTLE_ENDIAN
82 #endif
83 #define	uint_to_byte0(n)		((n) & 0xff)
84 #define	uint_to_byte1(n)		(((n)>>8) & 0xff)
85 #define	uint_to_byte2(n)		(((n)>>16) & 0xff)
86 #define	uint_to_byte3(n)		(((n)>>24) & 0xff)
87 
88 /*
89  * struct prop_map
90  *
91  * This structure maps a property name to the place to store its value.
92  */
93 struct prop_map {
94 	char 		*pm_name;	/* Name of the property. */
95 	int		*pm_value;	/* Place to store the value. */
96 };
97 
98 static int emul64_debug_blklist = 0;
99 
100 /*
101  * Some interesting statistics.  These are protected by the
102  * emul64_stats_mutex.  It would be nice to have an ioctl to print them out,
103  * but we don't have the development time for that now.  You can at least
104  * look at them with adb.
105  */
106 
107 int		emul64_collect_stats = 1; /* Collect stats if non-zero */
108 kmutex_t	emul64_stats_mutex;	/* Protect these variables */
109 long		emul64_nowrite_count = 0; /* # active nowrite ranges */
110 static uint64_t	emul64_skipped_io = 0;	/* Skipped I/O operations, because of */
111 					/* EMUL64_WRITE_OFF. */
112 static uint64_t	emul64_skipped_blk = 0;	/* Skipped blocks because of */
113 					/* EMUL64_WRITE_OFF. */
114 static uint64_t	emul64_io_ops = 0;	/* Total number of I/O operations */
115 					/* including skipped and actual. */
116 static uint64_t	emul64_io_blocks = 0;	/* Total number of blocks involved */
117 					/* in I/O operations. */
118 static uint64_t	emul64_nonzero = 0;	/* Number of non-zero data blocks */
119 					/* currently held in memory */
120 static uint64_t	emul64_max_list_length = 0; /* Maximum size of a linked */
121 					    /* list of non-zero blocks. */
122 uint64_t emul64_taskq_max = 0;		/* emul64_scsi_start uses the taskq */
123 					/* mechanism to dispatch work. */
124 					/* If the number of entries in the */
125 					/* exceeds the maximum for the queue */
126 					/* the queue a 1 second delay is */
127 					/* encountered in taskq_ent_alloc. */
128 					/* This counter counts the number */
129 					/* times that this happens. */
130 
131 /*
132  * Since emul64 does no physical I/O, operations that would normally be I/O
133  * intensive become CPU bound.  An example of this is RAID 5
134  * initialization.  When the kernel becomes CPU bound, it looks as if the
135  * machine is hung.
136  *
137  * To avoid this problem, we provide a function, emul64_yield_check, that does a
138  * delay from time to time to yield up the CPU.  The following variables
139  * are tunables for this algorithm.
140  *
141  *	emul64_num_delay_called	Number of times we called delay.  This is
142  *				not really a tunable.  Rather it is a
143  *				counter that provides useful information
144  *				for adjusting the tunables.
145  *	emul64_yield_length	Number of microseconds to yield the CPU.
146  *	emul64_yield_period	Number of I/O operations between yields.
147  *	emul64_yield_enable	emul64 will yield the CPU, only if this
148  *				variable contains a non-zero value.  This
149  *				allows the yield functionality to be turned
150  *				off for experimentation purposes.
151  *
152  * The value of 1000 for emul64_yield_period has been determined by
153  * experience with running the tests.
154  */
155 static uint64_t		emul64_num_delay_called = 0;
156 static int		emul64_yield_length = 1000;
157 static int		emul64_yield_period = 1000;
158 static int		emul64_yield_enable = 1;
159 static kmutex_t		emul64_yield_mutex;
160 static kcondvar_t 	emul64_yield_cv;
161 
162 /*
163  * This array establishes a set of tunable variables that can be set by
164  * defining properties in the emul64.conf file.
165  */
166 struct prop_map emul64_properties[] = {
167 	"emul64_collect_stats",		&emul64_collect_stats,
168 	"emul64_yield_length",		&emul64_yield_length,
169 	"emul64_yield_period",		&emul64_yield_period,
170 	"emul64_yield_enable",		&emul64_yield_enable,
171 	"emul64_max_task",		&emul64_max_task,
172 	"emul64_task_nthreads",		&emul64_task_nthreads
173 };
174 
175 static unsigned char *emul64_zeros = NULL; /* Block of 0s for comparison */
176 
177 extern void emul64_check_cond(struct scsi_pkt *pkt, uchar_t key,
178 				uchar_t asc, uchar_t ascq);
179 /* ncyl=250000 acyl=2 nhead=24 nsect=357 */
180 uint_t dkg_rpm = 3600;
181 
182 static int bsd_mode_sense_dad_mode_geometry(struct scsi_pkt *);
183 static int bsd_mode_sense_dad_mode_err_recov(struct scsi_pkt *);
184 static int bsd_mode_sense_modepage_disco_reco(struct scsi_pkt *);
185 static int bsd_mode_sense_dad_mode_format(struct scsi_pkt *);
186 static int bsd_mode_sense_dad_mode_cache(struct scsi_pkt *);
187 static int bsd_readblks(struct emul64 *, ushort_t, ushort_t, diskaddr_t,
188 				int, unsigned char *);
189 static int bsd_writeblks(struct emul64 *, ushort_t, ushort_t, diskaddr_t,
190 				int, unsigned char *);
191 emul64_tgt_t *find_tgt(struct emul64 *, ushort_t, ushort_t);
192 static blklist_t *bsd_findblk(emul64_tgt_t *, diskaddr_t, avl_index_t *);
193 static void bsd_allocblk(emul64_tgt_t *, diskaddr_t, caddr_t, avl_index_t);
194 static void bsd_freeblk(emul64_tgt_t *, blklist_t *);
195 static void emul64_yield_check();
196 static emul64_rng_overlap_t bsd_tgt_overlap(emul64_tgt_t *, diskaddr_t, int);
197 
198 char *emul64_name = "emul64";
199 
200 
201 /*
202  * Initialize globals in this file.
203  */
204 void
205 emul64_bsd_init()
206 {
207 	emul64_zeros = (unsigned char *) kmem_zalloc(DEV_BSIZE, KM_SLEEP);
208 	mutex_init(&emul64_stats_mutex, NULL, MUTEX_DRIVER, NULL);
209 	mutex_init(&emul64_yield_mutex, NULL, MUTEX_DRIVER, NULL);
210 	cv_init(&emul64_yield_cv, NULL, CV_DRIVER, NULL);
211 }
212 
213 /*
214  * Clean up globals in this file.
215  */
216 void
217 emul64_bsd_fini()
218 {
219 	cv_destroy(&emul64_yield_cv);
220 	mutex_destroy(&emul64_yield_mutex);
221 	mutex_destroy(&emul64_stats_mutex);
222 	if (emul64_zeros != NULL) {
223 		kmem_free(emul64_zeros, DEV_BSIZE);
224 		emul64_zeros = NULL;
225 	}
226 }
227 
228 /*
229  * Attempt to get the values of the properties that are specified in the
230  * emul64_properties array.  If the property exists, copy its value to the
231  * specified location.  All the properties have been assigned default
232  * values in this driver, so if we cannot get the property that is not a
233  * problem.
234  */
235 void
236 emul64_bsd_get_props(dev_info_t *dip)
237 {
238 	uint_t		count;
239 	uint_t		i;
240 	struct prop_map	*pmp;
241 	int		*properties;
242 
243 	for (pmp = emul64_properties, i = 0;
244 		i < sizeof (emul64_properties) / sizeof (struct prop_map);
245 		i++, pmp++) {
246 		if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip,
247 				DDI_PROP_DONTPASS,
248 				pmp->pm_name, &properties,
249 				&count) == DDI_PROP_SUCCESS) {
250 			if (count >= 1) {
251 				*pmp->pm_value = *properties;
252 			}
253 			ddi_prop_free((void *) properties);
254 		}
255 	}
256 }
257 
258 int
259 emul64_bsd_blkcompare(const void *a1, const void *b1)
260 {
261 	blklist_t	*a = (blklist_t *)a1;
262 	blklist_t	*b = (blklist_t *)b1;
263 
264 	if (a->bl_blkno < b->bl_blkno)
265 		return (-1);
266 	if (a->bl_blkno == b->bl_blkno)
267 		return (0);
268 	return (1);
269 }
270 
271 /* ARGSUSED 0 */
272 int
273 bsd_scsi_start_stop_unit(struct scsi_pkt *pkt)
274 {
275 	return (0);
276 }
277 
278 /* ARGSUSED 0 */
279 int
280 bsd_scsi_test_unit_ready(struct scsi_pkt *pkt)
281 {
282 	return (0);
283 }
284 
285 /* ARGSUSED 0 */
286 int
287 bsd_scsi_request_sense(struct scsi_pkt *pkt)
288 {
289 	return (0);
290 }
291 
292 int
293 bsd_scsi_inq_page0(struct scsi_pkt *pkt, uchar_t pqdtype)
294 {
295 	struct emul64_cmd	*sp = PKT2CMD(pkt);
296 
297 	if (sp->cmd_count < 6) {
298 		cmn_err(CE_CONT, "%s: bsd_scsi_inq_page0: size %d required\n",
299 		    emul64_name, 6);
300 		return (EIO);
301 	}
302 
303 	sp->cmd_addr[0] = pqdtype;	/* periph qual., dtype */
304 	sp->cmd_addr[1] = 0;		/* page code */
305 	sp->cmd_addr[2] = 0;		/* reserved */
306 	sp->cmd_addr[3] = 6 - 3;	/* length */
307 	sp->cmd_addr[4] = 0;		/* 1st page */
308 	sp->cmd_addr[5] = 0x83;		/* 2nd page */
309 
310 	pkt->pkt_resid = sp->cmd_count - 6;
311 	return (0);
312 }
313 
314 int
315 bsd_scsi_inq_page83(struct scsi_pkt *pkt, uchar_t pqdtype)
316 {
317 	struct emul64		*emul64 = PKT2EMUL64(pkt);
318 	struct emul64_cmd	*sp = PKT2CMD(pkt);
319 	int			instance = ddi_get_instance(emul64->emul64_dip);
320 
321 	if (sp->cmd_count < 22) {
322 		cmn_err(CE_CONT, "%s: bsd_scsi_inq_page83: size %d required\n",
323 		    emul64_name, 22);
324 		return (EIO);
325 	}
326 
327 	sp->cmd_addr[0] = pqdtype;	/* periph qual., dtype */
328 	sp->cmd_addr[1] = 0x83;		/* page code */
329 	sp->cmd_addr[2] = 0;		/* reserved */
330 	sp->cmd_addr[3] = (22 - 8) + 4;	/* length */
331 
332 	sp->cmd_addr[4] = 1;		/* code set - binary */
333 	sp->cmd_addr[5] = 3;		/* association and device ID type 3 */
334 	sp->cmd_addr[6] = 0;		/* reserved */
335 	sp->cmd_addr[7] = 22 - 8;	/* ID length */
336 
337 	sp->cmd_addr[8] = 0xde;		/* @8: identifier, byte 0 */
338 	sp->cmd_addr[9] = 0xca;
339 	sp->cmd_addr[10] = 0xde;
340 	sp->cmd_addr[11] = 0x80;
341 
342 	sp->cmd_addr[12] = 0xba;
343 	sp->cmd_addr[13] = 0xbe;
344 	sp->cmd_addr[14] = 0xab;
345 	sp->cmd_addr[15] = 0xba;
346 					/* @22: */
347 
348 	/*
349 	 * Instances seem to be assigned sequentially, so it unlikely that we
350 	 * will have more than 65535 of them.
351 	 */
352 	sp->cmd_addr[16] = uint_to_byte1(instance);
353 	sp->cmd_addr[17] = uint_to_byte0(instance);
354 	sp->cmd_addr[18] = uint_to_byte1(TGT(sp));
355 	sp->cmd_addr[19] = uint_to_byte0(TGT(sp));
356 	sp->cmd_addr[20] = uint_to_byte1(LUN(sp));
357 	sp->cmd_addr[21] = uint_to_byte0(LUN(sp));
358 
359 	pkt->pkt_resid = sp->cmd_count - 22;
360 	return (0);
361 }
362 
363 int
364 bsd_scsi_inquiry(struct scsi_pkt *pkt)
365 {
366 	struct emul64_cmd	*sp = PKT2CMD(pkt);
367 	union scsi_cdb		*cdb = (union scsi_cdb *)pkt->pkt_cdbp;
368 	emul64_tgt_t		*tgt;
369 	uchar_t			pqdtype;
370 	struct scsi_inquiry	inq;
371 
372 	EMUL64_MUTEX_ENTER(sp->cmd_emul64);
373 	tgt = find_tgt(sp->cmd_emul64,
374 	    pkt->pkt_address.a_target, pkt->pkt_address.a_lun);
375 	EMUL64_MUTEX_EXIT(sp->cmd_emul64);
376 
377 	if (sp->cmd_count < sizeof (inq)) {
378 		cmn_err(CE_CONT, "%s: bsd_scsi_inquiry: size %d required\n",
379 		    emul64_name, (int)sizeof (inq));
380 		return (EIO);
381 	}
382 
383 	if (cdb->cdb_opaque[1] & 0xfc) {
384 		cmn_err(CE_WARN, "%s: bsd_scsi_inquiry: 0x%x",
385 		    emul64_name, cdb->cdb_opaque[1]);
386 		emul64_check_cond(pkt, 0x5, 0x24, 0x0);	/* inv. fld in cdb */
387 		return (0);
388 	}
389 
390 	pqdtype = tgt->emul64_tgt_dtype;
391 	if (cdb->cdb_opaque[1] & 0x1) {
392 		switch (cdb->cdb_opaque[2]) {
393 			case 0x00:
394 				return (bsd_scsi_inq_page0(pkt, pqdtype));
395 			case 0x83:
396 				return (bsd_scsi_inq_page83(pkt, pqdtype));
397 			default:
398 				cmn_err(CE_WARN, "%s: bsd_scsi_inquiry: "
399 				    "unsupported 0x%x",
400 				    emul64_name, cdb->cdb_opaque[2]);
401 				return (0);
402 		}
403 	}
404 
405 	/* set up the inquiry data we return */
406 	(void) bzero((void *)&inq, sizeof (inq));
407 
408 	inq.inq_dtype = pqdtype;
409 	inq.inq_ansi = 2;
410 	inq.inq_rdf = 2;
411 	inq.inq_len = sizeof (inq) - 4;
412 	inq.inq_wbus16 = 1;
413 	inq.inq_cmdque = 1;
414 
415 	(void) bcopy(tgt->emul64_tgt_inq, inq.inq_vid,
416 	    sizeof (tgt->emul64_tgt_inq));
417 	(void) bcopy("1", inq.inq_revision, 2);
418 	(void) bcopy((void *)&inq, sp->cmd_addr, sizeof (inq));
419 
420 	pkt->pkt_resid = sp->cmd_count - sizeof (inq);
421 	return (0);
422 }
423 
424 /* ARGSUSED 0 */
425 int
426 bsd_scsi_format(struct scsi_pkt *pkt)
427 {
428 	return (0);
429 }
430 
431 int
432 bsd_scsi_io(struct scsi_pkt *pkt)
433 {
434 	struct emul64_cmd	*sp = PKT2CMD(pkt);
435 	union scsi_cdb		*cdb = (union scsi_cdb *)pkt->pkt_cdbp;
436 	diskaddr_t		lblkno;
437 	int			nblks;
438 
439 	switch (cdb->scc_cmd) {
440 	case SCMD_READ:
441 			lblkno = (uint32_t)GETG0ADDR(cdb);
442 			nblks = GETG0COUNT(cdb);
443 			pkt->pkt_resid = bsd_readblks(sp->cmd_emul64,
444 					pkt->pkt_address.a_target,
445 					pkt->pkt_address.a_lun,
446 					lblkno, nblks, sp->cmd_addr);
447 			if (emul64debug) {
448 				cmn_err(CE_CONT, "%s: bsd_scsi_io: "
449 				    "read g0 blk=%lld (0x%llx) nblks=%d\n",
450 				    emul64_name, lblkno, lblkno, nblks);
451 			}
452 		break;
453 	case SCMD_WRITE:
454 			lblkno = (uint32_t)GETG0ADDR(cdb);
455 			nblks = GETG0COUNT(cdb);
456 			pkt->pkt_resid = bsd_writeblks(sp->cmd_emul64,
457 					pkt->pkt_address.a_target,
458 					pkt->pkt_address.a_lun,
459 					lblkno, nblks, sp->cmd_addr);
460 			if (emul64debug) {
461 				cmn_err(CE_CONT, "%s: bsd_scsi_io: "
462 				    "write g0 blk=%lld (0x%llx) nblks=%d\n",
463 				    emul64_name, lblkno, lblkno, nblks);
464 			}
465 		break;
466 	case SCMD_READ_G1:
467 			lblkno = (uint32_t)GETG1ADDR(cdb);
468 			nblks = GETG1COUNT(cdb);
469 			pkt->pkt_resid = bsd_readblks(sp->cmd_emul64,
470 					pkt->pkt_address.a_target,
471 					pkt->pkt_address.a_lun,
472 					lblkno, nblks, sp->cmd_addr);
473 			if (emul64debug) {
474 				cmn_err(CE_CONT, "%s: bsd_scsi_io: "
475 				    "read g1 blk=%lld (0x%llx) nblks=%d\n",
476 				    emul64_name, lblkno, lblkno, nblks);
477 			}
478 		break;
479 	case SCMD_WRITE_G1:
480 			lblkno = (uint32_t)GETG1ADDR(cdb);
481 			nblks = GETG1COUNT(cdb);
482 			pkt->pkt_resid = bsd_writeblks(sp->cmd_emul64,
483 					pkt->pkt_address.a_target,
484 					pkt->pkt_address.a_lun,
485 					lblkno, nblks, sp->cmd_addr);
486 			if (emul64debug) {
487 				cmn_err(CE_CONT, "%s: bsd_scsi_io: "
488 				    "write g1 blk=%lld (0x%llx) nblks=%d\n",
489 				    emul64_name, lblkno, lblkno, nblks);
490 			}
491 		break;
492 	case SCMD_READ_G4:
493 			lblkno = GETG4ADDR(cdb);
494 			lblkno <<= 32;
495 			lblkno |= (uint32_t)GETG4ADDRTL(cdb);
496 			nblks = GETG4COUNT(cdb);
497 			pkt->pkt_resid = bsd_readblks(sp->cmd_emul64,
498 					pkt->pkt_address.a_target,
499 					pkt->pkt_address.a_lun,
500 					lblkno, nblks, sp->cmd_addr);
501 			if (emul64debug) {
502 				cmn_err(CE_CONT, "%s: bsd_scsi_io: "
503 				    "read g4 blk=%lld (0x%llx) nblks=%d\n",
504 				    emul64_name, lblkno, lblkno, nblks);
505 			}
506 		break;
507 	case SCMD_WRITE_G4:
508 			lblkno = GETG4ADDR(cdb);
509 			lblkno <<= 32;
510 			lblkno |= (uint32_t)GETG4ADDRTL(cdb);
511 			nblks = GETG4COUNT(cdb);
512 			pkt->pkt_resid = bsd_writeblks(sp->cmd_emul64,
513 					pkt->pkt_address.a_target,
514 					pkt->pkt_address.a_lun,
515 					lblkno, nblks, sp->cmd_addr);
516 			if (emul64debug) {
517 				cmn_err(CE_CONT, "%s: bsd_scsi_io: "
518 				    "write g4 blk=%lld (0x%llx) nblks=%d\n",
519 				    emul64_name, lblkno, lblkno, nblks);
520 			}
521 		break;
522 	default:
523 		cmn_err(CE_WARN, "%s: bsd_scsi_io: unhandled I/O: 0x%x",
524 		    emul64_name, cdb->scc_cmd);
525 		break;
526 	}
527 
528 	if (pkt->pkt_resid != 0)
529 		cmn_err(CE_WARN, "%s: bsd_scsi_io: "
530 		    "pkt_resid: 0x%lx, lblkno %lld, nblks %d",
531 		    emul64_name, pkt->pkt_resid, lblkno, nblks);
532 
533 	return (0);
534 }
535 
536 int
537 bsd_scsi_log_sense(struct scsi_pkt *pkt)
538 {
539 	union scsi_cdb		*cdb = (union scsi_cdb *)pkt->pkt_cdbp;
540 	struct emul64_cmd	*sp = PKT2CMD(pkt);
541 	int			page_code;
542 
543 	if (sp->cmd_count < 9) {
544 		cmn_err(CE_CONT, "%s: bsd_scsi_log_sense size %d required\n",
545 		    emul64_name, 9);
546 		return (EIO);
547 	}
548 
549 	page_code = cdb->cdb_opaque[2] & 0x3f;
550 	if (page_code) {
551 		cmn_err(CE_CONT, "%s: bsd_scsi_log_sense: "
552 		    "page 0x%x not supported\n", emul64_name, page_code);
553 		emul64_check_cond(pkt, 0x5, 0x24, 0x0); /* inv. fld in cdb */
554 		return (0);
555 	}
556 
557 	sp->cmd_addr[0] = 0;		/* page code */
558 	sp->cmd_addr[1] = 0;		/* reserved */
559 	sp->cmd_addr[2] = 0;		/* MSB of page length */
560 	sp->cmd_addr[3] = 8 - 3;	/* LSB of page length */
561 
562 	sp->cmd_addr[4] = 0;		/* MSB of parameter code */
563 	sp->cmd_addr[5] = 0;		/* LSB of parameter code */
564 	sp->cmd_addr[6] = 0;		/* parameter control byte */
565 	sp->cmd_addr[7] = 4 - 3;	/* parameter length */
566 	sp->cmd_addr[8] = 0x0;		/* parameter value */
567 
568 	pkt->pkt_resid = sp->cmd_count - 9;
569 	return (0);
570 }
571 
572 int
573 bsd_scsi_mode_sense(struct scsi_pkt *pkt)
574 {
575 	union scsi_cdb	*cdb = (union scsi_cdb *)pkt->pkt_cdbp;
576 	int		page_control;
577 	int		page_code;
578 	int		rval = 0;
579 
580 	switch (cdb->scc_cmd) {
581 	case SCMD_MODE_SENSE:
582 			page_code = cdb->cdb_opaque[2] & 0x3f;
583 			page_control = (cdb->cdb_opaque[2] >> 6) & 0x03;
584 			if (emul64debug) {
585 				cmn_err(CE_CONT, "%s: bsd_scsi_mode_sense: "
586 				    "page=0x%x control=0x%x nbytes=%d\n",
587 				    emul64_name, page_code, page_control,
588 				    GETG0COUNT(cdb));
589 			}
590 		break;
591 	case SCMD_MODE_SENSE_G1:
592 			page_code = cdb->cdb_opaque[2] & 0x3f;
593 			page_control = (cdb->cdb_opaque[2] >> 6) & 0x03;
594 			if (emul64debug) {
595 				cmn_err(CE_CONT, "%s: bsd_scsi_mode_sense: "
596 				    "page=0x%x control=0x%x nbytes=%d\n",
597 				    emul64_name, page_code, page_control,
598 				    GETG1COUNT(cdb));
599 			}
600 		break;
601 	default:
602 		cmn_err(CE_CONT, "%s: bsd_scsi_mode_sense: "
603 		    "cmd 0x%x not supported\n", emul64_name, cdb->scc_cmd);
604 		return (EIO);
605 	}
606 
607 	switch (page_code) {
608 	case DAD_MODE_GEOMETRY:
609 		rval = bsd_mode_sense_dad_mode_geometry(pkt);
610 		break;
611 	case DAD_MODE_ERR_RECOV:
612 		rval = bsd_mode_sense_dad_mode_err_recov(pkt);
613 		break;
614 	case MODEPAGE_DISCO_RECO:
615 		rval = bsd_mode_sense_modepage_disco_reco(pkt);
616 		break;
617 	case DAD_MODE_FORMAT:
618 		rval = bsd_mode_sense_dad_mode_format(pkt);
619 		break;
620 	case DAD_MODE_CACHE:
621 		rval = bsd_mode_sense_dad_mode_cache(pkt);
622 		break;
623 	default:
624 		cmn_err(CE_CONT, "%s: bsd_scsi_mode_sense: "
625 		    "page 0x%x not supported\n", emul64_name, page_code);
626 		rval = EIO;
627 		break;
628 	}
629 
630 	return (rval);
631 }
632 
633 
634 static int
635 bsd_mode_sense_dad_mode_geometry(struct scsi_pkt *pkt)
636 {
637 	struct emul64_cmd	*sp = PKT2CMD(pkt);
638 	union scsi_cdb		*cdb = (union scsi_cdb *)pkt->pkt_cdbp;
639 	uchar_t			*addr = (uchar_t *)sp->cmd_addr;
640 	emul64_tgt_t		*tgt;
641 	int			page_control;
642 	struct mode_header	header;
643 	struct mode_geometry	page4;
644 	int			ncyl;
645 	int			rval = 0;
646 
647 	page_control = (cdb->cdb_opaque[2] >> 6) & 0x03;
648 
649 	if (emul64debug) {
650 		cmn_err(CE_CONT, "%s: bsd_mode_sense_dad_mode_geometry: "
651 		    "pc=%d n=%d\n", emul64_name, page_control, sp->cmd_count);
652 	}
653 
654 	if (sp->cmd_count < (sizeof (header) + sizeof (page4))) {
655 		cmn_err(CE_CONT, "%s: bsd_mode_sense_dad_mode_geometry: "
656 		    "size %d required\n",
657 		    emul64_name, (int)(sizeof (header) + sizeof (page4)));
658 		return (EIO);
659 	}
660 
661 	(void) bzero(&header, sizeof (header));
662 	(void) bzero(&page4, sizeof (page4));
663 
664 	header.length = sizeof (header) + sizeof (page4) - 1;
665 	header.bdesc_length = 0;
666 
667 	page4.mode_page.code = DAD_MODE_GEOMETRY;
668 	page4.mode_page.ps = 1;
669 	page4.mode_page.length = sizeof (page4) - sizeof (struct mode_page);
670 
671 	switch (page_control) {
672 	case MODE_SENSE_PC_CURRENT:
673 	case MODE_SENSE_PC_DEFAULT:
674 	case MODE_SENSE_PC_SAVED:
675 		EMUL64_MUTEX_ENTER(sp->cmd_emul64);
676 		tgt = find_tgt(sp->cmd_emul64,
677 		    pkt->pkt_address.a_target, pkt->pkt_address.a_lun);
678 		EMUL64_MUTEX_EXIT(sp->cmd_emul64);
679 		ncyl = tgt->emul64_tgt_ncyls;
680 		page4.cyl_ub = uint_to_byte2(ncyl);
681 		page4.cyl_mb = uint_to_byte1(ncyl);
682 		page4.cyl_lb = uint_to_byte0(ncyl);
683 		page4.heads = uint_to_byte0(tgt->emul64_tgt_nheads);
684 		page4.rpm = ushort_to_scsi_ushort(dkg_rpm);
685 		break;
686 	case MODE_SENSE_PC_CHANGEABLE:
687 		page4.cyl_ub = 0xff;
688 		page4.cyl_mb = 0xff;
689 		page4.cyl_lb = 0xff;
690 		page4.heads = 0xff;
691 		page4.rpm = 0xffff;
692 		break;
693 	}
694 
695 	(void) bcopy(&header, addr, sizeof (header));
696 	(void) bcopy(&page4, addr + sizeof (header), sizeof (page4));
697 
698 	pkt->pkt_resid = sp->cmd_count - sizeof (page4) - sizeof (header);
699 	rval = 0;
700 
701 	return (rval);
702 }
703 
704 static int
705 bsd_mode_sense_dad_mode_err_recov(struct scsi_pkt *pkt)
706 {
707 	struct emul64_cmd	*sp = PKT2CMD(pkt);
708 	union scsi_cdb		*cdb = (union scsi_cdb *)pkt->pkt_cdbp;
709 	uchar_t			*addr = (uchar_t *)sp->cmd_addr;
710 	int			page_control;
711 	struct mode_header	header;
712 	struct mode_err_recov	page1;
713 	int			rval = 0;
714 
715 	page_control = (cdb->cdb_opaque[2] >> 6) & 0x03;
716 
717 	if (emul64debug) {
718 		cmn_err(CE_CONT, "%s: bsd_mode_sense_dad_mode_err_recov: "
719 		    "pc=%d n=%d\n", emul64_name, page_control, sp->cmd_count);
720 	}
721 
722 	if (sp->cmd_count < (sizeof (header) + sizeof (page1))) {
723 		cmn_err(CE_CONT, "%s: bsd_mode_sense_dad_mode_err_recov: "
724 		    "size %d required\n",
725 		    emul64_name, (int)(sizeof (header) + sizeof (page1)));
726 		return (EIO);
727 	}
728 
729 	(void) bzero(&header, sizeof (header));
730 	(void) bzero(&page1, sizeof (page1));
731 
732 	header.length = sizeof (header) + sizeof (page1) - 1;
733 	header.bdesc_length = 0;
734 
735 	page1.mode_page.code = DAD_MODE_ERR_RECOV;
736 	page1.mode_page.ps = 1;
737 	page1.mode_page.length = sizeof (page1) - sizeof (struct mode_page);
738 
739 	switch (page_control) {
740 	case MODE_SENSE_PC_CURRENT:
741 	case MODE_SENSE_PC_DEFAULT:
742 	case MODE_SENSE_PC_SAVED:
743 		break;
744 	case MODE_SENSE_PC_CHANGEABLE:
745 		break;
746 	}
747 
748 	(void) bcopy(&header, addr, sizeof (header));
749 	(void) bcopy(&page1, addr + sizeof (header), sizeof (page1));
750 
751 	pkt->pkt_resid = sp->cmd_count - sizeof (page1) - sizeof (header);
752 	rval = 0;
753 
754 	return (rval);
755 }
756 
757 static int
758 bsd_mode_sense_modepage_disco_reco(struct scsi_pkt *pkt)
759 {
760 	struct emul64_cmd	*sp = PKT2CMD(pkt);
761 	union scsi_cdb		*cdb = (union scsi_cdb *)pkt->pkt_cdbp;
762 	int			rval = 0;
763 	uchar_t			*addr = (uchar_t *)sp->cmd_addr;
764 	int			page_control;
765 	struct mode_header	header;
766 	struct mode_disco_reco	page2;
767 
768 	page_control = (cdb->cdb_opaque[2] >> 6) & 0x03;
769 
770 	if (emul64debug) {
771 		cmn_err(CE_CONT, "%s: bsd_mode_sense_modepage_disco_reco: "
772 		    "pc=%d n=%d\n", emul64_name, page_control, sp->cmd_count);
773 	}
774 
775 	if (sp->cmd_count < (sizeof (header) + sizeof (page2))) {
776 		cmn_err(CE_CONT, "%s: bsd_mode_sense_modepage_disco_reco: "
777 		    "size %d required\n",
778 		    emul64_name, (int)(sizeof (header) + sizeof (page2)));
779 		return (EIO);
780 	}
781 
782 	(void) bzero(&header, sizeof (header));
783 	(void) bzero(&page2, sizeof (page2));
784 
785 	header.length = sizeof (header) + sizeof (page2) - 1;
786 	header.bdesc_length = 0;
787 
788 	page2.mode_page.code = MODEPAGE_DISCO_RECO;
789 	page2.mode_page.ps = 1;
790 	page2.mode_page.length = sizeof (page2) - sizeof (struct mode_page);
791 
792 	switch (page_control) {
793 	case MODE_SENSE_PC_CURRENT:
794 	case MODE_SENSE_PC_DEFAULT:
795 	case MODE_SENSE_PC_SAVED:
796 		break;
797 	case MODE_SENSE_PC_CHANGEABLE:
798 		break;
799 	}
800 
801 	(void) bcopy(&header, addr, sizeof (header));
802 	(void) bcopy(&page2, addr + sizeof (header), sizeof (page2));
803 
804 	pkt->pkt_resid = sp->cmd_count - sizeof (page2) - sizeof (header);
805 	rval = 0;
806 
807 	return (rval);
808 }
809 
810 static int
811 bsd_mode_sense_dad_mode_format(struct scsi_pkt *pkt)
812 {
813 	struct emul64_cmd	*sp = PKT2CMD(pkt);
814 	union scsi_cdb		*cdb = (union scsi_cdb *)pkt->pkt_cdbp;
815 	uchar_t			*addr = (uchar_t *)sp->cmd_addr;
816 	emul64_tgt_t		*tgt;
817 	int			page_control;
818 	struct mode_header	header;
819 	struct mode_format	page3;
820 	int			rval = 0;
821 
822 	page_control = (cdb->cdb_opaque[2] >> 6) & 0x03;
823 
824 	if (emul64debug) {
825 		cmn_err(CE_CONT, "%s: bsd_mode_sense_dad_mode_format: "
826 		    "pc=%d n=%d\n", emul64_name, page_control, sp->cmd_count);
827 	}
828 
829 	if (sp->cmd_count < (sizeof (header) + sizeof (page3))) {
830 		cmn_err(CE_CONT, "%s: bsd_mode_sense_dad_mode_format: "
831 		    "size %d required\n",
832 		    emul64_name, (int)(sizeof (header) + sizeof (page3)));
833 		return (EIO);
834 	}
835 
836 	(void) bzero(&header, sizeof (header));
837 	(void) bzero(&page3, sizeof (page3));
838 
839 	header.length = sizeof (header) + sizeof (page3) - 1;
840 	header.bdesc_length = 0;
841 
842 	page3.mode_page.code = DAD_MODE_FORMAT;
843 	page3.mode_page.ps = 1;
844 	page3.mode_page.length = sizeof (page3) - sizeof (struct mode_page);
845 
846 	switch (page_control) {
847 	case MODE_SENSE_PC_CURRENT:
848 	case MODE_SENSE_PC_DEFAULT:
849 	case MODE_SENSE_PC_SAVED:
850 		page3.data_bytes_sect = ushort_to_scsi_ushort(DEV_BSIZE);
851 		page3.interleave = ushort_to_scsi_ushort(1);
852 		EMUL64_MUTEX_ENTER(sp->cmd_emul64);
853 		tgt = find_tgt(sp->cmd_emul64,
854 			pkt->pkt_address.a_target, pkt->pkt_address.a_lun);
855 		EMUL64_MUTEX_EXIT(sp->cmd_emul64);
856 		page3.sect_track = ushort_to_scsi_ushort(tgt->emul64_tgt_nsect);
857 		break;
858 	case MODE_SENSE_PC_CHANGEABLE:
859 		break;
860 	}
861 
862 	(void) bcopy(&header, addr, sizeof (header));
863 	(void) bcopy(&page3, addr + sizeof (header), sizeof (page3));
864 
865 	pkt->pkt_resid = sp->cmd_count - sizeof (page3) - sizeof (header);
866 	rval = 0;
867 
868 	return (rval);
869 }
870 
871 static int
872 bsd_mode_sense_dad_mode_cache(struct scsi_pkt *pkt)
873 {
874 	struct emul64_cmd	*sp = PKT2CMD(pkt);
875 	union scsi_cdb		*cdb = (union scsi_cdb *)pkt->pkt_cdbp;
876 	uchar_t			*addr = (uchar_t *)sp->cmd_addr;
877 	int			page_control;
878 	struct mode_header	header;
879 	struct mode_cache	page8;
880 	int			rval = 0;
881 
882 	page_control = (cdb->cdb_opaque[2] >> 6) & 0x03;
883 
884 	if (emul64debug) {
885 		cmn_err(CE_CONT, "%s: bsd_mode_sense_dad_mode_cache: "
886 		    "pc=%d n=%d\n", emul64_name, page_control, sp->cmd_count);
887 	}
888 
889 	if (sp->cmd_count < (sizeof (header) + sizeof (page8))) {
890 		cmn_err(CE_CONT, "%s: bsd_mode_sense_dad_mode_cache: "
891 		    "size %d required\n",
892 		    emul64_name, (int)(sizeof (header) + sizeof (page8)));
893 		return (EIO);
894 	}
895 
896 	(void) bzero(&header, sizeof (header));
897 	(void) bzero(&page8, sizeof (page8));
898 
899 	header.length = sizeof (header) + sizeof (page8) - 1;
900 	header.bdesc_length = 0;
901 
902 	page8.mode_page.code = DAD_MODE_CACHE;
903 	page8.mode_page.ps = 1;
904 	page8.mode_page.length = sizeof (page8) - sizeof (struct mode_page);
905 
906 	switch (page_control) {
907 	case MODE_SENSE_PC_CURRENT:
908 	case MODE_SENSE_PC_DEFAULT:
909 	case MODE_SENSE_PC_SAVED:
910 		break;
911 	case MODE_SENSE_PC_CHANGEABLE:
912 		break;
913 	}
914 
915 	(void) bcopy(&header, addr, sizeof (header));
916 	(void) bcopy(&page8, addr + sizeof (header), sizeof (page8));
917 
918 	pkt->pkt_resid = sp->cmd_count - sizeof (page8) - sizeof (header);
919 	rval = 0;
920 
921 	return (rval);
922 }
923 
924 /* ARGSUSED 0 */
925 int
926 bsd_scsi_mode_select(struct scsi_pkt *pkt)
927 {
928 	return (0);
929 }
930 
931 int
932 bsd_scsi_read_capacity_8(struct scsi_pkt *pkt)
933 {
934 	struct emul64_cmd	*sp = PKT2CMD(pkt);
935 	emul64_tgt_t		*tgt;
936 	struct scsi_capacity	cap;
937 	int			rval = 0;
938 
939 	EMUL64_MUTEX_ENTER(sp->cmd_emul64);
940 	tgt = find_tgt(sp->cmd_emul64,
941 		pkt->pkt_address.a_target, pkt->pkt_address.a_lun);
942 	EMUL64_MUTEX_EXIT(sp->cmd_emul64);
943 	if (tgt->emul64_tgt_sectors > 0xffffffff)
944 		cap.capacity = 0xffffffff;
945 	else
946 		cap.capacity =
947 		    uint32_to_scsi_uint32(tgt->emul64_tgt_sectors);
948 	cap.lbasize = uint32_to_scsi_uint32((uint_t)DEV_BSIZE);
949 
950 	pkt->pkt_resid = sp->cmd_count - sizeof (struct scsi_capacity);
951 
952 	(void) bcopy(&cap, (caddr_t)sp->cmd_addr,
953 		    sizeof (struct scsi_capacity));
954 	return (rval);
955 }
956 
957 int
958 bsd_scsi_read_capacity_16(struct scsi_pkt *pkt)
959 {
960 	struct emul64_cmd	*sp = PKT2CMD(pkt);
961 	emul64_tgt_t		*tgt;
962 	struct scsi_capacity_16 cap;
963 	int			rval = 0;
964 
965 	EMUL64_MUTEX_ENTER(sp->cmd_emul64);
966 	tgt = find_tgt(sp->cmd_emul64,
967 		pkt->pkt_address.a_target, pkt->pkt_address.a_lun);
968 	EMUL64_MUTEX_EXIT(sp->cmd_emul64);
969 
970 	cap.sc_capacity = uint64_to_scsi_uint64(tgt->emul64_tgt_sectors);
971 	cap.sc_lbasize = uint32_to_scsi_uint32((uint_t)DEV_BSIZE);
972 	cap.sc_rto_en = 0;
973 	cap.sc_prot_en = 0;
974 	cap.sc_rsvd0 = 0;
975 	bzero(&cap.sc_rsvd1[0], sizeof (cap.sc_rsvd1));
976 
977 	pkt->pkt_resid = sp->cmd_count - sizeof (struct scsi_capacity_16);
978 
979 	(void) bcopy(&cap, (caddr_t)sp->cmd_addr,
980 			sizeof (struct scsi_capacity_16));
981 	return (rval);
982 }
983 int
984 bsd_scsi_read_capacity(struct scsi_pkt *pkt)
985 {
986 	return (bsd_scsi_read_capacity_8(pkt));
987 }
988 
989 
990 /* ARGSUSED 0 */
991 int
992 bsd_scsi_reserve(struct scsi_pkt *pkt)
993 {
994 	return (0);
995 }
996 
997 /* ARGSUSED 0 */
998 int
999 bsd_scsi_release(struct scsi_pkt *pkt)
1000 {
1001 	return (0);
1002 }
1003 
1004 
1005 int
1006 bsd_scsi_read_defect_list(struct scsi_pkt *pkt)
1007 {
1008 	pkt->pkt_resid = 0;
1009 	return (0);
1010 }
1011 
1012 
1013 /* ARGSUSED 0 */
1014 int
1015 bsd_scsi_reassign_block(struct scsi_pkt *pkt)
1016 {
1017 	return (0);
1018 }
1019 
1020 
1021 static int
1022 bsd_readblks(struct emul64 *emul64, ushort_t a_target, ushort_t a_lun,
1023 		diskaddr_t blkno, int nblks, unsigned char *bufaddr)
1024 {
1025 	emul64_tgt_t	*tgt;
1026 	blklist_t	*blk;
1027 	emul64_rng_overlap_t overlap;
1028 	int		i = 0;
1029 
1030 	if (emul64debug) {
1031 		cmn_err(CE_CONT, "%s: bsd_readblks: "
1032 		    "<%d,%d> blk %llu (0x%llx) nblks %d\n",
1033 		    emul64_name, a_target, a_lun, blkno, blkno, nblks);
1034 	}
1035 
1036 	emul64_yield_check();
1037 
1038 	EMUL64_MUTEX_ENTER(emul64);
1039 	tgt = find_tgt(emul64, a_target, a_lun);
1040 	EMUL64_MUTEX_EXIT(emul64);
1041 	if (tgt == NULL) {
1042 		cmn_err(CE_WARN, "%s: bsd_readblks: no target for %d,%d\n",
1043 		    emul64_name, a_target, a_lun);
1044 		goto unlocked_out;
1045 	}
1046 
1047 	if (emul64_collect_stats) {
1048 		mutex_enter(&emul64_stats_mutex);
1049 		emul64_io_ops++;
1050 		emul64_io_blocks += nblks;
1051 		mutex_exit(&emul64_stats_mutex);
1052 	}
1053 	mutex_enter(&tgt->emul64_tgt_blk_lock);
1054 
1055 	/*
1056 	 * Keep the ioctls from changing the nowrite list for the duration
1057 	 * of this I/O by grabbing emul64_tgt_nw_lock.  This will keep the
1058 	 * results from our call to bsd_tgt_overlap from changing while we
1059 	 * do the I/O.
1060 	 */
1061 	rw_enter(&tgt->emul64_tgt_nw_lock, RW_READER);
1062 
1063 	overlap = bsd_tgt_overlap(tgt, blkno, nblks);
1064 	switch (overlap) {
1065 	case O_SAME:
1066 	case O_SUBSET:
1067 	case O_OVERLAP:
1068 		cmn_err(CE_WARN, "%s: bsd_readblks: "
1069 		    "read to blocked area %lld,%d\n",
1070 		    emul64_name, blkno, nblks);
1071 		rw_exit(&tgt->emul64_tgt_nw_lock);
1072 		goto errout;
1073 	case O_NONE:
1074 		break;
1075 	}
1076 	for (i = 0; i < nblks; i++) {
1077 		if (emul64_debug_blklist)
1078 			cmn_err(CE_CONT, "%s: bsd_readblks: "
1079 			    "%d of %d: blkno %lld\n",
1080 			    emul64_name, i+1, nblks, blkno);
1081 		if (blkno > tgt->emul64_tgt_sectors)
1082 			break;
1083 		blk = bsd_findblk(tgt, blkno, NULL);
1084 		if (blk) {
1085 			(void) bcopy(blk->bl_data, bufaddr, DEV_BSIZE);
1086 		} else {
1087 			(void) bzero(bufaddr, DEV_BSIZE);
1088 		}
1089 		blkno++;
1090 		bufaddr += DEV_BSIZE;
1091 	}
1092 	rw_exit(&tgt->emul64_tgt_nw_lock);
1093 
1094 errout:
1095 	mutex_exit(&tgt->emul64_tgt_blk_lock);
1096 
1097 unlocked_out:
1098 	return ((nblks - i) * DEV_BSIZE);
1099 }
1100 
1101 
1102 static int
1103 bsd_writeblks(struct emul64 *emul64, ushort_t a_target, ushort_t a_lun,
1104 		diskaddr_t blkno, int nblks, unsigned char *bufaddr)
1105 {
1106 	emul64_tgt_t	*tgt;
1107 	blklist_t	*blk;
1108 	emul64_rng_overlap_t overlap;
1109 	avl_index_t	where;
1110 	int		i = 0;
1111 
1112 	if (emul64debug) {
1113 		cmn_err(CE_CONT, "%s: bsd_writeblks: "
1114 		    "<%d,%d> blk %llu (0x%llx) nblks %d\n",
1115 		    emul64_name, a_target, a_lun, blkno, blkno, nblks);
1116 	}
1117 
1118 	emul64_yield_check();
1119 
1120 	EMUL64_MUTEX_ENTER(emul64);
1121 	tgt = find_tgt(emul64, a_target, a_lun);
1122 	EMUL64_MUTEX_EXIT(emul64);
1123 	if (tgt == NULL) {
1124 		cmn_err(CE_WARN, "%s: bsd_writeblks: no target for %d,%d\n",
1125 		    emul64_name, a_target, a_lun);
1126 		goto unlocked_out;
1127 	}
1128 
1129 	if (emul64_collect_stats) {
1130 		mutex_enter(&emul64_stats_mutex);
1131 		emul64_io_ops++;
1132 		emul64_io_blocks += nblks;
1133 		mutex_exit(&emul64_stats_mutex);
1134 	}
1135 	mutex_enter(&tgt->emul64_tgt_blk_lock);
1136 
1137 	/*
1138 	 * Keep the ioctls from changing the nowrite list for the duration
1139 	 * of this I/O by grabbing emul64_tgt_nw_lock.  This will keep the
1140 	 * results from our call to bsd_tgt_overlap from changing while we
1141 	 * do the I/O.
1142 	 */
1143 	rw_enter(&tgt->emul64_tgt_nw_lock, RW_READER);
1144 	overlap = bsd_tgt_overlap(tgt, blkno, nblks);
1145 	switch (overlap) {
1146 	case O_SAME:
1147 	case O_SUBSET:
1148 		if (emul64_collect_stats) {
1149 			mutex_enter(&emul64_stats_mutex);
1150 			emul64_skipped_io++;
1151 			emul64_skipped_blk += nblks;
1152 			mutex_exit(&emul64_stats_mutex);
1153 		}
1154 		rw_exit(&tgt->emul64_tgt_nw_lock);
1155 		mutex_exit(&tgt->emul64_tgt_blk_lock);
1156 		return (0);
1157 	case O_OVERLAP:
1158 	case O_NONE:
1159 		break;
1160 	}
1161 	for (i = 0; i < nblks; i++) {
1162 		if ((overlap == O_NONE) ||
1163 		    (bsd_tgt_overlap(tgt, blkno, 1) == O_NONE)) {
1164 			/*
1165 			 * If there was no overlap for the entire I/O range
1166 			 * or if there is no overlap for this particular
1167 			 * block, then we need to do the write.
1168 			 */
1169 			if (emul64_debug_blklist)
1170 				cmn_err(CE_CONT, "%s: bsd_writeblks: "
1171 				    "%d of %d: blkno %lld\n",
1172 				    emul64_name, i+1, nblks, blkno);
1173 			if (blkno > tgt->emul64_tgt_sectors) {
1174 				cmn_err(CE_WARN, "%s: bsd_writeblks: "
1175 				    "blkno %lld, tgt_sectors %lld\n",
1176 				    emul64_name, blkno,
1177 				    tgt->emul64_tgt_sectors);
1178 				break;
1179 			}
1180 
1181 			blk = bsd_findblk(tgt, blkno, &where);
1182 			if (bcmp(bufaddr, emul64_zeros, DEV_BSIZE) == 0) {
1183 				if (blk) {
1184 					bsd_freeblk(tgt, blk);
1185 				}
1186 			} else {
1187 				if (blk) {
1188 					(void) bcopy(bufaddr, blk->bl_data,
1189 							DEV_BSIZE);
1190 				} else {
1191 					bsd_allocblk(tgt,
1192 							blkno,
1193 							(caddr_t)bufaddr,
1194 							where);
1195 				}
1196 			}
1197 		}
1198 		blkno++;
1199 		bufaddr += DEV_BSIZE;
1200 	}
1201 
1202 	/*
1203 	 * Now that we're done with our I/O, allow the ioctls to change the
1204 	 * nowrite list.
1205 	 */
1206 	rw_exit(&tgt->emul64_tgt_nw_lock);
1207 
1208 errout:
1209 	mutex_exit(&tgt->emul64_tgt_blk_lock);
1210 
1211 unlocked_out:
1212 	return ((nblks - i) * DEV_BSIZE);
1213 }
1214 
1215 emul64_tgt_t *
1216 find_tgt(struct emul64 *emul64, ushort_t a_target, ushort_t a_lun)
1217 {
1218 	emul64_tgt_t	*tgt;
1219 
1220 	tgt = emul64->emul64_tgt;
1221 	while (tgt) {
1222 		if (tgt->emul64_tgt_saddr.a_target == a_target &&
1223 		    tgt->emul64_tgt_saddr.a_lun == a_lun) {
1224 			break;
1225 		}
1226 		tgt = tgt->emul64_tgt_next;
1227 	}
1228 	return (tgt);
1229 
1230 }
1231 
1232 /*
1233  * Free all blocks that are part of the specified range.
1234  */
1235 int
1236 bsd_freeblkrange(emul64_tgt_t *tgt, emul64_range_t *range)
1237 {
1238 	blklist_t	*blk;
1239 	blklist_t	*nextblk;
1240 
1241 	ASSERT(mutex_owned(&tgt->emul64_tgt_blk_lock));
1242 	for (blk = (blklist_t *)avl_first(&tgt->emul64_tgt_data);
1243 		blk != NULL;
1244 		blk = nextblk) {
1245 		/*
1246 		 * We need to get the next block pointer now, because blk
1247 		 * will be freed inside the if statement.
1248 		 */
1249 		nextblk = AVL_NEXT(&tgt->emul64_tgt_data, blk);
1250 
1251 		if (emul64_overlap(range, blk->bl_blkno, (size_t)1) != O_NONE) {
1252 			bsd_freeblk(tgt, blk);
1253 		}
1254 	}
1255 	return (0);
1256 }
1257 
1258 static blklist_t *
1259 bsd_findblk(emul64_tgt_t *tgt, diskaddr_t blkno, avl_index_t *where)
1260 {
1261 	blklist_t	*blk;
1262 	blklist_t	search;
1263 
1264 	ASSERT(mutex_owned(&tgt->emul64_tgt_blk_lock));
1265 
1266 	search.bl_blkno = blkno;
1267 	blk = (blklist_t *)avl_find(&tgt->emul64_tgt_data, &search, where);
1268 	return (blk);
1269 }
1270 
1271 
1272 static void
1273 bsd_allocblk(emul64_tgt_t *tgt,
1274 		diskaddr_t blkno,
1275 		caddr_t data,
1276 		avl_index_t where)
1277 {
1278 	blklist_t	*blk;
1279 
1280 	if (emul64_debug_blklist)
1281 		cmn_err(CE_CONT, "%s: bsd_allocblk: %llu\n",
1282 		    emul64_name, blkno);
1283 
1284 	ASSERT(mutex_owned(&tgt->emul64_tgt_blk_lock));
1285 
1286 	blk = (blklist_t *)kmem_zalloc(sizeof (blklist_t), KM_SLEEP);
1287 	blk->bl_data = (uchar_t *)kmem_zalloc(DEV_BSIZE, KM_SLEEP);
1288 	blk->bl_blkno = blkno;
1289 	(void) bcopy(data, blk->bl_data, DEV_BSIZE);
1290 	avl_insert(&tgt->emul64_tgt_data, (void *) blk, where);
1291 
1292 	if (emul64_collect_stats) {
1293 		mutex_enter(&emul64_stats_mutex);
1294 		emul64_nonzero++;
1295 		tgt->emul64_list_length++;
1296 		if (tgt->emul64_list_length > emul64_max_list_length) {
1297 			emul64_max_list_length = tgt->emul64_list_length;
1298 		}
1299 		mutex_exit(&emul64_stats_mutex);
1300 	}
1301 }
1302 
1303 static void
1304 bsd_freeblk(emul64_tgt_t *tgt, blklist_t *blk)
1305 {
1306 	if (emul64_debug_blklist)
1307 		cmn_err(CE_CONT, "%s: bsd_freeblk: <%d,%d> blk=%lld\n",
1308 		    emul64_name, tgt->emul64_tgt_saddr.a_target,
1309 		    tgt->emul64_tgt_saddr.a_lun, blk->bl_blkno);
1310 
1311 	ASSERT(mutex_owned(&tgt->emul64_tgt_blk_lock));
1312 
1313 	avl_remove(&tgt->emul64_tgt_data, (void *) blk);
1314 	if (emul64_collect_stats) {
1315 		mutex_enter(&emul64_stats_mutex);
1316 		emul64_nonzero--;
1317 		tgt->emul64_list_length--;
1318 		mutex_exit(&emul64_stats_mutex);
1319 	}
1320 	kmem_free(blk->bl_data, DEV_BSIZE);
1321 	kmem_free(blk, sizeof (blklist_t));
1322 }
1323 
1324 /*
1325  * Look for overlap between a nowrite range and a block range.
1326  *
1327  * NOTE:  Callers of this function must hold the tgt->emul64_tgt_nw_lock
1328  *	  lock.  For the purposes of this function, a reader lock is
1329  *	  sufficient.
1330  */
1331 static emul64_rng_overlap_t
1332 bsd_tgt_overlap(emul64_tgt_t *tgt, diskaddr_t blkno, int count)
1333 {
1334 	emul64_nowrite_t	*nw;
1335 	emul64_rng_overlap_t	rv = O_NONE;
1336 
1337 	for (nw = tgt->emul64_tgt_nowrite;
1338 		(nw != NULL) && (rv == O_NONE);
1339 		nw = nw->emul64_nwnext) {
1340 		rv = emul64_overlap(&nw->emul64_blocked,
1341 				    blkno,
1342 				    (size_t)count);
1343 	}
1344 	return (rv);
1345 }
1346 
1347 /*
1348  * Operations that do a lot of I/O, such as RAID 5 initializations, result
1349  * in a CPU bound kernel when the device is an emul64 device.  This makes
1350  * the machine look hung.  To avoid this problem, give up the CPU from time
1351  * to time.
1352  */
1353 
1354 static void
1355 emul64_yield_check()
1356 {
1357 	static uint_t	emul64_io_count = 0;	/* # I/Os since last wait */
1358 	static uint_t	emul64_waiting = FALSE;	/* TRUE -> a thread is in */
1359 						/*   cv_timed wait. */
1360 	clock_t		ticks;
1361 
1362 	if (emul64_yield_enable == 0)
1363 		return;
1364 
1365 	mutex_enter(&emul64_yield_mutex);
1366 
1367 	if (emul64_waiting == TRUE) {
1368 		/*
1369 		 * Another thread has already started the timer.  We'll
1370 		 * just wait here until their time expires, and they
1371 		 * broadcast to us.  When they do that, we'll return and
1372 		 * let our caller do more I/O.
1373 		 */
1374 		cv_wait(&emul64_yield_cv, &emul64_yield_mutex);
1375 	} else if (emul64_io_count++ > emul64_yield_period) {
1376 		/*
1377 		 * Set emul64_waiting to let other threads know that we
1378 		 * have started the timer.
1379 		 */
1380 		emul64_waiting = TRUE;
1381 		emul64_num_delay_called++;
1382 		ticks = drv_usectohz(emul64_yield_length);
1383 		if (ticks == 0)
1384 			ticks = 1;
1385 		(void) cv_timedwait(&emul64_yield_cv,
1386 		    &emul64_yield_mutex, ddi_get_lbolt() + ticks);
1387 		emul64_io_count = 0;
1388 		emul64_waiting = FALSE;
1389 
1390 		/* Broadcast in case others are waiting. */
1391 		cv_broadcast(&emul64_yield_cv);
1392 	}
1393 
1394 	mutex_exit(&emul64_yield_mutex);
1395 }
1396