xref: /titanic_51/usr/src/uts/common/io/emul64_bsd.c (revision 43a291055ab3951f6372241323fd4e2486098fff)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 /*
30  * pseudo scsi disk driver
31  */
32 
33 #include <sys/scsi/scsi.h>
34 #include <sys/ddi.h>
35 #include <sys/sunddi.h>
36 #include <sys/kmem.h>
37 #include <sys/taskq.h>
38 #include <sys/disp.h>
39 #include <sys/types.h>
40 #include <sys/buf.h>
41 
42 #include <sys/emul64.h>
43 #include <sys/emul64cmd.h>
44 #include <sys/emul64var.h>
45 
46 /*
47  * Mode sense/select page control
48  */
49 #define	MODE_SENSE_PC_CURRENT		0
50 #define	MODE_SENSE_PC_CHANGEABLE	1
51 #define	MODE_SENSE_PC_DEFAULT		2
52 #define	MODE_SENSE_PC_SAVED		3
53 
54 /*
55  * Byte conversion macros
56  */
57 #if	defined(_BIG_ENDIAN)
58 #define	ushort_to_scsi_ushort(n)	(n)
59 #define	uint32_to_scsi_uint32(n)	(n)
60 #define	uint64_to_scsi_uint64(n)	(n)
61 #elif	defined(_LITTLE_ENDIAN)
62 
63 #define	ushort_to_scsi_ushort(n)			\
64 		((((n) & 0x00ff) << 8) |		\
65 		(((n)  & 0xff00) >> 8))
66 
67 #define	uint32_to_scsi_uint32(n)			\
68 		((((n) & 0x000000ff) << 24) |		\
69 		(((n)  & 0x0000ff00) << 8) |		\
70 		(((n)  & 0x00ff0000) >> 8) |		\
71 		(((n)  & 0xff000000) >> 24))
72 #define	uint64_to_scsi_uint64(n)				\
73 		((((n) & 0x00000000000000ff) << 56) |           \
74 		(((n)  & 0x000000000000ff00) << 40) |           \
75 		(((n)  & 0x0000000000ff0000) << 24) |           \
76 		(((n)  & 0x00000000ff000000) << 8) |            \
77 		(((n)  & 0x000000ff00000000) >> 8) |            \
78 		(((n)  & 0x0000ff0000000000) >> 24) |           \
79 		(((n)  & 0x00ff000000000000) >> 40) |           \
80 		(((n)  & 0xff00000000000000) >> 56))
81 #else
82 error no _BIG_ENDIAN or _LITTLE_ENDIAN
83 #endif
84 #define	uint_to_byte0(n)		((n) & 0xff)
85 #define	uint_to_byte1(n)		(((n)>>8) & 0xff)
86 #define	uint_to_byte2(n)		(((n)>>16) & 0xff)
87 #define	uint_to_byte3(n)		(((n)>>24) & 0xff)
88 
89 /*
90  * struct prop_map
91  *
92  * This structure maps a property name to the place to store its value.
93  */
94 struct prop_map {
95 	char 		*pm_name;	/* Name of the property. */
96 	int		*pm_value;	/* Place to store the value. */
97 };
98 
99 static int emul64_debug_blklist = 0;
100 
101 /*
102  * Some interesting statistics.  These are protected by the
103  * emul64_stats_mutex.  It would be nice to have an ioctl to print them out,
104  * but we don't have the development time for that now.  You can at least
105  * look at them with adb.
106  */
107 
108 int		emul64_collect_stats = 1; /* Collect stats if non-zero */
109 kmutex_t	emul64_stats_mutex;	/* Protect these variables */
110 long		emul64_nowrite_count = 0; /* # active nowrite ranges */
111 static uint64_t	emul64_skipped_io = 0;	/* Skipped I/O operations, because of */
112 					/* EMUL64_WRITE_OFF. */
113 static uint64_t	emul64_skipped_blk = 0;	/* Skipped blocks because of */
114 					/* EMUL64_WRITE_OFF. */
115 static uint64_t	emul64_io_ops = 0;	/* Total number of I/O operations */
116 					/* including skipped and actual. */
117 static uint64_t	emul64_io_blocks = 0;	/* Total number of blocks involved */
118 					/* in I/O operations. */
119 static uint64_t	emul64_nonzero = 0;	/* Number of non-zero data blocks */
120 					/* currently held in memory */
121 static uint64_t	emul64_max_list_length = 0; /* Maximum size of a linked */
122 					    /* list of non-zero blocks. */
123 uint64_t emul64_taskq_max = 0;		/* emul64_scsi_start uses the taskq */
124 					/* mechanism to dispatch work. */
125 					/* If the number of entries in the */
126 					/* exceeds the maximum for the queue */
127 					/* the queue a 1 second delay is */
128 					/* encountered in taskq_ent_alloc. */
129 					/* This counter counts the number */
130 					/* times that this happens. */
131 
132 /*
133  * Since emul64 does no physical I/O, operations that would normally be I/O
134  * intensive become CPU bound.  An example of this is RAID 5
135  * initialization.  When the kernel becomes CPU bound, it looks as if the
136  * machine is hung.
137  *
138  * To avoid this problem, we provide a function, emul64_yield_check, that does a
139  * delay from time to time to yield up the CPU.  The following variables
140  * are tunables for this algorithm.
141  *
142  *	emul64_num_delay_called	Number of times we called delay.  This is
143  *				not really a tunable.  Rather it is a
144  *				counter that provides useful information
145  *				for adjusting the tunables.
146  *	emul64_yield_length	Number of microseconds to yield the CPU.
147  *	emul64_yield_period	Number of I/O operations between yields.
148  *	emul64_yield_enable	emul64 will yield the CPU, only if this
149  *				variable contains a non-zero value.  This
150  *				allows the yield functionality to be turned
151  *				off for experimentation purposes.
152  *
153  * The value of 1000 for emul64_yield_period has been determined by
154  * experience with running the tests.
155  */
156 static uint64_t		emul64_num_delay_called = 0;
157 static int		emul64_yield_length = 1000;
158 static int		emul64_yield_period = 1000;
159 static int		emul64_yield_enable = 1;
160 static kmutex_t		emul64_yield_mutex;
161 static kcondvar_t 	emul64_yield_cv;
162 
163 /*
164  * This array establishes a set of tunable variables that can be set by
165  * defining properties in the emul64.conf file.
166  */
167 struct prop_map emul64_properties[] = {
168 	"emul64_collect_stats",		&emul64_collect_stats,
169 	"emul64_yield_length",		&emul64_yield_length,
170 	"emul64_yield_period",		&emul64_yield_period,
171 	"emul64_yield_enable",		&emul64_yield_enable,
172 	"emul64_max_task",		&emul64_max_task,
173 	"emul64_task_nthreads",		&emul64_task_nthreads
174 };
175 
176 static unsigned char *emul64_zeros = NULL; /* Block of 0s for comparison */
177 
178 extern void emul64_check_cond(struct scsi_pkt *pkt, uchar_t key,
179 				uchar_t asc, uchar_t ascq);
180 /* ncyl=250000 acyl=2 nhead=24 nsect=357 */
181 uint_t dkg_rpm = 3600;
182 
183 static int bsd_mode_sense_dad_mode_geometry(struct scsi_pkt *);
184 static int bsd_mode_sense_dad_mode_err_recov(struct scsi_pkt *);
185 static int bsd_mode_sense_modepage_disco_reco(struct scsi_pkt *);
186 static int bsd_mode_sense_dad_mode_format(struct scsi_pkt *);
187 static int bsd_mode_sense_dad_mode_cache(struct scsi_pkt *);
188 static int bsd_readblks(struct emul64 *, ushort_t, ushort_t, diskaddr_t,
189 				int, unsigned char *);
190 static int bsd_writeblks(struct emul64 *, ushort_t, ushort_t, diskaddr_t,
191 				int, unsigned char *);
192 emul64_tgt_t *find_tgt(struct emul64 *, ushort_t, ushort_t);
193 static blklist_t *bsd_findblk(emul64_tgt_t *, diskaddr_t, avl_index_t *);
194 static void bsd_allocblk(emul64_tgt_t *, diskaddr_t, caddr_t, avl_index_t);
195 static void bsd_freeblk(emul64_tgt_t *, blklist_t *);
196 static void emul64_yield_check();
197 static emul64_rng_overlap_t bsd_tgt_overlap(emul64_tgt_t *, diskaddr_t, int);
198 
199 char *emul64_name = "emul64";
200 
201 
202 /* XXX replace with FORMG0COUNT */
203 #define	GETG0COUNT(cdb)		((cdb)->g0_count0)
204 
205 #define	GETG1COUNT(cdb)		((cdb)->g1_count1 << 8)  + \
206 				((cdb)->g1_count0)
207 
208 #define	GETG4COUNT(cdb)	\
209 			((uint64_t)(cdb)->g4_count3 << 24)  + \
210 			((uint64_t)(cdb)->g4_count2 << 16)  + \
211 			((uint64_t)(cdb)->g4_count1 << 8)  + \
212 			((uint64_t)(cdb)->g4_count0)
213 
214 
215 /*
216  * Initialize globals in this file.
217  */
218 void
219 emul64_bsd_init()
220 {
221 	emul64_zeros = (unsigned char *) kmem_zalloc(DEV_BSIZE, KM_SLEEP);
222 	mutex_init(&emul64_stats_mutex, NULL, MUTEX_DRIVER, NULL);
223 	mutex_init(&emul64_yield_mutex, NULL, MUTEX_DRIVER, NULL);
224 	cv_init(&emul64_yield_cv, NULL, CV_DRIVER, NULL);
225 }
226 
227 /*
228  * Clean up globals in this file.
229  */
230 void
231 emul64_bsd_fini()
232 {
233 	cv_destroy(&emul64_yield_cv);
234 	mutex_destroy(&emul64_yield_mutex);
235 	mutex_destroy(&emul64_stats_mutex);
236 	if (emul64_zeros != NULL) {
237 		kmem_free(emul64_zeros, DEV_BSIZE);
238 		emul64_zeros = NULL;
239 	}
240 }
241 
242 /*
243  * Attempt to get the values of the properties that are specified in the
244  * emul64_properties array.  If the property exists, copy its value to the
245  * specified location.  All the properties have been assigned default
246  * values in this driver, so if we cannot get the property that is not a
247  * problem.
248  */
249 void
250 emul64_bsd_get_props(dev_info_t *dip)
251 {
252 	uint_t		count;
253 	uint_t		i;
254 	struct prop_map	*pmp;
255 	int		*properties;
256 
257 	for (pmp = emul64_properties, i = 0;
258 		i < sizeof (emul64_properties) / sizeof (struct prop_map);
259 		i++, pmp++) {
260 		if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip,
261 				DDI_PROP_DONTPASS,
262 				pmp->pm_name, &properties,
263 				&count) == DDI_PROP_SUCCESS) {
264 			if (count >= 1) {
265 				*pmp->pm_value = *properties;
266 			}
267 			ddi_prop_free((void *) properties);
268 		}
269 	}
270 }
271 
272 int
273 emul64_bsd_blkcompare(const void *a1, const void *b1)
274 {
275 	blklist_t	*a = (blklist_t *)a1;
276 	blklist_t	*b = (blklist_t *)b1;
277 
278 	if (a->bl_blkno < b->bl_blkno)
279 		return (-1);
280 	if (a->bl_blkno == b->bl_blkno)
281 		return (0);
282 	return (1);
283 }
284 
285 /* ARGSUSED 0 */
286 int
287 bsd_scsi_start_stop_unit(struct scsi_pkt *pkt)
288 {
289 	return (0);
290 }
291 
292 /* ARGSUSED 0 */
293 int
294 bsd_scsi_test_unit_ready(struct scsi_pkt *pkt)
295 {
296 	return (0);
297 }
298 
299 /* ARGSUSED 0 */
300 int
301 bsd_scsi_request_sense(struct scsi_pkt *pkt)
302 {
303 	return (0);
304 }
305 
306 int
307 bsd_scsi_inq_page0(struct scsi_pkt *pkt, uchar_t pqdtype)
308 {
309 	struct emul64_cmd	*sp = PKT2CMD(pkt);
310 
311 	if (sp->cmd_count < 6) {
312 		cmn_err(CE_CONT, "%s: bsd_scsi_inq_page0: size %d required\n",
313 		    emul64_name, 6);
314 		return (EIO);
315 	}
316 
317 	sp->cmd_addr[0] = pqdtype;	/* periph qual., dtype */
318 	sp->cmd_addr[1] = 0;		/* page code */
319 	sp->cmd_addr[2] = 0;		/* reserved */
320 	sp->cmd_addr[3] = 6 - 3;	/* length */
321 	sp->cmd_addr[4] = 0;		/* 1st page */
322 	sp->cmd_addr[5] = 0x83;		/* 2nd page */
323 
324 	pkt->pkt_resid = sp->cmd_count - 6;
325 	return (0);
326 }
327 
328 int
329 bsd_scsi_inq_page83(struct scsi_pkt *pkt, uchar_t pqdtype)
330 {
331 	struct emul64		*emul64 = PKT2EMUL64(pkt);
332 	struct emul64_cmd	*sp = PKT2CMD(pkt);
333 	int			instance = ddi_get_instance(emul64->emul64_dip);
334 
335 	if (sp->cmd_count < 22) {
336 		cmn_err(CE_CONT, "%s: bsd_scsi_inq_page83: size %d required\n",
337 		    emul64_name, 22);
338 		return (EIO);
339 	}
340 
341 	sp->cmd_addr[0] = pqdtype;	/* periph qual., dtype */
342 	sp->cmd_addr[1] = 0x83;		/* page code */
343 	sp->cmd_addr[2] = 0;		/* reserved */
344 	sp->cmd_addr[3] = (22 - 8) + 4;	/* length */
345 
346 	sp->cmd_addr[4] = 1;		/* code set - binary */
347 	sp->cmd_addr[5] = 3;		/* association and device ID type 3 */
348 	sp->cmd_addr[6] = 0;		/* reserved */
349 	sp->cmd_addr[7] = 22 - 8;	/* ID length */
350 
351 	sp->cmd_addr[8] = 0xde;		/* @8: identifier, byte 0 */
352 	sp->cmd_addr[9] = 0xca;
353 	sp->cmd_addr[10] = 0xde;
354 	sp->cmd_addr[11] = 0x80;
355 
356 	sp->cmd_addr[12] = 0xba;
357 	sp->cmd_addr[13] = 0xbe;
358 	sp->cmd_addr[14] = 0xab;
359 	sp->cmd_addr[15] = 0xba;
360 					/* @22: */
361 
362 	/*
363 	 * Instances seem to be assigned sequentially, so it unlikely that we
364 	 * will have more than 65535 of them.
365 	 */
366 	sp->cmd_addr[16] = uint_to_byte1(instance);
367 	sp->cmd_addr[17] = uint_to_byte0(instance);
368 	sp->cmd_addr[18] = uint_to_byte1(TGT(sp));
369 	sp->cmd_addr[19] = uint_to_byte0(TGT(sp));
370 	sp->cmd_addr[20] = uint_to_byte1(LUN(sp));
371 	sp->cmd_addr[21] = uint_to_byte0(LUN(sp));
372 
373 	pkt->pkt_resid = sp->cmd_count - 22;
374 	return (0);
375 }
376 
377 int
378 bsd_scsi_inquiry(struct scsi_pkt *pkt)
379 {
380 	struct emul64_cmd	*sp = PKT2CMD(pkt);
381 	union scsi_cdb		*cdb = (union scsi_cdb *)pkt->pkt_cdbp;
382 	emul64_tgt_t		*tgt;
383 	uchar_t			pqdtype;
384 	struct scsi_inquiry	inq;
385 
386 	EMUL64_MUTEX_ENTER(sp->cmd_emul64);
387 	tgt = find_tgt(sp->cmd_emul64,
388 	    pkt->pkt_address.a_target, pkt->pkt_address.a_lun);
389 	EMUL64_MUTEX_EXIT(sp->cmd_emul64);
390 
391 	if (sp->cmd_count < sizeof (inq)) {
392 		cmn_err(CE_CONT, "%s: bsd_scsi_inquiry: size %d required\n",
393 		    emul64_name, (int)sizeof (inq));
394 		return (EIO);
395 	}
396 
397 	if (cdb->cdb_opaque[1] & 0xfc) {
398 		cmn_err(CE_WARN, "%s: bsd_scsi_inquiry: 0x%x",
399 		    emul64_name, cdb->cdb_opaque[1]);
400 		emul64_check_cond(pkt, 0x5, 0x24, 0x0);	/* inv. fld in cdb */
401 		return (0);
402 	}
403 
404 	pqdtype = tgt->emul64_tgt_dtype;
405 	if (cdb->cdb_opaque[1] & 0x1) {
406 		switch (cdb->cdb_opaque[2]) {
407 			case 0x00:
408 				return (bsd_scsi_inq_page0(pkt, pqdtype));
409 			case 0x83:
410 				return (bsd_scsi_inq_page83(pkt, pqdtype));
411 			default:
412 				cmn_err(CE_WARN, "%s: bsd_scsi_inquiry: "
413 				    "unsupported 0x%x",
414 				    emul64_name, cdb->cdb_opaque[2]);
415 				return (0);
416 		}
417 	}
418 
419 	/* set up the inquiry data we return */
420 	(void) bzero((void *)&inq, sizeof (inq));
421 
422 	inq.inq_dtype = pqdtype;
423 	inq.inq_ansi = 2;
424 	inq.inq_rdf = 2;
425 	inq.inq_len = sizeof (inq) - 4;
426 	inq.inq_wbus16 = 1;
427 	inq.inq_cmdque = 1;
428 
429 	(void) bcopy(tgt->emul64_tgt_inq, inq.inq_vid,
430 	    sizeof (tgt->emul64_tgt_inq));
431 	(void) bcopy("1", inq.inq_revision, 2);
432 	(void) bcopy((void *)&inq, sp->cmd_addr, sizeof (inq));
433 
434 	pkt->pkt_resid = sp->cmd_count - sizeof (inq);
435 	return (0);
436 }
437 
438 /* ARGSUSED 0 */
439 int
440 bsd_scsi_format(struct scsi_pkt *pkt)
441 {
442 	return (0);
443 }
444 
445 int
446 bsd_scsi_io(struct scsi_pkt *pkt)
447 {
448 	struct emul64_cmd	*sp = PKT2CMD(pkt);
449 	union scsi_cdb		*cdb = (union scsi_cdb *)pkt->pkt_cdbp;
450 	diskaddr_t		lblkno;
451 	int			nblks;
452 
453 	switch (cdb->scc_cmd) {
454 	case SCMD_READ:
455 			lblkno = (uint32_t)GETG0ADDR(cdb);
456 			nblks = GETG0COUNT(cdb);
457 			pkt->pkt_resid = bsd_readblks(sp->cmd_emul64,
458 					pkt->pkt_address.a_target,
459 					pkt->pkt_address.a_lun,
460 					lblkno, nblks, sp->cmd_addr);
461 			if (emul64debug) {
462 				cmn_err(CE_CONT, "%s: bsd_scsi_io: "
463 				    "read g0 blk=%lld (0x%llx) nblks=%d\n",
464 				    emul64_name, lblkno, lblkno, nblks);
465 			}
466 		break;
467 	case SCMD_WRITE:
468 			lblkno = (uint32_t)GETG0ADDR(cdb);
469 			nblks = GETG0COUNT(cdb);
470 			pkt->pkt_resid = bsd_writeblks(sp->cmd_emul64,
471 					pkt->pkt_address.a_target,
472 					pkt->pkt_address.a_lun,
473 					lblkno, nblks, sp->cmd_addr);
474 			if (emul64debug) {
475 				cmn_err(CE_CONT, "%s: bsd_scsi_io: "
476 				    "write g0 blk=%lld (0x%llx) nblks=%d\n",
477 				    emul64_name, lblkno, lblkno, nblks);
478 			}
479 		break;
480 	case SCMD_READ_G1:
481 			lblkno = (uint32_t)GETG1ADDR(cdb);
482 			nblks = GETG1COUNT(cdb);
483 			pkt->pkt_resid = bsd_readblks(sp->cmd_emul64,
484 					pkt->pkt_address.a_target,
485 					pkt->pkt_address.a_lun,
486 					lblkno, nblks, sp->cmd_addr);
487 			if (emul64debug) {
488 				cmn_err(CE_CONT, "%s: bsd_scsi_io: "
489 				    "read g1 blk=%lld (0x%llx) nblks=%d\n",
490 				    emul64_name, lblkno, lblkno, nblks);
491 			}
492 		break;
493 	case SCMD_WRITE_G1:
494 			lblkno = (uint32_t)GETG1ADDR(cdb);
495 			nblks = GETG1COUNT(cdb);
496 			pkt->pkt_resid = bsd_writeblks(sp->cmd_emul64,
497 					pkt->pkt_address.a_target,
498 					pkt->pkt_address.a_lun,
499 					lblkno, nblks, sp->cmd_addr);
500 			if (emul64debug) {
501 				cmn_err(CE_CONT, "%s: bsd_scsi_io: "
502 				    "write g1 blk=%lld (0x%llx) nblks=%d\n",
503 				    emul64_name, lblkno, lblkno, nblks);
504 			}
505 		break;
506 	case SCMD_READ_G4:
507 			lblkno = GETG4ADDR(cdb);
508 			lblkno <<= 32;
509 			lblkno |= (uint32_t)GETG4ADDRTL(cdb);
510 			nblks = GETG4COUNT(cdb);
511 			pkt->pkt_resid = bsd_readblks(sp->cmd_emul64,
512 					pkt->pkt_address.a_target,
513 					pkt->pkt_address.a_lun,
514 					lblkno, nblks, sp->cmd_addr);
515 			if (emul64debug) {
516 				cmn_err(CE_CONT, "%s: bsd_scsi_io: "
517 				    "read g4 blk=%lld (0x%llx) nblks=%d\n",
518 				    emul64_name, lblkno, lblkno, nblks);
519 			}
520 		break;
521 	case SCMD_WRITE_G4:
522 			lblkno = GETG4ADDR(cdb);
523 			lblkno <<= 32;
524 			lblkno |= (uint32_t)GETG4ADDRTL(cdb);
525 			nblks = GETG4COUNT(cdb);
526 			pkt->pkt_resid = bsd_writeblks(sp->cmd_emul64,
527 					pkt->pkt_address.a_target,
528 					pkt->pkt_address.a_lun,
529 					lblkno, nblks, sp->cmd_addr);
530 			if (emul64debug) {
531 				cmn_err(CE_CONT, "%s: bsd_scsi_io: "
532 				    "write g4 blk=%lld (0x%llx) nblks=%d\n",
533 				    emul64_name, lblkno, lblkno, nblks);
534 			}
535 		break;
536 	default:
537 		cmn_err(CE_WARN, "%s: bsd_scsi_io: unhandled I/O: 0x%x",
538 		    emul64_name, cdb->scc_cmd);
539 		break;
540 	}
541 
542 	if (pkt->pkt_resid != 0)
543 		cmn_err(CE_WARN, "%s: bsd_scsi_io: "
544 		    "pkt_resid: 0x%lx, lblkno %lld, nblks %d",
545 		    emul64_name, pkt->pkt_resid, lblkno, nblks);
546 
547 	return (0);
548 }
549 
550 int
551 bsd_scsi_log_sense(struct scsi_pkt *pkt)
552 {
553 	union scsi_cdb		*cdb = (union scsi_cdb *)pkt->pkt_cdbp;
554 	struct emul64_cmd	*sp = PKT2CMD(pkt);
555 	int			page_code;
556 
557 	if (sp->cmd_count < 9) {
558 		cmn_err(CE_CONT, "%s: bsd_scsi_log_sense size %d required\n",
559 		    emul64_name, 9);
560 		return (EIO);
561 	}
562 
563 	page_code = cdb->cdb_opaque[2] & 0x3f;
564 	if (page_code) {
565 		cmn_err(CE_CONT, "%s: bsd_scsi_log_sense: "
566 		    "page 0x%x not supported\n", emul64_name, page_code);
567 		emul64_check_cond(pkt, 0x5, 0x24, 0x0); /* inv. fld in cdb */
568 		return (0);
569 	}
570 
571 	sp->cmd_addr[0] = 0;		/* page code */
572 	sp->cmd_addr[1] = 0;		/* reserved */
573 	sp->cmd_addr[2] = 0;		/* MSB of page length */
574 	sp->cmd_addr[3] = 8 - 3;	/* LSB of page length */
575 
576 	sp->cmd_addr[4] = 0;		/* MSB of parameter code */
577 	sp->cmd_addr[5] = 0;		/* LSB of parameter code */
578 	sp->cmd_addr[6] = 0;		/* parameter control byte */
579 	sp->cmd_addr[7] = 4 - 3;	/* parameter length */
580 	sp->cmd_addr[8] = 0x0;		/* parameter value */
581 
582 	pkt->pkt_resid = sp->cmd_count - 9;
583 	return (0);
584 }
585 
586 int
587 bsd_scsi_mode_sense(struct scsi_pkt *pkt)
588 {
589 	union scsi_cdb	*cdb = (union scsi_cdb *)pkt->pkt_cdbp;
590 	int		page_control;
591 	int		page_code;
592 	int		rval = 0;
593 
594 	switch (cdb->scc_cmd) {
595 	case SCMD_MODE_SENSE:
596 			page_code = cdb->cdb_opaque[2] & 0x3f;
597 			page_control = (cdb->cdb_opaque[2] >> 6) & 0x03;
598 			if (emul64debug) {
599 				cmn_err(CE_CONT, "%s: bsd_scsi_mode_sense: "
600 				    "page=0x%x control=0x%x nbytes=%d\n",
601 				    emul64_name, page_code, page_control,
602 				    GETG0COUNT(cdb));
603 			}
604 		break;
605 	case SCMD_MODE_SENSE_G1:
606 			page_code = cdb->cdb_opaque[2] & 0x3f;
607 			page_control = (cdb->cdb_opaque[2] >> 6) & 0x03;
608 			if (emul64debug) {
609 				cmn_err(CE_CONT, "%s: bsd_scsi_mode_sense: "
610 				    "page=0x%x control=0x%x nbytes=%d\n",
611 				    emul64_name, page_code, page_control,
612 				    GETG1COUNT(cdb));
613 			}
614 		break;
615 	default:
616 		cmn_err(CE_CONT, "%s: bsd_scsi_mode_sense: "
617 		    "cmd 0x%x not supported\n", emul64_name, cdb->scc_cmd);
618 		return (EIO);
619 	}
620 
621 	switch (page_code) {
622 	case DAD_MODE_GEOMETRY:
623 		rval = bsd_mode_sense_dad_mode_geometry(pkt);
624 		break;
625 	case DAD_MODE_ERR_RECOV:
626 		rval = bsd_mode_sense_dad_mode_err_recov(pkt);
627 		break;
628 	case MODEPAGE_DISCO_RECO:
629 		rval = bsd_mode_sense_modepage_disco_reco(pkt);
630 		break;
631 	case DAD_MODE_FORMAT:
632 		rval = bsd_mode_sense_dad_mode_format(pkt);
633 		break;
634 	case DAD_MODE_CACHE:
635 		rval = bsd_mode_sense_dad_mode_cache(pkt);
636 		break;
637 	default:
638 		cmn_err(CE_CONT, "%s: bsd_scsi_mode_sense: "
639 		    "page 0x%x not supported\n", emul64_name, page_code);
640 		rval = EIO;
641 		break;
642 	}
643 
644 	return (rval);
645 }
646 
647 
648 static int
649 bsd_mode_sense_dad_mode_geometry(struct scsi_pkt *pkt)
650 {
651 	struct emul64_cmd	*sp = PKT2CMD(pkt);
652 	union scsi_cdb		*cdb = (union scsi_cdb *)pkt->pkt_cdbp;
653 	uchar_t			*addr = (uchar_t *)sp->cmd_addr;
654 	emul64_tgt_t		*tgt;
655 	int			page_control;
656 	struct mode_header	header;
657 	struct mode_geometry	page4;
658 	int			ncyl;
659 	int			rval = 0;
660 
661 	page_control = (cdb->cdb_opaque[2] >> 6) & 0x03;
662 
663 	if (emul64debug) {
664 		cmn_err(CE_CONT, "%s: bsd_mode_sense_dad_mode_geometry: "
665 		    "pc=%d n=%d\n", emul64_name, page_control, sp->cmd_count);
666 	}
667 
668 	if (sp->cmd_count < (sizeof (header) + sizeof (page4))) {
669 		cmn_err(CE_CONT, "%s: bsd_mode_sense_dad_mode_geometry: "
670 		    "size %d required\n",
671 		    emul64_name, (int)(sizeof (header) + sizeof (page4)));
672 		return (EIO);
673 	}
674 
675 	(void) bzero(&header, sizeof (header));
676 	(void) bzero(&page4, sizeof (page4));
677 
678 	header.length = sizeof (header) + sizeof (page4) - 1;
679 	header.bdesc_length = 0;
680 
681 	page4.mode_page.code = DAD_MODE_GEOMETRY;
682 	page4.mode_page.ps = 1;
683 	page4.mode_page.length = sizeof (page4) - sizeof (struct mode_page);
684 
685 	switch (page_control) {
686 	case MODE_SENSE_PC_CURRENT:
687 	case MODE_SENSE_PC_DEFAULT:
688 	case MODE_SENSE_PC_SAVED:
689 		EMUL64_MUTEX_ENTER(sp->cmd_emul64);
690 		tgt = find_tgt(sp->cmd_emul64,
691 		    pkt->pkt_address.a_target, pkt->pkt_address.a_lun);
692 		EMUL64_MUTEX_EXIT(sp->cmd_emul64);
693 		ncyl = tgt->emul64_tgt_ncyls;
694 		page4.cyl_ub = uint_to_byte2(ncyl);
695 		page4.cyl_mb = uint_to_byte1(ncyl);
696 		page4.cyl_lb = uint_to_byte0(ncyl);
697 		page4.heads = uint_to_byte0(tgt->emul64_tgt_nheads);
698 		page4.rpm = ushort_to_scsi_ushort(dkg_rpm);
699 		break;
700 	case MODE_SENSE_PC_CHANGEABLE:
701 		page4.cyl_ub = 0xff;
702 		page4.cyl_mb = 0xff;
703 		page4.cyl_lb = 0xff;
704 		page4.heads = 0xff;
705 		page4.rpm = 0xffff;
706 		break;
707 	}
708 
709 	(void) bcopy(&header, addr, sizeof (header));
710 	(void) bcopy(&page4, addr + sizeof (header), sizeof (page4));
711 
712 	pkt->pkt_resid = sp->cmd_count - sizeof (page4) - sizeof (header);
713 	rval = 0;
714 
715 	return (rval);
716 }
717 
718 static int
719 bsd_mode_sense_dad_mode_err_recov(struct scsi_pkt *pkt)
720 {
721 	struct emul64_cmd	*sp = PKT2CMD(pkt);
722 	union scsi_cdb		*cdb = (union scsi_cdb *)pkt->pkt_cdbp;
723 	uchar_t			*addr = (uchar_t *)sp->cmd_addr;
724 	int			page_control;
725 	struct mode_header	header;
726 	struct mode_err_recov	page1;
727 	int			rval = 0;
728 
729 	page_control = (cdb->cdb_opaque[2] >> 6) & 0x03;
730 
731 	if (emul64debug) {
732 		cmn_err(CE_CONT, "%s: bsd_mode_sense_dad_mode_err_recov: "
733 		    "pc=%d n=%d\n", emul64_name, page_control, sp->cmd_count);
734 	}
735 
736 	if (sp->cmd_count < (sizeof (header) + sizeof (page1))) {
737 		cmn_err(CE_CONT, "%s: bsd_mode_sense_dad_mode_err_recov: "
738 		    "size %d required\n",
739 		    emul64_name, (int)(sizeof (header) + sizeof (page1)));
740 		return (EIO);
741 	}
742 
743 	(void) bzero(&header, sizeof (header));
744 	(void) bzero(&page1, sizeof (page1));
745 
746 	header.length = sizeof (header) + sizeof (page1) - 1;
747 	header.bdesc_length = 0;
748 
749 	page1.mode_page.code = DAD_MODE_ERR_RECOV;
750 	page1.mode_page.ps = 1;
751 	page1.mode_page.length = sizeof (page1) - sizeof (struct mode_page);
752 
753 	switch (page_control) {
754 	case MODE_SENSE_PC_CURRENT:
755 	case MODE_SENSE_PC_DEFAULT:
756 	case MODE_SENSE_PC_SAVED:
757 		break;
758 	case MODE_SENSE_PC_CHANGEABLE:
759 		break;
760 	}
761 
762 	(void) bcopy(&header, addr, sizeof (header));
763 	(void) bcopy(&page1, addr + sizeof (header), sizeof (page1));
764 
765 	pkt->pkt_resid = sp->cmd_count - sizeof (page1) - sizeof (header);
766 	rval = 0;
767 
768 	return (rval);
769 }
770 
771 static int
772 bsd_mode_sense_modepage_disco_reco(struct scsi_pkt *pkt)
773 {
774 	struct emul64_cmd	*sp = PKT2CMD(pkt);
775 	union scsi_cdb		*cdb = (union scsi_cdb *)pkt->pkt_cdbp;
776 	int			rval = 0;
777 	uchar_t			*addr = (uchar_t *)sp->cmd_addr;
778 	int			page_control;
779 	struct mode_header	header;
780 	struct mode_disco_reco	page2;
781 
782 	page_control = (cdb->cdb_opaque[2] >> 6) & 0x03;
783 
784 	if (emul64debug) {
785 		cmn_err(CE_CONT, "%s: bsd_mode_sense_modepage_disco_reco: "
786 		    "pc=%d n=%d\n", emul64_name, page_control, sp->cmd_count);
787 	}
788 
789 	if (sp->cmd_count < (sizeof (header) + sizeof (page2))) {
790 		cmn_err(CE_CONT, "%s: bsd_mode_sense_modepage_disco_reco: "
791 		    "size %d required\n",
792 		    emul64_name, (int)(sizeof (header) + sizeof (page2)));
793 		return (EIO);
794 	}
795 
796 	(void) bzero(&header, sizeof (header));
797 	(void) bzero(&page2, sizeof (page2));
798 
799 	header.length = sizeof (header) + sizeof (page2) - 1;
800 	header.bdesc_length = 0;
801 
802 	page2.mode_page.code = MODEPAGE_DISCO_RECO;
803 	page2.mode_page.ps = 1;
804 	page2.mode_page.length = sizeof (page2) - sizeof (struct mode_page);
805 
806 	switch (page_control) {
807 	case MODE_SENSE_PC_CURRENT:
808 	case MODE_SENSE_PC_DEFAULT:
809 	case MODE_SENSE_PC_SAVED:
810 		break;
811 	case MODE_SENSE_PC_CHANGEABLE:
812 		break;
813 	}
814 
815 	(void) bcopy(&header, addr, sizeof (header));
816 	(void) bcopy(&page2, addr + sizeof (header), sizeof (page2));
817 
818 	pkt->pkt_resid = sp->cmd_count - sizeof (page2) - sizeof (header);
819 	rval = 0;
820 
821 	return (rval);
822 }
823 
824 static int
825 bsd_mode_sense_dad_mode_format(struct scsi_pkt *pkt)
826 {
827 	struct emul64_cmd	*sp = PKT2CMD(pkt);
828 	union scsi_cdb		*cdb = (union scsi_cdb *)pkt->pkt_cdbp;
829 	uchar_t			*addr = (uchar_t *)sp->cmd_addr;
830 	emul64_tgt_t		*tgt;
831 	int			page_control;
832 	struct mode_header	header;
833 	struct mode_format	page3;
834 	int			rval = 0;
835 
836 	page_control = (cdb->cdb_opaque[2] >> 6) & 0x03;
837 
838 	if (emul64debug) {
839 		cmn_err(CE_CONT, "%s: bsd_mode_sense_dad_mode_format: "
840 		    "pc=%d n=%d\n", emul64_name, page_control, sp->cmd_count);
841 	}
842 
843 	if (sp->cmd_count < (sizeof (header) + sizeof (page3))) {
844 		cmn_err(CE_CONT, "%s: bsd_mode_sense_dad_mode_format: "
845 		    "size %d required\n",
846 		    emul64_name, (int)(sizeof (header) + sizeof (page3)));
847 		return (EIO);
848 	}
849 
850 	(void) bzero(&header, sizeof (header));
851 	(void) bzero(&page3, sizeof (page3));
852 
853 	header.length = sizeof (header) + sizeof (page3) - 1;
854 	header.bdesc_length = 0;
855 
856 	page3.mode_page.code = DAD_MODE_FORMAT;
857 	page3.mode_page.ps = 1;
858 	page3.mode_page.length = sizeof (page3) - sizeof (struct mode_page);
859 
860 	switch (page_control) {
861 	case MODE_SENSE_PC_CURRENT:
862 	case MODE_SENSE_PC_DEFAULT:
863 	case MODE_SENSE_PC_SAVED:
864 		page3.data_bytes_sect = ushort_to_scsi_ushort(DEV_BSIZE);
865 		page3.interleave = ushort_to_scsi_ushort(1);
866 		EMUL64_MUTEX_ENTER(sp->cmd_emul64);
867 		tgt = find_tgt(sp->cmd_emul64,
868 			pkt->pkt_address.a_target, pkt->pkt_address.a_lun);
869 		EMUL64_MUTEX_EXIT(sp->cmd_emul64);
870 		page3.sect_track = ushort_to_scsi_ushort(tgt->emul64_tgt_nsect);
871 		break;
872 	case MODE_SENSE_PC_CHANGEABLE:
873 		break;
874 	}
875 
876 	(void) bcopy(&header, addr, sizeof (header));
877 	(void) bcopy(&page3, addr + sizeof (header), sizeof (page3));
878 
879 	pkt->pkt_resid = sp->cmd_count - sizeof (page3) - sizeof (header);
880 	rval = 0;
881 
882 	return (rval);
883 }
884 
885 static int
886 bsd_mode_sense_dad_mode_cache(struct scsi_pkt *pkt)
887 {
888 	struct emul64_cmd	*sp = PKT2CMD(pkt);
889 	union scsi_cdb		*cdb = (union scsi_cdb *)pkt->pkt_cdbp;
890 	uchar_t			*addr = (uchar_t *)sp->cmd_addr;
891 	int			page_control;
892 	struct mode_header	header;
893 	struct mode_cache	page8;
894 	int			rval = 0;
895 
896 	page_control = (cdb->cdb_opaque[2] >> 6) & 0x03;
897 
898 	if (emul64debug) {
899 		cmn_err(CE_CONT, "%s: bsd_mode_sense_dad_mode_cache: "
900 		    "pc=%d n=%d\n", emul64_name, page_control, sp->cmd_count);
901 	}
902 
903 	if (sp->cmd_count < (sizeof (header) + sizeof (page8))) {
904 		cmn_err(CE_CONT, "%s: bsd_mode_sense_dad_mode_cache: "
905 		    "size %d required\n",
906 		    emul64_name, (int)(sizeof (header) + sizeof (page8)));
907 		return (EIO);
908 	}
909 
910 	(void) bzero(&header, sizeof (header));
911 	(void) bzero(&page8, sizeof (page8));
912 
913 	header.length = sizeof (header) + sizeof (page8) - 1;
914 	header.bdesc_length = 0;
915 
916 	page8.mode_page.code = DAD_MODE_CACHE;
917 	page8.mode_page.ps = 1;
918 	page8.mode_page.length = sizeof (page8) - sizeof (struct mode_page);
919 
920 	switch (page_control) {
921 	case MODE_SENSE_PC_CURRENT:
922 	case MODE_SENSE_PC_DEFAULT:
923 	case MODE_SENSE_PC_SAVED:
924 		break;
925 	case MODE_SENSE_PC_CHANGEABLE:
926 		break;
927 	}
928 
929 	(void) bcopy(&header, addr, sizeof (header));
930 	(void) bcopy(&page8, addr + sizeof (header), sizeof (page8));
931 
932 	pkt->pkt_resid = sp->cmd_count - sizeof (page8) - sizeof (header);
933 	rval = 0;
934 
935 	return (rval);
936 }
937 
938 /* ARGSUSED 0 */
939 int
940 bsd_scsi_mode_select(struct scsi_pkt *pkt)
941 {
942 	return (0);
943 }
944 
945 int
946 bsd_scsi_read_capacity_8(struct scsi_pkt *pkt)
947 {
948 	struct emul64_cmd	*sp = PKT2CMD(pkt);
949 	emul64_tgt_t		*tgt;
950 	struct scsi_capacity	cap;
951 	int			rval = 0;
952 
953 	EMUL64_MUTEX_ENTER(sp->cmd_emul64);
954 	tgt = find_tgt(sp->cmd_emul64,
955 		pkt->pkt_address.a_target, pkt->pkt_address.a_lun);
956 	EMUL64_MUTEX_EXIT(sp->cmd_emul64);
957 	if (tgt->emul64_tgt_sectors > 0xffffffff)
958 		cap.capacity = 0xffffffff;
959 	else
960 		cap.capacity =
961 		    uint32_to_scsi_uint32(tgt->emul64_tgt_sectors);
962 	cap.lbasize = uint32_to_scsi_uint32((uint_t)DEV_BSIZE);
963 
964 	pkt->pkt_resid = sp->cmd_count - sizeof (struct scsi_capacity);
965 
966 	(void) bcopy(&cap, (caddr_t)sp->cmd_addr,
967 		    sizeof (struct scsi_capacity));
968 	return (rval);
969 }
970 
971 int
972 bsd_scsi_read_capacity_16(struct scsi_pkt *pkt)
973 {
974 	struct emul64_cmd	*sp = PKT2CMD(pkt);
975 	emul64_tgt_t		*tgt;
976 	struct scsi_capacity_16 cap;
977 	int			rval = 0;
978 
979 	EMUL64_MUTEX_ENTER(sp->cmd_emul64);
980 	tgt = find_tgt(sp->cmd_emul64,
981 		pkt->pkt_address.a_target, pkt->pkt_address.a_lun);
982 	EMUL64_MUTEX_EXIT(sp->cmd_emul64);
983 
984 	cap.sc_capacity = uint64_to_scsi_uint64(tgt->emul64_tgt_sectors);
985 	cap.sc_lbasize = uint32_to_scsi_uint32((uint_t)DEV_BSIZE);
986 	cap.sc_rto_en = 0;
987 	cap.sc_prot_en = 0;
988 	cap.sc_rsvd0 = 0;
989 	bzero(&cap.sc_rsvd1[0], sizeof (cap.sc_rsvd1));
990 
991 	pkt->pkt_resid = sp->cmd_count - sizeof (struct scsi_capacity_16);
992 
993 	(void) bcopy(&cap, (caddr_t)sp->cmd_addr,
994 			sizeof (struct scsi_capacity_16));
995 	return (rval);
996 }
997 int
998 bsd_scsi_read_capacity(struct scsi_pkt *pkt)
999 {
1000 	return (bsd_scsi_read_capacity_8(pkt));
1001 }
1002 
1003 
1004 /* ARGSUSED 0 */
1005 int
1006 bsd_scsi_reserve(struct scsi_pkt *pkt)
1007 {
1008 	return (0);
1009 }
1010 
1011 /* ARGSUSED 0 */
1012 int
1013 bsd_scsi_release(struct scsi_pkt *pkt)
1014 {
1015 	return (0);
1016 }
1017 
1018 
1019 int
1020 bsd_scsi_read_defect_list(struct scsi_pkt *pkt)
1021 {
1022 	pkt->pkt_resid = 0;
1023 	return (0);
1024 }
1025 
1026 
1027 /* ARGSUSED 0 */
1028 int
1029 bsd_scsi_reassign_block(struct scsi_pkt *pkt)
1030 {
1031 	return (0);
1032 }
1033 
1034 
1035 static int
1036 bsd_readblks(struct emul64 *emul64, ushort_t a_target, ushort_t a_lun,
1037 		diskaddr_t blkno, int nblks, unsigned char *bufaddr)
1038 {
1039 	emul64_tgt_t	*tgt;
1040 	blklist_t	*blk;
1041 	emul64_rng_overlap_t overlap;
1042 	int		i = 0;
1043 
1044 	if (emul64debug) {
1045 		cmn_err(CE_CONT, "%s: bsd_readblks: "
1046 		    "<%d,%d> blk %llu (0x%llx) nblks %d\n",
1047 		    emul64_name, a_target, a_lun, blkno, blkno, nblks);
1048 	}
1049 
1050 	emul64_yield_check();
1051 
1052 	EMUL64_MUTEX_ENTER(emul64);
1053 	tgt = find_tgt(emul64, a_target, a_lun);
1054 	EMUL64_MUTEX_EXIT(emul64);
1055 	if (tgt == NULL) {
1056 		cmn_err(CE_WARN, "%s: bsd_readblks: no target for %d,%d\n",
1057 		    emul64_name, a_target, a_lun);
1058 		goto unlocked_out;
1059 	}
1060 
1061 	if (emul64_collect_stats) {
1062 		mutex_enter(&emul64_stats_mutex);
1063 		emul64_io_ops++;
1064 		emul64_io_blocks += nblks;
1065 		mutex_exit(&emul64_stats_mutex);
1066 	}
1067 	mutex_enter(&tgt->emul64_tgt_blk_lock);
1068 
1069 	/*
1070 	 * Keep the ioctls from changing the nowrite list for the duration
1071 	 * of this I/O by grabbing emul64_tgt_nw_lock.  This will keep the
1072 	 * results from our call to bsd_tgt_overlap from changing while we
1073 	 * do the I/O.
1074 	 */
1075 	rw_enter(&tgt->emul64_tgt_nw_lock, RW_READER);
1076 
1077 	overlap = bsd_tgt_overlap(tgt, blkno, nblks);
1078 	switch (overlap) {
1079 	case O_SAME:
1080 	case O_SUBSET:
1081 	case O_OVERLAP:
1082 		cmn_err(CE_WARN, "%s: bsd_readblks: "
1083 		    "read to blocked area %lld,%d\n",
1084 		    emul64_name, blkno, nblks);
1085 		rw_exit(&tgt->emul64_tgt_nw_lock);
1086 		goto errout;
1087 	case O_NONE:
1088 		break;
1089 	}
1090 	for (i = 0; i < nblks; i++) {
1091 		if (emul64_debug_blklist)
1092 			cmn_err(CE_CONT, "%s: bsd_readblks: "
1093 			    "%d of %d: blkno %lld\n",
1094 			    emul64_name, i+1, nblks, blkno);
1095 		if (blkno > tgt->emul64_tgt_sectors)
1096 			break;
1097 		blk = bsd_findblk(tgt, blkno, NULL);
1098 		if (blk) {
1099 			(void) bcopy(blk->bl_data, bufaddr, DEV_BSIZE);
1100 		} else {
1101 			(void) bzero(bufaddr, DEV_BSIZE);
1102 		}
1103 		blkno++;
1104 		bufaddr += DEV_BSIZE;
1105 	}
1106 	rw_exit(&tgt->emul64_tgt_nw_lock);
1107 
1108 errout:
1109 	mutex_exit(&tgt->emul64_tgt_blk_lock);
1110 
1111 unlocked_out:
1112 	return ((nblks - i) * DEV_BSIZE);
1113 }
1114 
1115 
1116 static int
1117 bsd_writeblks(struct emul64 *emul64, ushort_t a_target, ushort_t a_lun,
1118 		diskaddr_t blkno, int nblks, unsigned char *bufaddr)
1119 {
1120 	emul64_tgt_t	*tgt;
1121 	blklist_t	*blk;
1122 	emul64_rng_overlap_t overlap;
1123 	avl_index_t	where;
1124 	int		i = 0;
1125 
1126 	if (emul64debug) {
1127 		cmn_err(CE_CONT, "%s: bsd_writeblks: "
1128 		    "<%d,%d> blk %llu (0x%llx) nblks %d\n",
1129 		    emul64_name, a_target, a_lun, blkno, blkno, nblks);
1130 	}
1131 
1132 	emul64_yield_check();
1133 
1134 	EMUL64_MUTEX_ENTER(emul64);
1135 	tgt = find_tgt(emul64, a_target, a_lun);
1136 	EMUL64_MUTEX_EXIT(emul64);
1137 	if (tgt == NULL) {
1138 		cmn_err(CE_WARN, "%s: bsd_writeblks: no target for %d,%d\n",
1139 		    emul64_name, a_target, a_lun);
1140 		goto unlocked_out;
1141 	}
1142 
1143 	if (emul64_collect_stats) {
1144 		mutex_enter(&emul64_stats_mutex);
1145 		emul64_io_ops++;
1146 		emul64_io_blocks += nblks;
1147 		mutex_exit(&emul64_stats_mutex);
1148 	}
1149 	mutex_enter(&tgt->emul64_tgt_blk_lock);
1150 
1151 	/*
1152 	 * Keep the ioctls from changing the nowrite list for the duration
1153 	 * of this I/O by grabbing emul64_tgt_nw_lock.  This will keep the
1154 	 * results from our call to bsd_tgt_overlap from changing while we
1155 	 * do the I/O.
1156 	 */
1157 	rw_enter(&tgt->emul64_tgt_nw_lock, RW_READER);
1158 	overlap = bsd_tgt_overlap(tgt, blkno, nblks);
1159 	switch (overlap) {
1160 	case O_SAME:
1161 	case O_SUBSET:
1162 		if (emul64_collect_stats) {
1163 			mutex_enter(&emul64_stats_mutex);
1164 			emul64_skipped_io++;
1165 			emul64_skipped_blk += nblks;
1166 			mutex_exit(&emul64_stats_mutex);
1167 		}
1168 		rw_exit(&tgt->emul64_tgt_nw_lock);
1169 		mutex_exit(&tgt->emul64_tgt_blk_lock);
1170 		return (0);
1171 	case O_OVERLAP:
1172 	case O_NONE:
1173 		break;
1174 	}
1175 	for (i = 0; i < nblks; i++) {
1176 		if ((overlap == O_NONE) ||
1177 		    (bsd_tgt_overlap(tgt, blkno, 1) == O_NONE)) {
1178 			/*
1179 			 * If there was no overlap for the entire I/O range
1180 			 * or if there is no overlap for this particular
1181 			 * block, then we need to do the write.
1182 			 */
1183 			if (emul64_debug_blklist)
1184 				cmn_err(CE_CONT, "%s: bsd_writeblks: "
1185 				    "%d of %d: blkno %lld\n",
1186 				    emul64_name, i+1, nblks, blkno);
1187 			if (blkno > tgt->emul64_tgt_sectors) {
1188 				cmn_err(CE_WARN, "%s: bsd_writeblks: "
1189 				    "blkno %lld, tgt_sectors %lld\n",
1190 				    emul64_name, blkno,
1191 				    tgt->emul64_tgt_sectors);
1192 				break;
1193 			}
1194 
1195 			blk = bsd_findblk(tgt, blkno, &where);
1196 			if (bcmp(bufaddr, emul64_zeros, DEV_BSIZE) == 0) {
1197 				if (blk) {
1198 					bsd_freeblk(tgt, blk);
1199 				}
1200 			} else {
1201 				if (blk) {
1202 					(void) bcopy(bufaddr, blk->bl_data,
1203 							DEV_BSIZE);
1204 				} else {
1205 					bsd_allocblk(tgt,
1206 							blkno,
1207 							(caddr_t)bufaddr,
1208 							where);
1209 				}
1210 			}
1211 		}
1212 		blkno++;
1213 		bufaddr += DEV_BSIZE;
1214 	}
1215 
1216 	/*
1217 	 * Now that we're done with our I/O, allow the ioctls to change the
1218 	 * nowrite list.
1219 	 */
1220 	rw_exit(&tgt->emul64_tgt_nw_lock);
1221 
1222 errout:
1223 	mutex_exit(&tgt->emul64_tgt_blk_lock);
1224 
1225 unlocked_out:
1226 	return ((nblks - i) * DEV_BSIZE);
1227 }
1228 
1229 emul64_tgt_t *
1230 find_tgt(struct emul64 *emul64, ushort_t a_target, ushort_t a_lun)
1231 {
1232 	emul64_tgt_t	*tgt;
1233 
1234 	tgt = emul64->emul64_tgt;
1235 	while (tgt) {
1236 		if (tgt->emul64_tgt_saddr.a_target == a_target &&
1237 		    tgt->emul64_tgt_saddr.a_lun == a_lun) {
1238 			break;
1239 		}
1240 		tgt = tgt->emul64_tgt_next;
1241 	}
1242 	return (tgt);
1243 
1244 }
1245 
1246 /*
1247  * Free all blocks that are part of the specified range.
1248  */
1249 int
1250 bsd_freeblkrange(emul64_tgt_t *tgt, emul64_range_t *range)
1251 {
1252 	blklist_t	*blk;
1253 	blklist_t	*nextblk;
1254 
1255 	ASSERT(mutex_owned(&tgt->emul64_tgt_blk_lock));
1256 	for (blk = (blklist_t *)avl_first(&tgt->emul64_tgt_data);
1257 		blk != NULL;
1258 		blk = nextblk) {
1259 		/*
1260 		 * We need to get the next block pointer now, because blk
1261 		 * will be freed inside the if statement.
1262 		 */
1263 		nextblk = AVL_NEXT(&tgt->emul64_tgt_data, blk);
1264 
1265 		if (emul64_overlap(range, blk->bl_blkno, (size_t)1) != O_NONE) {
1266 			bsd_freeblk(tgt, blk);
1267 		}
1268 	}
1269 	return (0);
1270 }
1271 
1272 static blklist_t *
1273 bsd_findblk(emul64_tgt_t *tgt, diskaddr_t blkno, avl_index_t *where)
1274 {
1275 	blklist_t	*blk;
1276 	blklist_t	search;
1277 
1278 	ASSERT(mutex_owned(&tgt->emul64_tgt_blk_lock));
1279 
1280 	search.bl_blkno = blkno;
1281 	blk = (blklist_t *)avl_find(&tgt->emul64_tgt_data, &search, where);
1282 	return (blk);
1283 }
1284 
1285 
1286 static void
1287 bsd_allocblk(emul64_tgt_t *tgt,
1288 		diskaddr_t blkno,
1289 		caddr_t data,
1290 		avl_index_t where)
1291 {
1292 	blklist_t	*blk;
1293 
1294 	if (emul64_debug_blklist)
1295 		cmn_err(CE_CONT, "%s: bsd_allocblk: %llu\n",
1296 		    emul64_name, blkno);
1297 
1298 	ASSERT(mutex_owned(&tgt->emul64_tgt_blk_lock));
1299 
1300 	blk = (blklist_t *)kmem_zalloc(sizeof (blklist_t), KM_SLEEP);
1301 	blk->bl_data = (uchar_t *)kmem_zalloc(DEV_BSIZE, KM_SLEEP);
1302 	blk->bl_blkno = blkno;
1303 	(void) bcopy(data, blk->bl_data, DEV_BSIZE);
1304 	avl_insert(&tgt->emul64_tgt_data, (void *) blk, where);
1305 
1306 	if (emul64_collect_stats) {
1307 		mutex_enter(&emul64_stats_mutex);
1308 		emul64_nonzero++;
1309 		tgt->emul64_list_length++;
1310 		if (tgt->emul64_list_length > emul64_max_list_length) {
1311 			emul64_max_list_length = tgt->emul64_list_length;
1312 		}
1313 		mutex_exit(&emul64_stats_mutex);
1314 	}
1315 }
1316 
1317 static void
1318 bsd_freeblk(emul64_tgt_t *tgt, blklist_t *blk)
1319 {
1320 	if (emul64_debug_blklist)
1321 		cmn_err(CE_CONT, "%s: bsd_freeblk: <%d,%d> blk=%lld\n",
1322 		    emul64_name, tgt->emul64_tgt_saddr.a_target,
1323 		    tgt->emul64_tgt_saddr.a_lun, blk->bl_blkno);
1324 
1325 	ASSERT(mutex_owned(&tgt->emul64_tgt_blk_lock));
1326 
1327 	avl_remove(&tgt->emul64_tgt_data, (void *) blk);
1328 	if (emul64_collect_stats) {
1329 		mutex_enter(&emul64_stats_mutex);
1330 		emul64_nonzero--;
1331 		tgt->emul64_list_length--;
1332 		mutex_exit(&emul64_stats_mutex);
1333 	}
1334 	kmem_free(blk->bl_data, DEV_BSIZE);
1335 	kmem_free(blk, sizeof (blklist_t));
1336 }
1337 
1338 /*
1339  * Look for overlap between a nowrite range and a block range.
1340  *
1341  * NOTE:  Callers of this function must hold the tgt->emul64_tgt_nw_lock
1342  *	  lock.  For the purposes of this function, a reader lock is
1343  *	  sufficient.
1344  */
1345 static emul64_rng_overlap_t
1346 bsd_tgt_overlap(emul64_tgt_t *tgt, diskaddr_t blkno, int count)
1347 {
1348 	emul64_nowrite_t	*nw;
1349 	emul64_rng_overlap_t	rv = O_NONE;
1350 
1351 	for (nw = tgt->emul64_tgt_nowrite;
1352 		(nw != NULL) && (rv == O_NONE);
1353 		nw = nw->emul64_nwnext) {
1354 		rv = emul64_overlap(&nw->emul64_blocked,
1355 				    blkno,
1356 				    (size_t)count);
1357 	}
1358 	return (rv);
1359 }
1360 
1361 /*
1362  * Operations that do a lot of I/O, such as RAID 5 initializations, result
1363  * in a CPU bound kernel when the device is an emul64 device.  This makes
1364  * the machine look hung.  To avoid this problem, give up the CPU from time
1365  * to time.
1366  */
1367 
1368 static void
1369 emul64_yield_check()
1370 {
1371 	static uint_t	emul64_io_count = 0;	/* # I/Os since last wait */
1372 	static uint_t	emul64_waiting = FALSE;	/* TRUE -> a thread is in */
1373 						/*   cv_timed wait. */
1374 	clock_t		ticks;
1375 
1376 	if (emul64_yield_enable == 0)
1377 		return;
1378 
1379 	mutex_enter(&emul64_yield_mutex);
1380 
1381 	if (emul64_waiting == TRUE) {
1382 		/*
1383 		 * Another thread has already started the timer.  We'll
1384 		 * just wait here until their time expires, and they
1385 		 * broadcast to us.  When they do that, we'll return and
1386 		 * let our caller do more I/O.
1387 		 */
1388 		cv_wait(&emul64_yield_cv, &emul64_yield_mutex);
1389 	} else if (emul64_io_count++ > emul64_yield_period) {
1390 		/*
1391 		 * Set emul64_waiting to let other threads know that we
1392 		 * have started the timer.
1393 		 */
1394 		emul64_waiting = TRUE;
1395 		emul64_num_delay_called++;
1396 		ticks = drv_usectohz(emul64_yield_length);
1397 		if (ticks == 0)
1398 			ticks = 1;
1399 		(void) cv_timedwait(&emul64_yield_cv,
1400 		    &emul64_yield_mutex, ddi_get_lbolt() + ticks);
1401 		emul64_io_count = 0;
1402 		emul64_waiting = FALSE;
1403 
1404 		/* Broadcast in case others are waiting. */
1405 		cv_broadcast(&emul64_yield_cv);
1406 	}
1407 
1408 	mutex_exit(&emul64_yield_mutex);
1409 }
1410