xref: /titanic_41/usr/src/uts/common/io/bofi.c (revision 445f2479fe3d7435daab18bf2cdc310b86cd6738)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include <sys/types.h>
30 #include <sys/sysmacros.h>
31 #include <sys/buf.h>
32 #include <sys/errno.h>
33 #include <sys/modctl.h>
34 #include <sys/conf.h>
35 #include <sys/stat.h>
36 #include <sys/kmem.h>
37 #include <sys/proc.h>
38 #include <sys/cpuvar.h>
39 #include <sys/ddi_impldefs.h>
40 #include <sys/ddi.h>
41 #include <sys/sunddi.h>
42 #include <sys/sunndi.h>
43 #include <sys/debug.h>
44 #include <sys/bofi.h>
45 #include <sys/dvma.h>
46 #include <sys/bofi_impl.h>
47 
48 /*
49  * Testing the resilience of a hardened device driver requires a suitably wide
50  * range of different types of "typical" hardware faults to be injected,
51  * preferably in a controlled and repeatable fashion. This is not in general
52  * possible via hardware, so the "fault injection test harness" is provided.
53  * This works by intercepting calls from the driver to various DDI routines,
54  * and then corrupting the result of those DDI routine calls as if the
55  * hardware had caused the corruption.
56  *
57  * Conceptually, the bofi driver consists of two parts:
58  *
59  * A driver interface that supports a number of ioctls which allow error
60  * definitions ("errdefs") to be defined and subsequently managed. The
61  * driver is a clone driver, so each open will create a separate
62  * invocation. Any errdefs created by using ioctls to that invocation
63  * will automatically be deleted when that invocation is closed.
64  *
65  * Intercept routines: When the bofi driver is attached, it edits the
66  * bus_ops structure of the bus nexus specified by the "bofi-nexus"
67  * field in the "bofi.conf" file, thus allowing the
68  * bofi driver to intercept various ddi functions. These intercept
69  * routines primarily carry out fault injections based on the errdefs
70  * created for that device.
71  *
72  * Faults can be injected into:
73  *
74  * DMA (corrupting data for DMA to/from memory areas defined by
75  * ddi_dma_setup(), ddi_dma_bind_handle(), etc)
76  *
77  * Physical IO (corrupting data sent/received via ddi_get8(), ddi_put8(),
78  * etc),
79  *
80  * Interrupts (generating spurious interrupts, losing interrupts,
81  * delaying interrupts).
82  *
83  * By default, ddi routines called from all drivers will be intercepted
84  * and faults potentially injected. However, the "bofi-to-test" field in
85  * the "bofi.conf" file can be set to a space-separated list of drivers to
86  * test (or by preceding each driver name in the list with an "!", a list
87  * of drivers not to test).
88  *
89  * In addition to fault injection, the bofi driver does a number of static
90  * checks which are controlled by properties in the "bofi.conf" file.
91  *
92  * "bofi-ddi-check" - if set will validate that there are no PIO access
93  * other than those using the DDI routines (ddi_get8(), ddi_put8(), etc).
94  *
95  * "bofi-range-check" - if set to values 1 (warning) or 2 (panic), will
96  * validate that calls to ddi_get8(), ddi_put8(), etc are not made
97  * specifying addresses outside the range of the access_handle.
98  *
99  * "bofi-sync-check" - if set will validate that calls to ddi_dma_sync()
100  * are being made correctly.
101  */
102 
103 extern void *bp_mapin_common(struct buf *, int);
104 
105 static int bofi_ddi_check;
106 static int bofi_sync_check;
107 static int bofi_range_check;
108 
109 static struct bofi_link bofi_link_array[BOFI_NLINKS], *bofi_link_freelist;
110 
111 #define	LLSZMASK (sizeof (uint64_t)-1)
112 
113 #define	HDL_HASH_TBL_SIZE 64
114 static struct bofi_shadow hhash_table[HDL_HASH_TBL_SIZE];
115 static struct bofi_shadow dhash_table[HDL_HASH_TBL_SIZE];
116 #define	HDL_DHASH(x) \
117 	(&dhash_table[((uintptr_t)(x) >> 3) & (HDL_HASH_TBL_SIZE-1)])
118 #define	HDL_HHASH(x) \
119 	(&hhash_table[((uintptr_t)(x) >> 5) & (HDL_HASH_TBL_SIZE-1)])
120 
121 static struct bofi_shadow shadow_list;
122 static struct bofi_errent *errent_listp;
123 
124 static char driver_list[NAMESIZE];
125 static int driver_list_size;
126 static int driver_list_neg;
127 static char nexus_name[NAMESIZE];
128 
129 static int initialized = 0;
130 
131 #define	NCLONES 256
132 static int clone_tab[NCLONES];
133 
134 static dev_info_t *our_dip;
135 
136 static kmutex_t bofi_mutex;
137 static kmutex_t clone_tab_mutex;
138 static kmutex_t bofi_low_mutex;
139 static ddi_iblock_cookie_t bofi_low_cookie;
140 static uint_t	bofi_signal(caddr_t arg);
141 static int	bofi_getinfo(dev_info_t *, ddi_info_cmd_t, void *, void **);
142 static int	bofi_attach(dev_info_t *, ddi_attach_cmd_t);
143 static int	bofi_detach(dev_info_t *, ddi_detach_cmd_t);
144 static int	bofi_open(dev_t *, int, int, cred_t *);
145 static int	bofi_close(dev_t, int, int, cred_t *);
146 static int	bofi_ioctl(dev_t, int, intptr_t, int, cred_t *, int *);
147 static int	bofi_errdef_alloc(struct bofi_errdef *, char *,
148 		    struct bofi_errent *);
149 static int	bofi_errdef_free(struct bofi_errent *);
150 static void	bofi_start(struct bofi_errctl *, char *);
151 static void	bofi_stop(struct bofi_errctl *, char *);
152 static void	bofi_broadcast(struct bofi_errctl *, char *);
153 static void	bofi_clear_acc_chk(struct bofi_errctl *, char *);
154 static void	bofi_clear_errors(struct bofi_errctl *, char *);
155 static void	bofi_clear_errdefs(struct bofi_errctl *, char *);
156 static int	bofi_errdef_check(struct bofi_errstate *,
157 		    struct acc_log_elem **);
158 static int	bofi_errdef_check_w(struct bofi_errstate *,
159 		    struct acc_log_elem **);
160 static int	bofi_map(dev_info_t *, dev_info_t *, ddi_map_req_t *,
161 		    off_t, off_t, caddr_t *);
162 static int	bofi_dma_map(dev_info_t *, dev_info_t *,
163 		    struct ddi_dma_req *, ddi_dma_handle_t *);
164 static int	bofi_dma_allochdl(dev_info_t *, dev_info_t *,
165 		    ddi_dma_attr_t *, int (*)(caddr_t), caddr_t,
166 		    ddi_dma_handle_t *);
167 static int	bofi_dma_freehdl(dev_info_t *, dev_info_t *,
168 		    ddi_dma_handle_t);
169 static int	bofi_dma_bindhdl(dev_info_t *, dev_info_t *,
170 		    ddi_dma_handle_t, struct ddi_dma_req *, ddi_dma_cookie_t *,
171 		    uint_t *);
172 static int	bofi_dma_unbindhdl(dev_info_t *, dev_info_t *,
173 		    ddi_dma_handle_t);
174 static int	bofi_dma_flush(dev_info_t *, dev_info_t *, ddi_dma_handle_t,
175 		    off_t, size_t, uint_t);
176 static int	bofi_dma_ctl(dev_info_t *, dev_info_t *, ddi_dma_handle_t,
177 		    enum ddi_dma_ctlops, off_t *, size_t *, caddr_t *, uint_t);
178 static int	bofi_dma_win(dev_info_t *, dev_info_t *, ddi_dma_handle_t,
179 		    uint_t, off_t *, size_t *, ddi_dma_cookie_t *, uint_t *);
180 static int	bofi_intr_ops(dev_info_t *dip, dev_info_t *rdip,
181 		    ddi_intr_op_t intr_op, ddi_intr_handle_impl_t *hdlp,
182 		    void *result);
183 
184 #if defined(__sparc)
185 static void	bofi_dvma_kaddr_load(ddi_dma_handle_t, caddr_t, uint_t,
186 		    uint_t, ddi_dma_cookie_t *);
187 static void	bofi_dvma_unload(ddi_dma_handle_t, uint_t, uint_t);
188 static void	bofi_dvma_sync(ddi_dma_handle_t, uint_t, uint_t);
189 static void	bofi_dvma_reserve(dev_info_t *, ddi_dma_handle_t);
190 #endif
191 static int	driver_under_test(dev_info_t *);
192 static int	bofi_check_acc_hdl(ddi_acc_impl_t *);
193 static int	bofi_check_dma_hdl(ddi_dma_impl_t *);
194 static int	bofi_post_event(dev_info_t *dip, dev_info_t *rdip,
195 		    ddi_eventcookie_t eventhdl, void *impl_data);
196 
197 static struct bus_ops bofi_bus_ops = {
198 	BUSO_REV,
199 	bofi_map,
200 	NULL,
201 	NULL,
202 	NULL,
203 	i_ddi_map_fault,
204 	bofi_dma_map,
205 	bofi_dma_allochdl,
206 	bofi_dma_freehdl,
207 	bofi_dma_bindhdl,
208 	bofi_dma_unbindhdl,
209 	bofi_dma_flush,
210 	bofi_dma_win,
211 	bofi_dma_ctl,
212 	NULL,
213 	ddi_bus_prop_op,
214 	ndi_busop_get_eventcookie,
215 	ndi_busop_add_eventcall,
216 	ndi_busop_remove_eventcall,
217 	bofi_post_event,
218 	NULL,
219 	0,
220 	0,
221 	0,
222 	0,
223 	0,
224 	0,
225 	0,
226 	bofi_intr_ops
227 };
228 
229 static struct cb_ops bofi_cb_ops = {
230 	bofi_open,		/* open */
231 	bofi_close,		/* close */
232 	nodev,			/* strategy */
233 	nodev,			/* print */
234 	nodev,			/* dump */
235 	nodev,			/* read */
236 	nodev,			/* write */
237 	bofi_ioctl,		/* ioctl */
238 	nodev,			/* devmap */
239 	nodev,			/* mmap */
240 	nodev,			/* segmap */
241 	nochpoll,		/* chpoll */
242 	ddi_prop_op,		/* prop_op */
243 	NULL,			/* for STREAMS drivers */
244 	D_MP,			/* driver compatibility flag */
245 	CB_REV,			/* cb_ops revision */
246 	nodev,			/* aread */
247 	nodev			/* awrite */
248 };
249 
250 static struct dev_ops bofi_ops = {
251 	DEVO_REV,		/* driver build version */
252 	0,			/* device reference count */
253 	bofi_getinfo,
254 	nulldev,
255 	nulldev,		/* probe */
256 	bofi_attach,
257 	bofi_detach,
258 	nulldev,		/* reset */
259 	&bofi_cb_ops,
260 	(struct bus_ops *)NULL,
261 	nulldev			/* power */
262 };
263 
264 /* module configuration stuff */
265 static void    *statep;
266 
267 static struct modldrv modldrv = {
268 	&mod_driverops,
269 	"bofi driver %I%",
270 	&bofi_ops
271 };
272 
273 static struct modlinkage modlinkage = {
274 	MODREV_1,
275 	&modldrv,
276 	0
277 };
278 
279 static struct bus_ops save_bus_ops;
280 
281 #if defined(__sparc)
282 static struct dvma_ops bofi_dvma_ops = {
283 	DVMAO_REV,
284 	bofi_dvma_kaddr_load,
285 	bofi_dvma_unload,
286 	bofi_dvma_sync
287 };
288 #endif
289 
290 /*
291  * support routine - map user page into kernel virtual
292  */
293 static caddr_t
294 dmareq_mapin(offset_t len, caddr_t addr, struct as *as, int flag)
295 {
296 	struct buf buf;
297 	struct proc proc;
298 
299 	/*
300 	 * mock up a buf structure so we can call bp_mapin_common()
301 	 */
302 	buf.b_flags = B_PHYS;
303 	buf.b_un.b_addr = (caddr_t)addr;
304 	buf.b_bcount = (size_t)len;
305 	proc.p_as = as;
306 	buf.b_proc = &proc;
307 	return (bp_mapin_common(&buf, flag));
308 }
309 
310 
311 /*
312  * support routine - map page chain into kernel virtual
313  */
314 static caddr_t
315 dmareq_pp_mapin(offset_t len, uint_t offset, page_t *pp, int flag)
316 {
317 	struct buf buf;
318 
319 	/*
320 	 * mock up a buf structure so we can call bp_mapin_common()
321 	 */
322 	buf.b_flags = B_PAGEIO;
323 	buf.b_un.b_addr = (caddr_t)(uintptr_t)offset;
324 	buf.b_bcount = (size_t)len;
325 	buf.b_pages = pp;
326 	return (bp_mapin_common(&buf, flag));
327 }
328 
329 
330 /*
331  * support routine - map page array into kernel virtual
332  */
333 static caddr_t
334 dmareq_pplist_mapin(uint_t len, caddr_t addr, page_t **pplist, struct as *as,
335     int flag)
336 {
337 	struct buf buf;
338 	struct proc proc;
339 
340 	/*
341 	 * mock up a buf structure so we can call bp_mapin_common()
342 	 */
343 	buf.b_flags = B_PHYS|B_SHADOW;
344 	buf.b_un.b_addr = addr;
345 	buf.b_bcount = len;
346 	buf.b_shadow = pplist;
347 	proc.p_as = as;
348 	buf.b_proc = &proc;
349 	return (bp_mapin_common(&buf, flag));
350 }
351 
352 
353 /*
354  * support routine - map dmareq into kernel virtual if not already
355  * fills in *lenp with length
356  * *mapaddr will be new kernel virtual address - or null if no mapping needed
357  */
358 static caddr_t
359 ddi_dmareq_mapin(struct ddi_dma_req *dmareqp, caddr_t *mapaddrp,
360 	offset_t *lenp)
361 {
362 	int sleep = (dmareqp->dmar_fp == DDI_DMA_SLEEP) ? VM_SLEEP: VM_NOSLEEP;
363 
364 	*lenp = dmareqp->dmar_object.dmao_size;
365 	if (dmareqp->dmar_object.dmao_type == DMA_OTYP_PAGES) {
366 		*mapaddrp = dmareq_pp_mapin(dmareqp->dmar_object.dmao_size,
367 		    dmareqp->dmar_object.dmao_obj.pp_obj.pp_offset,
368 		    dmareqp->dmar_object.dmao_obj.pp_obj.pp_pp, sleep);
369 		return (*mapaddrp);
370 	} else if (dmareqp->dmar_object.dmao_obj.virt_obj.v_priv != NULL) {
371 		*mapaddrp = dmareq_pplist_mapin(dmareqp->dmar_object.dmao_size,
372 		    dmareqp->dmar_object.dmao_obj.virt_obj.v_addr,
373 		    dmareqp->dmar_object.dmao_obj.virt_obj.v_priv,
374 		    dmareqp->dmar_object.dmao_obj.virt_obj.v_as, sleep);
375 		return (*mapaddrp);
376 	} else if (dmareqp->dmar_object.dmao_obj.virt_obj.v_as == &kas) {
377 		*mapaddrp = NULL;
378 		return (dmareqp->dmar_object.dmao_obj.virt_obj.v_addr);
379 	} else if (dmareqp->dmar_object.dmao_obj.virt_obj.v_as == NULL) {
380 		*mapaddrp = NULL;
381 		return (dmareqp->dmar_object.dmao_obj.virt_obj.v_addr);
382 	} else {
383 		*mapaddrp = dmareq_mapin(dmareqp->dmar_object.dmao_size,
384 		    dmareqp->dmar_object.dmao_obj.virt_obj.v_addr,
385 		    dmareqp->dmar_object.dmao_obj.virt_obj.v_as, sleep);
386 		return (*mapaddrp);
387 	}
388 }
389 
390 
391 /*
392  * support routine - free off kernel virtual mapping as allocated by
393  * ddi_dmareq_mapin()
394  */
395 static void
396 ddi_dmareq_mapout(caddr_t addr, offset_t len)
397 {
398 	struct buf buf;
399 
400 	if (addr == NULL)
401 		return;
402 	/*
403 	 * mock up a buf structure
404 	 */
405 	buf.b_flags = B_REMAPPED;
406 	buf.b_un.b_addr = addr;
407 	buf.b_bcount = (size_t)len;
408 	bp_mapout(&buf);
409 }
410 
411 static time_t
412 bofi_gettime()
413 {
414 	timestruc_t ts;
415 
416 	gethrestime(&ts);
417 	return (ts.tv_sec);
418 }
419 
420 /*
421  * reset the bus_ops structure of the specified nexus to point to
422  * the original values in the save_bus_ops structure.
423  *
424  * Note that both this routine and modify_bus_ops() rely on the current
425  * behavior of the framework in that nexus drivers are not unloadable
426  *
427  */
428 
429 static int
430 reset_bus_ops(char *name, struct bus_ops *bop)
431 {
432 	struct modctl *modp;
433 	struct modldrv *mp;
434 	struct bus_ops *bp;
435 	struct dev_ops *ops;
436 
437 	mutex_enter(&mod_lock);
438 	/*
439 	 * find specified module
440 	 */
441 	modp = &modules;
442 	do {
443 		if (strcmp(name, modp->mod_modname) == 0) {
444 			if (!modp->mod_linkage) {
445 				mutex_exit(&mod_lock);
446 				return (0);
447 			}
448 			mp = modp->mod_linkage->ml_linkage[0];
449 			if (!mp || !mp->drv_dev_ops) {
450 				mutex_exit(&mod_lock);
451 				return (0);
452 			}
453 			ops = mp->drv_dev_ops;
454 			bp = ops->devo_bus_ops;
455 			if (!bp) {
456 				mutex_exit(&mod_lock);
457 				return (0);
458 			}
459 			if (ops->devo_refcnt > 0) {
460 				/*
461 				 * As long as devices are active with modified
462 				 * bus ops bofi must not go away. There may be
463 				 * drivers with modified access or dma handles.
464 				 */
465 				mutex_exit(&mod_lock);
466 				return (0);
467 			}
468 			cmn_err(CE_NOTE, "bofi reset bus_ops for %s",
469 			    mp->drv_linkinfo);
470 			bp->bus_intr_op = bop->bus_intr_op;
471 			bp->bus_post_event = bop->bus_post_event;
472 			bp->bus_map = bop->bus_map;
473 			bp->bus_dma_map = bop->bus_dma_map;
474 			bp->bus_dma_allochdl = bop->bus_dma_allochdl;
475 			bp->bus_dma_freehdl = bop->bus_dma_freehdl;
476 			bp->bus_dma_bindhdl = bop->bus_dma_bindhdl;
477 			bp->bus_dma_unbindhdl = bop->bus_dma_unbindhdl;
478 			bp->bus_dma_flush = bop->bus_dma_flush;
479 			bp->bus_dma_win = bop->bus_dma_win;
480 			bp->bus_dma_ctl = bop->bus_dma_ctl;
481 			mutex_exit(&mod_lock);
482 			return (1);
483 		}
484 	} while ((modp = modp->mod_next) != &modules);
485 	mutex_exit(&mod_lock);
486 	return (0);
487 }
488 
489 /*
490  * modify the bus_ops structure of the specified nexus to point to bofi
491  * routines, saving the original values in the save_bus_ops structure
492  */
493 
494 static int
495 modify_bus_ops(char *name, struct bus_ops *bop)
496 {
497 	struct modctl *modp;
498 	struct modldrv *mp;
499 	struct bus_ops *bp;
500 	struct dev_ops *ops;
501 
502 	if (ddi_name_to_major(name) == -1)
503 		return (0);
504 
505 	mutex_enter(&mod_lock);
506 	/*
507 	 * find specified module
508 	 */
509 	modp = &modules;
510 	do {
511 		if (strcmp(name, modp->mod_modname) == 0) {
512 			if (!modp->mod_linkage) {
513 				mutex_exit(&mod_lock);
514 				return (0);
515 			}
516 			mp = modp->mod_linkage->ml_linkage[0];
517 			if (!mp || !mp->drv_dev_ops) {
518 				mutex_exit(&mod_lock);
519 				return (0);
520 			}
521 			ops = mp->drv_dev_ops;
522 			bp = ops->devo_bus_ops;
523 			if (!bp) {
524 				mutex_exit(&mod_lock);
525 				return (0);
526 			}
527 			if (ops->devo_refcnt == 0) {
528 				/*
529 				 * If there is no device active for this
530 				 * module then there is nothing to do for bofi.
531 				 */
532 				mutex_exit(&mod_lock);
533 				return (0);
534 			}
535 			cmn_err(CE_NOTE, "bofi modify bus_ops for %s",
536 			    mp->drv_linkinfo);
537 			save_bus_ops = *bp;
538 			bp->bus_intr_op = bop->bus_intr_op;
539 			bp->bus_post_event = bop->bus_post_event;
540 			bp->bus_map = bop->bus_map;
541 			bp->bus_dma_map = bop->bus_dma_map;
542 			bp->bus_dma_allochdl = bop->bus_dma_allochdl;
543 			bp->bus_dma_freehdl = bop->bus_dma_freehdl;
544 			bp->bus_dma_bindhdl = bop->bus_dma_bindhdl;
545 			bp->bus_dma_unbindhdl = bop->bus_dma_unbindhdl;
546 			bp->bus_dma_flush = bop->bus_dma_flush;
547 			bp->bus_dma_win = bop->bus_dma_win;
548 			bp->bus_dma_ctl = bop->bus_dma_ctl;
549 			mutex_exit(&mod_lock);
550 			return (1);
551 		}
552 	} while ((modp = modp->mod_next) != &modules);
553 	mutex_exit(&mod_lock);
554 	return (0);
555 }
556 
557 
558 int
559 _init(void)
560 {
561 	int    e;
562 
563 	e = ddi_soft_state_init(&statep, sizeof (struct bofi_errent), 1);
564 	if (e != 0)
565 		return (e);
566 	if ((e = mod_install(&modlinkage)) != 0)
567 		ddi_soft_state_fini(&statep);
568 	return (e);
569 }
570 
571 
572 int
573 _fini(void)
574 {
575 	int e;
576 
577 	if ((e = mod_remove(&modlinkage)) != 0)
578 		return (e);
579 	ddi_soft_state_fini(&statep);
580 	return (e);
581 }
582 
583 
584 int
585 _info(struct modinfo *modinfop)
586 {
587 	return (mod_info(&modlinkage, modinfop));
588 }
589 
590 
591 static int
592 bofi_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
593 {
594 	char *name;
595 	char buf[80];
596 	int i;
597 	int s, ss;
598 	int size = NAMESIZE;
599 	int new_string;
600 	char *ptr;
601 
602 	if (cmd != DDI_ATTACH)
603 		return (DDI_FAILURE);
604 	/*
605 	 * only one instance - but we clone using the open routine
606 	 */
607 	if (ddi_get_instance(dip) > 0)
608 		return (DDI_FAILURE);
609 
610 	if (!initialized) {
611 		if ((name = ddi_get_name(dip)) == NULL)
612 			return (DDI_FAILURE);
613 		(void) snprintf(buf, sizeof (buf), "%s,ctl", name);
614 		if (ddi_create_minor_node(dip, buf, S_IFCHR, 0,
615 		    DDI_PSEUDO, NULL) == DDI_FAILURE)
616 			return (DDI_FAILURE);
617 
618 		if (ddi_get_soft_iblock_cookie(dip, DDI_SOFTINT_MED,
619 		    &bofi_low_cookie) != DDI_SUCCESS) {
620 			ddi_remove_minor_node(dip, buf);
621 			return (DDI_FAILURE); /* fail attach */
622 		}
623 		/*
624 		 * get nexus name (from conf file)
625 		 */
626 		if (ddi_prop_op(DDI_DEV_T_ANY, dip, PROP_LEN_AND_VAL_BUF, 0,
627 		    "bofi-nexus", nexus_name, &size) != DDI_PROP_SUCCESS) {
628 			ddi_remove_minor_node(dip, buf);
629 			return (DDI_FAILURE);
630 		}
631 		/*
632 		 * get whether to do dma map kmem private checking
633 		 */
634 		if ((bofi_range_check = ddi_prop_lookup_string(DDI_DEV_T_ANY,
635 		    dip, 0, "bofi-range-check", &ptr)) != DDI_PROP_SUCCESS)
636 			bofi_range_check = 0;
637 		else if (strcmp(ptr, "panic") == 0)
638 			bofi_range_check = 2;
639 		else if (strcmp(ptr, "warn") == 0)
640 			bofi_range_check = 1;
641 		else
642 			bofi_range_check = 0;
643 		ddi_prop_free(ptr);
644 
645 		/*
646 		 * get whether to prevent direct access to register
647 		 */
648 		if ((bofi_ddi_check = ddi_prop_lookup_string(DDI_DEV_T_ANY,
649 		    dip, 0, "bofi-ddi-check", &ptr)) != DDI_PROP_SUCCESS)
650 			bofi_ddi_check = 0;
651 		else if (strcmp(ptr, "on") == 0)
652 			bofi_ddi_check = 1;
653 		else
654 			bofi_ddi_check = 0;
655 		ddi_prop_free(ptr);
656 
657 		/*
658 		 * get whether to do copy on ddi_dma_sync
659 		 */
660 		if ((bofi_sync_check = ddi_prop_lookup_string(DDI_DEV_T_ANY,
661 		    dip, 0, "bofi-sync-check", &ptr)) != DDI_PROP_SUCCESS)
662 			bofi_sync_check = 0;
663 		else if (strcmp(ptr, "on") == 0)
664 			bofi_sync_check = 1;
665 		else
666 			bofi_sync_check = 0;
667 		ddi_prop_free(ptr);
668 
669 		/*
670 		 * get driver-under-test names (from conf file)
671 		 */
672 		size = NAMESIZE;
673 		if (ddi_prop_op(DDI_DEV_T_ANY, dip, PROP_LEN_AND_VAL_BUF, 0,
674 		    "bofi-to-test", driver_list, &size) != DDI_PROP_SUCCESS)
675 			driver_list[0] = 0;
676 		/*
677 		 * and convert into a sequence of strings
678 		 */
679 		driver_list_neg = 1;
680 		new_string = 1;
681 		driver_list_size = strlen(driver_list);
682 		for (i = 0; i < driver_list_size; i++) {
683 			if (driver_list[i] == ' ') {
684 				driver_list[i] = '\0';
685 				new_string = 1;
686 			} else if (new_string) {
687 				if (driver_list[i] != '!')
688 					driver_list_neg = 0;
689 				new_string = 0;
690 			}
691 		}
692 		/*
693 		 * initialize mutex, lists
694 		 */
695 		mutex_init(&clone_tab_mutex, NULL, MUTEX_DRIVER,
696 		    NULL);
697 		/*
698 		 * fake up iblock cookie - need to protect outselves
699 		 * against drivers that use hilevel interrupts
700 		 */
701 		ss = spl8();
702 		s = spl8();
703 		splx(ss);
704 		mutex_init(&bofi_mutex, NULL, MUTEX_SPIN, (void *)(uintptr_t)s);
705 		mutex_init(&bofi_low_mutex, NULL, MUTEX_DRIVER,
706 		    (void *)bofi_low_cookie);
707 		shadow_list.next = &shadow_list;
708 		shadow_list.prev = &shadow_list;
709 		for (i = 0; i < HDL_HASH_TBL_SIZE; i++) {
710 			hhash_table[i].hnext = &hhash_table[i];
711 			hhash_table[i].hprev = &hhash_table[i];
712 			dhash_table[i].dnext = &dhash_table[i];
713 			dhash_table[i].dprev = &dhash_table[i];
714 		}
715 		for (i = 1; i < BOFI_NLINKS; i++)
716 			bofi_link_array[i].link = &bofi_link_array[i-1];
717 		bofi_link_freelist = &bofi_link_array[BOFI_NLINKS - 1];
718 		/*
719 		 * overlay bus_ops structure
720 		 */
721 		if (modify_bus_ops(nexus_name, &bofi_bus_ops) == 0) {
722 			ddi_remove_minor_node(dip, buf);
723 			mutex_destroy(&clone_tab_mutex);
724 			mutex_destroy(&bofi_mutex);
725 			mutex_destroy(&bofi_low_mutex);
726 			return (DDI_FAILURE);
727 		}
728 		/*
729 		 * save dip for getinfo
730 		 */
731 		our_dip = dip;
732 		ddi_report_dev(dip);
733 		initialized = 1;
734 	}
735 	return (DDI_SUCCESS);
736 }
737 
738 
739 static int
740 bofi_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
741 {
742 	char *name;
743 	char buf[80];
744 
745 	if (cmd != DDI_DETACH)
746 		return (DDI_FAILURE);
747 	if (ddi_get_instance(dip) > 0)
748 		return (DDI_FAILURE);
749 	if ((name = ddi_get_name(dip)) == NULL)
750 		return (DDI_FAILURE);
751 	(void) snprintf(buf, sizeof (buf), "%s,ctl", name);
752 	mutex_enter(&bofi_low_mutex);
753 	mutex_enter(&bofi_mutex);
754 	/*
755 	 * make sure test bofi is no longer in use
756 	 */
757 	if (shadow_list.next != &shadow_list || errent_listp != NULL) {
758 		mutex_exit(&bofi_mutex);
759 		mutex_exit(&bofi_low_mutex);
760 		return (DDI_FAILURE);
761 	}
762 	mutex_exit(&bofi_mutex);
763 	mutex_exit(&bofi_low_mutex);
764 
765 	/*
766 	 * restore bus_ops structure
767 	 */
768 	if (reset_bus_ops(nexus_name, &save_bus_ops) == 0)
769 		return (DDI_FAILURE);
770 
771 	mutex_destroy(&clone_tab_mutex);
772 	mutex_destroy(&bofi_mutex);
773 	mutex_destroy(&bofi_low_mutex);
774 	ddi_remove_minor_node(dip, buf);
775 	our_dip = NULL;
776 	initialized = 0;
777 	return (DDI_SUCCESS);
778 }
779 
780 
781 /* ARGSUSED */
782 static int
783 bofi_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result)
784 {
785 	dev_t	dev = (dev_t)arg;
786 	int	minor = (int)getminor(dev);
787 	int	retval;
788 
789 	switch (cmd) {
790 	case DDI_INFO_DEVT2DEVINFO:
791 		if (minor != 0 || our_dip == NULL) {
792 			*result = (void *)NULL;
793 			retval = DDI_FAILURE;
794 		} else {
795 			*result = (void *)our_dip;
796 			retval = DDI_SUCCESS;
797 		}
798 		break;
799 	case DDI_INFO_DEVT2INSTANCE:
800 		*result = (void *)0;
801 		retval = DDI_SUCCESS;
802 		break;
803 	default:
804 		retval = DDI_FAILURE;
805 	}
806 	return (retval);
807 }
808 
809 
810 /* ARGSUSED */
811 static int
812 bofi_open(dev_t *devp, int flag, int otyp, cred_t *credp)
813 {
814 	int	minor = (int)getminor(*devp);
815 	struct bofi_errent *softc;
816 
817 	/*
818 	 * only allow open on minor=0 - the clone device
819 	 */
820 	if (minor != 0)
821 		return (ENXIO);
822 	/*
823 	 * fail if not attached
824 	 */
825 	if (!initialized)
826 		return (ENXIO);
827 	/*
828 	 * find a free slot and grab it
829 	 */
830 	mutex_enter(&clone_tab_mutex);
831 	for (minor = 1; minor < NCLONES; minor++) {
832 		if (clone_tab[minor] == 0) {
833 			clone_tab[minor] = 1;
834 			break;
835 		}
836 	}
837 	mutex_exit(&clone_tab_mutex);
838 	if (minor == NCLONES)
839 		return (EAGAIN);
840 	/*
841 	 * soft state structure for this clone is used to maintain a list
842 	 * of allocated errdefs so they can be freed on close
843 	 */
844 	if (ddi_soft_state_zalloc(statep, minor) != DDI_SUCCESS) {
845 		mutex_enter(&clone_tab_mutex);
846 		clone_tab[minor] = 0;
847 		mutex_exit(&clone_tab_mutex);
848 		return (EAGAIN);
849 	}
850 	softc = ddi_get_soft_state(statep, minor);
851 	softc->cnext = softc;
852 	softc->cprev = softc;
853 
854 	*devp = makedevice(getmajor(*devp), minor);
855 	return (0);
856 }
857 
858 
859 /* ARGSUSED */
860 static int
861 bofi_close(dev_t dev, int flag, int otyp, cred_t *credp)
862 {
863 	int	minor = (int)getminor(dev);
864 	struct bofi_errent *softc;
865 	struct bofi_errent *ep, *next_ep;
866 
867 	softc = ddi_get_soft_state(statep, minor);
868 	if (softc == NULL)
869 		return (ENXIO);
870 	/*
871 	 * find list of errdefs and free them off
872 	 */
873 	for (ep = softc->cnext; ep != softc; ) {
874 		next_ep = ep->cnext;
875 		(void) bofi_errdef_free(ep);
876 		ep = next_ep;
877 	}
878 	/*
879 	 * free clone tab slot
880 	 */
881 	mutex_enter(&clone_tab_mutex);
882 	clone_tab[minor] = 0;
883 	mutex_exit(&clone_tab_mutex);
884 
885 	ddi_soft_state_free(statep, minor);
886 	return (0);
887 }
888 
889 
890 /* ARGSUSED */
891 static int
892 bofi_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp,
893 	int *rvalp)
894 {
895 	struct bofi_errent *softc;
896 	int	minor = (int)getminor(dev);
897 	struct bofi_errdef errdef;
898 	struct bofi_errctl errctl;
899 	struct bofi_errstate errstate;
900 	void *ed_handle;
901 	struct bofi_get_handles get_handles;
902 	struct bofi_get_hdl_info hdl_info;
903 	struct handle_info *hdlip;
904 	struct handle_info *hib;
905 
906 	char *buffer;
907 	char *bufptr;
908 	char *endbuf;
909 	int req_count, count, err;
910 	char *namep;
911 	struct bofi_shadow *hp;
912 	int retval;
913 	struct bofi_shadow *hhashp;
914 	int i;
915 
916 	switch (cmd) {
917 	case BOFI_ADD_DEF:
918 		/*
919 		 * add a new error definition
920 		 */
921 #ifdef _MULTI_DATAMODEL
922 		switch (ddi_model_convert_from(mode & FMODELS)) {
923 		case DDI_MODEL_ILP32:
924 		{
925 			/*
926 			 * For use when a 32 bit app makes a call into a
927 			 * 64 bit ioctl
928 			 */
929 			struct bofi_errdef32	errdef_32;
930 
931 			if (ddi_copyin((void *)arg, &errdef_32,
932 			    sizeof (struct bofi_errdef32), mode)) {
933 				return (EFAULT);
934 			}
935 			errdef.namesize = errdef_32.namesize;
936 			(void) strncpy(errdef.name, errdef_32.name, NAMESIZE);
937 			errdef.instance = errdef_32.instance;
938 			errdef.rnumber = errdef_32.rnumber;
939 			errdef.offset = errdef_32.offset;
940 			errdef.len = errdef_32.len;
941 			errdef.access_type = errdef_32.access_type;
942 			errdef.access_count = errdef_32.access_count;
943 			errdef.fail_count = errdef_32.fail_count;
944 			errdef.acc_chk = errdef_32.acc_chk;
945 			errdef.optype = errdef_32.optype;
946 			errdef.operand = errdef_32.operand;
947 			errdef.log.logsize = errdef_32.log.logsize;
948 			errdef.log.entries = errdef_32.log.entries;
949 			errdef.log.flags = errdef_32.log.flags;
950 			errdef.log.wrapcnt = errdef_32.log.wrapcnt;
951 			errdef.log.start_time = errdef_32.log.start_time;
952 			errdef.log.stop_time = errdef_32.log.stop_time;
953 			errdef.log.logbase =
954 			    (caddr_t)(uintptr_t)errdef_32.log.logbase;
955 			errdef.errdef_handle = errdef_32.errdef_handle;
956 			break;
957 		}
958 		case DDI_MODEL_NONE:
959 			if (ddi_copyin((void *)arg, &errdef,
960 			    sizeof (struct bofi_errdef), mode))
961 				return (EFAULT);
962 			break;
963 		}
964 #else /* ! _MULTI_DATAMODEL */
965 		if (ddi_copyin((void *)arg, &errdef,
966 		    sizeof (struct bofi_errdef), mode) != 0)
967 			return (EFAULT);
968 #endif /* _MULTI_DATAMODEL */
969 		/*
970 		 * do some validation
971 		 */
972 		if (errdef.fail_count == 0)
973 			errdef.optype = 0;
974 		if (errdef.optype != 0) {
975 			if (errdef.access_type & BOFI_INTR &&
976 			    errdef.optype != BOFI_DELAY_INTR &&
977 			    errdef.optype != BOFI_LOSE_INTR &&
978 			    errdef.optype != BOFI_EXTRA_INTR)
979 				return (EINVAL);
980 			if ((errdef.access_type & (BOFI_DMA_RW|BOFI_PIO_R)) &&
981 			    errdef.optype == BOFI_NO_TRANSFER)
982 				return (EINVAL);
983 			if ((errdef.access_type & (BOFI_PIO_RW)) &&
984 			    errdef.optype != BOFI_EQUAL &&
985 			    errdef.optype != BOFI_OR &&
986 			    errdef.optype != BOFI_XOR &&
987 			    errdef.optype != BOFI_AND &&
988 			    errdef.optype != BOFI_NO_TRANSFER)
989 				return (EINVAL);
990 		}
991 		/*
992 		 * find softstate for this clone, so we can tag
993 		 * new errdef on to it
994 		 */
995 		softc = ddi_get_soft_state(statep, minor);
996 		if (softc == NULL)
997 			return (ENXIO);
998 		/*
999 		 * read in name
1000 		 */
1001 		if (errdef.namesize > NAMESIZE)
1002 			return (EINVAL);
1003 		namep = kmem_zalloc(errdef.namesize+1, KM_SLEEP);
1004 		(void) strncpy(namep, errdef.name, errdef.namesize);
1005 
1006 		if (bofi_errdef_alloc(&errdef, namep, softc) != DDI_SUCCESS) {
1007 			(void) bofi_errdef_free((struct bofi_errent *)
1008 			    (uintptr_t)errdef.errdef_handle);
1009 			kmem_free(namep, errdef.namesize+1);
1010 			return (EINVAL);
1011 		}
1012 		/*
1013 		 * copy out errdef again, including filled in errdef_handle
1014 		 */
1015 #ifdef _MULTI_DATAMODEL
1016 		switch (ddi_model_convert_from(mode & FMODELS)) {
1017 		case DDI_MODEL_ILP32:
1018 		{
1019 			/*
1020 			 * For use when a 32 bit app makes a call into a
1021 			 * 64 bit ioctl
1022 			 */
1023 			struct bofi_errdef32	errdef_32;
1024 
1025 			errdef_32.namesize = errdef.namesize;
1026 			(void) strncpy(errdef_32.name, errdef.name, NAMESIZE);
1027 			errdef_32.instance = errdef.instance;
1028 			errdef_32.rnumber = errdef.rnumber;
1029 			errdef_32.offset = errdef.offset;
1030 			errdef_32.len = errdef.len;
1031 			errdef_32.access_type = errdef.access_type;
1032 			errdef_32.access_count = errdef.access_count;
1033 			errdef_32.fail_count = errdef.fail_count;
1034 			errdef_32.acc_chk = errdef.acc_chk;
1035 			errdef_32.optype = errdef.optype;
1036 			errdef_32.operand = errdef.operand;
1037 			errdef_32.log.logsize = errdef.log.logsize;
1038 			errdef_32.log.entries = errdef.log.entries;
1039 			errdef_32.log.flags = errdef.log.flags;
1040 			errdef_32.log.wrapcnt = errdef.log.wrapcnt;
1041 			errdef_32.log.start_time = errdef.log.start_time;
1042 			errdef_32.log.stop_time = errdef.log.stop_time;
1043 			errdef_32.log.logbase =
1044 			    (caddr32_t)(uintptr_t)errdef.log.logbase;
1045 			errdef_32.errdef_handle = errdef.errdef_handle;
1046 			if (ddi_copyout(&errdef_32, (void *)arg,
1047 			    sizeof (struct bofi_errdef32), mode) != 0) {
1048 				(void) bofi_errdef_free((struct bofi_errent *)
1049 				    errdef.errdef_handle);
1050 				kmem_free(namep, errdef.namesize+1);
1051 				return (EFAULT);
1052 			}
1053 			break;
1054 		}
1055 		case DDI_MODEL_NONE:
1056 			if (ddi_copyout(&errdef, (void *)arg,
1057 			    sizeof (struct bofi_errdef), mode) != 0) {
1058 				(void) bofi_errdef_free((struct bofi_errent *)
1059 				    errdef.errdef_handle);
1060 				kmem_free(namep, errdef.namesize+1);
1061 				return (EFAULT);
1062 			}
1063 			break;
1064 		}
1065 #else /* ! _MULTI_DATAMODEL */
1066 		if (ddi_copyout(&errdef, (void *)arg,
1067 		    sizeof (struct bofi_errdef), mode) != 0) {
1068 			(void) bofi_errdef_free((struct bofi_errent *)
1069 			    (uintptr_t)errdef.errdef_handle);
1070 			kmem_free(namep, errdef.namesize+1);
1071 			return (EFAULT);
1072 		}
1073 #endif /* _MULTI_DATAMODEL */
1074 		return (0);
1075 	case BOFI_DEL_DEF:
1076 		/*
1077 		 * delete existing errdef
1078 		 */
1079 		if (ddi_copyin((void *)arg, &ed_handle,
1080 		    sizeof (void *), mode) != 0)
1081 			return (EFAULT);
1082 		return (bofi_errdef_free((struct bofi_errent *)ed_handle));
1083 	case BOFI_START:
1084 		/*
1085 		 * start all errdefs corresponding to
1086 		 * this name and instance
1087 		 */
1088 		if (ddi_copyin((void *)arg, &errctl,
1089 		    sizeof (struct bofi_errctl), mode) != 0)
1090 			return (EFAULT);
1091 		/*
1092 		 * copy in name
1093 		 */
1094 		if (errctl.namesize > NAMESIZE)
1095 			return (EINVAL);
1096 		namep = kmem_zalloc(errctl.namesize+1, KM_SLEEP);
1097 		(void) strncpy(namep, errctl.name, errctl.namesize);
1098 		bofi_start(&errctl, namep);
1099 		kmem_free(namep, errctl.namesize+1);
1100 		return (0);
1101 	case BOFI_STOP:
1102 		/*
1103 		 * stop all errdefs corresponding to
1104 		 * this name and instance
1105 		 */
1106 		if (ddi_copyin((void *)arg, &errctl,
1107 		    sizeof (struct bofi_errctl), mode) != 0)
1108 			return (EFAULT);
1109 		/*
1110 		 * copy in name
1111 		 */
1112 		if (errctl.namesize > NAMESIZE)
1113 			return (EINVAL);
1114 		namep = kmem_zalloc(errctl.namesize+1, KM_SLEEP);
1115 		(void) strncpy(namep, errctl.name, errctl.namesize);
1116 		bofi_stop(&errctl, namep);
1117 		kmem_free(namep, errctl.namesize+1);
1118 		return (0);
1119 	case BOFI_BROADCAST:
1120 		/*
1121 		 * wakeup all errdefs corresponding to
1122 		 * this name and instance
1123 		 */
1124 		if (ddi_copyin((void *)arg, &errctl,
1125 		    sizeof (struct bofi_errctl), mode) != 0)
1126 			return (EFAULT);
1127 		/*
1128 		 * copy in name
1129 		 */
1130 		if (errctl.namesize > NAMESIZE)
1131 			return (EINVAL);
1132 		namep = kmem_zalloc(errctl.namesize+1, KM_SLEEP);
1133 		(void) strncpy(namep, errctl.name, errctl.namesize);
1134 		bofi_broadcast(&errctl, namep);
1135 		kmem_free(namep, errctl.namesize+1);
1136 		return (0);
1137 	case BOFI_CLEAR_ACC_CHK:
1138 		/*
1139 		 * clear "acc_chk" for all errdefs corresponding to
1140 		 * this name and instance
1141 		 */
1142 		if (ddi_copyin((void *)arg, &errctl,
1143 		    sizeof (struct bofi_errctl), mode) != 0)
1144 			return (EFAULT);
1145 		/*
1146 		 * copy in name
1147 		 */
1148 		if (errctl.namesize > NAMESIZE)
1149 			return (EINVAL);
1150 		namep = kmem_zalloc(errctl.namesize+1, KM_SLEEP);
1151 		(void) strncpy(namep, errctl.name, errctl.namesize);
1152 		bofi_clear_acc_chk(&errctl, namep);
1153 		kmem_free(namep, errctl.namesize+1);
1154 		return (0);
1155 	case BOFI_CLEAR_ERRORS:
1156 		/*
1157 		 * set "fail_count" to 0 for all errdefs corresponding to
1158 		 * this name and instance whose "access_count"
1159 		 * has expired.
1160 		 */
1161 		if (ddi_copyin((void *)arg, &errctl,
1162 		    sizeof (struct bofi_errctl), mode) != 0)
1163 			return (EFAULT);
1164 		/*
1165 		 * copy in name
1166 		 */
1167 		if (errctl.namesize > NAMESIZE)
1168 			return (EINVAL);
1169 		namep = kmem_zalloc(errctl.namesize+1, KM_SLEEP);
1170 		(void) strncpy(namep, errctl.name, errctl.namesize);
1171 		bofi_clear_errors(&errctl, namep);
1172 		kmem_free(namep, errctl.namesize+1);
1173 		return (0);
1174 	case BOFI_CLEAR_ERRDEFS:
1175 		/*
1176 		 * set "access_count" and "fail_count" to 0 for all errdefs
1177 		 * corresponding to this name and instance
1178 		 */
1179 		if (ddi_copyin((void *)arg, &errctl,
1180 		    sizeof (struct bofi_errctl), mode) != 0)
1181 			return (EFAULT);
1182 		/*
1183 		 * copy in name
1184 		 */
1185 		if (errctl.namesize > NAMESIZE)
1186 			return (EINVAL);
1187 		namep = kmem_zalloc(errctl.namesize+1, KM_SLEEP);
1188 		(void) strncpy(namep, errctl.name, errctl.namesize);
1189 		bofi_clear_errdefs(&errctl, namep);
1190 		kmem_free(namep, errctl.namesize+1);
1191 		return (0);
1192 	case BOFI_CHK_STATE:
1193 	{
1194 		struct acc_log_elem *klg;
1195 		size_t uls;
1196 		/*
1197 		 * get state for this errdef - read in dummy errstate
1198 		 * with just the errdef_handle filled in
1199 		 */
1200 #ifdef _MULTI_DATAMODEL
1201 		switch (ddi_model_convert_from(mode & FMODELS)) {
1202 		case DDI_MODEL_ILP32:
1203 		{
1204 			/*
1205 			 * For use when a 32 bit app makes a call into a
1206 			 * 64 bit ioctl
1207 			 */
1208 			struct bofi_errstate32	errstate_32;
1209 
1210 			if (ddi_copyin((void *)arg, &errstate_32,
1211 			    sizeof (struct bofi_errstate32), mode) != 0) {
1212 				return (EFAULT);
1213 			}
1214 			errstate.fail_time = errstate_32.fail_time;
1215 			errstate.msg_time = errstate_32.msg_time;
1216 			errstate.access_count = errstate_32.access_count;
1217 			errstate.fail_count = errstate_32.fail_count;
1218 			errstate.acc_chk = errstate_32.acc_chk;
1219 			errstate.errmsg_count = errstate_32.errmsg_count;
1220 			(void) strncpy(errstate.buffer, errstate_32.buffer,
1221 			    ERRMSGSIZE);
1222 			errstate.severity = errstate_32.severity;
1223 			errstate.log.logsize = errstate_32.log.logsize;
1224 			errstate.log.entries = errstate_32.log.entries;
1225 			errstate.log.flags = errstate_32.log.flags;
1226 			errstate.log.wrapcnt = errstate_32.log.wrapcnt;
1227 			errstate.log.start_time = errstate_32.log.start_time;
1228 			errstate.log.stop_time = errstate_32.log.stop_time;
1229 			errstate.log.logbase =
1230 			    (caddr_t)(uintptr_t)errstate_32.log.logbase;
1231 			errstate.errdef_handle = errstate_32.errdef_handle;
1232 			break;
1233 		}
1234 		case DDI_MODEL_NONE:
1235 			if (ddi_copyin((void *)arg, &errstate,
1236 			    sizeof (struct bofi_errstate), mode) != 0)
1237 				return (EFAULT);
1238 			break;
1239 		}
1240 #else /* ! _MULTI_DATAMODEL */
1241 		if (ddi_copyin((void *)arg, &errstate,
1242 		    sizeof (struct bofi_errstate), mode) != 0)
1243 			return (EFAULT);
1244 #endif /* _MULTI_DATAMODEL */
1245 		if ((retval = bofi_errdef_check(&errstate, &klg)) == EINVAL)
1246 			return (EINVAL);
1247 		/*
1248 		 * copy out real errstate structure
1249 		 */
1250 		uls = errstate.log.logsize;
1251 		if (errstate.log.entries > uls && uls)
1252 			/* insufficient user memory */
1253 			errstate.log.entries = uls;
1254 		/* always pass back a time */
1255 		if (errstate.log.stop_time == 0ul)
1256 			(void) drv_getparm(TIME, &(errstate.log.stop_time));
1257 
1258 #ifdef _MULTI_DATAMODEL
1259 		switch (ddi_model_convert_from(mode & FMODELS)) {
1260 		case DDI_MODEL_ILP32:
1261 		{
1262 			/*
1263 			 * For use when a 32 bit app makes a call into a
1264 			 * 64 bit ioctl
1265 			 */
1266 			struct bofi_errstate32	errstate_32;
1267 
1268 			errstate_32.fail_time = errstate.fail_time;
1269 			errstate_32.msg_time = errstate.msg_time;
1270 			errstate_32.access_count = errstate.access_count;
1271 			errstate_32.fail_count = errstate.fail_count;
1272 			errstate_32.acc_chk = errstate.acc_chk;
1273 			errstate_32.errmsg_count = errstate.errmsg_count;
1274 			(void) strncpy(errstate_32.buffer, errstate.buffer,
1275 			    ERRMSGSIZE);
1276 			errstate_32.severity = errstate.severity;
1277 			errstate_32.log.logsize = errstate.log.logsize;
1278 			errstate_32.log.entries = errstate.log.entries;
1279 			errstate_32.log.flags = errstate.log.flags;
1280 			errstate_32.log.wrapcnt = errstate.log.wrapcnt;
1281 			errstate_32.log.start_time = errstate.log.start_time;
1282 			errstate_32.log.stop_time = errstate.log.stop_time;
1283 			errstate_32.log.logbase =
1284 			    (caddr32_t)(uintptr_t)errstate.log.logbase;
1285 			errstate_32.errdef_handle = errstate.errdef_handle;
1286 			if (ddi_copyout(&errstate_32, (void *)arg,
1287 			    sizeof (struct bofi_errstate32), mode) != 0)
1288 				return (EFAULT);
1289 			break;
1290 		}
1291 		case DDI_MODEL_NONE:
1292 			if (ddi_copyout(&errstate, (void *)arg,
1293 			    sizeof (struct bofi_errstate), mode) != 0)
1294 				return (EFAULT);
1295 			break;
1296 		}
1297 #else /* ! _MULTI_DATAMODEL */
1298 		if (ddi_copyout(&errstate, (void *)arg,
1299 		    sizeof (struct bofi_errstate), mode) != 0)
1300 			return (EFAULT);
1301 #endif /* _MULTI_DATAMODEL */
1302 		if (uls && errstate.log.entries &&
1303 		    ddi_copyout(klg, errstate.log.logbase,
1304 		    errstate.log.entries * sizeof (struct acc_log_elem),
1305 		    mode) != 0) {
1306 			return (EFAULT);
1307 		}
1308 		return (retval);
1309 	}
1310 	case BOFI_CHK_STATE_W:
1311 	{
1312 		struct acc_log_elem *klg;
1313 		size_t uls;
1314 		/*
1315 		 * get state for this errdef - read in dummy errstate
1316 		 * with just the errdef_handle filled in. Then wait for
1317 		 * a ddi_report_fault message to come back
1318 		 */
1319 #ifdef _MULTI_DATAMODEL
1320 		switch (ddi_model_convert_from(mode & FMODELS)) {
1321 		case DDI_MODEL_ILP32:
1322 		{
1323 			/*
1324 			 * For use when a 32 bit app makes a call into a
1325 			 * 64 bit ioctl
1326 			 */
1327 			struct bofi_errstate32	errstate_32;
1328 
1329 			if (ddi_copyin((void *)arg, &errstate_32,
1330 			    sizeof (struct bofi_errstate32), mode) != 0) {
1331 				return (EFAULT);
1332 			}
1333 			errstate.fail_time = errstate_32.fail_time;
1334 			errstate.msg_time = errstate_32.msg_time;
1335 			errstate.access_count = errstate_32.access_count;
1336 			errstate.fail_count = errstate_32.fail_count;
1337 			errstate.acc_chk = errstate_32.acc_chk;
1338 			errstate.errmsg_count = errstate_32.errmsg_count;
1339 			(void) strncpy(errstate.buffer, errstate_32.buffer,
1340 			    ERRMSGSIZE);
1341 			errstate.severity = errstate_32.severity;
1342 			errstate.log.logsize = errstate_32.log.logsize;
1343 			errstate.log.entries = errstate_32.log.entries;
1344 			errstate.log.flags = errstate_32.log.flags;
1345 			errstate.log.wrapcnt = errstate_32.log.wrapcnt;
1346 			errstate.log.start_time = errstate_32.log.start_time;
1347 			errstate.log.stop_time = errstate_32.log.stop_time;
1348 			errstate.log.logbase =
1349 			    (caddr_t)(uintptr_t)errstate_32.log.logbase;
1350 			errstate.errdef_handle = errstate_32.errdef_handle;
1351 			break;
1352 		}
1353 		case DDI_MODEL_NONE:
1354 			if (ddi_copyin((void *)arg, &errstate,
1355 			    sizeof (struct bofi_errstate), mode) != 0)
1356 				return (EFAULT);
1357 			break;
1358 		}
1359 #else /* ! _MULTI_DATAMODEL */
1360 		if (ddi_copyin((void *)arg, &errstate,
1361 		    sizeof (struct bofi_errstate), mode) != 0)
1362 			return (EFAULT);
1363 #endif /* _MULTI_DATAMODEL */
1364 		if ((retval = bofi_errdef_check_w(&errstate, &klg)) == EINVAL)
1365 			return (EINVAL);
1366 		/*
1367 		 * copy out real errstate structure
1368 		 */
1369 		uls = errstate.log.logsize;
1370 		uls = errstate.log.logsize;
1371 		if (errstate.log.entries > uls && uls)
1372 			/* insufficient user memory */
1373 			errstate.log.entries = uls;
1374 		/* always pass back a time */
1375 		if (errstate.log.stop_time == 0ul)
1376 			(void) drv_getparm(TIME, &(errstate.log.stop_time));
1377 
1378 #ifdef _MULTI_DATAMODEL
1379 		switch (ddi_model_convert_from(mode & FMODELS)) {
1380 		case DDI_MODEL_ILP32:
1381 		{
1382 			/*
1383 			 * For use when a 32 bit app makes a call into a
1384 			 * 64 bit ioctl
1385 			 */
1386 			struct bofi_errstate32	errstate_32;
1387 
1388 			errstate_32.fail_time = errstate.fail_time;
1389 			errstate_32.msg_time = errstate.msg_time;
1390 			errstate_32.access_count = errstate.access_count;
1391 			errstate_32.fail_count = errstate.fail_count;
1392 			errstate_32.acc_chk = errstate.acc_chk;
1393 			errstate_32.errmsg_count = errstate.errmsg_count;
1394 			(void) strncpy(errstate_32.buffer, errstate.buffer,
1395 			    ERRMSGSIZE);
1396 			errstate_32.severity = errstate.severity;
1397 			errstate_32.log.logsize = errstate.log.logsize;
1398 			errstate_32.log.entries = errstate.log.entries;
1399 			errstate_32.log.flags = errstate.log.flags;
1400 			errstate_32.log.wrapcnt = errstate.log.wrapcnt;
1401 			errstate_32.log.start_time = errstate.log.start_time;
1402 			errstate_32.log.stop_time = errstate.log.stop_time;
1403 			errstate_32.log.logbase =
1404 			    (caddr32_t)(uintptr_t)errstate.log.logbase;
1405 			errstate_32.errdef_handle = errstate.errdef_handle;
1406 			if (ddi_copyout(&errstate_32, (void *)arg,
1407 			    sizeof (struct bofi_errstate32), mode) != 0)
1408 				return (EFAULT);
1409 			break;
1410 		}
1411 		case DDI_MODEL_NONE:
1412 			if (ddi_copyout(&errstate, (void *)arg,
1413 			    sizeof (struct bofi_errstate), mode) != 0)
1414 				return (EFAULT);
1415 			break;
1416 		}
1417 #else /* ! _MULTI_DATAMODEL */
1418 		if (ddi_copyout(&errstate, (void *)arg,
1419 		    sizeof (struct bofi_errstate), mode) != 0)
1420 			return (EFAULT);
1421 #endif /* _MULTI_DATAMODEL */
1422 
1423 		if (uls && errstate.log.entries &&
1424 		    ddi_copyout(klg, errstate.log.logbase,
1425 		    errstate.log.entries * sizeof (struct acc_log_elem),
1426 		    mode) != 0) {
1427 			return (EFAULT);
1428 		}
1429 		return (retval);
1430 	}
1431 	case BOFI_GET_HANDLES:
1432 		/*
1433 		 * display existing handles
1434 		 */
1435 #ifdef _MULTI_DATAMODEL
1436 		switch (ddi_model_convert_from(mode & FMODELS)) {
1437 		case DDI_MODEL_ILP32:
1438 		{
1439 			/*
1440 			 * For use when a 32 bit app makes a call into a
1441 			 * 64 bit ioctl
1442 			 */
1443 			struct bofi_get_handles32	get_handles_32;
1444 
1445 			if (ddi_copyin((void *)arg, &get_handles_32,
1446 			    sizeof (get_handles_32), mode) != 0) {
1447 				return (EFAULT);
1448 			}
1449 			get_handles.namesize = get_handles_32.namesize;
1450 			(void) strncpy(get_handles.name, get_handles_32.name,
1451 			    NAMESIZE);
1452 			get_handles.instance = get_handles_32.instance;
1453 			get_handles.count = get_handles_32.count;
1454 			get_handles.buffer =
1455 			    (caddr_t)(uintptr_t)get_handles_32.buffer;
1456 			break;
1457 		}
1458 		case DDI_MODEL_NONE:
1459 			if (ddi_copyin((void *)arg, &get_handles,
1460 			    sizeof (get_handles), mode) != 0)
1461 				return (EFAULT);
1462 			break;
1463 		}
1464 #else /* ! _MULTI_DATAMODEL */
1465 		if (ddi_copyin((void *)arg, &get_handles,
1466 		    sizeof (get_handles), mode) != 0)
1467 			return (EFAULT);
1468 #endif /* _MULTI_DATAMODEL */
1469 		/*
1470 		 * read in name
1471 		 */
1472 		if (get_handles.namesize > NAMESIZE)
1473 			return (EINVAL);
1474 		namep = kmem_zalloc(get_handles.namesize+1, KM_SLEEP);
1475 		(void) strncpy(namep, get_handles.name, get_handles.namesize);
1476 		req_count = get_handles.count;
1477 		bufptr = buffer = kmem_zalloc(req_count, KM_SLEEP);
1478 		endbuf = bufptr + req_count;
1479 		/*
1480 		 * display existing handles
1481 		 */
1482 		mutex_enter(&bofi_low_mutex);
1483 		mutex_enter(&bofi_mutex);
1484 		for (i = 0; i < HDL_HASH_TBL_SIZE; i++) {
1485 			hhashp = &hhash_table[i];
1486 			for (hp = hhashp->hnext; hp != hhashp; hp = hp->hnext) {
1487 				if (!driver_under_test(hp->dip))
1488 					continue;
1489 				if (ddi_name_to_major(ddi_get_name(hp->dip)) !=
1490 				    ddi_name_to_major(namep))
1491 					continue;
1492 				if (hp->instance != get_handles.instance)
1493 					continue;
1494 				/*
1495 				 * print information per handle - note that
1496 				 * DMA* means an unbound DMA handle
1497 				 */
1498 				(void) snprintf(bufptr, (size_t)(endbuf-bufptr),
1499 				    "  %s %d %s ", hp->name, hp->instance,
1500 				    (hp->type == BOFI_INT_HDL) ? "INTR" :
1501 				    (hp->type == BOFI_ACC_HDL) ? "PIO" :
1502 				    (hp->type == BOFI_DMA_HDL) ? "DMA" :
1503 				    (hp->hparrayp != NULL) ? "DVMA" : "DMA*");
1504 				bufptr += strlen(bufptr);
1505 				if (hp->type == BOFI_ACC_HDL) {
1506 					if (hp->len == INT_MAX - hp->offset)
1507 						(void) snprintf(bufptr,
1508 						    (size_t)(endbuf-bufptr),
1509 						    "reg set %d off 0x%llx\n",
1510 						    hp->rnumber, hp->offset);
1511 					else
1512 						(void) snprintf(bufptr,
1513 						    (size_t)(endbuf-bufptr),
1514 						    "reg set %d off 0x%llx"
1515 						    " len 0x%llx\n",
1516 						    hp->rnumber, hp->offset,
1517 						    hp->len);
1518 				} else if (hp->type == BOFI_DMA_HDL)
1519 					(void) snprintf(bufptr,
1520 					    (size_t)(endbuf-bufptr),
1521 					    "handle no %d len 0x%llx"
1522 					    " addr 0x%p\n", hp->rnumber,
1523 					    hp->len, (void *)hp->addr);
1524 				else if (hp->type == BOFI_NULL &&
1525 				    hp->hparrayp == NULL)
1526 					(void) snprintf(bufptr,
1527 					    (size_t)(endbuf-bufptr),
1528 					    "handle no %d\n", hp->rnumber);
1529 				else
1530 					(void) snprintf(bufptr,
1531 					    (size_t)(endbuf-bufptr), "\n");
1532 				bufptr += strlen(bufptr);
1533 			}
1534 		}
1535 		mutex_exit(&bofi_mutex);
1536 		mutex_exit(&bofi_low_mutex);
1537 		err = ddi_copyout(buffer, get_handles.buffer, req_count, mode);
1538 		kmem_free(namep, get_handles.namesize+1);
1539 		kmem_free(buffer, req_count);
1540 		if (err != 0)
1541 			return (EFAULT);
1542 		else
1543 			return (0);
1544 	case BOFI_GET_HANDLE_INFO:
1545 		/*
1546 		 * display existing handles
1547 		 */
1548 #ifdef _MULTI_DATAMODEL
1549 		switch (ddi_model_convert_from(mode & FMODELS)) {
1550 		case DDI_MODEL_ILP32:
1551 		{
1552 			/*
1553 			 * For use when a 32 bit app makes a call into a
1554 			 * 64 bit ioctl
1555 			 */
1556 			struct bofi_get_hdl_info32	hdl_info_32;
1557 
1558 			if (ddi_copyin((void *)arg, &hdl_info_32,
1559 			    sizeof (hdl_info_32), mode)) {
1560 				return (EFAULT);
1561 			}
1562 			hdl_info.namesize = hdl_info_32.namesize;
1563 			(void) strncpy(hdl_info.name, hdl_info_32.name,
1564 			    NAMESIZE);
1565 			hdl_info.count = hdl_info_32.count;
1566 			hdl_info.hdli = (caddr_t)(uintptr_t)hdl_info_32.hdli;
1567 			break;
1568 		}
1569 		case DDI_MODEL_NONE:
1570 			if (ddi_copyin((void *)arg, &hdl_info,
1571 			    sizeof (hdl_info), mode))
1572 				return (EFAULT);
1573 			break;
1574 		}
1575 #else /* ! _MULTI_DATAMODEL */
1576 		if (ddi_copyin((void *)arg, &hdl_info,
1577 		    sizeof (hdl_info), mode))
1578 			return (EFAULT);
1579 #endif /* _MULTI_DATAMODEL */
1580 		if (hdl_info.namesize > NAMESIZE)
1581 			return (EINVAL);
1582 		namep = kmem_zalloc(hdl_info.namesize + 1, KM_SLEEP);
1583 		(void) strncpy(namep, hdl_info.name, hdl_info.namesize);
1584 		req_count = hdl_info.count;
1585 		count = hdl_info.count = 0; /* the actual no of handles */
1586 		if (req_count > 0) {
1587 			hib = hdlip =
1588 			    kmem_zalloc(req_count * sizeof (struct handle_info),
1589 			    KM_SLEEP);
1590 		} else {
1591 			hib = hdlip = 0;
1592 			req_count = hdl_info.count = 0;
1593 		}
1594 
1595 		/*
1596 		 * display existing handles
1597 		 */
1598 		mutex_enter(&bofi_low_mutex);
1599 		mutex_enter(&bofi_mutex);
1600 		for (i = 0; i < HDL_HASH_TBL_SIZE; i++) {
1601 			hhashp = &hhash_table[i];
1602 			for (hp = hhashp->hnext; hp != hhashp; hp = hp->hnext) {
1603 				if (!driver_under_test(hp->dip) ||
1604 				    ddi_name_to_major(ddi_get_name(hp->dip)) !=
1605 				    ddi_name_to_major(namep) ||
1606 				    ++(hdl_info.count) > req_count ||
1607 				    count == req_count)
1608 					continue;
1609 
1610 				hdlip->instance = hp->instance;
1611 				hdlip->rnumber = hp->rnumber;
1612 				switch (hp->type) {
1613 				case BOFI_ACC_HDL:
1614 					hdlip->access_type = BOFI_PIO_RW;
1615 					hdlip->offset = hp->offset;
1616 					hdlip->len = hp->len;
1617 					break;
1618 				case BOFI_DMA_HDL:
1619 					hdlip->access_type = 0;
1620 					if (hp->flags & DDI_DMA_WRITE)
1621 						hdlip->access_type |=
1622 						    BOFI_DMA_W;
1623 					if (hp->flags & DDI_DMA_READ)
1624 						hdlip->access_type |=
1625 						    BOFI_DMA_R;
1626 					hdlip->len = hp->len;
1627 					hdlip->addr_cookie =
1628 					    (uint64_t)(uintptr_t)hp->addr;
1629 					break;
1630 				case BOFI_INT_HDL:
1631 					hdlip->access_type = BOFI_INTR;
1632 					break;
1633 				default:
1634 					hdlip->access_type = 0;
1635 					break;
1636 				}
1637 				hdlip++;
1638 				count++;
1639 			}
1640 		}
1641 		mutex_exit(&bofi_mutex);
1642 		mutex_exit(&bofi_low_mutex);
1643 		err = 0;
1644 #ifdef _MULTI_DATAMODEL
1645 		switch (ddi_model_convert_from(mode & FMODELS)) {
1646 		case DDI_MODEL_ILP32:
1647 		{
1648 			/*
1649 			 * For use when a 32 bit app makes a call into a
1650 			 * 64 bit ioctl
1651 			 */
1652 			struct bofi_get_hdl_info32	hdl_info_32;
1653 
1654 			hdl_info_32.namesize = hdl_info.namesize;
1655 			(void) strncpy(hdl_info_32.name, hdl_info.name,
1656 			    NAMESIZE);
1657 			hdl_info_32.count = hdl_info.count;
1658 			hdl_info_32.hdli = (caddr32_t)(uintptr_t)hdl_info.hdli;
1659 			if (ddi_copyout(&hdl_info_32, (void *)arg,
1660 			    sizeof (hdl_info_32), mode) != 0) {
1661 				kmem_free(namep, hdl_info.namesize+1);
1662 				if (req_count > 0)
1663 					kmem_free(hib,
1664 					    req_count * sizeof (*hib));
1665 				return (EFAULT);
1666 			}
1667 			break;
1668 		}
1669 		case DDI_MODEL_NONE:
1670 			if (ddi_copyout(&hdl_info, (void *)arg,
1671 			    sizeof (hdl_info), mode) != 0) {
1672 				kmem_free(namep, hdl_info.namesize+1);
1673 				if (req_count > 0)
1674 					kmem_free(hib,
1675 					    req_count * sizeof (*hib));
1676 				return (EFAULT);
1677 			}
1678 			break;
1679 		}
1680 #else /* ! _MULTI_DATAMODEL */
1681 		if (ddi_copyout(&hdl_info, (void *)arg,
1682 		    sizeof (hdl_info), mode) != 0) {
1683 			kmem_free(namep, hdl_info.namesize+1);
1684 			if (req_count > 0)
1685 				kmem_free(hib, req_count * sizeof (*hib));
1686 			return (EFAULT);
1687 		}
1688 #endif /* ! _MULTI_DATAMODEL */
1689 		if (count > 0) {
1690 			if (ddi_copyout(hib, hdl_info.hdli,
1691 			    count * sizeof (*hib), mode) != 0) {
1692 				kmem_free(namep, hdl_info.namesize+1);
1693 				if (req_count > 0)
1694 					kmem_free(hib,
1695 					    req_count * sizeof (*hib));
1696 				return (EFAULT);
1697 			}
1698 		}
1699 		kmem_free(namep, hdl_info.namesize+1);
1700 		if (req_count > 0)
1701 			kmem_free(hib, req_count * sizeof (*hib));
1702 		return (err);
1703 	default:
1704 		return (ENOTTY);
1705 	}
1706 }
1707 
1708 
1709 /*
1710  * add a new error definition
1711  */
1712 static int
1713 bofi_errdef_alloc(struct bofi_errdef *errdefp, char *namep,
1714 	struct bofi_errent *softc)
1715 {
1716 	struct bofi_errent *ep;
1717 	struct bofi_shadow *hp;
1718 	struct bofi_link   *lp;
1719 
1720 	/*
1721 	 * allocate errdef structure and put on in-use list
1722 	 */
1723 	ep = kmem_zalloc(sizeof (struct bofi_errent), KM_SLEEP);
1724 	ep->errdef = *errdefp;
1725 	ep->name = namep;
1726 	ep->errdef.errdef_handle = (uint64_t)(uintptr_t)ep;
1727 	ep->errstate.errdef_handle = (uint64_t)(uintptr_t)ep;
1728 	cv_init(&ep->cv, NULL, CV_DRIVER, NULL);
1729 	/*
1730 	 * allocate space for logging
1731 	 */
1732 	ep->errdef.log.entries = 0;
1733 	ep->errdef.log.wrapcnt = 0;
1734 	if (ep->errdef.access_type & BOFI_LOG)
1735 		ep->logbase = kmem_alloc(sizeof (struct acc_log_elem) *
1736 		    ep->errdef.log.logsize, KM_SLEEP);
1737 	else
1738 		ep->logbase = NULL;
1739 	/*
1740 	 * put on in-use list
1741 	 */
1742 	mutex_enter(&bofi_low_mutex);
1743 	mutex_enter(&bofi_mutex);
1744 	ep->next = errent_listp;
1745 	errent_listp = ep;
1746 	/*
1747 	 * and add it to the per-clone list
1748 	 */
1749 	ep->cnext = softc->cnext;
1750 	softc->cnext->cprev = ep;
1751 	ep->cprev = softc;
1752 	softc->cnext = ep;
1753 
1754 	/*
1755 	 * look for corresponding shadow handle structures and if we find any
1756 	 * tag this errdef structure on to their link lists.
1757 	 */
1758 	for (hp = shadow_list.next; hp != &shadow_list; hp = hp->next) {
1759 		if (ddi_name_to_major(hp->name) == ddi_name_to_major(namep) &&
1760 		    hp->instance == errdefp->instance &&
1761 		    (((errdefp->access_type & BOFI_DMA_RW) &&
1762 		    (ep->errdef.rnumber == -1 ||
1763 		    hp->rnumber == ep->errdef.rnumber) &&
1764 		    hp->type == BOFI_DMA_HDL &&
1765 		    (((uintptr_t)(hp->addr + ep->errdef.offset +
1766 		    ep->errdef.len) & ~LLSZMASK) >
1767 		    ((uintptr_t)((hp->addr + ep->errdef.offset) +
1768 		    LLSZMASK) & ~LLSZMASK))) ||
1769 		    ((errdefp->access_type & BOFI_INTR) &&
1770 		    hp->type == BOFI_INT_HDL) ||
1771 		    ((errdefp->access_type & BOFI_PIO_RW) &&
1772 		    hp->type == BOFI_ACC_HDL &&
1773 		    (errdefp->rnumber == -1 ||
1774 		    hp->rnumber == errdefp->rnumber) &&
1775 		    (errdefp->len == 0 ||
1776 		    hp->offset < errdefp->offset + errdefp->len) &&
1777 		    hp->offset + hp->len > errdefp->offset))) {
1778 			lp = bofi_link_freelist;
1779 			if (lp != NULL) {
1780 				bofi_link_freelist = lp->link;
1781 				lp->errentp = ep;
1782 				lp->link = hp->link;
1783 				hp->link = lp;
1784 			}
1785 		}
1786 	}
1787 	errdefp->errdef_handle = (uint64_t)(uintptr_t)ep;
1788 	mutex_exit(&bofi_mutex);
1789 	mutex_exit(&bofi_low_mutex);
1790 	ep->softintr_id = NULL;
1791 	return (ddi_add_softintr(our_dip, DDI_SOFTINT_MED, &ep->softintr_id,
1792 	    NULL, NULL, bofi_signal, (caddr_t)&ep->errdef));
1793 }
1794 
1795 
1796 /*
1797  * delete existing errdef
1798  */
1799 static int
1800 bofi_errdef_free(struct bofi_errent *ep)
1801 {
1802 	struct bofi_errent *hep, *prev_hep;
1803 	struct bofi_link *lp, *prev_lp, *next_lp;
1804 	struct bofi_shadow *hp;
1805 
1806 	mutex_enter(&bofi_low_mutex);
1807 	mutex_enter(&bofi_mutex);
1808 	/*
1809 	 * don't just assume its a valid ep - check that its on the
1810 	 * in-use list
1811 	 */
1812 	prev_hep = NULL;
1813 	for (hep = errent_listp; hep != NULL; ) {
1814 		if (hep == ep)
1815 			break;
1816 		prev_hep = hep;
1817 		hep = hep->next;
1818 	}
1819 	if (hep == NULL) {
1820 		mutex_exit(&bofi_mutex);
1821 		mutex_exit(&bofi_low_mutex);
1822 		return (EINVAL);
1823 	}
1824 	/*
1825 	 * found it - delete from in-use list
1826 	 */
1827 
1828 	if (prev_hep)
1829 		prev_hep->next = hep->next;
1830 	else
1831 		errent_listp = hep->next;
1832 	/*
1833 	 * and take it off the per-clone list
1834 	 */
1835 	hep->cnext->cprev = hep->cprev;
1836 	hep->cprev->cnext = hep->cnext;
1837 	/*
1838 	 * see if we are on any shadow handle link lists - and if we
1839 	 * are then take us off
1840 	 */
1841 	for (hp = shadow_list.next; hp != &shadow_list; hp = hp->next) {
1842 		prev_lp = NULL;
1843 		for (lp = hp->link; lp != NULL; ) {
1844 			if (lp->errentp == ep) {
1845 				if (prev_lp)
1846 					prev_lp->link = lp->link;
1847 				else
1848 					hp->link = lp->link;
1849 				next_lp = lp->link;
1850 				lp->link = bofi_link_freelist;
1851 				bofi_link_freelist = lp;
1852 				lp = next_lp;
1853 			} else {
1854 				prev_lp = lp;
1855 				lp = lp->link;
1856 			}
1857 		}
1858 	}
1859 	mutex_exit(&bofi_mutex);
1860 	mutex_exit(&bofi_low_mutex);
1861 
1862 	cv_destroy(&ep->cv);
1863 	kmem_free(ep->name, ep->errdef.namesize+1);
1864 	if ((ep->errdef.access_type & BOFI_LOG) &&
1865 		ep->errdef.log.logsize && ep->logbase) /* double check */
1866 		kmem_free(ep->logbase,
1867 		    sizeof (struct acc_log_elem) * ep->errdef.log.logsize);
1868 
1869 	if (ep->softintr_id)
1870 		ddi_remove_softintr(ep->softintr_id);
1871 	kmem_free(ep, sizeof (struct bofi_errent));
1872 	return (0);
1873 }
1874 
1875 
1876 /*
1877  * start all errdefs corresponding to this name and instance
1878  */
1879 static void
1880 bofi_start(struct bofi_errctl *errctlp, char *namep)
1881 {
1882 	struct bofi_errent *ep;
1883 
1884 	/*
1885 	 * look for any errdefs with matching name and instance
1886 	 */
1887 	mutex_enter(&bofi_low_mutex);
1888 	for (ep = errent_listp; ep != NULL; ep = ep->next)
1889 		if (strncmp(namep, ep->name, NAMESIZE) == 0 &&
1890 		    errctlp->instance == ep->errdef.instance) {
1891 			ep->state |= BOFI_DEV_ACTIVE;
1892 			(void) drv_getparm(TIME, &(ep->errdef.log.start_time));
1893 			ep->errdef.log.stop_time = 0ul;
1894 		}
1895 	mutex_exit(&bofi_low_mutex);
1896 }
1897 
1898 
1899 /*
1900  * stop all errdefs corresponding to this name and instance
1901  */
1902 static void
1903 bofi_stop(struct bofi_errctl *errctlp, char *namep)
1904 {
1905 	struct bofi_errent *ep;
1906 
1907 	/*
1908 	 * look for any errdefs with matching name and instance
1909 	 */
1910 	mutex_enter(&bofi_low_mutex);
1911 	for (ep = errent_listp; ep != NULL; ep = ep->next)
1912 		if (strncmp(namep, ep->name, NAMESIZE) == 0 &&
1913 		    errctlp->instance == ep->errdef.instance) {
1914 			ep->state &= ~BOFI_DEV_ACTIVE;
1915 			if (ep->errdef.log.stop_time == 0ul)
1916 				(void) drv_getparm(TIME,
1917 				    &(ep->errdef.log.stop_time));
1918 		}
1919 	mutex_exit(&bofi_low_mutex);
1920 }
1921 
1922 
1923 /*
1924  * wake up any thread waiting on this errdefs
1925  */
1926 static uint_t
1927 bofi_signal(caddr_t arg)
1928 {
1929 	struct bofi_errdef *edp = (struct bofi_errdef *)arg;
1930 	struct bofi_errent *hep;
1931 	struct bofi_errent *ep =
1932 	    (struct bofi_errent *)(uintptr_t)edp->errdef_handle;
1933 
1934 	mutex_enter(&bofi_low_mutex);
1935 	for (hep = errent_listp; hep != NULL; ) {
1936 		if (hep == ep)
1937 			break;
1938 		hep = hep->next;
1939 	}
1940 	if (hep == NULL) {
1941 		mutex_exit(&bofi_low_mutex);
1942 		return (DDI_INTR_UNCLAIMED);
1943 	}
1944 	if ((ep->errdef.access_type & BOFI_LOG) &&
1945 	    (edp->log.flags & BOFI_LOG_FULL)) {
1946 		edp->log.stop_time = bofi_gettime();
1947 		ep->state |= BOFI_NEW_MESSAGE;
1948 		if (ep->state & BOFI_MESSAGE_WAIT)
1949 			cv_broadcast(&ep->cv);
1950 		ep->state &= ~BOFI_MESSAGE_WAIT;
1951 	}
1952 	if (ep->errstate.msg_time != 0) {
1953 		ep->state |= BOFI_NEW_MESSAGE;
1954 		if (ep->state & BOFI_MESSAGE_WAIT)
1955 			cv_broadcast(&ep->cv);
1956 		ep->state &= ~BOFI_MESSAGE_WAIT;
1957 	}
1958 	mutex_exit(&bofi_low_mutex);
1959 	return (DDI_INTR_CLAIMED);
1960 }
1961 
1962 
1963 /*
1964  * wake up all errdefs corresponding to this name and instance
1965  */
1966 static void
1967 bofi_broadcast(struct bofi_errctl *errctlp, char *namep)
1968 {
1969 	struct bofi_errent *ep;
1970 
1971 	/*
1972 	 * look for any errdefs with matching name and instance
1973 	 */
1974 	mutex_enter(&bofi_low_mutex);
1975 	for (ep = errent_listp; ep != NULL; ep = ep->next)
1976 		if (strncmp(namep, ep->name, NAMESIZE) == 0 &&
1977 		    errctlp->instance == ep->errdef.instance) {
1978 			/*
1979 			 * wake up sleepers
1980 			 */
1981 			ep->state |= BOFI_NEW_MESSAGE;
1982 			if (ep->state & BOFI_MESSAGE_WAIT)
1983 				cv_broadcast(&ep->cv);
1984 			ep->state &= ~BOFI_MESSAGE_WAIT;
1985 		}
1986 	mutex_exit(&bofi_low_mutex);
1987 }
1988 
1989 
1990 /*
1991  * clear "acc_chk" for all errdefs corresponding to this name and instance
1992  * and wake them up.
1993  */
1994 static void
1995 bofi_clear_acc_chk(struct bofi_errctl *errctlp, char *namep)
1996 {
1997 	struct bofi_errent *ep;
1998 
1999 	/*
2000 	 * look for any errdefs with matching name and instance
2001 	 */
2002 	mutex_enter(&bofi_low_mutex);
2003 	for (ep = errent_listp; ep != NULL; ep = ep->next)
2004 		if (strncmp(namep, ep->name, NAMESIZE) == 0 &&
2005 		    errctlp->instance == ep->errdef.instance) {
2006 			mutex_enter(&bofi_mutex);
2007 			if (ep->errdef.access_count == 0 &&
2008 			    ep->errdef.fail_count == 0)
2009 				ep->errdef.acc_chk = 0;
2010 			mutex_exit(&bofi_mutex);
2011 			/*
2012 			 * wake up sleepers
2013 			 */
2014 			ep->state |= BOFI_NEW_MESSAGE;
2015 			if (ep->state & BOFI_MESSAGE_WAIT)
2016 				cv_broadcast(&ep->cv);
2017 			ep->state &= ~BOFI_MESSAGE_WAIT;
2018 		}
2019 	mutex_exit(&bofi_low_mutex);
2020 }
2021 
2022 
2023 /*
2024  * set "fail_count" to 0 for all errdefs corresponding to this name and instance
2025  * whose "access_count" has expired, set "acc_chk" to 0 and wake them up.
2026  */
2027 static void
2028 bofi_clear_errors(struct bofi_errctl *errctlp, char *namep)
2029 {
2030 	struct bofi_errent *ep;
2031 
2032 	/*
2033 	 * look for any errdefs with matching name and instance
2034 	 */
2035 	mutex_enter(&bofi_low_mutex);
2036 	for (ep = errent_listp; ep != NULL; ep = ep->next)
2037 		if (strncmp(namep, ep->name, NAMESIZE) == 0 &&
2038 		    errctlp->instance == ep->errdef.instance) {
2039 			mutex_enter(&bofi_mutex);
2040 			if (ep->errdef.access_count == 0) {
2041 				ep->errdef.acc_chk = 0;
2042 				ep->errdef.fail_count = 0;
2043 				mutex_exit(&bofi_mutex);
2044 				if (ep->errdef.log.stop_time == 0ul)
2045 					(void) drv_getparm(TIME,
2046 					    &(ep->errdef.log.stop_time));
2047 			} else
2048 				mutex_exit(&bofi_mutex);
2049 			/*
2050 			 * wake up sleepers
2051 			 */
2052 			ep->state |= BOFI_NEW_MESSAGE;
2053 			if (ep->state & BOFI_MESSAGE_WAIT)
2054 				cv_broadcast(&ep->cv);
2055 			ep->state &= ~BOFI_MESSAGE_WAIT;
2056 		}
2057 	mutex_exit(&bofi_low_mutex);
2058 }
2059 
2060 
2061 /*
2062  * set "access_count" and "fail_count" to 0 for all errdefs corresponding to
2063  * this name and instance, set "acc_chk" to 0, and wake them up.
2064  */
2065 static void
2066 bofi_clear_errdefs(struct bofi_errctl *errctlp, char *namep)
2067 {
2068 	struct bofi_errent *ep;
2069 
2070 	/*
2071 	 * look for any errdefs with matching name and instance
2072 	 */
2073 	mutex_enter(&bofi_low_mutex);
2074 	for (ep = errent_listp; ep != NULL; ep = ep->next)
2075 		if (strncmp(namep, ep->name, NAMESIZE) == 0 &&
2076 		    errctlp->instance == ep->errdef.instance) {
2077 			mutex_enter(&bofi_mutex);
2078 			ep->errdef.acc_chk = 0;
2079 			ep->errdef.access_count = 0;
2080 			ep->errdef.fail_count = 0;
2081 			mutex_exit(&bofi_mutex);
2082 			if (ep->errdef.log.stop_time == 0ul)
2083 				(void) drv_getparm(TIME,
2084 				    &(ep->errdef.log.stop_time));
2085 			/*
2086 			 * wake up sleepers
2087 			 */
2088 			ep->state |= BOFI_NEW_MESSAGE;
2089 			if (ep->state & BOFI_MESSAGE_WAIT)
2090 				cv_broadcast(&ep->cv);
2091 			ep->state &= ~BOFI_MESSAGE_WAIT;
2092 		}
2093 	mutex_exit(&bofi_low_mutex);
2094 }
2095 
2096 
2097 /*
2098  * get state for this errdef
2099  */
2100 static int
2101 bofi_errdef_check(struct bofi_errstate *errstatep, struct acc_log_elem **logpp)
2102 {
2103 	struct bofi_errent *hep;
2104 	struct bofi_errent *ep;
2105 
2106 	ep = (struct bofi_errent *)(uintptr_t)errstatep->errdef_handle;
2107 	mutex_enter(&bofi_low_mutex);
2108 	/*
2109 	 * don't just assume its a valid ep - check that its on the
2110 	 * in-use list
2111 	 */
2112 	for (hep = errent_listp; hep != NULL; hep = hep->next)
2113 		if (hep == ep)
2114 			break;
2115 	if (hep == NULL) {
2116 		mutex_exit(&bofi_low_mutex);
2117 		return (EINVAL);
2118 	}
2119 	mutex_enter(&bofi_mutex);
2120 	ep->errstate.access_count = ep->errdef.access_count;
2121 	ep->errstate.fail_count = ep->errdef.fail_count;
2122 	ep->errstate.acc_chk = ep->errdef.acc_chk;
2123 	ep->errstate.log = ep->errdef.log;
2124 	*logpp = ep->logbase;
2125 	*errstatep = ep->errstate;
2126 	mutex_exit(&bofi_mutex);
2127 	mutex_exit(&bofi_low_mutex);
2128 	return (0);
2129 }
2130 
2131 
2132 /*
2133  * Wait for a ddi_report_fault message to come back for this errdef
2134  * Then return state for this errdef.
2135  * fault report is intercepted by bofi_post_event, which triggers
2136  * bofi_signal via a softint, which will wake up this routine if
2137  * we are waiting
2138  */
2139 static int
2140 bofi_errdef_check_w(struct bofi_errstate *errstatep,
2141     struct acc_log_elem **logpp)
2142 {
2143 	struct bofi_errent *hep;
2144 	struct bofi_errent *ep;
2145 	int rval = 0;
2146 
2147 	ep = (struct bofi_errent *)(uintptr_t)errstatep->errdef_handle;
2148 	mutex_enter(&bofi_low_mutex);
2149 retry:
2150 	/*
2151 	 * don't just assume its a valid ep - check that its on the
2152 	 * in-use list
2153 	 */
2154 	for (hep = errent_listp; hep != NULL; hep = hep->next)
2155 		if (hep == ep)
2156 			break;
2157 	if (hep == NULL) {
2158 		mutex_exit(&bofi_low_mutex);
2159 		return (EINVAL);
2160 	}
2161 	/*
2162 	 * wait for ddi_report_fault for the devinfo corresponding
2163 	 * to this errdef
2164 	 */
2165 	if (rval == 0 && !(ep->state & BOFI_NEW_MESSAGE)) {
2166 		ep->state |= BOFI_MESSAGE_WAIT;
2167 		if (cv_wait_sig(&ep->cv, &bofi_low_mutex) == 0)
2168 			rval = EINTR;
2169 		goto retry;
2170 	}
2171 	ep->state &= ~BOFI_NEW_MESSAGE;
2172 	/*
2173 	 * we either didn't need to sleep, we've been woken up or we've been
2174 	 * signaled - either way return state now
2175 	 */
2176 	mutex_enter(&bofi_mutex);
2177 	ep->errstate.access_count = ep->errdef.access_count;
2178 	ep->errstate.fail_count = ep->errdef.fail_count;
2179 	ep->errstate.acc_chk = ep->errdef.acc_chk;
2180 	ep->errstate.log = ep->errdef.log;
2181 	*logpp = ep->logbase;
2182 	*errstatep = ep->errstate;
2183 	mutex_exit(&bofi_mutex);
2184 	mutex_exit(&bofi_low_mutex);
2185 	return (rval);
2186 }
2187 
2188 
2189 /*
2190  * support routine - check if requested driver is defined as under test in the
2191  * conf file.
2192  */
2193 static int
2194 driver_under_test(dev_info_t *rdip)
2195 {
2196 	int i;
2197 	char	*rname;
2198 	major_t rmaj;
2199 
2200 	rname = ddi_get_name(rdip);
2201 	rmaj = ddi_name_to_major(rname);
2202 
2203 	/*
2204 	 * Enforce the user to specifically request the following drivers.
2205 	 */
2206 	for (i = 0; i < driver_list_size; i += (1 + strlen(&driver_list[i]))) {
2207 		if (driver_list_neg == 0) {
2208 			if (rmaj == ddi_name_to_major(&driver_list[i]))
2209 				return (1);
2210 		} else {
2211 			if (rmaj == ddi_name_to_major(&driver_list[i+1]))
2212 				return (0);
2213 		}
2214 	}
2215 	if (driver_list_neg == 0)
2216 		return (0);
2217 	else
2218 		return (1);
2219 
2220 }
2221 
2222 
2223 static void
2224 log_acc_event(struct bofi_errent *ep, uint_t at, offset_t offset, off_t len,
2225     size_t repcount, uint64_t *valuep)
2226 {
2227 	struct bofi_errdef *edp = &(ep->errdef);
2228 	struct acc_log *log = &edp->log;
2229 
2230 	ASSERT(log != NULL);
2231 	ASSERT(MUTEX_HELD(&bofi_mutex));
2232 
2233 	if (log->flags & BOFI_LOG_REPIO)
2234 		repcount = 1;
2235 	else if (repcount == 0 && edp->access_count > 0 &&
2236 				(log->flags & BOFI_LOG_FULL) == 0)
2237 		edp->access_count += 1;
2238 
2239 	if (repcount && log->entries < log->logsize) {
2240 		struct acc_log_elem *elem = ep->logbase + log->entries;
2241 
2242 		if (log->flags & BOFI_LOG_TIMESTAMP)
2243 			elem->access_time = bofi_gettime();
2244 		elem->access_type = at;
2245 		elem->offset = offset;
2246 		elem->value = valuep ? *valuep : 0ll;
2247 		elem->size = len;
2248 		elem->repcount = repcount;
2249 		++log->entries;
2250 		if (log->entries == log->logsize) {
2251 			log->flags |= BOFI_LOG_FULL;
2252 			ddi_trigger_softintr(((struct bofi_errent *)
2253 			    (uintptr_t)edp->errdef_handle)->softintr_id);
2254 		}
2255 	}
2256 	if ((log->flags & BOFI_LOG_WRAP) && edp->access_count <= 1) {
2257 		log->wrapcnt++;
2258 		edp->access_count = log->logsize;
2259 		log->entries = 0;	/* wrap back to the start */
2260 	}
2261 }
2262 
2263 
2264 /*
2265  * got a condition match on dma read/write - check counts and corrupt
2266  * data if necessary
2267  *
2268  * bofi_mutex always held when this is called.
2269  */
2270 static void
2271 do_dma_corrupt(struct bofi_shadow *hp, struct bofi_errent *ep,
2272 	uint_t synctype, off_t off, off_t length)
2273 {
2274 	uint64_t operand;
2275 	int i;
2276 	off_t len;
2277 	caddr_t logaddr;
2278 	uint64_t *addr;
2279 	uint64_t *endaddr;
2280 
2281 	ASSERT(MUTEX_HELD(&bofi_mutex));
2282 	if ((ep->errdef.access_count ||
2283 		ep->errdef.fail_count) &&
2284 		(ep->errdef.access_type & BOFI_LOG)) {
2285 		uint_t atype;
2286 
2287 		if (synctype == DDI_DMA_SYNC_FORDEV)
2288 			atype = BOFI_DMA_W;
2289 		else if (synctype == DDI_DMA_SYNC_FORCPU ||
2290 			synctype == DDI_DMA_SYNC_FORKERNEL)
2291 			atype = BOFI_DMA_R;
2292 		else
2293 			atype = 0;
2294 		if ((off <= ep->errdef.offset &&
2295 			off + length > ep->errdef.offset) ||
2296 			(off > ep->errdef.offset &&
2297 			off < ep->errdef.offset + ep->errdef.len)) {
2298 			logaddr = (caddr_t)((uintptr_t)(hp->addr +
2299 			    off + LLSZMASK) & ~LLSZMASK);
2300 
2301 			log_acc_event(ep, atype, logaddr - hp->addr,
2302 			    length, 1, 0);
2303 		}
2304 	}
2305 	if (ep->errdef.access_count > 1) {
2306 		ep->errdef.access_count--;
2307 	} else if (ep->errdef.fail_count > 0) {
2308 		ep->errdef.fail_count--;
2309 		ep->errdef.access_count = 0;
2310 		/*
2311 		 * OK do the corruption
2312 		 */
2313 		if (ep->errstate.fail_time == 0)
2314 			ep->errstate.fail_time = bofi_gettime();
2315 		/*
2316 		 * work out how much to corrupt
2317 		 *
2318 		 * Make sure endaddr isn't greater than hp->addr + hp->len.
2319 		 * If endaddr becomes less than addr len becomes negative
2320 		 * and the following loop isn't entered.
2321 		 */
2322 		addr = (uint64_t *)((uintptr_t)((hp->addr +
2323 		    ep->errdef.offset) + LLSZMASK) & ~LLSZMASK);
2324 		endaddr = (uint64_t *)((uintptr_t)(hp->addr + min(hp->len,
2325 		    ep->errdef.offset + ep->errdef.len)) & ~LLSZMASK);
2326 		len = endaddr - addr;
2327 		operand = ep->errdef.operand;
2328 		switch (ep->errdef.optype) {
2329 		case BOFI_EQUAL :
2330 			for (i = 0; i < len; i++)
2331 				*(addr + i) = operand;
2332 			break;
2333 		case BOFI_AND :
2334 			for (i = 0; i < len; i++)
2335 				*(addr + i) &= operand;
2336 			break;
2337 		case BOFI_OR :
2338 			for (i = 0; i < len; i++)
2339 				*(addr + i) |= operand;
2340 			break;
2341 		case BOFI_XOR :
2342 			for (i = 0; i < len; i++)
2343 				*(addr + i) ^= operand;
2344 			break;
2345 		default:
2346 			/* do nothing */
2347 			break;
2348 		}
2349 	}
2350 }
2351 
2352 
2353 static uint64_t do_bofi_rd8(struct bofi_shadow *, caddr_t);
2354 static uint64_t do_bofi_rd16(struct bofi_shadow *, caddr_t);
2355 static uint64_t do_bofi_rd32(struct bofi_shadow *, caddr_t);
2356 static uint64_t do_bofi_rd64(struct bofi_shadow *, caddr_t);
2357 
2358 
2359 /*
2360  * check all errdefs linked to this shadow handle. If we've got a condition
2361  * match check counts and corrupt data if necessary
2362  *
2363  * bofi_mutex always held when this is called.
2364  *
2365  * because of possibility of BOFI_NO_TRANSFER, we couldn't get data
2366  * from io-space before calling this, so we pass in the func to do the
2367  * transfer as a parameter.
2368  */
2369 static uint64_t
2370 do_pior_corrupt(struct bofi_shadow *hp, caddr_t addr,
2371 	uint64_t (*func)(), size_t repcount, size_t accsize)
2372 {
2373 	struct bofi_errent *ep;
2374 	struct bofi_link   *lp;
2375 	uint64_t operand;
2376 	uintptr_t minlen;
2377 	intptr_t base;
2378 	int done_get = 0;
2379 	uint64_t get_val, gv;
2380 
2381 	ASSERT(MUTEX_HELD(&bofi_mutex));
2382 	/*
2383 	 * check through all errdefs associated with this shadow handle
2384 	 */
2385 	for (lp = hp->link; lp != NULL; lp = lp->link) {
2386 		ep = lp->errentp;
2387 		if (ep->errdef.len == 0)
2388 			minlen = hp->len;
2389 		else
2390 			minlen = min(hp->len, ep->errdef.len);
2391 		base = addr - hp->addr - ep->errdef.offset + hp->offset;
2392 		if ((ep->errdef.access_type & BOFI_PIO_R) &&
2393 		    (ep->state & BOFI_DEV_ACTIVE) &&
2394 		    base >= 0 && base < minlen) {
2395 			/*
2396 			 * condition match for pio read
2397 			 */
2398 			if (ep->errdef.access_count > 1) {
2399 				ep->errdef.access_count--;
2400 				if (done_get == 0) {
2401 					done_get = 1;
2402 					gv = get_val = func(hp, addr);
2403 				}
2404 				if (ep->errdef.access_type & BOFI_LOG) {
2405 					log_acc_event(ep, BOFI_PIO_R,
2406 					    addr - hp->addr,
2407 					    accsize, repcount, &gv);
2408 				}
2409 			} else if (ep->errdef.fail_count > 0) {
2410 				ep->errdef.fail_count--;
2411 				ep->errdef.access_count = 0;
2412 				/*
2413 				 * OK do corruption
2414 				 */
2415 				if (ep->errstate.fail_time == 0)
2416 					ep->errstate.fail_time = bofi_gettime();
2417 				operand = ep->errdef.operand;
2418 				if (done_get == 0) {
2419 					if (ep->errdef.optype ==
2420 					    BOFI_NO_TRANSFER)
2421 						/*
2422 						 * no transfer - bomb out
2423 						 */
2424 						return (operand);
2425 					done_get = 1;
2426 					gv = get_val = func(hp, addr);
2427 
2428 				}
2429 				if (ep->errdef.access_type & BOFI_LOG) {
2430 					log_acc_event(ep, BOFI_PIO_R,
2431 					    addr - hp->addr,
2432 					    accsize, repcount, &gv);
2433 				}
2434 				switch (ep->errdef.optype) {
2435 				case BOFI_EQUAL :
2436 					get_val = operand;
2437 					break;
2438 				case BOFI_AND :
2439 					get_val &= operand;
2440 					break;
2441 				case BOFI_OR :
2442 					get_val |= operand;
2443 					break;
2444 				case BOFI_XOR :
2445 					get_val ^= operand;
2446 					break;
2447 				default:
2448 					/* do nothing */
2449 					break;
2450 				}
2451 			}
2452 		}
2453 	}
2454 	if (done_get == 0)
2455 		return (func(hp, addr));
2456 	else
2457 		return (get_val);
2458 }
2459 
2460 
2461 /*
2462  * check all errdefs linked to this shadow handle. If we've got a condition
2463  * match check counts and corrupt data if necessary
2464  *
2465  * bofi_mutex always held when this is called.
2466  *
2467  * because of possibility of BOFI_NO_TRANSFER, we return 0 if no data
2468  * is to be written out to io-space, 1 otherwise
2469  */
2470 static int
2471 do_piow_corrupt(struct bofi_shadow *hp, caddr_t addr, uint64_t *valuep,
2472 				size_t size, size_t repcount)
2473 {
2474 	struct bofi_errent *ep;
2475 	struct bofi_link   *lp;
2476 	uintptr_t minlen;
2477 	intptr_t base;
2478 	uint64_t v = *valuep;
2479 
2480 	ASSERT(MUTEX_HELD(&bofi_mutex));
2481 	/*
2482 	 * check through all errdefs associated with this shadow handle
2483 	 */
2484 	for (lp = hp->link; lp != NULL; lp = lp->link) {
2485 		ep = lp->errentp;
2486 		if (ep->errdef.len == 0)
2487 			minlen = hp->len;
2488 		else
2489 			minlen = min(hp->len, ep->errdef.len);
2490 		base = (caddr_t)addr - hp->addr - ep->errdef.offset +hp->offset;
2491 		if ((ep->errdef.access_type & BOFI_PIO_W) &&
2492 		    (ep->state & BOFI_DEV_ACTIVE) &&
2493 		    base >= 0 && base < minlen) {
2494 			/*
2495 			 * condition match for pio write
2496 			 */
2497 
2498 			if (ep->errdef.access_count > 1) {
2499 				ep->errdef.access_count--;
2500 				if (ep->errdef.access_type & BOFI_LOG)
2501 					log_acc_event(ep, BOFI_PIO_W,
2502 					    addr - hp->addr, size,
2503 					    repcount, &v);
2504 			} else if (ep->errdef.fail_count > 0) {
2505 				ep->errdef.fail_count--;
2506 				ep->errdef.access_count = 0;
2507 				if (ep->errdef.access_type & BOFI_LOG)
2508 					log_acc_event(ep, BOFI_PIO_W,
2509 					    addr - hp->addr, size,
2510 					    repcount, &v);
2511 				/*
2512 				 * OK do corruption
2513 				 */
2514 				if (ep->errstate.fail_time == 0)
2515 					ep->errstate.fail_time = bofi_gettime();
2516 				switch (ep->errdef.optype) {
2517 				case BOFI_EQUAL :
2518 					*valuep = ep->errdef.operand;
2519 					break;
2520 				case BOFI_AND :
2521 					*valuep &= ep->errdef.operand;
2522 					break;
2523 				case BOFI_OR :
2524 					*valuep |= ep->errdef.operand;
2525 					break;
2526 				case BOFI_XOR :
2527 					*valuep ^= ep->errdef.operand;
2528 					break;
2529 				case BOFI_NO_TRANSFER :
2530 					/*
2531 					 * no transfer - bomb out
2532 					 */
2533 					return (0);
2534 				default:
2535 					/* do nothing */
2536 					break;
2537 				}
2538 			}
2539 		}
2540 	}
2541 	return (1);
2542 }
2543 
2544 
2545 static uint64_t
2546 do_bofi_rd8(struct bofi_shadow *hp, caddr_t addr)
2547 {
2548 	return (hp->save.acc.ahi_get8(&hp->save.acc, (uint8_t *)addr));
2549 }
2550 
2551 #define	BOFI_READ_CHECKS(type) \
2552 	if (bofi_ddi_check) \
2553 		addr = (type *)((uintptr_t)addr - 64 + hp->addr); \
2554 	if (bofi_range_check && ((caddr_t)addr < hp->addr || \
2555 	    (caddr_t)addr - hp->addr >= hp->len)) { \
2556 		cmn_err((bofi_range_check == 2) ? CE_PANIC : CE_WARN, \
2557 		    "ddi_get() out of range addr %p not in %p/%llx", \
2558 		    (void *)addr, (void *)hp->addr, hp->len); \
2559 		return (0); \
2560 	}
2561 
2562 /*
2563  * our getb() routine - use tryenter
2564  */
2565 static uint8_t
2566 bofi_rd8(ddi_acc_impl_t *handle, uint8_t *addr)
2567 {
2568 	struct bofi_shadow *hp;
2569 	uint8_t retval;
2570 
2571 	hp = handle->ahi_common.ah_bus_private;
2572 	BOFI_READ_CHECKS(uint8_t)
2573 	if (!hp->link || !mutex_tryenter(&bofi_mutex))
2574 		return (hp->save.acc.ahi_get8(&hp->save.acc, addr));
2575 	retval = (uint8_t)do_pior_corrupt(hp, (caddr_t)addr, do_bofi_rd8, 1,
2576 	    1);
2577 	mutex_exit(&bofi_mutex);
2578 	return (retval);
2579 }
2580 
2581 
2582 static uint64_t
2583 do_bofi_rd16(struct bofi_shadow *hp, caddr_t addr)
2584 {
2585 	return (hp->save.acc.ahi_get16(&hp->save.acc, (uint16_t *)addr));
2586 }
2587 
2588 
2589 /*
2590  * our getw() routine - use tryenter
2591  */
2592 static uint16_t
2593 bofi_rd16(ddi_acc_impl_t *handle, uint16_t *addr)
2594 {
2595 	struct bofi_shadow *hp;
2596 	uint16_t retval;
2597 
2598 	hp = handle->ahi_common.ah_bus_private;
2599 	BOFI_READ_CHECKS(uint16_t)
2600 	if (!hp->link || !mutex_tryenter(&bofi_mutex))
2601 		return (hp->save.acc.ahi_get16(&hp->save.acc, addr));
2602 	retval = (uint16_t)do_pior_corrupt(hp, (caddr_t)addr, do_bofi_rd16, 1,
2603 	    2);
2604 	mutex_exit(&bofi_mutex);
2605 	return (retval);
2606 }
2607 
2608 
2609 static uint64_t
2610 do_bofi_rd32(struct bofi_shadow *hp, caddr_t addr)
2611 {
2612 	return (hp->save.acc.ahi_get32(&hp->save.acc, (uint32_t *)addr));
2613 }
2614 
2615 
2616 /*
2617  * our getl() routine - use tryenter
2618  */
2619 static uint32_t
2620 bofi_rd32(ddi_acc_impl_t *handle, uint32_t *addr)
2621 {
2622 	struct bofi_shadow *hp;
2623 	uint32_t retval;
2624 
2625 	hp = handle->ahi_common.ah_bus_private;
2626 	BOFI_READ_CHECKS(uint32_t)
2627 	if (!hp->link || !mutex_tryenter(&bofi_mutex))
2628 		return (hp->save.acc.ahi_get32(&hp->save.acc, addr));
2629 	retval = (uint32_t)do_pior_corrupt(hp, (caddr_t)addr, do_bofi_rd32, 1,
2630 	    4);
2631 	mutex_exit(&bofi_mutex);
2632 	return (retval);
2633 }
2634 
2635 
2636 static uint64_t
2637 do_bofi_rd64(struct bofi_shadow *hp, caddr_t addr)
2638 {
2639 	return (hp->save.acc.ahi_get64(&hp->save.acc, (uint64_t *)addr));
2640 }
2641 
2642 
2643 /*
2644  * our getll() routine - use tryenter
2645  */
2646 static uint64_t
2647 bofi_rd64(ddi_acc_impl_t *handle, uint64_t *addr)
2648 {
2649 	struct bofi_shadow *hp;
2650 	uint64_t retval;
2651 
2652 	hp = handle->ahi_common.ah_bus_private;
2653 	BOFI_READ_CHECKS(uint64_t)
2654 	if (!hp->link || !mutex_tryenter(&bofi_mutex))
2655 		return (hp->save.acc.ahi_get64(&hp->save.acc, addr));
2656 	retval = (uint64_t)do_pior_corrupt(hp, (caddr_t)addr, do_bofi_rd64, 1,
2657 	    8);
2658 	mutex_exit(&bofi_mutex);
2659 	return (retval);
2660 }
2661 
2662 #define	BOFI_WRITE_TESTS(type) \
2663 	if (bofi_ddi_check) \
2664 		addr = (type *)((uintptr_t)addr - 64 + hp->addr); \
2665 	if (bofi_range_check && ((caddr_t)addr < hp->addr || \
2666 	    (caddr_t)addr - hp->addr >= hp->len)) { \
2667 		cmn_err((bofi_range_check == 2) ? CE_PANIC : CE_WARN, \
2668 		    "ddi_put() out of range addr %p not in %p/%llx\n", \
2669 		    (void *)addr, (void *)hp->addr, hp->len); \
2670 		return; \
2671 	}
2672 
2673 /*
2674  * our putb() routine - use tryenter
2675  */
2676 static void
2677 bofi_wr8(ddi_acc_impl_t *handle, uint8_t *addr, uint8_t value)
2678 {
2679 	struct bofi_shadow *hp;
2680 	uint64_t llvalue = value;
2681 
2682 	hp = handle->ahi_common.ah_bus_private;
2683 	BOFI_WRITE_TESTS(uint8_t)
2684 	if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
2685 		hp->save.acc.ahi_put8(&hp->save.acc, addr, (uint8_t)llvalue);
2686 		return;
2687 	}
2688 	if (do_piow_corrupt(hp, (caddr_t)addr, &llvalue, 1, 1))
2689 		hp->save.acc.ahi_put8(&hp->save.acc, addr, (uint8_t)llvalue);
2690 	mutex_exit(&bofi_mutex);
2691 }
2692 
2693 
2694 /*
2695  * our putw() routine - use tryenter
2696  */
2697 static void
2698 bofi_wr16(ddi_acc_impl_t *handle, uint16_t *addr, uint16_t value)
2699 {
2700 	struct bofi_shadow *hp;
2701 	uint64_t llvalue = value;
2702 
2703 	hp = handle->ahi_common.ah_bus_private;
2704 	BOFI_WRITE_TESTS(uint16_t)
2705 	if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
2706 		hp->save.acc.ahi_put16(&hp->save.acc, addr, (uint16_t)llvalue);
2707 		return;
2708 	}
2709 	if (do_piow_corrupt(hp, (caddr_t)addr, &llvalue, 2, 1))
2710 		hp->save.acc.ahi_put16(&hp->save.acc, addr, (uint16_t)llvalue);
2711 	mutex_exit(&bofi_mutex);
2712 }
2713 
2714 
2715 /*
2716  * our putl() routine - use tryenter
2717  */
2718 static void
2719 bofi_wr32(ddi_acc_impl_t *handle, uint32_t *addr, uint32_t value)
2720 {
2721 	struct bofi_shadow *hp;
2722 	uint64_t llvalue = value;
2723 
2724 	hp = handle->ahi_common.ah_bus_private;
2725 	BOFI_WRITE_TESTS(uint32_t)
2726 	if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
2727 		hp->save.acc.ahi_put32(&hp->save.acc, addr, (uint32_t)llvalue);
2728 		return;
2729 	}
2730 	if (do_piow_corrupt(hp, (caddr_t)addr, &llvalue, 4, 1))
2731 		hp->save.acc.ahi_put32(&hp->save.acc, addr, (uint32_t)llvalue);
2732 	mutex_exit(&bofi_mutex);
2733 }
2734 
2735 
2736 /*
2737  * our putll() routine - use tryenter
2738  */
2739 static void
2740 bofi_wr64(ddi_acc_impl_t *handle, uint64_t *addr, uint64_t value)
2741 {
2742 	struct bofi_shadow *hp;
2743 	uint64_t llvalue = value;
2744 
2745 	hp = handle->ahi_common.ah_bus_private;
2746 	BOFI_WRITE_TESTS(uint64_t)
2747 	if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
2748 		hp->save.acc.ahi_put64(&hp->save.acc, addr, (uint64_t)llvalue);
2749 		return;
2750 	}
2751 	if (do_piow_corrupt(hp, (caddr_t)addr, &llvalue, 8, 1))
2752 		hp->save.acc.ahi_put64(&hp->save.acc, addr, (uint64_t)llvalue);
2753 	mutex_exit(&bofi_mutex);
2754 }
2755 
2756 #define	BOFI_REP_READ_TESTS(type) \
2757 	if (bofi_ddi_check) \
2758 		dev_addr = (type *)((uintptr_t)dev_addr - 64 + hp->addr); \
2759 	if (bofi_range_check && ((caddr_t)dev_addr < hp->addr || \
2760 	    (caddr_t)(dev_addr + repcount) - hp->addr > hp->len)) { \
2761 		cmn_err((bofi_range_check == 2) ? CE_PANIC : CE_WARN, \
2762 		    "ddi_rep_get() out of range addr %p not in %p/%llx\n", \
2763 		    (void *)dev_addr, (void *)hp->addr, hp->len); \
2764 		if ((caddr_t)dev_addr < hp->addr || \
2765 		    (caddr_t)dev_addr - hp->addr >= hp->len) \
2766 			return; \
2767 		repcount = (type *)(hp->addr + hp->len) - dev_addr; \
2768 	}
2769 
2770 /*
2771  * our rep_getb() routine - use tryenter
2772  */
2773 static void
2774 bofi_rep_rd8(ddi_acc_impl_t *handle, uint8_t *host_addr, uint8_t *dev_addr,
2775 	size_t repcount, uint_t flags)
2776 {
2777 	struct bofi_shadow *hp;
2778 	int i;
2779 	uint8_t *addr;
2780 
2781 	hp = handle->ahi_common.ah_bus_private;
2782 	BOFI_REP_READ_TESTS(uint8_t)
2783 	if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
2784 		hp->save.acc.ahi_rep_get8(&hp->save.acc, host_addr, dev_addr,
2785 		    repcount, flags);
2786 		return;
2787 	}
2788 	for (i = 0; i < repcount; i++) {
2789 		addr = dev_addr + ((flags == DDI_DEV_AUTOINCR) ? i : 0);
2790 		*(host_addr + i) = (uint8_t)do_pior_corrupt(hp, (caddr_t)addr,
2791 		    do_bofi_rd8, i ? 0 : repcount, 1);
2792 	}
2793 	mutex_exit(&bofi_mutex);
2794 }
2795 
2796 
2797 /*
2798  * our rep_getw() routine - use tryenter
2799  */
2800 static void
2801 bofi_rep_rd16(ddi_acc_impl_t *handle, uint16_t *host_addr,
2802 	uint16_t *dev_addr, size_t repcount, uint_t flags)
2803 {
2804 	struct bofi_shadow *hp;
2805 	int i;
2806 	uint16_t *addr;
2807 
2808 	hp = handle->ahi_common.ah_bus_private;
2809 	BOFI_REP_READ_TESTS(uint16_t)
2810 	if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
2811 		hp->save.acc.ahi_rep_get16(&hp->save.acc, host_addr, dev_addr,
2812 		    repcount, flags);
2813 		return;
2814 	}
2815 	for (i = 0; i < repcount; i++) {
2816 		addr = dev_addr + ((flags == DDI_DEV_AUTOINCR) ? i : 0);
2817 		*(host_addr + i) = (uint16_t)do_pior_corrupt(hp, (caddr_t)addr,
2818 		    do_bofi_rd16, i ? 0 : repcount, 2);
2819 	}
2820 	mutex_exit(&bofi_mutex);
2821 }
2822 
2823 
2824 /*
2825  * our rep_getl() routine - use tryenter
2826  */
2827 static void
2828 bofi_rep_rd32(ddi_acc_impl_t *handle, uint32_t *host_addr,
2829 	uint32_t *dev_addr, size_t repcount, uint_t flags)
2830 {
2831 	struct bofi_shadow *hp;
2832 	int i;
2833 	uint32_t *addr;
2834 
2835 	hp = handle->ahi_common.ah_bus_private;
2836 	BOFI_REP_READ_TESTS(uint32_t)
2837 	if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
2838 		hp->save.acc.ahi_rep_get32(&hp->save.acc, host_addr, dev_addr,
2839 		    repcount, flags);
2840 		return;
2841 	}
2842 	for (i = 0; i < repcount; i++) {
2843 		addr = dev_addr + ((flags == DDI_DEV_AUTOINCR) ? i : 0);
2844 		*(host_addr + i) = (uint32_t)do_pior_corrupt(hp, (caddr_t)addr,
2845 		    do_bofi_rd32, i ? 0 : repcount, 4);
2846 	}
2847 	mutex_exit(&bofi_mutex);
2848 }
2849 
2850 
2851 /*
2852  * our rep_getll() routine - use tryenter
2853  */
2854 static void
2855 bofi_rep_rd64(ddi_acc_impl_t *handle, uint64_t *host_addr,
2856 	uint64_t *dev_addr, size_t repcount, uint_t flags)
2857 {
2858 	struct bofi_shadow *hp;
2859 	int i;
2860 	uint64_t *addr;
2861 
2862 	hp = handle->ahi_common.ah_bus_private;
2863 	BOFI_REP_READ_TESTS(uint64_t)
2864 	if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
2865 		hp->save.acc.ahi_rep_get64(&hp->save.acc, host_addr, dev_addr,
2866 		    repcount, flags);
2867 		return;
2868 	}
2869 	for (i = 0; i < repcount; i++) {
2870 		addr = dev_addr + ((flags == DDI_DEV_AUTOINCR) ? i : 0);
2871 		*(host_addr + i) = (uint64_t)do_pior_corrupt(hp, (caddr_t)addr,
2872 		    do_bofi_rd64, i ? 0 : repcount, 8);
2873 	}
2874 	mutex_exit(&bofi_mutex);
2875 }
2876 
2877 #define	BOFI_REP_WRITE_TESTS(type) \
2878 	if (bofi_ddi_check) \
2879 		dev_addr = (type *)((uintptr_t)dev_addr - 64 + hp->addr); \
2880 	if (bofi_range_check && ((caddr_t)dev_addr < hp->addr || \
2881 	    (caddr_t)(dev_addr + repcount) - hp->addr > hp->len)) { \
2882 		cmn_err((bofi_range_check == 2) ? CE_PANIC : CE_WARN, \
2883 		    "ddi_rep_put() out of range addr %p not in %p/%llx\n", \
2884 		    (void *)dev_addr, (void *)hp->addr, hp->len); \
2885 		if ((caddr_t)dev_addr < hp->addr || \
2886 		    (caddr_t)dev_addr - hp->addr >= hp->len) \
2887 			return; \
2888 		repcount = (type *)(hp->addr + hp->len) - dev_addr; \
2889 	}
2890 
2891 /*
2892  * our rep_putb() routine - use tryenter
2893  */
2894 static void
2895 bofi_rep_wr8(ddi_acc_impl_t *handle, uint8_t *host_addr, uint8_t *dev_addr,
2896 	size_t repcount, uint_t flags)
2897 {
2898 	struct bofi_shadow *hp;
2899 	int i;
2900 	uint64_t llvalue;
2901 	uint8_t *addr;
2902 
2903 	hp = handle->ahi_common.ah_bus_private;
2904 	BOFI_REP_WRITE_TESTS(uint8_t)
2905 	if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
2906 		hp->save.acc.ahi_rep_put8(&hp->save.acc, host_addr, dev_addr,
2907 		    repcount, flags);
2908 		return;
2909 	}
2910 	for (i = 0; i < repcount; i++) {
2911 		llvalue = *(host_addr + i);
2912 		addr = dev_addr + ((flags == DDI_DEV_AUTOINCR) ? i : 0);
2913 		if (do_piow_corrupt(hp, (caddr_t)addr, &llvalue, 1, i ? 0 :
2914 		    repcount))
2915 			hp->save.acc.ahi_put8(&hp->save.acc, addr,
2916 			    (uint8_t)llvalue);
2917 	}
2918 	mutex_exit(&bofi_mutex);
2919 }
2920 
2921 
2922 /*
2923  * our rep_putw() routine - use tryenter
2924  */
2925 static void
2926 bofi_rep_wr16(ddi_acc_impl_t *handle, uint16_t *host_addr,
2927 	uint16_t *dev_addr, size_t repcount, uint_t flags)
2928 {
2929 	struct bofi_shadow *hp;
2930 	int i;
2931 	uint64_t llvalue;
2932 	uint16_t *addr;
2933 
2934 	hp = handle->ahi_common.ah_bus_private;
2935 	BOFI_REP_WRITE_TESTS(uint16_t)
2936 	if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
2937 		hp->save.acc.ahi_rep_put16(&hp->save.acc, host_addr, dev_addr,
2938 		    repcount, flags);
2939 		return;
2940 	}
2941 	for (i = 0; i < repcount; i++) {
2942 		llvalue = *(host_addr + i);
2943 		addr = dev_addr + ((flags == DDI_DEV_AUTOINCR) ? i : 0);
2944 		if (do_piow_corrupt(hp, (caddr_t)addr, &llvalue, 2, i ? 0 :
2945 		    repcount))
2946 			hp->save.acc.ahi_put16(&hp->save.acc, addr,
2947 			    (uint16_t)llvalue);
2948 	}
2949 	mutex_exit(&bofi_mutex);
2950 }
2951 
2952 
2953 /*
2954  * our rep_putl() routine - use tryenter
2955  */
2956 static void
2957 bofi_rep_wr32(ddi_acc_impl_t *handle, uint32_t *host_addr,
2958 	uint32_t *dev_addr, size_t repcount, uint_t flags)
2959 {
2960 	struct bofi_shadow *hp;
2961 	int i;
2962 	uint64_t llvalue;
2963 	uint32_t *addr;
2964 
2965 	hp = handle->ahi_common.ah_bus_private;
2966 	BOFI_REP_WRITE_TESTS(uint32_t)
2967 	if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
2968 		hp->save.acc.ahi_rep_put32(&hp->save.acc, host_addr, dev_addr,
2969 		    repcount, flags);
2970 		return;
2971 	}
2972 	for (i = 0; i < repcount; i++) {
2973 		llvalue = *(host_addr + i);
2974 		addr = dev_addr + ((flags == DDI_DEV_AUTOINCR) ? i : 0);
2975 		if (do_piow_corrupt(hp, (caddr_t)addr, &llvalue, 4, i ? 0 :
2976 		    repcount))
2977 			hp->save.acc.ahi_put32(&hp->save.acc, addr,
2978 			    (uint32_t)llvalue);
2979 	}
2980 	mutex_exit(&bofi_mutex);
2981 }
2982 
2983 
2984 /*
2985  * our rep_putll() routine - use tryenter
2986  */
2987 static void
2988 bofi_rep_wr64(ddi_acc_impl_t *handle, uint64_t *host_addr,
2989 	uint64_t *dev_addr, size_t repcount, uint_t flags)
2990 {
2991 	struct bofi_shadow *hp;
2992 	int i;
2993 	uint64_t llvalue;
2994 	uint64_t *addr;
2995 
2996 	hp = handle->ahi_common.ah_bus_private;
2997 	BOFI_REP_WRITE_TESTS(uint64_t)
2998 	if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
2999 		hp->save.acc.ahi_rep_put64(&hp->save.acc, host_addr, dev_addr,
3000 		    repcount, flags);
3001 		return;
3002 	}
3003 	for (i = 0; i < repcount; i++) {
3004 		llvalue = *(host_addr + i);
3005 		addr = dev_addr + ((flags == DDI_DEV_AUTOINCR) ? i : 0);
3006 		if (do_piow_corrupt(hp, (caddr_t)addr, &llvalue, 8, i ? 0 :
3007 		    repcount))
3008 			hp->save.acc.ahi_put64(&hp->save.acc, addr,
3009 			    (uint64_t)llvalue);
3010 	}
3011 	mutex_exit(&bofi_mutex);
3012 }
3013 
3014 
3015 /*
3016  * our ddi_map routine
3017  */
3018 static int
3019 bofi_map(dev_info_t *dip, dev_info_t *rdip,
3020 	ddi_map_req_t *reqp, off_t offset, off_t len, caddr_t *vaddrp)
3021 {
3022 	ddi_acc_impl_t *ap;
3023 	struct bofi_shadow *hp;
3024 	struct bofi_errent *ep;
3025 	struct bofi_link   *lp, *next_lp;
3026 	int retval;
3027 	struct bofi_shadow *dhashp;
3028 	struct bofi_shadow *hhashp;
3029 
3030 	switch (reqp->map_op) {
3031 	case DDI_MO_MAP_LOCKED:
3032 		/*
3033 		 * for this case get nexus to do real work first
3034 		 */
3035 		retval = save_bus_ops.bus_map(dip, rdip, reqp, offset, len,
3036 		    vaddrp);
3037 		if (retval != DDI_SUCCESS)
3038 			return (retval);
3039 
3040 		ap = (ddi_acc_impl_t *)reqp->map_handlep;
3041 		if (ap == NULL)
3042 			return (DDI_SUCCESS);
3043 		/*
3044 		 * if driver_list is set, only intercept those drivers
3045 		 */
3046 		if (!driver_under_test(ap->ahi_common.ah_dip))
3047 			return (DDI_SUCCESS);
3048 
3049 		/*
3050 		 * support for ddi_regs_map_setup()
3051 		 * - allocate shadow handle structure and fill it in
3052 		 */
3053 		hp = kmem_zalloc(sizeof (struct bofi_shadow), KM_SLEEP);
3054 		(void) strncpy(hp->name, ddi_get_name(ap->ahi_common.ah_dip),
3055 		    NAMESIZE);
3056 		hp->instance = ddi_get_instance(ap->ahi_common.ah_dip);
3057 		hp->dip = ap->ahi_common.ah_dip;
3058 		hp->addr = *vaddrp;
3059 		/*
3060 		 * return spurious value to catch direct access to registers
3061 		 */
3062 		if (bofi_ddi_check)
3063 			*vaddrp = (caddr_t)64;
3064 		hp->rnumber = ((ddi_acc_hdl_t *)ap)->ah_rnumber;
3065 		hp->offset = offset;
3066 		if (len == 0)
3067 			hp->len = INT_MAX - offset;
3068 		else
3069 			hp->len = min(len, INT_MAX - offset);
3070 		hp->hdl.acc_handle = (ddi_acc_handle_t)ap;
3071 		hp->link = NULL;
3072 		hp->type = BOFI_ACC_HDL;
3073 		/*
3074 		 * save existing function pointers and plug in our own
3075 		 */
3076 		hp->save.acc = *ap;
3077 		ap->ahi_get8 = bofi_rd8;
3078 		ap->ahi_get16 = bofi_rd16;
3079 		ap->ahi_get32 = bofi_rd32;
3080 		ap->ahi_get64 = bofi_rd64;
3081 		ap->ahi_put8 = bofi_wr8;
3082 		ap->ahi_put16 = bofi_wr16;
3083 		ap->ahi_put32 = bofi_wr32;
3084 		ap->ahi_put64 = bofi_wr64;
3085 		ap->ahi_rep_get8 = bofi_rep_rd8;
3086 		ap->ahi_rep_get16 = bofi_rep_rd16;
3087 		ap->ahi_rep_get32 = bofi_rep_rd32;
3088 		ap->ahi_rep_get64 = bofi_rep_rd64;
3089 		ap->ahi_rep_put8 = bofi_rep_wr8;
3090 		ap->ahi_rep_put16 = bofi_rep_wr16;
3091 		ap->ahi_rep_put32 = bofi_rep_wr32;
3092 		ap->ahi_rep_put64 = bofi_rep_wr64;
3093 		ap->ahi_fault_check = bofi_check_acc_hdl;
3094 #if defined(__sparc)
3095 #else
3096 		ap->ahi_acc_attr &= ~DDI_ACCATTR_DIRECT;
3097 #endif
3098 		/*
3099 		 * stick in a pointer to our shadow handle
3100 		 */
3101 		ap->ahi_common.ah_bus_private = hp;
3102 		/*
3103 		 * add to dhash, hhash and inuse lists
3104 		 */
3105 		mutex_enter(&bofi_low_mutex);
3106 		mutex_enter(&bofi_mutex);
3107 		hp->next = shadow_list.next;
3108 		shadow_list.next->prev = hp;
3109 		hp->prev = &shadow_list;
3110 		shadow_list.next = hp;
3111 		hhashp = HDL_HHASH(ap);
3112 		hp->hnext = hhashp->hnext;
3113 		hhashp->hnext->hprev = hp;
3114 		hp->hprev = hhashp;
3115 		hhashp->hnext = hp;
3116 		dhashp = HDL_DHASH(hp->dip);
3117 		hp->dnext = dhashp->dnext;
3118 		dhashp->dnext->dprev = hp;
3119 		hp->dprev = dhashp;
3120 		dhashp->dnext = hp;
3121 		/*
3122 		 * chain on any pre-existing errdefs that apply to this
3123 		 * acc_handle
3124 		 */
3125 		for (ep = errent_listp; ep != NULL; ep = ep->next) {
3126 			if (ddi_name_to_major(hp->name) ==
3127 			    ddi_name_to_major(ep->name) &&
3128 			    hp->instance == ep->errdef.instance &&
3129 			    (ep->errdef.access_type & BOFI_PIO_RW) &&
3130 			    (ep->errdef.rnumber == -1 ||
3131 			    hp->rnumber == ep->errdef.rnumber) &&
3132 			    (ep->errdef.len == 0 ||
3133 			    offset < ep->errdef.offset + ep->errdef.len) &&
3134 			    offset + hp->len > ep->errdef.offset) {
3135 				lp = bofi_link_freelist;
3136 				if (lp != NULL) {
3137 					bofi_link_freelist = lp->link;
3138 					lp->errentp = ep;
3139 					lp->link = hp->link;
3140 					hp->link = lp;
3141 				}
3142 			}
3143 		}
3144 		mutex_exit(&bofi_mutex);
3145 		mutex_exit(&bofi_low_mutex);
3146 		return (DDI_SUCCESS);
3147 	case DDI_MO_UNMAP:
3148 
3149 		ap = (ddi_acc_impl_t *)reqp->map_handlep;
3150 		if (ap == NULL)
3151 			break;
3152 		/*
3153 		 * support for ddi_regs_map_free()
3154 		 * - check we really have a shadow handle for this one
3155 		 */
3156 		mutex_enter(&bofi_low_mutex);
3157 		mutex_enter(&bofi_mutex);
3158 		hhashp = HDL_HHASH(ap);
3159 		for (hp = hhashp->hnext; hp != hhashp; hp = hp->hnext)
3160 			if (hp->hdl.acc_handle == (ddi_acc_handle_t)ap)
3161 				break;
3162 		if (hp == hhashp) {
3163 			mutex_exit(&bofi_mutex);
3164 			mutex_exit(&bofi_low_mutex);
3165 			break;
3166 		}
3167 		/*
3168 		 * got a shadow handle - restore original pointers
3169 		 */
3170 		*ap = hp->save.acc;
3171 		*vaddrp = hp->addr;
3172 		/*
3173 		 * remove from dhash, hhash and inuse lists
3174 		 */
3175 		hp->hnext->hprev = hp->hprev;
3176 		hp->hprev->hnext = hp->hnext;
3177 		hp->dnext->dprev = hp->dprev;
3178 		hp->dprev->dnext = hp->dnext;
3179 		hp->next->prev = hp->prev;
3180 		hp->prev->next = hp->next;
3181 		/*
3182 		 * free any errdef link structures tagged onto the shadow handle
3183 		 */
3184 		for (lp = hp->link; lp != NULL; ) {
3185 			next_lp = lp->link;
3186 			lp->link = bofi_link_freelist;
3187 			bofi_link_freelist = lp;
3188 			lp = next_lp;
3189 		}
3190 		hp->link = NULL;
3191 		mutex_exit(&bofi_mutex);
3192 		mutex_exit(&bofi_low_mutex);
3193 		/*
3194 		 * finally delete shadow handle
3195 		 */
3196 		kmem_free(hp, sizeof (struct bofi_shadow));
3197 		break;
3198 	default:
3199 		break;
3200 	}
3201 	return (save_bus_ops.bus_map(dip, rdip, reqp, offset, len, vaddrp));
3202 }
3203 
3204 
3205 /*
3206  * chain any pre-existing errdefs on to newly created dma handle
3207  * if required call do_dma_corrupt() to corrupt data
3208  */
3209 static void
3210 chain_on_errdefs(struct bofi_shadow *hp)
3211 {
3212 	struct bofi_errent *ep;
3213 	struct bofi_link   *lp;
3214 
3215 	ASSERT(MUTEX_HELD(&bofi_mutex));
3216 	/*
3217 	 * chain on any pre-existing errdefs that apply to this dma_handle
3218 	 */
3219 	for (ep = errent_listp; ep != NULL; ep = ep->next) {
3220 		if (ddi_name_to_major(hp->name) ==
3221 		    ddi_name_to_major(ep->name) &&
3222 		    hp->instance == ep->errdef.instance &&
3223 		    (ep->errdef.rnumber == -1 ||
3224 		    hp->rnumber == ep->errdef.rnumber) &&
3225 		    ((ep->errdef.access_type & BOFI_DMA_RW) &&
3226 		    (((uintptr_t)(hp->addr + ep->errdef.offset +
3227 		    ep->errdef.len) & ~LLSZMASK) >
3228 		    ((uintptr_t)((hp->addr + ep->errdef.offset) +
3229 		    LLSZMASK) & ~LLSZMASK)))) {
3230 			/*
3231 			 * got a match - link it on
3232 			 */
3233 			lp = bofi_link_freelist;
3234 			if (lp != NULL) {
3235 				bofi_link_freelist = lp->link;
3236 				lp->errentp = ep;
3237 				lp->link = hp->link;
3238 				hp->link = lp;
3239 				if ((ep->errdef.access_type & BOFI_DMA_W) &&
3240 				    (hp->flags & DDI_DMA_WRITE) &&
3241 				    (ep->state & BOFI_DEV_ACTIVE)) {
3242 					do_dma_corrupt(hp, ep,
3243 					    DDI_DMA_SYNC_FORDEV,
3244 					    0, hp->len);
3245 				}
3246 			}
3247 		}
3248 	}
3249 }
3250 
3251 
3252 /*
3253  * need to do copy byte-by-byte in case one of pages is little-endian
3254  */
3255 static void
3256 xbcopy(void *from, void *to, u_longlong_t len)
3257 {
3258 	uchar_t *f = from;
3259 	uchar_t *t = to;
3260 
3261 	while (len--)
3262 		*t++ = *f++;
3263 }
3264 
3265 
3266 /*
3267  * our ddi_dma_map routine
3268  */
3269 static int
3270 bofi_dma_map(dev_info_t *dip, dev_info_t *rdip,
3271 		struct ddi_dma_req *dmareqp, ddi_dma_handle_t *handlep)
3272 {
3273 	struct bofi_shadow *hp, *xhp;
3274 	int maxrnumber = 0;
3275 	int retval = DDI_DMA_NORESOURCES;
3276 	auto struct ddi_dma_req dmareq;
3277 	int sleep;
3278 	struct bofi_shadow *dhashp;
3279 	struct bofi_shadow *hhashp;
3280 	ddi_dma_impl_t *mp;
3281 	unsigned long pagemask = ddi_ptob(rdip, 1) - 1;
3282 
3283 	/*
3284 	 * if driver_list is set, only intercept those drivers
3285 	 */
3286 	if (handlep == NULL || !driver_under_test(rdip))
3287 		return (save_bus_ops.bus_dma_map(dip, rdip, dmareqp, handlep));
3288 
3289 	sleep = (dmareqp->dmar_fp == DDI_DMA_SLEEP) ? KM_SLEEP : KM_NOSLEEP;
3290 	/*
3291 	 * allocate shadow handle structure and fill it in
3292 	 */
3293 	hp = kmem_zalloc(sizeof (struct bofi_shadow), sleep);
3294 	if (hp == NULL)
3295 		goto error;
3296 	(void) strncpy(hp->name, ddi_get_name(rdip), NAMESIZE);
3297 	hp->instance = ddi_get_instance(rdip);
3298 	hp->dip = rdip;
3299 	hp->flags = dmareqp->dmar_flags;
3300 	hp->link = NULL;
3301 	hp->type = BOFI_DMA_HDL;
3302 	/*
3303 	 * get a kernel virtual mapping
3304 	 */
3305 	hp->addr = ddi_dmareq_mapin(dmareqp, &hp->mapaddr, &hp->len);
3306 	if (hp->addr == NULL)
3307 		goto error;
3308 	if (bofi_sync_check) {
3309 		/*
3310 		 * Take a copy and pass pointers to this up to nexus instead.
3311 		 * Data will be copied from the original on explicit
3312 		 * and implicit ddi_dma_sync()
3313 		 *
3314 		 * - maintain page alignment because some devices assume it.
3315 		 */
3316 		hp->origaddr = hp->addr;
3317 		hp->allocaddr = ddi_umem_alloc(
3318 		    ((uintptr_t)hp->addr & pagemask) + hp->len, sleep,
3319 		    &hp->umem_cookie);
3320 		if (hp->allocaddr == NULL)
3321 			goto error;
3322 		hp->addr = hp->allocaddr + ((uintptr_t)hp->addr & pagemask);
3323 		if (dmareqp->dmar_flags & DDI_DMA_WRITE)
3324 			xbcopy(hp->origaddr, hp->addr, hp->len);
3325 		dmareq = *dmareqp;
3326 		dmareq.dmar_object.dmao_size = hp->len;
3327 		dmareq.dmar_object.dmao_type = DMA_OTYP_VADDR;
3328 		dmareq.dmar_object.dmao_obj.virt_obj.v_as = &kas;
3329 		dmareq.dmar_object.dmao_obj.virt_obj.v_addr = hp->addr;
3330 		dmareq.dmar_object.dmao_obj.virt_obj.v_priv = NULL;
3331 		dmareqp = &dmareq;
3332 	}
3333 	/*
3334 	 * call nexus to do the real work
3335 	 */
3336 	retval = save_bus_ops.bus_dma_map(dip, rdip, dmareqp, handlep);
3337 	if (retval != DDI_SUCCESS)
3338 		goto error2;
3339 	/*
3340 	 * now set dma_handle to point to real handle
3341 	 */
3342 	hp->hdl.dma_handle = *handlep;
3343 	/*
3344 	 * unset DMP_NOSYNC
3345 	 */
3346 	mp = (ddi_dma_impl_t *)*handlep;
3347 	mp->dmai_rflags &= ~DMP_NOSYNC;
3348 	mp->dmai_fault_check = bofi_check_dma_hdl;
3349 	/*
3350 	 * bind and unbind are cached in devinfo - must overwrite them
3351 	 * - note that our bind and unbind are quite happy dealing with
3352 	 * any handles for this devinfo that were previously allocated
3353 	 */
3354 	if (save_bus_ops.bus_dma_bindhdl == DEVI(rdip)->devi_bus_dma_bindfunc)
3355 		DEVI(rdip)->devi_bus_dma_bindfunc = bofi_dma_bindhdl;
3356 	if (save_bus_ops.bus_dma_unbindhdl ==
3357 	    DEVI(rdip)->devi_bus_dma_unbindfunc)
3358 		DEVI(rdip)->devi_bus_dma_unbindfunc = bofi_dma_unbindhdl;
3359 	mutex_enter(&bofi_low_mutex);
3360 	mutex_enter(&bofi_mutex);
3361 	/*
3362 	 * get an "rnumber" for this handle - really just seeking to
3363 	 * get a unique number - generally only care for early allocated
3364 	 * handles - so we get as far as INT_MAX, just stay there
3365 	 */
3366 	dhashp = HDL_DHASH(hp->dip);
3367 	for (xhp = dhashp->dnext; xhp != dhashp; xhp = xhp->dnext)
3368 		if (ddi_name_to_major(xhp->name) ==
3369 		    ddi_name_to_major(hp->name) &&
3370 		    xhp->instance == hp->instance &&
3371 		    xhp->type == BOFI_DMA_HDL)
3372 			if (xhp->rnumber >= maxrnumber) {
3373 				if (xhp->rnumber == INT_MAX)
3374 					maxrnumber = INT_MAX;
3375 				else
3376 					maxrnumber = xhp->rnumber + 1;
3377 			}
3378 	hp->rnumber = maxrnumber;
3379 	/*
3380 	 * add to dhash, hhash and inuse lists
3381 	 */
3382 	hp->next = shadow_list.next;
3383 	shadow_list.next->prev = hp;
3384 	hp->prev = &shadow_list;
3385 	shadow_list.next = hp;
3386 	hhashp = HDL_HHASH(*handlep);
3387 	hp->hnext = hhashp->hnext;
3388 	hhashp->hnext->hprev = hp;
3389 	hp->hprev = hhashp;
3390 	hhashp->hnext = hp;
3391 	dhashp = HDL_DHASH(hp->dip);
3392 	hp->dnext = dhashp->dnext;
3393 	dhashp->dnext->dprev = hp;
3394 	hp->dprev = dhashp;
3395 	dhashp->dnext = hp;
3396 	/*
3397 	 * chain on any pre-existing errdefs that apply to this
3398 	 * acc_handle and corrupt if required (as there is an implicit
3399 	 * ddi_dma_sync() in this call)
3400 	 */
3401 	chain_on_errdefs(hp);
3402 	mutex_exit(&bofi_mutex);
3403 	mutex_exit(&bofi_low_mutex);
3404 	return (retval);
3405 error:
3406 	if (dmareqp->dmar_fp != DDI_DMA_DONTWAIT) {
3407 		/*
3408 		 * what to do here? Wait a bit and try again
3409 		 */
3410 		(void) timeout((void (*)())dmareqp->dmar_fp,
3411 		    dmareqp->dmar_arg, 10);
3412 	}
3413 error2:
3414 	if (hp) {
3415 		ddi_dmareq_mapout(hp->mapaddr, hp->len);
3416 		if (bofi_sync_check && hp->allocaddr)
3417 			ddi_umem_free(hp->umem_cookie);
3418 		kmem_free(hp, sizeof (struct bofi_shadow));
3419 	}
3420 	return (retval);
3421 }
3422 
3423 
3424 /*
3425  * our ddi_dma_allochdl routine
3426  */
3427 static int
3428 bofi_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attrp,
3429 	int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep)
3430 {
3431 	int retval = DDI_DMA_NORESOURCES;
3432 	struct bofi_shadow *hp, *xhp;
3433 	int maxrnumber = 0;
3434 	struct bofi_shadow *dhashp;
3435 	struct bofi_shadow *hhashp;
3436 	ddi_dma_impl_t *mp;
3437 
3438 	/*
3439 	 * if driver_list is set, only intercept those drivers
3440 	 */
3441 	if (!driver_under_test(rdip))
3442 		return (save_bus_ops.bus_dma_allochdl(dip, rdip, attrp,
3443 		    waitfp, arg, handlep));
3444 
3445 	/*
3446 	 * allocate shadow handle structure and fill it in
3447 	 */
3448 	hp = kmem_zalloc(sizeof (struct bofi_shadow),
3449 	    ((waitfp == DDI_DMA_SLEEP) ? KM_SLEEP : KM_NOSLEEP));
3450 	if (hp == NULL) {
3451 		/*
3452 		 * what to do here? Wait a bit and try again
3453 		 */
3454 		if (waitfp != DDI_DMA_DONTWAIT)
3455 			(void) timeout((void (*)())waitfp, arg, 10);
3456 		return (retval);
3457 	}
3458 	(void) strncpy(hp->name, ddi_get_name(rdip), NAMESIZE);
3459 	hp->instance = ddi_get_instance(rdip);
3460 	hp->dip = rdip;
3461 	hp->link = NULL;
3462 	hp->type = BOFI_NULL;
3463 	/*
3464 	 * call nexus to do the real work
3465 	 */
3466 	retval = save_bus_ops.bus_dma_allochdl(dip, rdip, attrp, waitfp, arg,
3467 	    handlep);
3468 	if (retval != DDI_SUCCESS) {
3469 		kmem_free(hp, sizeof (struct bofi_shadow));
3470 		return (retval);
3471 	}
3472 	/*
3473 	 * now point set dma_handle to point to real handle
3474 	 */
3475 	hp->hdl.dma_handle = *handlep;
3476 	mp = (ddi_dma_impl_t *)*handlep;
3477 	mp->dmai_fault_check = bofi_check_dma_hdl;
3478 	/*
3479 	 * bind and unbind are cached in devinfo - must overwrite them
3480 	 * - note that our bind and unbind are quite happy dealing with
3481 	 * any handles for this devinfo that were previously allocated
3482 	 */
3483 	if (save_bus_ops.bus_dma_bindhdl == DEVI(rdip)->devi_bus_dma_bindfunc)
3484 		DEVI(rdip)->devi_bus_dma_bindfunc = bofi_dma_bindhdl;
3485 	if (save_bus_ops.bus_dma_unbindhdl ==
3486 	    DEVI(rdip)->devi_bus_dma_unbindfunc)
3487 		DEVI(rdip)->devi_bus_dma_unbindfunc = bofi_dma_unbindhdl;
3488 	mutex_enter(&bofi_low_mutex);
3489 	mutex_enter(&bofi_mutex);
3490 	/*
3491 	 * get an "rnumber" for this handle - really just seeking to
3492 	 * get a unique number - generally only care for early allocated
3493 	 * handles - so we get as far as INT_MAX, just stay there
3494 	 */
3495 	dhashp = HDL_DHASH(hp->dip);
3496 	for (xhp = dhashp->dnext; xhp != dhashp; xhp = xhp->dnext)
3497 		if (ddi_name_to_major(xhp->name) ==
3498 		    ddi_name_to_major(hp->name) &&
3499 		    xhp->instance == hp->instance &&
3500 		    (xhp->type == BOFI_DMA_HDL ||
3501 		    xhp->type == BOFI_NULL))
3502 			if (xhp->rnumber >= maxrnumber) {
3503 				if (xhp->rnumber == INT_MAX)
3504 					maxrnumber = INT_MAX;
3505 				else
3506 					maxrnumber = xhp->rnumber + 1;
3507 			}
3508 	hp->rnumber = maxrnumber;
3509 	/*
3510 	 * add to dhash, hhash and inuse lists
3511 	 */
3512 	hp->next = shadow_list.next;
3513 	shadow_list.next->prev = hp;
3514 	hp->prev = &shadow_list;
3515 	shadow_list.next = hp;
3516 	hhashp = HDL_HHASH(*handlep);
3517 	hp->hnext = hhashp->hnext;
3518 	hhashp->hnext->hprev = hp;
3519 	hp->hprev = hhashp;
3520 	hhashp->hnext = hp;
3521 	dhashp = HDL_DHASH(hp->dip);
3522 	hp->dnext = dhashp->dnext;
3523 	dhashp->dnext->dprev = hp;
3524 	hp->dprev = dhashp;
3525 	dhashp->dnext = hp;
3526 	mutex_exit(&bofi_mutex);
3527 	mutex_exit(&bofi_low_mutex);
3528 	return (retval);
3529 }
3530 
3531 
3532 /*
3533  * our ddi_dma_freehdl routine
3534  */
3535 static int
3536 bofi_dma_freehdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle)
3537 {
3538 	int retval;
3539 	struct bofi_shadow *hp;
3540 	struct bofi_shadow *hhashp;
3541 
3542 	/*
3543 	 * find shadow for this handle
3544 	 */
3545 	mutex_enter(&bofi_low_mutex);
3546 	mutex_enter(&bofi_mutex);
3547 	hhashp = HDL_HHASH(handle);
3548 	for (hp = hhashp->hnext; hp != hhashp; hp = hp->hnext)
3549 		if (hp->hdl.dma_handle == handle)
3550 			break;
3551 	mutex_exit(&bofi_mutex);
3552 	mutex_exit(&bofi_low_mutex);
3553 	/*
3554 	 * call nexus to do the real work
3555 	 */
3556 	retval = save_bus_ops.bus_dma_freehdl(dip, rdip, handle);
3557 	if (retval != DDI_SUCCESS) {
3558 		return (retval);
3559 	}
3560 	/*
3561 	 * did we really have a shadow for this handle
3562 	 */
3563 	if (hp == hhashp)
3564 		return (retval);
3565 	/*
3566 	 * yes we have - see if it's still bound
3567 	 */
3568 	mutex_enter(&bofi_low_mutex);
3569 	mutex_enter(&bofi_mutex);
3570 	if (hp->type != BOFI_NULL)
3571 		panic("driver freeing bound dma_handle");
3572 	/*
3573 	 * remove from dhash, hhash and inuse lists
3574 	 */
3575 	hp->hnext->hprev = hp->hprev;
3576 	hp->hprev->hnext = hp->hnext;
3577 	hp->dnext->dprev = hp->dprev;
3578 	hp->dprev->dnext = hp->dnext;
3579 	hp->next->prev = hp->prev;
3580 	hp->prev->next = hp->next;
3581 	mutex_exit(&bofi_mutex);
3582 	mutex_exit(&bofi_low_mutex);
3583 
3584 	kmem_free(hp, sizeof (struct bofi_shadow));
3585 	return (retval);
3586 }
3587 
3588 
3589 /*
3590  * our ddi_dma_bindhdl routine
3591  */
3592 static int
3593 bofi_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip,
3594 	ddi_dma_handle_t handle, struct ddi_dma_req *dmareqp,
3595 	ddi_dma_cookie_t *cookiep, uint_t *ccountp)
3596 {
3597 	int retval = DDI_DMA_NORESOURCES;
3598 	auto struct ddi_dma_req dmareq;
3599 	struct bofi_shadow *hp;
3600 	struct bofi_shadow *hhashp;
3601 	ddi_dma_impl_t *mp;
3602 	unsigned long pagemask = ddi_ptob(rdip, 1) - 1;
3603 
3604 	/*
3605 	 * check we really have a shadow for this handle
3606 	 */
3607 	mutex_enter(&bofi_low_mutex);
3608 	mutex_enter(&bofi_mutex);
3609 	hhashp = HDL_HHASH(handle);
3610 	for (hp = hhashp->hnext; hp != hhashp; hp = hp->hnext)
3611 		if (hp->hdl.dma_handle == handle)
3612 			break;
3613 	mutex_exit(&bofi_mutex);
3614 	mutex_exit(&bofi_low_mutex);
3615 	if (hp == hhashp) {
3616 		/*
3617 		 * no we don't - just call nexus to do the real work
3618 		 */
3619 		return save_bus_ops.bus_dma_bindhdl(dip, rdip, handle, dmareqp,
3620 		    cookiep, ccountp);
3621 	}
3622 	/*
3623 	 * yes we have - see if it's already bound
3624 	 */
3625 	if (hp->type != BOFI_NULL)
3626 		return (DDI_DMA_INUSE);
3627 
3628 	hp->flags = dmareqp->dmar_flags;
3629 	/*
3630 	 * get a kernel virtual mapping
3631 	 */
3632 	hp->addr = ddi_dmareq_mapin(dmareqp, &hp->mapaddr, &hp->len);
3633 	if (hp->addr == NULL)
3634 		goto error;
3635 	if (bofi_sync_check) {
3636 		/*
3637 		 * Take a copy and pass pointers to this up to nexus instead.
3638 		 * Data will be copied from the original on explicit
3639 		 * and implicit ddi_dma_sync()
3640 		 *
3641 		 * - maintain page alignment because some devices assume it.
3642 		 */
3643 		hp->origaddr = hp->addr;
3644 		hp->allocaddr = ddi_umem_alloc(
3645 		    ((uintptr_t)hp->addr & pagemask) + hp->len,
3646 		    (dmareqp->dmar_fp == DDI_DMA_SLEEP) ? KM_SLEEP : KM_NOSLEEP,
3647 		    &hp->umem_cookie);
3648 		if (hp->allocaddr == NULL)
3649 			goto error;
3650 		hp->addr = hp->allocaddr + ((uintptr_t)hp->addr & pagemask);
3651 		if (dmareqp->dmar_flags & DDI_DMA_WRITE)
3652 			xbcopy(hp->origaddr, hp->addr, hp->len);
3653 		dmareq = *dmareqp;
3654 		dmareq.dmar_object.dmao_size = hp->len;
3655 		dmareq.dmar_object.dmao_type = DMA_OTYP_VADDR;
3656 		dmareq.dmar_object.dmao_obj.virt_obj.v_as = &kas;
3657 		dmareq.dmar_object.dmao_obj.virt_obj.v_addr = hp->addr;
3658 		dmareq.dmar_object.dmao_obj.virt_obj.v_priv = NULL;
3659 		dmareqp = &dmareq;
3660 	}
3661 	/*
3662 	 * call nexus to do the real work
3663 	 */
3664 	retval = save_bus_ops.bus_dma_bindhdl(dip, rdip, handle, dmareqp,
3665 	    cookiep, ccountp);
3666 	if (retval != DDI_SUCCESS)
3667 		goto error2;
3668 	/*
3669 	 * unset DMP_NOSYNC
3670 	 */
3671 	mp = (ddi_dma_impl_t *)handle;
3672 	mp->dmai_rflags &= ~DMP_NOSYNC;
3673 	/*
3674 	 * chain on any pre-existing errdefs that apply to this
3675 	 * acc_handle and corrupt if required (as there is an implicit
3676 	 * ddi_dma_sync() in this call)
3677 	 */
3678 	mutex_enter(&bofi_low_mutex);
3679 	mutex_enter(&bofi_mutex);
3680 	hp->type = BOFI_DMA_HDL;
3681 	chain_on_errdefs(hp);
3682 	mutex_exit(&bofi_mutex);
3683 	mutex_exit(&bofi_low_mutex);
3684 	return (retval);
3685 
3686 error:
3687 	if (dmareqp->dmar_fp != DDI_DMA_DONTWAIT) {
3688 		/*
3689 		 * what to do here? Wait a bit and try again
3690 		 */
3691 		(void) timeout((void (*)())dmareqp->dmar_fp,
3692 		    dmareqp->dmar_arg, 10);
3693 	}
3694 error2:
3695 	if (hp) {
3696 		ddi_dmareq_mapout(hp->mapaddr, hp->len);
3697 		if (bofi_sync_check && hp->allocaddr)
3698 			ddi_umem_free(hp->umem_cookie);
3699 		hp->mapaddr = NULL;
3700 		hp->allocaddr = NULL;
3701 		hp->origaddr = NULL;
3702 	}
3703 	return (retval);
3704 }
3705 
3706 
3707 /*
3708  * our ddi_dma_unbindhdl routine
3709  */
3710 static int
3711 bofi_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle)
3712 {
3713 	struct bofi_link *lp, *next_lp;
3714 	struct bofi_errent *ep;
3715 	int retval;
3716 	struct bofi_shadow *hp;
3717 	struct bofi_shadow *hhashp;
3718 
3719 	/*
3720 	 * call nexus to do the real work
3721 	 */
3722 	retval = save_bus_ops.bus_dma_unbindhdl(dip, rdip, handle);
3723 	if (retval != DDI_SUCCESS)
3724 		return (retval);
3725 	/*
3726 	 * check we really have a shadow for this handle
3727 	 */
3728 	mutex_enter(&bofi_low_mutex);
3729 	mutex_enter(&bofi_mutex);
3730 	hhashp = HDL_HHASH(handle);
3731 	for (hp = hhashp->hnext; hp != hhashp; hp = hp->hnext)
3732 		if (hp->hdl.dma_handle == handle)
3733 			break;
3734 	if (hp == hhashp) {
3735 		mutex_exit(&bofi_mutex);
3736 		mutex_exit(&bofi_low_mutex);
3737 		return (retval);
3738 	}
3739 	/*
3740 	 * yes we have - see if it's already unbound
3741 	 */
3742 	if (hp->type == BOFI_NULL)
3743 		panic("driver unbinding unbound dma_handle");
3744 	/*
3745 	 * free any errdef link structures tagged on to this
3746 	 * shadow handle
3747 	 */
3748 	for (lp = hp->link; lp != NULL; ) {
3749 		next_lp = lp->link;
3750 		/*
3751 		 * there is an implicit sync_for_cpu on free -
3752 		 * may need to corrupt
3753 		 */
3754 		ep = lp->errentp;
3755 		if ((ep->errdef.access_type & BOFI_DMA_R) &&
3756 		    (hp->flags & DDI_DMA_READ) &&
3757 		    (ep->state & BOFI_DEV_ACTIVE)) {
3758 			do_dma_corrupt(hp, ep, DDI_DMA_SYNC_FORCPU, 0, hp->len);
3759 		}
3760 		lp->link = bofi_link_freelist;
3761 		bofi_link_freelist = lp;
3762 		lp = next_lp;
3763 	}
3764 	hp->link = NULL;
3765 	hp->type = BOFI_NULL;
3766 	mutex_exit(&bofi_mutex);
3767 	mutex_exit(&bofi_low_mutex);
3768 
3769 	if (bofi_sync_check && (hp->flags & DDI_DMA_READ))
3770 		/*
3771 		 * implicit sync_for_cpu - copy data back
3772 		 */
3773 		if (hp->allocaddr)
3774 			xbcopy(hp->addr, hp->origaddr, hp->len);
3775 	ddi_dmareq_mapout(hp->mapaddr, hp->len);
3776 	if (bofi_sync_check && hp->allocaddr)
3777 		ddi_umem_free(hp->umem_cookie);
3778 	hp->mapaddr = NULL;
3779 	hp->allocaddr = NULL;
3780 	hp->origaddr = NULL;
3781 	return (retval);
3782 }
3783 
3784 
3785 /*
3786  * our ddi_dma_sync routine
3787  */
3788 static int
3789 bofi_dma_flush(dev_info_t *dip, dev_info_t *rdip,
3790 		ddi_dma_handle_t handle, off_t off, size_t len, uint_t flags)
3791 {
3792 	struct bofi_link *lp;
3793 	struct bofi_errent *ep;
3794 	struct bofi_shadow *hp;
3795 	struct bofi_shadow *hhashp;
3796 	int retval;
3797 
3798 	if (flags == DDI_DMA_SYNC_FORCPU || flags == DDI_DMA_SYNC_FORKERNEL) {
3799 		/*
3800 		 * in this case get nexus driver to do sync first
3801 		 */
3802 		retval = save_bus_ops.bus_dma_flush(dip, rdip, handle, off,
3803 		    len, flags);
3804 		if (retval != DDI_SUCCESS)
3805 			return (retval);
3806 	}
3807 	/*
3808 	 * check we really have a shadow for this handle
3809 	 */
3810 	mutex_enter(&bofi_low_mutex);
3811 	mutex_enter(&bofi_mutex);
3812 	hhashp = HDL_HHASH(handle);
3813 	for (hp = hhashp->hnext; hp != hhashp; hp = hp->hnext)
3814 		if (hp->hdl.dma_handle == handle &&
3815 		    hp->type == BOFI_DMA_HDL)
3816 			break;
3817 	mutex_exit(&bofi_mutex);
3818 	mutex_exit(&bofi_low_mutex);
3819 	if (hp != hhashp) {
3820 		/*
3821 		 * yes - do we need to copy data from original
3822 		 */
3823 		if (bofi_sync_check && flags == DDI_DMA_SYNC_FORDEV)
3824 			if (hp->allocaddr)
3825 				xbcopy(hp->origaddr+off, hp->addr+off,
3826 				    len ? len : (hp->len - off));
3827 		/*
3828 		 * yes - check if we need to corrupt the data
3829 		 */
3830 		mutex_enter(&bofi_low_mutex);
3831 		mutex_enter(&bofi_mutex);
3832 		for (lp = hp->link; lp != NULL; lp = lp->link) {
3833 			ep = lp->errentp;
3834 			if ((((ep->errdef.access_type & BOFI_DMA_R) &&
3835 			    (flags == DDI_DMA_SYNC_FORCPU ||
3836 			    flags == DDI_DMA_SYNC_FORKERNEL)) ||
3837 			    ((ep->errdef.access_type & BOFI_DMA_W) &&
3838 			    (flags == DDI_DMA_SYNC_FORDEV))) &&
3839 			    (ep->state & BOFI_DEV_ACTIVE)) {
3840 				do_dma_corrupt(hp, ep, flags, off,
3841 				    len ? len : (hp->len - off));
3842 			}
3843 		}
3844 		mutex_exit(&bofi_mutex);
3845 		mutex_exit(&bofi_low_mutex);
3846 		/*
3847 		 *  do we need to copy data to original
3848 		 */
3849 		if (bofi_sync_check && (flags == DDI_DMA_SYNC_FORCPU ||
3850 		    flags == DDI_DMA_SYNC_FORKERNEL))
3851 			if (hp->allocaddr)
3852 				xbcopy(hp->addr+off, hp->origaddr+off,
3853 				    len ? len : (hp->len - off));
3854 	}
3855 	if (flags == DDI_DMA_SYNC_FORDEV)
3856 		/*
3857 		 * in this case get nexus driver to do sync last
3858 		 */
3859 		retval = save_bus_ops.bus_dma_flush(dip, rdip, handle, off,
3860 		    len, flags);
3861 	return (retval);
3862 }
3863 
3864 
3865 /*
3866  * our dma_win routine
3867  */
3868 static int
3869 bofi_dma_win(dev_info_t *dip, dev_info_t *rdip,
3870 	ddi_dma_handle_t handle, uint_t win, off_t *offp,
3871 	size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp)
3872 {
3873 	struct bofi_shadow *hp;
3874 	struct bofi_shadow *hhashp;
3875 	int retval;
3876 	ddi_dma_impl_t *mp;
3877 
3878 	/*
3879 	 * call nexus to do the real work
3880 	 */
3881 	retval = save_bus_ops.bus_dma_win(dip, rdip, handle, win, offp, lenp,
3882 	    cookiep, ccountp);
3883 	if (retval != DDI_SUCCESS)
3884 		return (retval);
3885 	/*
3886 	 * check we really have a shadow for this handle
3887 	 */
3888 	mutex_enter(&bofi_low_mutex);
3889 	mutex_enter(&bofi_mutex);
3890 	hhashp = HDL_HHASH(handle);
3891 	for (hp = hhashp->hnext; hp != hhashp; hp = hp->hnext)
3892 		if (hp->hdl.dma_handle == handle)
3893 			break;
3894 	if (hp != hhashp) {
3895 		/*
3896 		 * yes - make sure DMP_NOSYNC is unset
3897 		 */
3898 		mp = (ddi_dma_impl_t *)handle;
3899 		mp->dmai_rflags &= ~DMP_NOSYNC;
3900 	}
3901 	mutex_exit(&bofi_mutex);
3902 	mutex_exit(&bofi_low_mutex);
3903 	return (retval);
3904 }
3905 
3906 
3907 /*
3908  * our dma_ctl routine
3909  */
3910 static int
3911 bofi_dma_ctl(dev_info_t *dip, dev_info_t *rdip,
3912 		ddi_dma_handle_t handle, enum ddi_dma_ctlops request,
3913 		off_t *offp, size_t *lenp, caddr_t *objp, uint_t flags)
3914 {
3915 	struct bofi_link *lp, *next_lp;
3916 	struct bofi_errent *ep;
3917 	struct bofi_shadow *hp;
3918 	struct bofi_shadow *hhashp;
3919 	int retval;
3920 	int i;
3921 	struct bofi_shadow *dummyhp;
3922 	ddi_dma_impl_t *mp;
3923 
3924 	/*
3925 	 * get nexus to do real work
3926 	 */
3927 	retval = save_bus_ops.bus_dma_ctl(dip, rdip, handle, request, offp,
3928 	    lenp, objp, flags);
3929 	if (retval != DDI_SUCCESS)
3930 		return (retval);
3931 	/*
3932 	 * if driver_list is set, only intercept those drivers
3933 	 */
3934 	if (!driver_under_test(rdip))
3935 		return (DDI_SUCCESS);
3936 
3937 #if defined(__sparc)
3938 	/*
3939 	 * check if this is a dvma_reserve - that one's like a
3940 	 * dma_allochdl and needs to be handled separately
3941 	 */
3942 	if (request == DDI_DMA_RESERVE) {
3943 		bofi_dvma_reserve(rdip, *(ddi_dma_handle_t *)objp);
3944 		return (DDI_SUCCESS);
3945 	}
3946 #endif
3947 	/*
3948 	 * check we really have a shadow for this handle
3949 	 */
3950 	mutex_enter(&bofi_low_mutex);
3951 	mutex_enter(&bofi_mutex);
3952 	hhashp = HDL_HHASH(handle);
3953 	for (hp = hhashp->hnext; hp != hhashp; hp = hp->hnext)
3954 		if (hp->hdl.dma_handle == handle)
3955 			break;
3956 	if (hp == hhashp) {
3957 		mutex_exit(&bofi_mutex);
3958 		mutex_exit(&bofi_low_mutex);
3959 		return (retval);
3960 	}
3961 	/*
3962 	 * yes we have - see what kind of command this is
3963 	 */
3964 	switch (request) {
3965 	case DDI_DMA_RELEASE:
3966 		/*
3967 		 * dvma release - release dummy handle and all the index handles
3968 		 */
3969 		dummyhp = hp;
3970 		dummyhp->hnext->hprev = dummyhp->hprev;
3971 		dummyhp->hprev->hnext = dummyhp->hnext;
3972 		mutex_exit(&bofi_mutex);
3973 		mutex_exit(&bofi_low_mutex);
3974 		for (i = 0; i < dummyhp->len; i++) {
3975 			hp = dummyhp->hparrayp[i];
3976 			/*
3977 			 * chek none of the index handles were still loaded
3978 			 */
3979 			if (hp->type != BOFI_NULL)
3980 				panic("driver releasing loaded dvma");
3981 			/*
3982 			 * remove from dhash and inuse lists
3983 			 */
3984 			mutex_enter(&bofi_low_mutex);
3985 			mutex_enter(&bofi_mutex);
3986 			hp->dnext->dprev = hp->dprev;
3987 			hp->dprev->dnext = hp->dnext;
3988 			hp->next->prev = hp->prev;
3989 			hp->prev->next = hp->next;
3990 			mutex_exit(&bofi_mutex);
3991 			mutex_exit(&bofi_low_mutex);
3992 
3993 			if (bofi_sync_check && hp->allocaddr)
3994 				ddi_umem_free(hp->umem_cookie);
3995 			kmem_free(hp, sizeof (struct bofi_shadow));
3996 		}
3997 		kmem_free(dummyhp->hparrayp, dummyhp->len *
3998 		    sizeof (struct bofi_shadow *));
3999 		kmem_free(dummyhp, sizeof (struct bofi_shadow));
4000 		return (retval);
4001 	case DDI_DMA_FREE:
4002 		/*
4003 		 * ddi_dma_free case - remove from dhash, hhash and inuse lists
4004 		 */
4005 		hp->hnext->hprev = hp->hprev;
4006 		hp->hprev->hnext = hp->hnext;
4007 		hp->dnext->dprev = hp->dprev;
4008 		hp->dprev->dnext = hp->dnext;
4009 		hp->next->prev = hp->prev;
4010 		hp->prev->next = hp->next;
4011 		/*
4012 		 * free any errdef link structures tagged on to this
4013 		 * shadow handle
4014 		 */
4015 		for (lp = hp->link; lp != NULL; ) {
4016 			next_lp = lp->link;
4017 			/*
4018 			 * there is an implicit sync_for_cpu on free -
4019 			 * may need to corrupt
4020 			 */
4021 			ep = lp->errentp;
4022 			if ((ep->errdef.access_type & BOFI_DMA_R) &&
4023 			    (hp->flags & DDI_DMA_READ) &&
4024 			    (ep->state & BOFI_DEV_ACTIVE)) {
4025 				do_dma_corrupt(hp, ep, DDI_DMA_SYNC_FORCPU,
4026 				    0, hp->len);
4027 			}
4028 			lp->link = bofi_link_freelist;
4029 			bofi_link_freelist = lp;
4030 			lp = next_lp;
4031 		}
4032 		hp->link = NULL;
4033 		mutex_exit(&bofi_mutex);
4034 		mutex_exit(&bofi_low_mutex);
4035 
4036 		if (bofi_sync_check && (hp->flags & DDI_DMA_READ))
4037 			if (hp->allocaddr)
4038 				xbcopy(hp->addr, hp->origaddr, hp->len);
4039 		ddi_dmareq_mapout(hp->mapaddr, hp->len);
4040 		if (bofi_sync_check && hp->allocaddr)
4041 			ddi_umem_free(hp->umem_cookie);
4042 		kmem_free(hp, sizeof (struct bofi_shadow));
4043 		return (retval);
4044 	case DDI_DMA_MOVWIN:
4045 		mp = (ddi_dma_impl_t *)handle;
4046 		mp->dmai_rflags &= ~DMP_NOSYNC;
4047 		break;
4048 	case DDI_DMA_NEXTWIN:
4049 		mp = (ddi_dma_impl_t *)handle;
4050 		mp->dmai_rflags &= ~DMP_NOSYNC;
4051 		break;
4052 	default:
4053 		break;
4054 	}
4055 	mutex_exit(&bofi_mutex);
4056 	mutex_exit(&bofi_low_mutex);
4057 	return (retval);
4058 }
4059 
4060 #if defined(__sparc)
4061 /*
4062  * dvma reserve case from bofi_dma_ctl()
4063  */
4064 static void
4065 bofi_dvma_reserve(dev_info_t *rdip, ddi_dma_handle_t handle)
4066 {
4067 	struct bofi_shadow *hp;
4068 	struct bofi_shadow *dummyhp;
4069 	struct bofi_shadow *dhashp;
4070 	struct bofi_shadow *hhashp;
4071 	ddi_dma_impl_t *mp;
4072 	struct fast_dvma *nexus_private;
4073 	int i, count;
4074 
4075 	mp = (ddi_dma_impl_t *)handle;
4076 	count = mp->dmai_ndvmapages;
4077 	/*
4078 	 * allocate dummy shadow handle structure
4079 	 */
4080 	dummyhp = kmem_zalloc(sizeof (*dummyhp), KM_SLEEP);
4081 	if (mp->dmai_rflags & DMP_BYPASSNEXUS) {
4082 		/*
4083 		 * overlay our routines over the nexus's dvma routines
4084 		 */
4085 		nexus_private = (struct fast_dvma *)mp->dmai_nexus_private;
4086 		dummyhp->save.dvma_ops = *(nexus_private->ops);
4087 		nexus_private->ops = &bofi_dvma_ops;
4088 	}
4089 	/*
4090 	 * now fill in the dummy handle. This just gets put on hhash queue
4091 	 * so our dvma routines can find and index off to the handle they
4092 	 * really want.
4093 	 */
4094 	(void) strncpy(dummyhp->name, ddi_get_name(rdip), NAMESIZE);
4095 	dummyhp->instance = ddi_get_instance(rdip);
4096 	dummyhp->rnumber = -1;
4097 	dummyhp->dip = rdip;
4098 	dummyhp->len = count;
4099 	dummyhp->hdl.dma_handle = handle;
4100 	dummyhp->link = NULL;
4101 	dummyhp->type = BOFI_NULL;
4102 	/*
4103 	 * allocate space for real handles
4104 	 */
4105 	dummyhp->hparrayp = kmem_alloc(count *
4106 	    sizeof (struct bofi_shadow *), KM_SLEEP);
4107 	for (i = 0; i < count; i++) {
4108 		/*
4109 		 * allocate shadow handle structures and fill them in
4110 		 */
4111 		hp = kmem_zalloc(sizeof (*hp), KM_SLEEP);
4112 		(void) strncpy(hp->name, ddi_get_name(rdip), NAMESIZE);
4113 		hp->instance = ddi_get_instance(rdip);
4114 		hp->rnumber = -1;
4115 		hp->dip = rdip;
4116 		hp->hdl.dma_handle = 0;
4117 		hp->link = NULL;
4118 		hp->type = BOFI_NULL;
4119 		if (bofi_sync_check) {
4120 			unsigned long pagemask = ddi_ptob(rdip, 1) - 1;
4121 			/*
4122 			 * Take a copy and set this to be hp->addr
4123 			 * Data will be copied to and from the original on
4124 			 * explicit and implicit ddi_dma_sync()
4125 			 *
4126 			 * - maintain page alignment because some devices
4127 			 * assume it.
4128 			 */
4129 			hp->allocaddr = ddi_umem_alloc(
4130 			    ((int)(uintptr_t)hp->addr & pagemask)
4131 				+ pagemask + 1,
4132 			    KM_SLEEP, &hp->umem_cookie);
4133 			hp->addr = hp->allocaddr +
4134 			    ((int)(uintptr_t)hp->addr & pagemask);
4135 		}
4136 		/*
4137 		 * add to dhash and inuse lists.
4138 		 * these don't go on hhash queue.
4139 		 */
4140 		mutex_enter(&bofi_low_mutex);
4141 		mutex_enter(&bofi_mutex);
4142 		hp->next = shadow_list.next;
4143 		shadow_list.next->prev = hp;
4144 		hp->prev = &shadow_list;
4145 		shadow_list.next = hp;
4146 		dhashp = HDL_DHASH(hp->dip);
4147 		hp->dnext = dhashp->dnext;
4148 		dhashp->dnext->dprev = hp;
4149 		hp->dprev = dhashp;
4150 		dhashp->dnext = hp;
4151 		dummyhp->hparrayp[i] = hp;
4152 		mutex_exit(&bofi_mutex);
4153 		mutex_exit(&bofi_low_mutex);
4154 	}
4155 	/*
4156 	 * add dummy handle to hhash list only
4157 	 */
4158 	mutex_enter(&bofi_low_mutex);
4159 	mutex_enter(&bofi_mutex);
4160 	hhashp = HDL_HHASH(handle);
4161 	dummyhp->hnext = hhashp->hnext;
4162 	hhashp->hnext->hprev = dummyhp;
4163 	dummyhp->hprev = hhashp;
4164 	hhashp->hnext = dummyhp;
4165 	mutex_exit(&bofi_mutex);
4166 	mutex_exit(&bofi_low_mutex);
4167 }
4168 
4169 /*
4170  * our dvma_kaddr_load()
4171  */
4172 static void
4173 bofi_dvma_kaddr_load(ddi_dma_handle_t h, caddr_t a, uint_t len, uint_t index,
4174 	ddi_dma_cookie_t *cp)
4175 {
4176 	struct bofi_shadow *dummyhp;
4177 	struct bofi_shadow *hp;
4178 	struct bofi_shadow *hhashp;
4179 	struct bofi_errent *ep;
4180 	struct bofi_link   *lp;
4181 
4182 	/*
4183 	 * check we really have a dummy shadow for this handle
4184 	 */
4185 	mutex_enter(&bofi_low_mutex);
4186 	mutex_enter(&bofi_mutex);
4187 	hhashp = HDL_HHASH(h);
4188 	for (dummyhp = hhashp->hnext; dummyhp != hhashp;
4189 	    dummyhp = dummyhp->hnext)
4190 		if (dummyhp->hdl.dma_handle == h)
4191 			break;
4192 	mutex_exit(&bofi_mutex);
4193 	mutex_exit(&bofi_low_mutex);
4194 	if (dummyhp == hhashp) {
4195 		/*
4196 		 * no dummy shadow - panic
4197 		 */
4198 		panic("driver dvma_kaddr_load with no reserve");
4199 	}
4200 
4201 	/*
4202 	 * find real hp
4203 	 */
4204 	hp = dummyhp->hparrayp[index];
4205 	/*
4206 	 * check its not already loaded
4207 	 */
4208 	if (hp->type != BOFI_NULL)
4209 		panic("driver loading loaded dvma");
4210 	/*
4211 	 * if were doing copying, just need to change origaddr and get
4212 	 * nexus to map hp->addr again
4213 	 * if not, set hp->addr to new address.
4214 	 * - note these are always kernel virtual addresses - no need to map
4215 	 */
4216 	if (bofi_sync_check && hp->allocaddr) {
4217 		hp->origaddr = a;
4218 		a = hp->addr;
4219 	} else
4220 		hp->addr = a;
4221 	hp->len = len;
4222 	/*
4223 	 * get nexus to do the real work
4224 	 */
4225 	dummyhp->save.dvma_ops.dvma_kaddr_load(h, a, len, index, cp);
4226 	/*
4227 	 * chain on any pre-existing errdefs that apply to this dma_handle
4228 	 * no need to corrupt - there's no implicit dma_sync on this one
4229 	 */
4230 	mutex_enter(&bofi_low_mutex);
4231 	mutex_enter(&bofi_mutex);
4232 	hp->type = BOFI_DMA_HDL;
4233 	for (ep = errent_listp; ep != NULL; ep = ep->next) {
4234 		if (ddi_name_to_major(hp->name) ==
4235 		    ddi_name_to_major(ep->name) &&
4236 		    hp->instance == ep->errdef.instance &&
4237 		    (ep->errdef.rnumber == -1 ||
4238 		    hp->rnumber == ep->errdef.rnumber) &&
4239 		    ((ep->errdef.access_type & BOFI_DMA_RW) &&
4240 		    (((uintptr_t)(hp->addr + ep->errdef.offset +
4241 		    ep->errdef.len) & ~LLSZMASK) >
4242 		    ((uintptr_t)((hp->addr + ep->errdef.offset) +
4243 		    LLSZMASK) & ~LLSZMASK)))) {
4244 			lp = bofi_link_freelist;
4245 			if (lp != NULL) {
4246 				bofi_link_freelist = lp->link;
4247 				lp->errentp = ep;
4248 				lp->link = hp->link;
4249 				hp->link = lp;
4250 			}
4251 		}
4252 	}
4253 	mutex_exit(&bofi_mutex);
4254 	mutex_exit(&bofi_low_mutex);
4255 }
4256 
4257 /*
4258  * our dvma_unload()
4259  */
4260 static void
4261 bofi_dvma_unload(ddi_dma_handle_t h, uint_t index, uint_t view)
4262 {
4263 	struct bofi_link *lp, *next_lp;
4264 	struct bofi_errent *ep;
4265 	struct bofi_shadow *dummyhp;
4266 	struct bofi_shadow *hp;
4267 	struct bofi_shadow *hhashp;
4268 
4269 	/*
4270 	 * check we really have a dummy shadow for this handle
4271 	 */
4272 	mutex_enter(&bofi_low_mutex);
4273 	mutex_enter(&bofi_mutex);
4274 	hhashp = HDL_HHASH(h);
4275 	for (dummyhp = hhashp->hnext; dummyhp != hhashp;
4276 	    dummyhp = dummyhp->hnext)
4277 		if (dummyhp->hdl.dma_handle == h)
4278 			break;
4279 	mutex_exit(&bofi_mutex);
4280 	mutex_exit(&bofi_low_mutex);
4281 	if (dummyhp == hhashp) {
4282 		/*
4283 		 * no dummy shadow - panic
4284 		 */
4285 		panic("driver dvma_unload with no reserve");
4286 	}
4287 	dummyhp->save.dvma_ops.dvma_unload(h, index, view);
4288 	/*
4289 	 * find real hp
4290 	 */
4291 	hp = dummyhp->hparrayp[index];
4292 	/*
4293 	 * check its not already unloaded
4294 	 */
4295 	if (hp->type == BOFI_NULL)
4296 		panic("driver unloading unloaded dvma");
4297 	/*
4298 	 * free any errdef link structures tagged on to this
4299 	 * shadow handle - do corruption if necessary
4300 	 */
4301 	mutex_enter(&bofi_low_mutex);
4302 	mutex_enter(&bofi_mutex);
4303 	for (lp = hp->link; lp != NULL; ) {
4304 		next_lp = lp->link;
4305 		ep = lp->errentp;
4306 		if ((ep->errdef.access_type & BOFI_DMA_R) &&
4307 		    (view == DDI_DMA_SYNC_FORCPU ||
4308 		    view == DDI_DMA_SYNC_FORKERNEL) &&
4309 		    (ep->state & BOFI_DEV_ACTIVE)) {
4310 			do_dma_corrupt(hp, ep, view, 0, hp->len);
4311 		}
4312 		lp->link = bofi_link_freelist;
4313 		bofi_link_freelist = lp;
4314 		lp = next_lp;
4315 	}
4316 	hp->link = NULL;
4317 	hp->type = BOFI_NULL;
4318 	mutex_exit(&bofi_mutex);
4319 	mutex_exit(&bofi_low_mutex);
4320 	/*
4321 	 * if there is an explicit sync_for_cpu, then do copy to original
4322 	 */
4323 	if (bofi_sync_check &&
4324 	    (view == DDI_DMA_SYNC_FORCPU || view == DDI_DMA_SYNC_FORKERNEL))
4325 		if (hp->allocaddr)
4326 			xbcopy(hp->addr, hp->origaddr, hp->len);
4327 }
4328 
4329 /*
4330  * our dvma_unload()
4331  */
4332 static void
4333 bofi_dvma_sync(ddi_dma_handle_t h, uint_t index, uint_t view)
4334 {
4335 	struct bofi_link *lp;
4336 	struct bofi_errent *ep;
4337 	struct bofi_shadow *hp;
4338 	struct bofi_shadow *dummyhp;
4339 	struct bofi_shadow *hhashp;
4340 
4341 	/*
4342 	 * check we really have a dummy shadow for this handle
4343 	 */
4344 	mutex_enter(&bofi_low_mutex);
4345 	mutex_enter(&bofi_mutex);
4346 	hhashp = HDL_HHASH(h);
4347 	for (dummyhp = hhashp->hnext; dummyhp != hhashp;
4348 	    dummyhp = dummyhp->hnext)
4349 		if (dummyhp->hdl.dma_handle == h)
4350 			break;
4351 	mutex_exit(&bofi_mutex);
4352 	mutex_exit(&bofi_low_mutex);
4353 	if (dummyhp == hhashp) {
4354 		/*
4355 		 * no dummy shadow - panic
4356 		 */
4357 		panic("driver dvma_sync with no reserve");
4358 	}
4359 	/*
4360 	 * find real hp
4361 	 */
4362 	hp = dummyhp->hparrayp[index];
4363 	/*
4364 	 * check its already loaded
4365 	 */
4366 	if (hp->type == BOFI_NULL)
4367 		panic("driver syncing unloaded dvma");
4368 	if (view == DDI_DMA_SYNC_FORCPU || view == DDI_DMA_SYNC_FORKERNEL)
4369 		/*
4370 		 * in this case do sync first
4371 		 */
4372 		dummyhp->save.dvma_ops.dvma_sync(h, index, view);
4373 	/*
4374 	 * if there is an explicit sync_for_dev, then do copy from original
4375 	 */
4376 	if (bofi_sync_check && view == DDI_DMA_SYNC_FORDEV) {
4377 		if (hp->allocaddr)
4378 			xbcopy(hp->origaddr, hp->addr, hp->len);
4379 	}
4380 	/*
4381 	 * do corruption if necessary
4382 	 */
4383 	mutex_enter(&bofi_low_mutex);
4384 	mutex_enter(&bofi_mutex);
4385 	for (lp = hp->link; lp != NULL; lp = lp->link) {
4386 		ep = lp->errentp;
4387 		if ((((ep->errdef.access_type & BOFI_DMA_R) &&
4388 		    (view == DDI_DMA_SYNC_FORCPU ||
4389 		    view == DDI_DMA_SYNC_FORKERNEL)) ||
4390 		    ((ep->errdef.access_type & BOFI_DMA_W) &&
4391 		    (view == DDI_DMA_SYNC_FORDEV))) &&
4392 		    (ep->state & BOFI_DEV_ACTIVE)) {
4393 			do_dma_corrupt(hp, ep, view, 0, hp->len);
4394 		}
4395 	}
4396 	mutex_exit(&bofi_mutex);
4397 	mutex_exit(&bofi_low_mutex);
4398 	/*
4399 	 * if there is an explicit sync_for_cpu, then do copy to original
4400 	 */
4401 	if (bofi_sync_check &&
4402 	    (view == DDI_DMA_SYNC_FORCPU || view == DDI_DMA_SYNC_FORKERNEL)) {
4403 		if (hp->allocaddr)
4404 			xbcopy(hp->addr, hp->origaddr, hp->len);
4405 	}
4406 	if (view == DDI_DMA_SYNC_FORDEV)
4407 		/*
4408 		 * in this case do sync last
4409 		 */
4410 		dummyhp->save.dvma_ops.dvma_sync(h, index, view);
4411 }
4412 #endif
4413 
4414 /*
4415  * bofi intercept routine - gets called instead of users interrupt routine
4416  */
4417 static uint_t
4418 bofi_intercept_intr(caddr_t xp)
4419 {
4420 	struct bofi_errent *ep;
4421 	struct bofi_link   *lp;
4422 	struct bofi_shadow *hp;
4423 	int intr_count = 1;
4424 	int i;
4425 	uint_t retval = DDI_INTR_UNCLAIMED;
4426 	uint_t result;
4427 	int unclaimed_counter = 0;
4428 	int jabber_detected = 0;
4429 
4430 	hp = (struct bofi_shadow *)xp;
4431 	/*
4432 	 * check if nothing to do
4433 	 */
4434 	if (hp->link == NULL)
4435 		return (hp->save.intr.int_handler
4436 		    (hp->save.intr.int_handler_arg1, NULL));
4437 	mutex_enter(&bofi_mutex);
4438 	/*
4439 	 * look for any errdefs
4440 	 */
4441 	for (lp = hp->link; lp != NULL; lp = lp->link) {
4442 		ep = lp->errentp;
4443 		if (ep->state & BOFI_DEV_ACTIVE) {
4444 			/*
4445 			 * got one
4446 			 */
4447 			if ((ep->errdef.access_count ||
4448 			    ep->errdef.fail_count) &&
4449 			    (ep->errdef.access_type & BOFI_LOG))
4450 				log_acc_event(ep, BOFI_INTR, 0, 0, 1, 0);
4451 			if (ep->errdef.access_count > 1) {
4452 				ep->errdef.access_count--;
4453 			} else if (ep->errdef.fail_count > 0) {
4454 				ep->errdef.fail_count--;
4455 				ep->errdef.access_count = 0;
4456 				/*
4457 				 * OK do "corruption"
4458 				 */
4459 				if (ep->errstate.fail_time == 0)
4460 					ep->errstate.fail_time = bofi_gettime();
4461 				switch (ep->errdef.optype) {
4462 				case BOFI_DELAY_INTR:
4463 					if (!hp->hilevel) {
4464 						drv_usecwait
4465 						    (ep->errdef.operand);
4466 					}
4467 					break;
4468 				case BOFI_LOSE_INTR:
4469 					intr_count = 0;
4470 					break;
4471 				case BOFI_EXTRA_INTR:
4472 					intr_count += ep->errdef.operand;
4473 					break;
4474 				default:
4475 					break;
4476 				}
4477 			}
4478 		}
4479 	}
4480 	mutex_exit(&bofi_mutex);
4481 	/*
4482 	 * send extra or fewer interrupts as requested
4483 	 */
4484 	for (i = 0; i < intr_count; i++) {
4485 		result = hp->save.intr.int_handler
4486 		    (hp->save.intr.int_handler_arg1, NULL);
4487 		if (result == DDI_INTR_CLAIMED)
4488 			unclaimed_counter >>= 1;
4489 		else if (++unclaimed_counter >= 20)
4490 			jabber_detected = 1;
4491 		if (i == 0)
4492 			retval = result;
4493 	}
4494 	/*
4495 	 * if more than 1000 spurious interrupts requested and
4496 	 * jabber not detected - give warning
4497 	 */
4498 	if (intr_count > 1000 && !jabber_detected)
4499 		panic("undetected interrupt jabber: %s%d",
4500 		    hp->name, hp->instance);
4501 	/*
4502 	 * return first response - or "unclaimed" if none
4503 	 */
4504 	return (retval);
4505 }
4506 
4507 
4508 /*
4509  * our ddi_check_acc_hdl
4510  */
4511 /* ARGSUSED */
4512 static int
4513 bofi_check_acc_hdl(ddi_acc_impl_t *handle)
4514 {
4515 	struct bofi_shadow *hp;
4516 	struct bofi_link   *lp;
4517 	uint_t result = 0;
4518 
4519 	hp = handle->ahi_common.ah_bus_private;
4520 	if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
4521 		return (0);
4522 	}
4523 	for (lp = hp->link; lp != NULL; lp = lp->link) {
4524 		/*
4525 		 * OR in error state from all associated
4526 		 * errdef structures
4527 		 */
4528 		if (lp->errentp->errdef.access_count == 0 &&
4529 		    (lp->errentp->state & BOFI_DEV_ACTIVE)) {
4530 			result = (lp->errentp->errdef.acc_chk & 1);
4531 		}
4532 	}
4533 	mutex_exit(&bofi_mutex);
4534 	return (result);
4535 }
4536 
4537 /*
4538  * our ddi_check_dma_hdl
4539  */
4540 /* ARGSUSED */
4541 static int
4542 bofi_check_dma_hdl(ddi_dma_impl_t *handle)
4543 {
4544 	struct bofi_shadow *hp;
4545 	struct bofi_link   *lp;
4546 	struct bofi_shadow *hhashp;
4547 	uint_t result = 0;
4548 
4549 	if (!mutex_tryenter(&bofi_mutex)) {
4550 		return (0);
4551 	}
4552 	hhashp = HDL_HHASH(handle);
4553 	for (hp = hhashp->hnext; hp != hhashp; hp = hp->hnext)
4554 		if (hp->hdl.dma_handle == (ddi_dma_handle_t)handle)
4555 			break;
4556 	if (hp == hhashp) {
4557 		mutex_exit(&bofi_mutex);
4558 		return (0);
4559 	}
4560 	if (!hp->link) {
4561 		mutex_exit(&bofi_mutex);
4562 		return (0);
4563 	}
4564 	for (lp = hp->link; lp != NULL; lp = lp->link) {
4565 		/*
4566 		 * OR in error state from all associated
4567 		 * errdef structures
4568 		 */
4569 		if (lp->errentp->errdef.access_count == 0 &&
4570 		    (lp->errentp->state & BOFI_DEV_ACTIVE)) {
4571 			result = ((lp->errentp->errdef.acc_chk & 2) ? 1 : 0);
4572 		}
4573 	}
4574 	mutex_exit(&bofi_mutex);
4575 	return (result);
4576 }
4577 
4578 
4579 /* ARGSUSED */
4580 static int
4581 bofi_post_event(dev_info_t *dip, dev_info_t *rdip,
4582 		    ddi_eventcookie_t eventhdl, void *impl_data)
4583 {
4584 	ddi_eventcookie_t ec;
4585 	struct ddi_fault_event_data *arg;
4586 	struct bofi_errent *ep;
4587 	struct bofi_shadow *hp;
4588 	struct bofi_shadow *dhashp;
4589 	struct bofi_link   *lp;
4590 
4591 	ASSERT(eventhdl);
4592 	if (ddi_get_eventcookie(dip, DDI_DEVI_FAULT_EVENT, &ec) != DDI_SUCCESS)
4593 		return (DDI_FAILURE);
4594 
4595 	if (ec != eventhdl)
4596 		return (save_bus_ops.bus_post_event(dip, rdip, eventhdl,
4597 		    impl_data));
4598 
4599 	arg = (struct ddi_fault_event_data *)impl_data;
4600 	mutex_enter(&bofi_mutex);
4601 	/*
4602 	 * find shadow handles with appropriate dev_infos
4603 	 * and set error reported on all associated errdef structures
4604 	 */
4605 	dhashp = HDL_DHASH(arg->f_dip);
4606 	for (hp = dhashp->dnext; hp != dhashp; hp = hp->dnext) {
4607 		if (hp->dip == arg->f_dip) {
4608 			for (lp = hp->link; lp != NULL; lp = lp->link) {
4609 				ep = lp->errentp;
4610 				ep->errstate.errmsg_count++;
4611 				if ((ep->errstate.msg_time == NULL ||
4612 				    ep->errstate.severity > arg->f_impact) &&
4613 				    (ep->state & BOFI_DEV_ACTIVE)) {
4614 					ep->errstate.msg_time = bofi_gettime();
4615 					ep->errstate.severity = arg->f_impact;
4616 					(void) strncpy(ep->errstate.buffer,
4617 					    arg->f_message, ERRMSGSIZE);
4618 					ddi_trigger_softintr(ep->softintr_id);
4619 				}
4620 			}
4621 		}
4622 	}
4623 	mutex_exit(&bofi_mutex);
4624 	return (save_bus_ops.bus_post_event(dip, rdip, eventhdl, impl_data));
4625 }
4626 
4627 /*
4628  * our intr_ops routine
4629  */
4630 static int
4631 bofi_intr_ops(dev_info_t *dip, dev_info_t *rdip, ddi_intr_op_t intr_op,
4632     ddi_intr_handle_impl_t *hdlp, void *result)
4633 {
4634 	int retval;
4635 	struct bofi_shadow *hp;
4636 	struct bofi_shadow *dhashp;
4637 	struct bofi_shadow *hhashp;
4638 	struct bofi_errent *ep;
4639 	struct bofi_link   *lp, *next_lp;
4640 
4641 	switch (intr_op) {
4642 	case DDI_INTROP_ADDISR:
4643 		/*
4644 		 * if driver_list is set, only intercept those drivers
4645 		 */
4646 		if (!driver_under_test(rdip))
4647 			return (save_bus_ops.bus_intr_op(dip, rdip,
4648 			    intr_op, hdlp, result));
4649 		/*
4650 		 * allocate shadow handle structure and fill in
4651 		 */
4652 		hp = kmem_zalloc(sizeof (struct bofi_shadow), KM_SLEEP);
4653 		(void) strncpy(hp->name, ddi_get_name(rdip), NAMESIZE);
4654 		hp->instance = ddi_get_instance(rdip);
4655 		hp->save.intr.int_handler = hdlp->ih_cb_func;
4656 		hp->save.intr.int_handler_arg1 = hdlp->ih_cb_arg1;
4657 		hdlp->ih_cb_func = (ddi_intr_handler_t *)bofi_intercept_intr;
4658 		hdlp->ih_cb_arg1 = (caddr_t)hp;
4659 		hp->bofi_inum = hdlp->ih_inum;
4660 		hp->dip = rdip;
4661 		hp->link = NULL;
4662 		hp->type = BOFI_INT_HDL;
4663 		/*
4664 		 * save whether hilevel or not
4665 		 */
4666 
4667 		if (hdlp->ih_pri >= ddi_intr_get_hilevel_pri())
4668 			hp->hilevel = 1;
4669 		else
4670 			hp->hilevel = 0;
4671 
4672 		/*
4673 		 * call nexus to do real work, but specifying our handler, and
4674 		 * our shadow handle as argument
4675 		 */
4676 		retval = save_bus_ops.bus_intr_op(dip, rdip,
4677 		    intr_op, hdlp, result);
4678 		if (retval != DDI_SUCCESS) {
4679 			kmem_free(hp, sizeof (struct bofi_shadow));
4680 			return (retval);
4681 		}
4682 		/*
4683 		 * add to dhash, hhash and inuse lists
4684 		 */
4685 		mutex_enter(&bofi_low_mutex);
4686 		mutex_enter(&bofi_mutex);
4687 		hp->next = shadow_list.next;
4688 		shadow_list.next->prev = hp;
4689 		hp->prev = &shadow_list;
4690 		shadow_list.next = hp;
4691 		hhashp = HDL_HHASH(hdlp->ih_inum);
4692 		hp->hnext = hhashp->hnext;
4693 		hhashp->hnext->hprev = hp;
4694 		hp->hprev = hhashp;
4695 		hhashp->hnext = hp;
4696 		dhashp = HDL_DHASH(hp->dip);
4697 		hp->dnext = dhashp->dnext;
4698 		dhashp->dnext->dprev = hp;
4699 		hp->dprev = dhashp;
4700 		dhashp->dnext = hp;
4701 		/*
4702 		 * chain on any pre-existing errdefs that apply to this
4703 		 * acc_handle
4704 		 */
4705 		for (ep = errent_listp; ep != NULL; ep = ep->next) {
4706 			if (ddi_name_to_major(hp->name) ==
4707 			    ddi_name_to_major(ep->name) &&
4708 			    hp->instance == ep->errdef.instance &&
4709 			    (ep->errdef.access_type & BOFI_INTR)) {
4710 				lp = bofi_link_freelist;
4711 				if (lp != NULL) {
4712 					bofi_link_freelist = lp->link;
4713 					lp->errentp = ep;
4714 					lp->link = hp->link;
4715 					hp->link = lp;
4716 				}
4717 			}
4718 		}
4719 		mutex_exit(&bofi_mutex);
4720 		mutex_exit(&bofi_low_mutex);
4721 		return (retval);
4722 	case DDI_INTROP_REMISR:
4723 		/*
4724 		 * call nexus routine first
4725 		 */
4726 		retval = save_bus_ops.bus_intr_op(dip, rdip,
4727 		    intr_op, hdlp, result);
4728 		/*
4729 		 * find shadow handle
4730 		 */
4731 		mutex_enter(&bofi_low_mutex);
4732 		mutex_enter(&bofi_mutex);
4733 		hhashp = HDL_HHASH(hdlp->ih_inum);
4734 		for (hp = hhashp->hnext; hp != hhashp; hp = hp->hnext) {
4735 			if (hp->dip == rdip &&
4736 			    hp->type == BOFI_INT_HDL &&
4737 			    hp->bofi_inum == hdlp->ih_inum) {
4738 				break;
4739 			}
4740 		}
4741 		if (hp == hhashp) {
4742 			mutex_exit(&bofi_mutex);
4743 			mutex_exit(&bofi_low_mutex);
4744 			return (retval);
4745 		}
4746 		/*
4747 		 * found one - remove from dhash, hhash and inuse lists
4748 		 */
4749 		hp->hnext->hprev = hp->hprev;
4750 		hp->hprev->hnext = hp->hnext;
4751 		hp->dnext->dprev = hp->dprev;
4752 		hp->dprev->dnext = hp->dnext;
4753 		hp->next->prev = hp->prev;
4754 		hp->prev->next = hp->next;
4755 		/*
4756 		 * free any errdef link structures
4757 		 * tagged on to this shadow handle
4758 		 */
4759 		for (lp = hp->link; lp != NULL; ) {
4760 			next_lp = lp->link;
4761 			lp->link = bofi_link_freelist;
4762 			bofi_link_freelist = lp;
4763 			lp = next_lp;
4764 		}
4765 		hp->link = NULL;
4766 		mutex_exit(&bofi_mutex);
4767 		mutex_exit(&bofi_low_mutex);
4768 		kmem_free(hp, sizeof (struct bofi_shadow));
4769 		return (retval);
4770 	default:
4771 		return (save_bus_ops.bus_intr_op(dip, rdip,
4772 		    intr_op, hdlp, result));
4773 	}
4774 }
4775