xref: /titanic_41/usr/src/uts/common/io/bofi.c (revision 7c478bd95313f5f23a4c958a745db2134aa03244)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include <sys/types.h>
30 #include <sys/sysmacros.h>
31 #include <sys/buf.h>
32 #include <sys/errno.h>
33 #include <sys/modctl.h>
34 #include <sys/conf.h>
35 #include <sys/stat.h>
36 #include <sys/kmem.h>
37 #include <sys/proc.h>
38 #include <sys/cpuvar.h>
39 #include <sys/ddi_impldefs.h>
40 #include <sys/ddi.h>
41 #include <sys/sunddi.h>
42 #include <sys/sunndi.h>
43 #include <sys/debug.h>
44 #include <sys/bofi.h>
45 #include <sys/dvma.h>
46 #include <sys/bofi_impl.h>
47 
48 /*
49  * Testing the resilience of a hardened device driver requires a suitably wide
50  * range of different types of "typical" hardware faults to be injected,
51  * preferably in a controlled and repeatable fashion. This is not in general
52  * possible via hardware, so the "fault injection test harness" is provided.
53  * This works by intercepting calls from the driver to various DDI routines,
54  * and then corrupting the result of those DDI routine calls as if the
55  * hardware had caused the corruption.
56  *
57  * Conceptually, the bofi driver consists of two parts:
58  *
59  * A driver interface that supports a number of ioctls which allow error
60  * definitions ("errdefs") to be defined and subsequently managed. The
61  * driver is a clone driver, so each open will create a separate
62  * invocation. Any errdefs created by using ioctls to that invocation
63  * will automatically be deleted when that invocation is closed.
64  *
65  * Intercept routines: When the bofi driver is attached, it edits the
66  * bus_ops structure of the bus nexus specified by the "bofi-nexus"
67  * field in the "bofi.conf" file, thus allowing the
68  * bofi driver to intercept various ddi functions. These intercept
69  * routines primarily carry out fault injections based on the errdefs
70  * created for that device.
71  *
72  * Faults can be injected into:
73  *
74  * DMA (corrupting data for DMA to/from memory areas defined by
75  * ddi_dma_setup(), ddi_dma_bind_handle(), etc)
76  *
77  * Physical IO (corrupting data sent/received via ddi_get8(), ddi_put8(),
78  * etc),
79  *
80  * Interrupts (generating spurious interrupts, losing interrupts,
81  * delaying interrupts).
82  *
83  * By default, ddi routines called from all drivers will be intercepted
84  * and faults potentially injected. However, the "bofi-to-test" field in
85  * the "bofi.conf" file can be set to a space-separated list of drivers to
86  * test (or by preceding each driver name in the list with an "!", a list
87  * of drivers not to test).
88  *
89  * In addition to fault injection, the bofi driver does a number of static
90  * checks which are controlled by properties in the "bofi.conf" file.
91  *
92  * "bofi-ddi-check" - if set will validate that there are no PIO access
93  * other than those using the DDI routines (ddi_get8(), ddi_put8(), etc).
94  *
95  * "bofi-range-check" - if set to values 1 (warning) or 2 (panic), will
96  * validate that calls to ddi_get8(), ddi_put8(), etc are not made
97  * specifying addresses outside the range of the access_handle.
98  *
99  * "bofi-sync-check" - if set will validate that calls to ddi_dma_sync()
100  * are being made correctly.
101  */
102 
103 extern void *bp_mapin_common(struct buf *, int);
104 
105 static int bofi_ddi_check;
106 static int bofi_sync_check;
107 static int bofi_range_check;
108 
109 static struct bofi_link bofi_link_array[BOFI_NLINKS], *bofi_link_freelist;
110 
111 #define	LLSZMASK (sizeof (uint64_t)-1)
112 
113 #define	HDL_HASH_TBL_SIZE 64
114 static struct bofi_shadow hhash_table[HDL_HASH_TBL_SIZE];
115 static struct bofi_shadow dhash_table[HDL_HASH_TBL_SIZE];
116 #define	HDL_DHASH(x) \
117 	(&dhash_table[((uintptr_t)(x) >> 3) & (HDL_HASH_TBL_SIZE-1)])
118 #define	HDL_HHASH(x) \
119 	(&hhash_table[((uintptr_t)(x) >> 5) & (HDL_HASH_TBL_SIZE-1)])
120 
121 static struct bofi_shadow shadow_list;
122 static struct bofi_errent *errent_listp;
123 
124 static char driver_list[NAMESIZE];
125 static int driver_list_size;
126 static int driver_list_neg;
127 static char nexus_name[NAMESIZE];
128 
129 static int initialized = 0;
130 
131 #define	NCLONES 256
132 static int clone_tab[NCLONES];
133 
134 static dev_info_t *our_dip;
135 
136 static kmutex_t bofi_mutex;
137 static kmutex_t clone_tab_mutex;
138 static kmutex_t bofi_low_mutex;
139 static ddi_iblock_cookie_t bofi_low_cookie;
140 static uint_t	bofi_signal(caddr_t arg);
141 static int	bofi_getinfo(dev_info_t *, ddi_info_cmd_t, void *, void **);
142 static int	bofi_attach(dev_info_t *, ddi_attach_cmd_t);
143 static int	bofi_detach(dev_info_t *, ddi_detach_cmd_t);
144 static int	bofi_open(dev_t *, int, int, cred_t *);
145 static int	bofi_close(dev_t, int, int, cred_t *);
146 static int	bofi_ioctl(dev_t, int, intptr_t, int, cred_t *, int *);
147 static int	bofi_errdef_alloc(struct bofi_errdef *, char *,
148 		    struct bofi_errent *);
149 static int	bofi_errdef_free(struct bofi_errent *);
150 static void	bofi_start(struct bofi_errctl *, char *);
151 static void	bofi_stop(struct bofi_errctl *, char *);
152 static void	bofi_broadcast(struct bofi_errctl *, char *);
153 static void	bofi_clear_acc_chk(struct bofi_errctl *, char *);
154 static void	bofi_clear_errors(struct bofi_errctl *, char *);
155 static void	bofi_clear_errdefs(struct bofi_errctl *, char *);
156 static int	bofi_errdef_check(struct bofi_errstate *,
157 		    struct acc_log_elem **);
158 static int	bofi_errdef_check_w(struct bofi_errstate *,
159 		    struct acc_log_elem **);
160 static int	bofi_map(dev_info_t *, dev_info_t *, ddi_map_req_t *,
161 		    off_t, off_t, caddr_t *);
162 static int	bofi_dma_map(dev_info_t *, dev_info_t *,
163 		    struct ddi_dma_req *, ddi_dma_handle_t *);
164 static int	bofi_dma_allochdl(dev_info_t *, dev_info_t *,
165 		    ddi_dma_attr_t *, int (*)(caddr_t), caddr_t,
166 		    ddi_dma_handle_t *);
167 static int	bofi_dma_freehdl(dev_info_t *, dev_info_t *,
168 		    ddi_dma_handle_t);
169 static int	bofi_dma_bindhdl(dev_info_t *, dev_info_t *,
170 		    ddi_dma_handle_t, struct ddi_dma_req *, ddi_dma_cookie_t *,
171 		    uint_t *);
172 static int	bofi_dma_unbindhdl(dev_info_t *, dev_info_t *,
173 		    ddi_dma_handle_t);
174 static int	bofi_dma_flush(dev_info_t *, dev_info_t *, ddi_dma_handle_t,
175 		    off_t, size_t, uint_t);
176 static int	bofi_dma_ctl(dev_info_t *, dev_info_t *, ddi_dma_handle_t,
177 		    enum ddi_dma_ctlops, off_t *, size_t *, caddr_t *, uint_t);
178 static int	bofi_dma_win(dev_info_t *, dev_info_t *, ddi_dma_handle_t,
179 		    uint_t, off_t *, size_t *, ddi_dma_cookie_t *, uint_t *);
180 static int	bofi_intr_ops(dev_info_t *dip, dev_info_t *rdip,
181 		    ddi_intr_op_t intr_op, ddi_intr_handle_impl_t *hdlp,
182 		    void *result);
183 
184 #if defined(__sparc)
185 static void	bofi_dvma_kaddr_load(ddi_dma_handle_t, caddr_t, uint_t,
186 		    uint_t, ddi_dma_cookie_t *);
187 static void	bofi_dvma_unload(ddi_dma_handle_t, uint_t, uint_t);
188 static void	bofi_dvma_sync(ddi_dma_handle_t, uint_t, uint_t);
189 static void	bofi_dvma_reserve(dev_info_t *, ddi_dma_handle_t);
190 #endif
191 static int	driver_under_test(dev_info_t *);
192 static int	bofi_check_acc_hdl(ddi_acc_impl_t *);
193 static int	bofi_check_dma_hdl(ddi_dma_impl_t *);
194 static int	bofi_post_event(dev_info_t *dip, dev_info_t *rdip,
195 		    ddi_eventcookie_t eventhdl, void *impl_data);
196 
197 static struct bus_ops bofi_bus_ops = {
198 	BUSO_REV,
199 	bofi_map,
200 	NULL,
201 	NULL,
202 	NULL,
203 	i_ddi_map_fault,
204 	bofi_dma_map,
205 	bofi_dma_allochdl,
206 	bofi_dma_freehdl,
207 	bofi_dma_bindhdl,
208 	bofi_dma_unbindhdl,
209 	bofi_dma_flush,
210 	bofi_dma_win,
211 	bofi_dma_ctl,
212 	NULL,
213 	ddi_bus_prop_op,
214 	ndi_busop_get_eventcookie,
215 	ndi_busop_add_eventcall,
216 	ndi_busop_remove_eventcall,
217 	bofi_post_event,
218 	NULL,
219 	0,
220 	0,
221 	0,
222 	0,
223 	0,
224 	0,
225 	0,
226 	bofi_intr_ops
227 };
228 
229 static struct cb_ops bofi_cb_ops = {
230 	bofi_open,
231 	bofi_close,
232 	nodev,
233 	nodev,
234 	nodev,			/* dump */
235 	nodev,
236 	nodev,
237 	bofi_ioctl,
238 	nodev,			/* devmap */
239 	nodev,
240 	nodev,			/* segmap */
241 	nochpoll,
242 	nodev,
243 	NULL,			/* for STREAMS drivers */
244 	D_NEW | D_MP		/* driver compatibility flag */
245 };
246 
247 static struct dev_ops bofi_ops = {
248 	DEVO_REV,		/* driver build version */
249 	0,			/* device reference count */
250 	bofi_getinfo,
251 	nulldev,
252 	nulldev,		/* probe */
253 	bofi_attach,
254 	bofi_detach,
255 	nulldev,		/* reset */
256 	&bofi_cb_ops,
257 	(struct bus_ops *)NULL,
258 	nulldev			/* power */
259 };
260 
261 /* module configuration stuff */
262 static void    *statep;
263 
264 static struct modldrv modldrv = {
265 	&mod_driverops,
266 	"bofi driver %I%",
267 	&bofi_ops
268 };
269 
270 static struct modlinkage modlinkage = {
271 	MODREV_1,
272 	&modldrv,
273 	0
274 };
275 
276 static struct bus_ops save_bus_ops;
277 
278 #if defined(__sparc)
279 static struct dvma_ops bofi_dvma_ops = {
280 	DVMAO_REV,
281 	bofi_dvma_kaddr_load,
282 	bofi_dvma_unload,
283 	bofi_dvma_sync
284 };
285 #endif
286 
287 /*
288  * support routine - map user page into kernel virtual
289  */
290 static caddr_t
291 dmareq_mapin(offset_t len, caddr_t addr, struct as *as, int flag)
292 {
293 	struct buf buf;
294 	struct proc proc;
295 
296 	/*
297 	 * mock up a buf structure so we can call bp_mapin_common()
298 	 */
299 	buf.b_flags = B_PHYS;
300 	buf.b_un.b_addr = (caddr_t)addr;
301 	buf.b_bcount = (size_t)len;
302 	proc.p_as = as;
303 	buf.b_proc = &proc;
304 	return (bp_mapin_common(&buf, flag));
305 }
306 
307 
308 /*
309  * support routine - map page chain into kernel virtual
310  */
311 static caddr_t
312 dmareq_pp_mapin(offset_t len, uint_t offset, page_t *pp, int flag)
313 {
314 	struct buf buf;
315 
316 	/*
317 	 * mock up a buf structure so we can call bp_mapin_common()
318 	 */
319 	buf.b_flags = B_PAGEIO;
320 	buf.b_un.b_addr = (caddr_t)(uintptr_t)offset;
321 	buf.b_bcount = (size_t)len;
322 	buf.b_pages = pp;
323 	return (bp_mapin_common(&buf, flag));
324 }
325 
326 
327 /*
328  * support routine - map page array into kernel virtual
329  */
330 static caddr_t
331 dmareq_pplist_mapin(uint_t len, caddr_t addr, page_t **pplist, struct as *as,
332     int flag)
333 {
334 	struct buf buf;
335 	struct proc proc;
336 
337 	/*
338 	 * mock up a buf structure so we can call bp_mapin_common()
339 	 */
340 	buf.b_flags = B_PHYS|B_SHADOW;
341 	buf.b_un.b_addr = addr;
342 	buf.b_bcount = len;
343 	buf.b_shadow = pplist;
344 	proc.p_as = as;
345 	buf.b_proc = &proc;
346 	return (bp_mapin_common(&buf, flag));
347 }
348 
349 
350 /*
351  * support routine - map dmareq into kernel virtual if not already
352  * fills in *lenp with length
353  * *mapaddr will be new kernel virtual address - or null if no mapping needed
354  */
355 static caddr_t
356 ddi_dmareq_mapin(struct ddi_dma_req *dmareqp, caddr_t *mapaddrp,
357 	offset_t *lenp)
358 {
359 	int sleep = (dmareqp->dmar_fp == DDI_DMA_SLEEP) ? VM_SLEEP: VM_NOSLEEP;
360 
361 	*lenp = dmareqp->dmar_object.dmao_size;
362 	if (dmareqp->dmar_object.dmao_type == DMA_OTYP_PAGES) {
363 		*mapaddrp = dmareq_pp_mapin(dmareqp->dmar_object.dmao_size,
364 		    dmareqp->dmar_object.dmao_obj.pp_obj.pp_offset,
365 		    dmareqp->dmar_object.dmao_obj.pp_obj.pp_pp, sleep);
366 		return (*mapaddrp);
367 	} else if (dmareqp->dmar_object.dmao_obj.virt_obj.v_priv != NULL) {
368 		*mapaddrp = dmareq_pplist_mapin(dmareqp->dmar_object.dmao_size,
369 		    dmareqp->dmar_object.dmao_obj.virt_obj.v_addr,
370 		    dmareqp->dmar_object.dmao_obj.virt_obj.v_priv,
371 		    dmareqp->dmar_object.dmao_obj.virt_obj.v_as, sleep);
372 		return (*mapaddrp);
373 	} else if (dmareqp->dmar_object.dmao_obj.virt_obj.v_as == &kas) {
374 		*mapaddrp = NULL;
375 		return (dmareqp->dmar_object.dmao_obj.virt_obj.v_addr);
376 	} else if (dmareqp->dmar_object.dmao_obj.virt_obj.v_as == NULL) {
377 		*mapaddrp = NULL;
378 		return (dmareqp->dmar_object.dmao_obj.virt_obj.v_addr);
379 	} else {
380 		*mapaddrp = dmareq_mapin(dmareqp->dmar_object.dmao_size,
381 		    dmareqp->dmar_object.dmao_obj.virt_obj.v_addr,
382 		    dmareqp->dmar_object.dmao_obj.virt_obj.v_as, sleep);
383 		return (*mapaddrp);
384 	}
385 }
386 
387 
388 /*
389  * support routine - free off kernel virtual mapping as allocated by
390  * ddi_dmareq_mapin()
391  */
392 static void
393 ddi_dmareq_mapout(caddr_t addr, offset_t len)
394 {
395 	struct buf buf;
396 
397 	if (addr == NULL)
398 		return;
399 	/*
400 	 * mock up a buf structure
401 	 */
402 	buf.b_flags = B_REMAPPED;
403 	buf.b_un.b_addr = addr;
404 	buf.b_bcount = (size_t)len;
405 	bp_mapout(&buf);
406 }
407 
408 static time_t
409 bofi_gettime()
410 {
411 	timestruc_t ts;
412 
413 	gethrestime(&ts);
414 	return (ts.tv_sec);
415 }
416 
417 /*
418  * reset the bus_ops structure of the specified nexus to point to
419  * the original values in the save_bus_ops structure.
420  *
421  * Note that both this routine and modify_bus_ops() rely on the current
422  * behavior of the framework in that nexus drivers are not unloadable
423  *
424  */
425 
426 static int
427 reset_bus_ops(char *name, struct bus_ops *bop)
428 {
429 	struct modctl *modp;
430 	struct modldrv *mp;
431 	struct bus_ops *bp;
432 	struct dev_ops *ops;
433 
434 	mutex_enter(&mod_lock);
435 	/*
436 	 * find specified module
437 	 */
438 	modp = &modules;
439 	do {
440 		if (strcmp(name, modp->mod_modname) == 0) {
441 			if (!modp->mod_linkage) {
442 				mutex_exit(&mod_lock);
443 				return (0);
444 			}
445 			mp = modp->mod_linkage->ml_linkage[0];
446 			if (!mp || !mp->drv_dev_ops) {
447 				mutex_exit(&mod_lock);
448 				return (0);
449 			}
450 			ops = mp->drv_dev_ops;
451 			bp = ops->devo_bus_ops;
452 			if (!bp) {
453 				mutex_exit(&mod_lock);
454 				return (0);
455 			}
456 			if (ops->devo_refcnt > 0) {
457 				/*
458 				 * As long as devices are active with modified
459 				 * bus ops bofi must not go away. There may be
460 				 * drivers with modified access or dma handles.
461 				 */
462 				mutex_exit(&mod_lock);
463 				return (0);
464 			}
465 			cmn_err(CE_NOTE, "bofi reset bus_ops for %s",
466 			    mp->drv_linkinfo);
467 			bp->bus_intr_op = bop->bus_intr_op;
468 			bp->bus_post_event = bop->bus_post_event;
469 			bp->bus_map = bop->bus_map;
470 			bp->bus_dma_map = bop->bus_dma_map;
471 			bp->bus_dma_allochdl = bop->bus_dma_allochdl;
472 			bp->bus_dma_freehdl = bop->bus_dma_freehdl;
473 			bp->bus_dma_bindhdl = bop->bus_dma_bindhdl;
474 			bp->bus_dma_unbindhdl = bop->bus_dma_unbindhdl;
475 			bp->bus_dma_flush = bop->bus_dma_flush;
476 			bp->bus_dma_win = bop->bus_dma_win;
477 			bp->bus_dma_ctl = bop->bus_dma_ctl;
478 			mutex_exit(&mod_lock);
479 			return (1);
480 		}
481 	} while ((modp = modp->mod_next) != &modules);
482 	mutex_exit(&mod_lock);
483 	return (0);
484 }
485 
486 /*
487  * modify the bus_ops structure of the specified nexus to point to bofi
488  * routines, saving the original values in the save_bus_ops structure
489  */
490 
491 static int
492 modify_bus_ops(char *name, struct bus_ops *bop)
493 {
494 	struct modctl *modp;
495 	struct modldrv *mp;
496 	struct bus_ops *bp;
497 	struct dev_ops *ops;
498 
499 	if (ddi_name_to_major(name) == -1)
500 		return (0);
501 
502 	mutex_enter(&mod_lock);
503 	/*
504 	 * find specified module
505 	 */
506 	modp = &modules;
507 	do {
508 		if (strcmp(name, modp->mod_modname) == 0) {
509 			if (!modp->mod_linkage) {
510 				mutex_exit(&mod_lock);
511 				return (0);
512 			}
513 			mp = modp->mod_linkage->ml_linkage[0];
514 			if (!mp || !mp->drv_dev_ops) {
515 				mutex_exit(&mod_lock);
516 				return (0);
517 			}
518 			ops = mp->drv_dev_ops;
519 			bp = ops->devo_bus_ops;
520 			if (!bp) {
521 				mutex_exit(&mod_lock);
522 				return (0);
523 			}
524 			if (ops->devo_refcnt == 0) {
525 				/*
526 				 * If there is no device active for this
527 				 * module then there is nothing to do for bofi.
528 				 */
529 				mutex_exit(&mod_lock);
530 				return (0);
531 			}
532 			cmn_err(CE_NOTE, "bofi modify bus_ops for %s",
533 			    mp->drv_linkinfo);
534 			save_bus_ops = *bp;
535 			bp->bus_intr_op = bop->bus_intr_op;
536 			bp->bus_post_event = bop->bus_post_event;
537 			bp->bus_map = bop->bus_map;
538 			bp->bus_dma_map = bop->bus_dma_map;
539 			bp->bus_dma_allochdl = bop->bus_dma_allochdl;
540 			bp->bus_dma_freehdl = bop->bus_dma_freehdl;
541 			bp->bus_dma_bindhdl = bop->bus_dma_bindhdl;
542 			bp->bus_dma_unbindhdl = bop->bus_dma_unbindhdl;
543 			bp->bus_dma_flush = bop->bus_dma_flush;
544 			bp->bus_dma_win = bop->bus_dma_win;
545 			bp->bus_dma_ctl = bop->bus_dma_ctl;
546 			mutex_exit(&mod_lock);
547 			return (1);
548 		}
549 	} while ((modp = modp->mod_next) != &modules);
550 	mutex_exit(&mod_lock);
551 	return (0);
552 }
553 
554 
555 int
556 _init(void)
557 {
558 	int    e;
559 
560 	e = ddi_soft_state_init(&statep, sizeof (struct bofi_errent), 1);
561 	if (e != 0)
562 		return (e);
563 	if ((e = mod_install(&modlinkage)) != 0)
564 		ddi_soft_state_fini(&statep);
565 	return (e);
566 }
567 
568 
569 int
570 _fini(void)
571 {
572 	int e;
573 
574 	if ((e = mod_remove(&modlinkage)) != 0)
575 		return (e);
576 	ddi_soft_state_fini(&statep);
577 	return (e);
578 }
579 
580 
581 int
582 _info(struct modinfo *modinfop)
583 {
584 	return (mod_info(&modlinkage, modinfop));
585 }
586 
587 
588 static int
589 bofi_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
590 {
591 	char *name;
592 	char buf[80];
593 	int i;
594 	int s, ss;
595 	int size = NAMESIZE;
596 	int new_string;
597 	char *ptr;
598 
599 	if (cmd != DDI_ATTACH)
600 		return (DDI_FAILURE);
601 	/*
602 	 * only one instance - but we clone using the open routine
603 	 */
604 	if (ddi_get_instance(dip) > 0)
605 		return (DDI_FAILURE);
606 
607 	if (!initialized) {
608 		if ((name = ddi_get_name(dip)) == NULL)
609 			return (DDI_FAILURE);
610 		(void) snprintf(buf, sizeof (buf), "%s,ctl", name);
611 		if (ddi_create_minor_node(dip, buf, S_IFCHR, 0,
612 		    DDI_PSEUDO, NULL) == DDI_FAILURE)
613 			return (DDI_FAILURE);
614 
615 		if (ddi_get_soft_iblock_cookie(dip, DDI_SOFTINT_MED,
616 		    &bofi_low_cookie) != DDI_SUCCESS) {
617 			ddi_remove_minor_node(dip, buf);
618 			return (DDI_FAILURE); /* fail attach */
619 		}
620 		/*
621 		 * get nexus name (from conf file)
622 		 */
623 		if (ddi_prop_op(DDI_DEV_T_ANY, dip, PROP_LEN_AND_VAL_BUF, 0,
624 		    "bofi-nexus", nexus_name, &size) != DDI_PROP_SUCCESS) {
625 			ddi_remove_minor_node(dip, buf);
626 			return (DDI_FAILURE);
627 		}
628 		/*
629 		 * get whether to do dma map kmem private checking
630 		 */
631 		if ((bofi_range_check = ddi_prop_lookup_string(DDI_DEV_T_ANY,
632 		    dip, 0, "bofi-range-check", &ptr)) != DDI_PROP_SUCCESS)
633 			bofi_range_check = 0;
634 		else if (strcmp(ptr, "panic") == 0)
635 			bofi_range_check = 2;
636 		else if (strcmp(ptr, "warn") == 0)
637 			bofi_range_check = 1;
638 		else
639 			bofi_range_check = 0;
640 		ddi_prop_free(ptr);
641 
642 		/*
643 		 * get whether to prevent direct access to register
644 		 */
645 		if ((bofi_ddi_check = ddi_prop_lookup_string(DDI_DEV_T_ANY,
646 		    dip, 0, "bofi-ddi-check", &ptr)) != DDI_PROP_SUCCESS)
647 			bofi_ddi_check = 0;
648 		else if (strcmp(ptr, "on") == 0)
649 			bofi_ddi_check = 1;
650 		else
651 			bofi_ddi_check = 0;
652 		ddi_prop_free(ptr);
653 
654 		/*
655 		 * get whether to do copy on ddi_dma_sync
656 		 */
657 		if ((bofi_sync_check = ddi_prop_lookup_string(DDI_DEV_T_ANY,
658 		    dip, 0, "bofi-sync-check", &ptr)) != DDI_PROP_SUCCESS)
659 			bofi_sync_check = 0;
660 		else if (strcmp(ptr, "on") == 0)
661 			bofi_sync_check = 1;
662 		else
663 			bofi_sync_check = 0;
664 		ddi_prop_free(ptr);
665 
666 		/*
667 		 * get driver-under-test names (from conf file)
668 		 */
669 		size = NAMESIZE;
670 		if (ddi_prop_op(DDI_DEV_T_ANY, dip, PROP_LEN_AND_VAL_BUF, 0,
671 		    "bofi-to-test", driver_list, &size) != DDI_PROP_SUCCESS)
672 			driver_list[0] = 0;
673 		/*
674 		 * and convert into a sequence of strings
675 		 */
676 		driver_list_neg = 1;
677 		new_string = 1;
678 		driver_list_size = strlen(driver_list);
679 		for (i = 0; i < driver_list_size; i++) {
680 			if (driver_list[i] == ' ') {
681 				driver_list[i] = '\0';
682 				new_string = 1;
683 			} else if (new_string) {
684 				if (driver_list[i] != '!')
685 					driver_list_neg = 0;
686 				new_string = 0;
687 			}
688 		}
689 		/*
690 		 * initialize mutex, lists
691 		 */
692 		mutex_init(&clone_tab_mutex, NULL, MUTEX_DRIVER,
693 		    NULL);
694 		/*
695 		 * fake up iblock cookie - need to protect outselves
696 		 * against drivers that use hilevel interrupts
697 		 */
698 		ss = spl8();
699 		s = spl8();
700 		splx(ss);
701 		mutex_init(&bofi_mutex, NULL, MUTEX_SPIN, (void *)(uintptr_t)s);
702 		mutex_init(&bofi_low_mutex, NULL, MUTEX_DRIVER,
703 		    (void *)bofi_low_cookie);
704 		shadow_list.next = &shadow_list;
705 		shadow_list.prev = &shadow_list;
706 		for (i = 0; i < HDL_HASH_TBL_SIZE; i++) {
707 			hhash_table[i].hnext = &hhash_table[i];
708 			hhash_table[i].hprev = &hhash_table[i];
709 			dhash_table[i].dnext = &dhash_table[i];
710 			dhash_table[i].dprev = &dhash_table[i];
711 		}
712 		for (i = 1; i < BOFI_NLINKS; i++)
713 			bofi_link_array[i].link = &bofi_link_array[i-1];
714 		bofi_link_freelist = &bofi_link_array[BOFI_NLINKS - 1];
715 		/*
716 		 * overlay bus_ops structure
717 		 */
718 		if (modify_bus_ops(nexus_name, &bofi_bus_ops) == 0) {
719 			ddi_remove_minor_node(dip, buf);
720 			mutex_destroy(&clone_tab_mutex);
721 			mutex_destroy(&bofi_mutex);
722 			mutex_destroy(&bofi_low_mutex);
723 			return (DDI_FAILURE);
724 		}
725 		/*
726 		 * save dip for getinfo
727 		 */
728 		our_dip = dip;
729 		ddi_report_dev(dip);
730 		initialized = 1;
731 	}
732 	return (DDI_SUCCESS);
733 }
734 
735 
736 static int
737 bofi_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
738 {
739 	char *name;
740 	char buf[80];
741 
742 	if (cmd != DDI_DETACH)
743 		return (DDI_FAILURE);
744 	if (ddi_get_instance(dip) > 0)
745 		return (DDI_FAILURE);
746 	if ((name = ddi_get_name(dip)) == NULL)
747 		return (DDI_FAILURE);
748 	(void) snprintf(buf, sizeof (buf), "%s,ctl", name);
749 	mutex_enter(&bofi_low_mutex);
750 	mutex_enter(&bofi_mutex);
751 	/*
752 	 * make sure test bofi is no longer in use
753 	 */
754 	if (shadow_list.next != &shadow_list || errent_listp != NULL) {
755 		mutex_exit(&bofi_mutex);
756 		mutex_exit(&bofi_low_mutex);
757 		return (DDI_FAILURE);
758 	}
759 	mutex_exit(&bofi_mutex);
760 	mutex_exit(&bofi_low_mutex);
761 
762 	/*
763 	 * restore bus_ops structure
764 	 */
765 	if (reset_bus_ops(nexus_name, &save_bus_ops) == 0)
766 		return (DDI_FAILURE);
767 
768 	mutex_destroy(&clone_tab_mutex);
769 	mutex_destroy(&bofi_mutex);
770 	mutex_destroy(&bofi_low_mutex);
771 	ddi_remove_minor_node(dip, buf);
772 	our_dip = NULL;
773 	initialized = 0;
774 	return (DDI_SUCCESS);
775 }
776 
777 
778 /* ARGSUSED */
779 static int
780 bofi_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result)
781 {
782 	dev_t	dev = (dev_t)arg;
783 	int	minor = (int)getminor(dev);
784 	int	retval;
785 
786 	switch (cmd) {
787 	case DDI_INFO_DEVT2DEVINFO:
788 		if (minor != 0 || our_dip == NULL) {
789 			*result = (void *)NULL;
790 			retval = DDI_FAILURE;
791 		} else {
792 			*result = (void *)our_dip;
793 			retval = DDI_SUCCESS;
794 		}
795 		break;
796 	case DDI_INFO_DEVT2INSTANCE:
797 		*result = (void *)0;
798 		retval = DDI_SUCCESS;
799 		break;
800 	default:
801 		retval = DDI_FAILURE;
802 	}
803 	return (retval);
804 }
805 
806 
807 /* ARGSUSED */
808 static int
809 bofi_open(dev_t *devp, int flag, int otyp, cred_t *credp)
810 {
811 	int	minor = (int)getminor(*devp);
812 	struct bofi_errent *softc;
813 
814 	/*
815 	 * only allow open on minor=0 - the clone device
816 	 */
817 	if (minor != 0)
818 		return (ENXIO);
819 	/*
820 	 * fail if not attached
821 	 */
822 	if (!initialized)
823 		return (ENXIO);
824 	/*
825 	 * find a free slot and grab it
826 	 */
827 	mutex_enter(&clone_tab_mutex);
828 	for (minor = 1; minor < NCLONES; minor++) {
829 		if (clone_tab[minor] == 0) {
830 			clone_tab[minor] = 1;
831 			break;
832 		}
833 	}
834 	mutex_exit(&clone_tab_mutex);
835 	if (minor == NCLONES)
836 		return (EAGAIN);
837 	/*
838 	 * soft state structure for this clone is used to maintain a list
839 	 * of allocated errdefs so they can be freed on close
840 	 */
841 	if (ddi_soft_state_zalloc(statep, minor) != DDI_SUCCESS) {
842 		mutex_enter(&clone_tab_mutex);
843 		clone_tab[minor] = 0;
844 		mutex_exit(&clone_tab_mutex);
845 		return (EAGAIN);
846 	}
847 	softc = ddi_get_soft_state(statep, minor);
848 	softc->cnext = softc;
849 	softc->cprev = softc;
850 
851 	*devp = makedevice(getmajor(*devp), minor);
852 	return (0);
853 }
854 
855 
856 /* ARGSUSED */
857 static int
858 bofi_close(dev_t dev, int flag, int otyp, cred_t *credp)
859 {
860 	int	minor = (int)getminor(dev);
861 	struct bofi_errent *softc;
862 	struct bofi_errent *ep, *next_ep;
863 
864 	softc = ddi_get_soft_state(statep, minor);
865 	if (softc == NULL)
866 		return (ENXIO);
867 	/*
868 	 * find list of errdefs and free them off
869 	 */
870 	for (ep = softc->cnext; ep != softc; ) {
871 		next_ep = ep->cnext;
872 		(void) bofi_errdef_free(ep);
873 		ep = next_ep;
874 	}
875 	/*
876 	 * free clone tab slot
877 	 */
878 	mutex_enter(&clone_tab_mutex);
879 	clone_tab[minor] = 0;
880 	mutex_exit(&clone_tab_mutex);
881 
882 	ddi_soft_state_free(statep, minor);
883 	return (0);
884 }
885 
886 
887 /* ARGSUSED */
888 static int
889 bofi_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp,
890 	int *rvalp)
891 {
892 	struct bofi_errent *softc;
893 	int	minor = (int)getminor(dev);
894 	struct bofi_errdef errdef;
895 	struct bofi_errctl errctl;
896 	struct bofi_errstate errstate;
897 	void *ed_handle;
898 	struct bofi_get_handles get_handles;
899 	struct bofi_get_hdl_info hdl_info;
900 	struct handle_info *hdlip;
901 	struct handle_info *hib;
902 
903 	char *buffer;
904 	char *bufptr;
905 	char *endbuf;
906 	int req_count, count, err;
907 	char *namep;
908 	struct bofi_shadow *hp;
909 	int retval;
910 	struct bofi_shadow *hhashp;
911 	int i;
912 
913 	switch (cmd) {
914 	case BOFI_ADD_DEF:
915 		/*
916 		 * add a new error definition
917 		 */
918 #ifdef _MULTI_DATAMODEL
919 		switch (ddi_model_convert_from(mode & FMODELS)) {
920 		case DDI_MODEL_ILP32:
921 		{
922 			/*
923 			 * For use when a 32 bit app makes a call into a
924 			 * 64 bit ioctl
925 			 */
926 			struct bofi_errdef32	errdef_32;
927 
928 			if (ddi_copyin((void *)arg, &errdef_32,
929 			    sizeof (struct bofi_errdef32), mode)) {
930 				return (EFAULT);
931 			}
932 			errdef.namesize = errdef_32.namesize;
933 			(void) strncpy(errdef.name, errdef_32.name, NAMESIZE);
934 			errdef.instance = errdef_32.instance;
935 			errdef.rnumber = errdef_32.rnumber;
936 			errdef.offset = errdef_32.offset;
937 			errdef.len = errdef_32.len;
938 			errdef.access_type = errdef_32.access_type;
939 			errdef.access_count = errdef_32.access_count;
940 			errdef.fail_count = errdef_32.fail_count;
941 			errdef.acc_chk = errdef_32.acc_chk;
942 			errdef.optype = errdef_32.optype;
943 			errdef.operand = errdef_32.operand;
944 			errdef.log.logsize = errdef_32.log.logsize;
945 			errdef.log.entries = errdef_32.log.entries;
946 			errdef.log.flags = errdef_32.log.flags;
947 			errdef.log.wrapcnt = errdef_32.log.wrapcnt;
948 			errdef.log.start_time = errdef_32.log.start_time;
949 			errdef.log.stop_time = errdef_32.log.stop_time;
950 			errdef.log.logbase =
951 			    (caddr_t)(uintptr_t)errdef_32.log.logbase;
952 			errdef.errdef_handle = errdef_32.errdef_handle;
953 			break;
954 		}
955 		case DDI_MODEL_NONE:
956 			if (ddi_copyin((void *)arg, &errdef,
957 			    sizeof (struct bofi_errdef), mode))
958 				return (EFAULT);
959 			break;
960 		}
961 #else /* ! _MULTI_DATAMODEL */
962 		if (ddi_copyin((void *)arg, &errdef,
963 		    sizeof (struct bofi_errdef), mode) != 0)
964 			return (EFAULT);
965 #endif /* _MULTI_DATAMODEL */
966 		/*
967 		 * do some validation
968 		 */
969 		if (errdef.fail_count == 0)
970 			errdef.optype = 0;
971 		if (errdef.optype != 0) {
972 			if (errdef.access_type & BOFI_INTR &&
973 			    errdef.optype != BOFI_DELAY_INTR &&
974 			    errdef.optype != BOFI_LOSE_INTR &&
975 			    errdef.optype != BOFI_EXTRA_INTR)
976 				return (EINVAL);
977 			if ((errdef.access_type & (BOFI_DMA_RW|BOFI_PIO_R)) &&
978 			    errdef.optype == BOFI_NO_TRANSFER)
979 				return (EINVAL);
980 			if ((errdef.access_type & (BOFI_PIO_RW)) &&
981 			    errdef.optype != BOFI_EQUAL &&
982 			    errdef.optype != BOFI_OR &&
983 			    errdef.optype != BOFI_XOR &&
984 			    errdef.optype != BOFI_AND &&
985 			    errdef.optype != BOFI_NO_TRANSFER)
986 				return (EINVAL);
987 		}
988 		/*
989 		 * find softstate for this clone, so we can tag
990 		 * new errdef on to it
991 		 */
992 		softc = ddi_get_soft_state(statep, minor);
993 		if (softc == NULL)
994 			return (ENXIO);
995 		/*
996 		 * read in name
997 		 */
998 		if (errdef.namesize > NAMESIZE)
999 			return (EINVAL);
1000 		namep = kmem_zalloc(errdef.namesize+1, KM_SLEEP);
1001 		(void) strncpy(namep, errdef.name, errdef.namesize);
1002 
1003 		if (bofi_errdef_alloc(&errdef, namep, softc) != DDI_SUCCESS) {
1004 			(void) bofi_errdef_free((struct bofi_errent *)
1005 			    (uintptr_t)errdef.errdef_handle);
1006 			kmem_free(namep, errdef.namesize+1);
1007 			return (EINVAL);
1008 		}
1009 		/*
1010 		 * copy out errdef again, including filled in errdef_handle
1011 		 */
1012 #ifdef _MULTI_DATAMODEL
1013 		switch (ddi_model_convert_from(mode & FMODELS)) {
1014 		case DDI_MODEL_ILP32:
1015 		{
1016 			/*
1017 			 * For use when a 32 bit app makes a call into a
1018 			 * 64 bit ioctl
1019 			 */
1020 			struct bofi_errdef32	errdef_32;
1021 
1022 			errdef_32.namesize = errdef.namesize;
1023 			(void) strncpy(errdef_32.name, errdef.name, NAMESIZE);
1024 			errdef_32.instance = errdef.instance;
1025 			errdef_32.rnumber = errdef.rnumber;
1026 			errdef_32.offset = errdef.offset;
1027 			errdef_32.len = errdef.len;
1028 			errdef_32.access_type = errdef.access_type;
1029 			errdef_32.access_count = errdef.access_count;
1030 			errdef_32.fail_count = errdef.fail_count;
1031 			errdef_32.acc_chk = errdef.acc_chk;
1032 			errdef_32.optype = errdef.optype;
1033 			errdef_32.operand = errdef.operand;
1034 			errdef_32.log.logsize = errdef.log.logsize;
1035 			errdef_32.log.entries = errdef.log.entries;
1036 			errdef_32.log.flags = errdef.log.flags;
1037 			errdef_32.log.wrapcnt = errdef.log.wrapcnt;
1038 			errdef_32.log.start_time = errdef.log.start_time;
1039 			errdef_32.log.stop_time = errdef.log.stop_time;
1040 			errdef_32.log.logbase =
1041 			    (caddr32_t)(uintptr_t)errdef.log.logbase;
1042 			errdef_32.errdef_handle = errdef.errdef_handle;
1043 			if (ddi_copyout(&errdef_32, (void *)arg,
1044 			    sizeof (struct bofi_errdef32), mode) != 0) {
1045 				(void) bofi_errdef_free((struct bofi_errent *)
1046 				    errdef.errdef_handle);
1047 				kmem_free(namep, errdef.namesize+1);
1048 				return (EFAULT);
1049 			}
1050 			break;
1051 		}
1052 		case DDI_MODEL_NONE:
1053 			if (ddi_copyout(&errdef, (void *)arg,
1054 			    sizeof (struct bofi_errdef), mode) != 0) {
1055 				(void) bofi_errdef_free((struct bofi_errent *)
1056 				    errdef.errdef_handle);
1057 				kmem_free(namep, errdef.namesize+1);
1058 				return (EFAULT);
1059 			}
1060 			break;
1061 		}
1062 #else /* ! _MULTI_DATAMODEL */
1063 		if (ddi_copyout(&errdef, (void *)arg,
1064 		    sizeof (struct bofi_errdef), mode) != 0) {
1065 			(void) bofi_errdef_free((struct bofi_errent *)
1066 			    (uintptr_t)errdef.errdef_handle);
1067 			kmem_free(namep, errdef.namesize+1);
1068 			return (EFAULT);
1069 		}
1070 #endif /* _MULTI_DATAMODEL */
1071 		return (0);
1072 	case BOFI_DEL_DEF:
1073 		/*
1074 		 * delete existing errdef
1075 		 */
1076 		if (ddi_copyin((void *)arg, &ed_handle,
1077 		    sizeof (void *), mode) != 0)
1078 			return (EFAULT);
1079 		return (bofi_errdef_free((struct bofi_errent *)ed_handle));
1080 	case BOFI_START:
1081 		/*
1082 		 * start all errdefs corresponding to
1083 		 * this name and instance
1084 		 */
1085 		if (ddi_copyin((void *)arg, &errctl,
1086 		    sizeof (struct bofi_errctl), mode) != 0)
1087 			return (EFAULT);
1088 		/*
1089 		 * copy in name
1090 		 */
1091 		if (errctl.namesize > NAMESIZE)
1092 			return (EINVAL);
1093 		namep = kmem_zalloc(errctl.namesize+1, KM_SLEEP);
1094 		(void) strncpy(namep, errctl.name, errctl.namesize);
1095 		bofi_start(&errctl, namep);
1096 		kmem_free(namep, errctl.namesize+1);
1097 		return (0);
1098 	case BOFI_STOP:
1099 		/*
1100 		 * stop all errdefs corresponding to
1101 		 * this name and instance
1102 		 */
1103 		if (ddi_copyin((void *)arg, &errctl,
1104 		    sizeof (struct bofi_errctl), mode) != 0)
1105 			return (EFAULT);
1106 		/*
1107 		 * copy in name
1108 		 */
1109 		if (errctl.namesize > NAMESIZE)
1110 			return (EINVAL);
1111 		namep = kmem_zalloc(errctl.namesize+1, KM_SLEEP);
1112 		(void) strncpy(namep, errctl.name, errctl.namesize);
1113 		bofi_stop(&errctl, namep);
1114 		kmem_free(namep, errctl.namesize+1);
1115 		return (0);
1116 	case BOFI_BROADCAST:
1117 		/*
1118 		 * wakeup all errdefs corresponding to
1119 		 * this name and instance
1120 		 */
1121 		if (ddi_copyin((void *)arg, &errctl,
1122 		    sizeof (struct bofi_errctl), mode) != 0)
1123 			return (EFAULT);
1124 		/*
1125 		 * copy in name
1126 		 */
1127 		if (errctl.namesize > NAMESIZE)
1128 			return (EINVAL);
1129 		namep = kmem_zalloc(errctl.namesize+1, KM_SLEEP);
1130 		(void) strncpy(namep, errctl.name, errctl.namesize);
1131 		bofi_broadcast(&errctl, namep);
1132 		kmem_free(namep, errctl.namesize+1);
1133 		return (0);
1134 	case BOFI_CLEAR_ACC_CHK:
1135 		/*
1136 		 * clear "acc_chk" for all errdefs corresponding to
1137 		 * this name and instance
1138 		 */
1139 		if (ddi_copyin((void *)arg, &errctl,
1140 		    sizeof (struct bofi_errctl), mode) != 0)
1141 			return (EFAULT);
1142 		/*
1143 		 * copy in name
1144 		 */
1145 		if (errctl.namesize > NAMESIZE)
1146 			return (EINVAL);
1147 		namep = kmem_zalloc(errctl.namesize+1, KM_SLEEP);
1148 		(void) strncpy(namep, errctl.name, errctl.namesize);
1149 		bofi_clear_acc_chk(&errctl, namep);
1150 		kmem_free(namep, errctl.namesize+1);
1151 		return (0);
1152 	case BOFI_CLEAR_ERRORS:
1153 		/*
1154 		 * set "fail_count" to 0 for all errdefs corresponding to
1155 		 * this name and instance whose "access_count"
1156 		 * has expired.
1157 		 */
1158 		if (ddi_copyin((void *)arg, &errctl,
1159 		    sizeof (struct bofi_errctl), mode) != 0)
1160 			return (EFAULT);
1161 		/*
1162 		 * copy in name
1163 		 */
1164 		if (errctl.namesize > NAMESIZE)
1165 			return (EINVAL);
1166 		namep = kmem_zalloc(errctl.namesize+1, KM_SLEEP);
1167 		(void) strncpy(namep, errctl.name, errctl.namesize);
1168 		bofi_clear_errors(&errctl, namep);
1169 		kmem_free(namep, errctl.namesize+1);
1170 		return (0);
1171 	case BOFI_CLEAR_ERRDEFS:
1172 		/*
1173 		 * set "access_count" and "fail_count" to 0 for all errdefs
1174 		 * corresponding to this name and instance
1175 		 */
1176 		if (ddi_copyin((void *)arg, &errctl,
1177 		    sizeof (struct bofi_errctl), mode) != 0)
1178 			return (EFAULT);
1179 		/*
1180 		 * copy in name
1181 		 */
1182 		if (errctl.namesize > NAMESIZE)
1183 			return (EINVAL);
1184 		namep = kmem_zalloc(errctl.namesize+1, KM_SLEEP);
1185 		(void) strncpy(namep, errctl.name, errctl.namesize);
1186 		bofi_clear_errdefs(&errctl, namep);
1187 		kmem_free(namep, errctl.namesize+1);
1188 		return (0);
1189 	case BOFI_CHK_STATE:
1190 	{
1191 		struct acc_log_elem *klg;
1192 		size_t uls;
1193 		/*
1194 		 * get state for this errdef - read in dummy errstate
1195 		 * with just the errdef_handle filled in
1196 		 */
1197 #ifdef _MULTI_DATAMODEL
1198 		switch (ddi_model_convert_from(mode & FMODELS)) {
1199 		case DDI_MODEL_ILP32:
1200 		{
1201 			/*
1202 			 * For use when a 32 bit app makes a call into a
1203 			 * 64 bit ioctl
1204 			 */
1205 			struct bofi_errstate32	errstate_32;
1206 
1207 			if (ddi_copyin((void *)arg, &errstate_32,
1208 			    sizeof (struct bofi_errstate32), mode) != 0) {
1209 				return (EFAULT);
1210 			}
1211 			errstate.fail_time = errstate_32.fail_time;
1212 			errstate.msg_time = errstate_32.msg_time;
1213 			errstate.access_count = errstate_32.access_count;
1214 			errstate.fail_count = errstate_32.fail_count;
1215 			errstate.acc_chk = errstate_32.acc_chk;
1216 			errstate.errmsg_count = errstate_32.errmsg_count;
1217 			(void) strncpy(errstate.buffer, errstate_32.buffer,
1218 			    ERRMSGSIZE);
1219 			errstate.severity = errstate_32.severity;
1220 			errstate.log.logsize = errstate_32.log.logsize;
1221 			errstate.log.entries = errstate_32.log.entries;
1222 			errstate.log.flags = errstate_32.log.flags;
1223 			errstate.log.wrapcnt = errstate_32.log.wrapcnt;
1224 			errstate.log.start_time = errstate_32.log.start_time;
1225 			errstate.log.stop_time = errstate_32.log.stop_time;
1226 			errstate.log.logbase =
1227 			    (caddr_t)(uintptr_t)errstate_32.log.logbase;
1228 			errstate.errdef_handle = errstate_32.errdef_handle;
1229 			break;
1230 		}
1231 		case DDI_MODEL_NONE:
1232 			if (ddi_copyin((void *)arg, &errstate,
1233 			    sizeof (struct bofi_errstate), mode) != 0)
1234 				return (EFAULT);
1235 			break;
1236 		}
1237 #else /* ! _MULTI_DATAMODEL */
1238 		if (ddi_copyin((void *)arg, &errstate,
1239 		    sizeof (struct bofi_errstate), mode) != 0)
1240 			return (EFAULT);
1241 #endif /* _MULTI_DATAMODEL */
1242 		if ((retval = bofi_errdef_check(&errstate, &klg)) == EINVAL)
1243 			return (EINVAL);
1244 		/*
1245 		 * copy out real errstate structure
1246 		 */
1247 		uls = errstate.log.logsize;
1248 		if (errstate.log.entries > uls && uls)
1249 			/* insufficient user memory */
1250 			errstate.log.entries = uls;
1251 		/* always pass back a time */
1252 		if (errstate.log.stop_time == 0ul)
1253 			(void) drv_getparm(TIME, &(errstate.log.stop_time));
1254 
1255 #ifdef _MULTI_DATAMODEL
1256 		switch (ddi_model_convert_from(mode & FMODELS)) {
1257 		case DDI_MODEL_ILP32:
1258 		{
1259 			/*
1260 			 * For use when a 32 bit app makes a call into a
1261 			 * 64 bit ioctl
1262 			 */
1263 			struct bofi_errstate32	errstate_32;
1264 
1265 			errstate_32.fail_time = errstate.fail_time;
1266 			errstate_32.msg_time = errstate.msg_time;
1267 			errstate_32.access_count = errstate.access_count;
1268 			errstate_32.fail_count = errstate.fail_count;
1269 			errstate_32.acc_chk = errstate.acc_chk;
1270 			errstate_32.errmsg_count = errstate.errmsg_count;
1271 			(void) strncpy(errstate_32.buffer, errstate.buffer,
1272 			    ERRMSGSIZE);
1273 			errstate_32.severity = errstate.severity;
1274 			errstate_32.log.logsize = errstate.log.logsize;
1275 			errstate_32.log.entries = errstate.log.entries;
1276 			errstate_32.log.flags = errstate.log.flags;
1277 			errstate_32.log.wrapcnt = errstate.log.wrapcnt;
1278 			errstate_32.log.start_time = errstate.log.start_time;
1279 			errstate_32.log.stop_time = errstate.log.stop_time;
1280 			errstate_32.log.logbase =
1281 			    (caddr32_t)(uintptr_t)errstate.log.logbase;
1282 			errstate_32.errdef_handle = errstate.errdef_handle;
1283 			if (ddi_copyout(&errstate_32, (void *)arg,
1284 			    sizeof (struct bofi_errstate32), mode) != 0)
1285 				return (EFAULT);
1286 			break;
1287 		}
1288 		case DDI_MODEL_NONE:
1289 			if (ddi_copyout(&errstate, (void *)arg,
1290 			    sizeof (struct bofi_errstate), mode) != 0)
1291 				return (EFAULT);
1292 			break;
1293 		}
1294 #else /* ! _MULTI_DATAMODEL */
1295 		if (ddi_copyout(&errstate, (void *)arg,
1296 		    sizeof (struct bofi_errstate), mode) != 0)
1297 			return (EFAULT);
1298 #endif /* _MULTI_DATAMODEL */
1299 		if (uls && errstate.log.entries &&
1300 		    ddi_copyout(klg, errstate.log.logbase,
1301 		    errstate.log.entries * sizeof (struct acc_log_elem),
1302 		    mode) != 0) {
1303 			return (EFAULT);
1304 		}
1305 		return (retval);
1306 	}
1307 	case BOFI_CHK_STATE_W:
1308 	{
1309 		struct acc_log_elem *klg;
1310 		size_t uls;
1311 		/*
1312 		 * get state for this errdef - read in dummy errstate
1313 		 * with just the errdef_handle filled in. Then wait for
1314 		 * a ddi_report_fault message to come back
1315 		 */
1316 #ifdef _MULTI_DATAMODEL
1317 		switch (ddi_model_convert_from(mode & FMODELS)) {
1318 		case DDI_MODEL_ILP32:
1319 		{
1320 			/*
1321 			 * For use when a 32 bit app makes a call into a
1322 			 * 64 bit ioctl
1323 			 */
1324 			struct bofi_errstate32	errstate_32;
1325 
1326 			if (ddi_copyin((void *)arg, &errstate_32,
1327 			    sizeof (struct bofi_errstate32), mode) != 0) {
1328 				return (EFAULT);
1329 			}
1330 			errstate.fail_time = errstate_32.fail_time;
1331 			errstate.msg_time = errstate_32.msg_time;
1332 			errstate.access_count = errstate_32.access_count;
1333 			errstate.fail_count = errstate_32.fail_count;
1334 			errstate.acc_chk = errstate_32.acc_chk;
1335 			errstate.errmsg_count = errstate_32.errmsg_count;
1336 			(void) strncpy(errstate.buffer, errstate_32.buffer,
1337 			    ERRMSGSIZE);
1338 			errstate.severity = errstate_32.severity;
1339 			errstate.log.logsize = errstate_32.log.logsize;
1340 			errstate.log.entries = errstate_32.log.entries;
1341 			errstate.log.flags = errstate_32.log.flags;
1342 			errstate.log.wrapcnt = errstate_32.log.wrapcnt;
1343 			errstate.log.start_time = errstate_32.log.start_time;
1344 			errstate.log.stop_time = errstate_32.log.stop_time;
1345 			errstate.log.logbase =
1346 			    (caddr_t)(uintptr_t)errstate_32.log.logbase;
1347 			errstate.errdef_handle = errstate_32.errdef_handle;
1348 			break;
1349 		}
1350 		case DDI_MODEL_NONE:
1351 			if (ddi_copyin((void *)arg, &errstate,
1352 			    sizeof (struct bofi_errstate), mode) != 0)
1353 				return (EFAULT);
1354 			break;
1355 		}
1356 #else /* ! _MULTI_DATAMODEL */
1357 		if (ddi_copyin((void *)arg, &errstate,
1358 		    sizeof (struct bofi_errstate), mode) != 0)
1359 			return (EFAULT);
1360 #endif /* _MULTI_DATAMODEL */
1361 		if ((retval = bofi_errdef_check_w(&errstate, &klg)) == EINVAL)
1362 			return (EINVAL);
1363 		/*
1364 		 * copy out real errstate structure
1365 		 */
1366 		uls = errstate.log.logsize;
1367 		uls = errstate.log.logsize;
1368 		if (errstate.log.entries > uls && uls)
1369 			/* insufficient user memory */
1370 			errstate.log.entries = uls;
1371 		/* always pass back a time */
1372 		if (errstate.log.stop_time == 0ul)
1373 			(void) drv_getparm(TIME, &(errstate.log.stop_time));
1374 
1375 #ifdef _MULTI_DATAMODEL
1376 		switch (ddi_model_convert_from(mode & FMODELS)) {
1377 		case DDI_MODEL_ILP32:
1378 		{
1379 			/*
1380 			 * For use when a 32 bit app makes a call into a
1381 			 * 64 bit ioctl
1382 			 */
1383 			struct bofi_errstate32	errstate_32;
1384 
1385 			errstate_32.fail_time = errstate.fail_time;
1386 			errstate_32.msg_time = errstate.msg_time;
1387 			errstate_32.access_count = errstate.access_count;
1388 			errstate_32.fail_count = errstate.fail_count;
1389 			errstate_32.acc_chk = errstate.acc_chk;
1390 			errstate_32.errmsg_count = errstate.errmsg_count;
1391 			(void) strncpy(errstate_32.buffer, errstate.buffer,
1392 			    ERRMSGSIZE);
1393 			errstate_32.severity = errstate.severity;
1394 			errstate_32.log.logsize = errstate.log.logsize;
1395 			errstate_32.log.entries = errstate.log.entries;
1396 			errstate_32.log.flags = errstate.log.flags;
1397 			errstate_32.log.wrapcnt = errstate.log.wrapcnt;
1398 			errstate_32.log.start_time = errstate.log.start_time;
1399 			errstate_32.log.stop_time = errstate.log.stop_time;
1400 			errstate_32.log.logbase =
1401 			    (caddr32_t)(uintptr_t)errstate.log.logbase;
1402 			errstate_32.errdef_handle = errstate.errdef_handle;
1403 			if (ddi_copyout(&errstate_32, (void *)arg,
1404 			    sizeof (struct bofi_errstate32), mode) != 0)
1405 				return (EFAULT);
1406 			break;
1407 		}
1408 		case DDI_MODEL_NONE:
1409 			if (ddi_copyout(&errstate, (void *)arg,
1410 			    sizeof (struct bofi_errstate), mode) != 0)
1411 				return (EFAULT);
1412 			break;
1413 		}
1414 #else /* ! _MULTI_DATAMODEL */
1415 		if (ddi_copyout(&errstate, (void *)arg,
1416 		    sizeof (struct bofi_errstate), mode) != 0)
1417 			return (EFAULT);
1418 #endif /* _MULTI_DATAMODEL */
1419 
1420 		if (uls && errstate.log.entries &&
1421 		    ddi_copyout(klg, errstate.log.logbase,
1422 		    errstate.log.entries * sizeof (struct acc_log_elem),
1423 		    mode) != 0) {
1424 			return (EFAULT);
1425 		}
1426 		return (retval);
1427 	}
1428 	case BOFI_GET_HANDLES:
1429 		/*
1430 		 * display existing handles
1431 		 */
1432 #ifdef _MULTI_DATAMODEL
1433 		switch (ddi_model_convert_from(mode & FMODELS)) {
1434 		case DDI_MODEL_ILP32:
1435 		{
1436 			/*
1437 			 * For use when a 32 bit app makes a call into a
1438 			 * 64 bit ioctl
1439 			 */
1440 			struct bofi_get_handles32	get_handles_32;
1441 
1442 			if (ddi_copyin((void *)arg, &get_handles_32,
1443 			    sizeof (get_handles_32), mode) != 0) {
1444 				return (EFAULT);
1445 			}
1446 			get_handles.namesize = get_handles_32.namesize;
1447 			(void) strncpy(get_handles.name, get_handles_32.name,
1448 			    NAMESIZE);
1449 			get_handles.instance = get_handles_32.instance;
1450 			get_handles.count = get_handles_32.count;
1451 			get_handles.buffer =
1452 			    (caddr_t)(uintptr_t)get_handles_32.buffer;
1453 			break;
1454 		}
1455 		case DDI_MODEL_NONE:
1456 			if (ddi_copyin((void *)arg, &get_handles,
1457 			    sizeof (get_handles), mode) != 0)
1458 				return (EFAULT);
1459 			break;
1460 		}
1461 #else /* ! _MULTI_DATAMODEL */
1462 		if (ddi_copyin((void *)arg, &get_handles,
1463 		    sizeof (get_handles), mode) != 0)
1464 			return (EFAULT);
1465 #endif /* _MULTI_DATAMODEL */
1466 		/*
1467 		 * read in name
1468 		 */
1469 		if (get_handles.namesize > NAMESIZE)
1470 			return (EINVAL);
1471 		namep = kmem_zalloc(get_handles.namesize+1, KM_SLEEP);
1472 		(void) strncpy(namep, get_handles.name, get_handles.namesize);
1473 		req_count = get_handles.count;
1474 		bufptr = buffer = kmem_zalloc(req_count, KM_SLEEP);
1475 		endbuf = bufptr + req_count;
1476 		/*
1477 		 * display existing handles
1478 		 */
1479 		mutex_enter(&bofi_low_mutex);
1480 		mutex_enter(&bofi_mutex);
1481 		for (i = 0; i < HDL_HASH_TBL_SIZE; i++) {
1482 			hhashp = &hhash_table[i];
1483 			for (hp = hhashp->hnext; hp != hhashp; hp = hp->hnext) {
1484 				if (!driver_under_test(hp->dip))
1485 					continue;
1486 				if (ddi_name_to_major(ddi_get_name(hp->dip)) !=
1487 				    ddi_name_to_major(namep))
1488 					continue;
1489 				if (hp->instance != get_handles.instance)
1490 					continue;
1491 				/*
1492 				 * print information per handle - note that
1493 				 * DMA* means an unbound DMA handle
1494 				 */
1495 				(void) snprintf(bufptr, (size_t)(endbuf-bufptr),
1496 				    "  %s %d %s ", hp->name, hp->instance,
1497 				    (hp->type == BOFI_INT_HDL) ? "INTR" :
1498 				    (hp->type == BOFI_ACC_HDL) ? "PIO" :
1499 				    (hp->type == BOFI_DMA_HDL) ? "DMA" :
1500 				    (hp->hparrayp != NULL) ? "DVMA" : "DMA*");
1501 				bufptr += strlen(bufptr);
1502 				if (hp->type == BOFI_ACC_HDL) {
1503 					if (hp->len == INT_MAX - hp->offset)
1504 						(void) snprintf(bufptr,
1505 						    (size_t)(endbuf-bufptr),
1506 						    "reg set %d off 0x%llx\n",
1507 						    hp->rnumber, hp->offset);
1508 					else
1509 						(void) snprintf(bufptr,
1510 						    (size_t)(endbuf-bufptr),
1511 						    "reg set %d off 0x%llx"
1512 						    " len 0x%llx\n",
1513 						    hp->rnumber, hp->offset,
1514 						    hp->len);
1515 				} else if (hp->type == BOFI_DMA_HDL)
1516 					(void) snprintf(bufptr,
1517 					    (size_t)(endbuf-bufptr),
1518 					    "handle no %d len 0x%llx"
1519 					    " addr 0x%p\n", hp->rnumber,
1520 					    hp->len, (void *)hp->addr);
1521 				else if (hp->type == BOFI_NULL &&
1522 				    hp->hparrayp == NULL)
1523 					(void) snprintf(bufptr,
1524 					    (size_t)(endbuf-bufptr),
1525 					    "handle no %d\n", hp->rnumber);
1526 				else
1527 					(void) snprintf(bufptr,
1528 					    (size_t)(endbuf-bufptr), "\n");
1529 				bufptr += strlen(bufptr);
1530 			}
1531 		}
1532 		mutex_exit(&bofi_mutex);
1533 		mutex_exit(&bofi_low_mutex);
1534 		err = ddi_copyout(buffer, get_handles.buffer, req_count, mode);
1535 		kmem_free(namep, get_handles.namesize+1);
1536 		kmem_free(buffer, req_count);
1537 		if (err != 0)
1538 			return (EFAULT);
1539 		else
1540 			return (0);
1541 	case BOFI_GET_HANDLE_INFO:
1542 		/*
1543 		 * display existing handles
1544 		 */
1545 #ifdef _MULTI_DATAMODEL
1546 		switch (ddi_model_convert_from(mode & FMODELS)) {
1547 		case DDI_MODEL_ILP32:
1548 		{
1549 			/*
1550 			 * For use when a 32 bit app makes a call into a
1551 			 * 64 bit ioctl
1552 			 */
1553 			struct bofi_get_hdl_info32	hdl_info_32;
1554 
1555 			if (ddi_copyin((void *)arg, &hdl_info_32,
1556 			    sizeof (hdl_info_32), mode)) {
1557 				return (EFAULT);
1558 			}
1559 			hdl_info.namesize = hdl_info_32.namesize;
1560 			(void) strncpy(hdl_info.name, hdl_info_32.name,
1561 			    NAMESIZE);
1562 			hdl_info.count = hdl_info_32.count;
1563 			hdl_info.hdli = (caddr_t)(uintptr_t)hdl_info_32.hdli;
1564 			break;
1565 		}
1566 		case DDI_MODEL_NONE:
1567 			if (ddi_copyin((void *)arg, &hdl_info,
1568 			    sizeof (hdl_info), mode))
1569 				return (EFAULT);
1570 			break;
1571 		}
1572 #else /* ! _MULTI_DATAMODEL */
1573 		if (ddi_copyin((void *)arg, &hdl_info,
1574 		    sizeof (hdl_info), mode))
1575 			return (EFAULT);
1576 #endif /* _MULTI_DATAMODEL */
1577 		if (hdl_info.namesize > NAMESIZE)
1578 			return (EINVAL);
1579 		namep = kmem_zalloc(hdl_info.namesize + 1, KM_SLEEP);
1580 		(void) strncpy(namep, hdl_info.name, hdl_info.namesize);
1581 		req_count = hdl_info.count;
1582 		count = hdl_info.count = 0; /* the actual no of handles */
1583 		if (req_count > 0) {
1584 			hib = hdlip =
1585 			    kmem_zalloc(req_count * sizeof (struct handle_info),
1586 			    KM_SLEEP);
1587 		} else {
1588 			hib = hdlip = 0;
1589 			req_count = hdl_info.count = 0;
1590 		}
1591 
1592 		/*
1593 		 * display existing handles
1594 		 */
1595 		mutex_enter(&bofi_low_mutex);
1596 		mutex_enter(&bofi_mutex);
1597 		for (i = 0; i < HDL_HASH_TBL_SIZE; i++) {
1598 			hhashp = &hhash_table[i];
1599 			for (hp = hhashp->hnext; hp != hhashp; hp = hp->hnext) {
1600 				if (!driver_under_test(hp->dip) ||
1601 				    ddi_name_to_major(ddi_get_name(hp->dip)) !=
1602 				    ddi_name_to_major(namep) ||
1603 				    ++(hdl_info.count) > req_count ||
1604 				    count == req_count)
1605 					continue;
1606 
1607 				hdlip->instance = hp->instance;
1608 				hdlip->rnumber = hp->rnumber;
1609 				switch (hp->type) {
1610 				case BOFI_ACC_HDL:
1611 					hdlip->access_type = BOFI_PIO_RW;
1612 					hdlip->offset = hp->offset;
1613 					hdlip->len = hp->len;
1614 					break;
1615 				case BOFI_DMA_HDL:
1616 					hdlip->access_type = 0;
1617 					if (hp->flags & DDI_DMA_WRITE)
1618 						hdlip->access_type |=
1619 						    BOFI_DMA_W;
1620 					if (hp->flags & DDI_DMA_READ)
1621 						hdlip->access_type |=
1622 						    BOFI_DMA_R;
1623 					hdlip->len = hp->len;
1624 					hdlip->addr_cookie =
1625 					    (uint64_t)(uintptr_t)hp->addr;
1626 					break;
1627 				case BOFI_INT_HDL:
1628 					hdlip->access_type = BOFI_INTR;
1629 					break;
1630 				default:
1631 					hdlip->access_type = 0;
1632 					break;
1633 				}
1634 				hdlip++;
1635 				count++;
1636 			}
1637 		}
1638 		mutex_exit(&bofi_mutex);
1639 		mutex_exit(&bofi_low_mutex);
1640 		err = 0;
1641 #ifdef _MULTI_DATAMODEL
1642 		switch (ddi_model_convert_from(mode & FMODELS)) {
1643 		case DDI_MODEL_ILP32:
1644 		{
1645 			/*
1646 			 * For use when a 32 bit app makes a call into a
1647 			 * 64 bit ioctl
1648 			 */
1649 			struct bofi_get_hdl_info32	hdl_info_32;
1650 
1651 			hdl_info_32.namesize = hdl_info.namesize;
1652 			(void) strncpy(hdl_info_32.name, hdl_info.name,
1653 			    NAMESIZE);
1654 			hdl_info_32.count = hdl_info.count;
1655 			hdl_info_32.hdli = (caddr32_t)(uintptr_t)hdl_info.hdli;
1656 			if (ddi_copyout(&hdl_info_32, (void *)arg,
1657 			    sizeof (hdl_info_32), mode) != 0) {
1658 				kmem_free(namep, hdl_info.namesize+1);
1659 				if (req_count > 0)
1660 					kmem_free(hib,
1661 					    req_count * sizeof (*hib));
1662 				return (EFAULT);
1663 			}
1664 			break;
1665 		}
1666 		case DDI_MODEL_NONE:
1667 			if (ddi_copyout(&hdl_info, (void *)arg,
1668 			    sizeof (hdl_info), mode) != 0) {
1669 				kmem_free(namep, hdl_info.namesize+1);
1670 				if (req_count > 0)
1671 					kmem_free(hib,
1672 					    req_count * sizeof (*hib));
1673 				return (EFAULT);
1674 			}
1675 			break;
1676 		}
1677 #else /* ! _MULTI_DATAMODEL */
1678 		if (ddi_copyout(&hdl_info, (void *)arg,
1679 		    sizeof (hdl_info), mode) != 0) {
1680 			kmem_free(namep, hdl_info.namesize+1);
1681 			if (req_count > 0)
1682 				kmem_free(hib, req_count * sizeof (*hib));
1683 			return (EFAULT);
1684 		}
1685 #endif /* ! _MULTI_DATAMODEL */
1686 		if (count > 0) {
1687 			if (ddi_copyout(hib, hdl_info.hdli,
1688 			    count * sizeof (*hib), mode) != 0) {
1689 				kmem_free(namep, hdl_info.namesize+1);
1690 				if (req_count > 0)
1691 					kmem_free(hib,
1692 					    req_count * sizeof (*hib));
1693 				return (EFAULT);
1694 			}
1695 		}
1696 		kmem_free(namep, hdl_info.namesize+1);
1697 		if (req_count > 0)
1698 			kmem_free(hib, req_count * sizeof (*hib));
1699 		return (err);
1700 	default:
1701 		return (ENOTTY);
1702 	}
1703 }
1704 
1705 
1706 /*
1707  * add a new error definition
1708  */
1709 static int
1710 bofi_errdef_alloc(struct bofi_errdef *errdefp, char *namep,
1711 	struct bofi_errent *softc)
1712 {
1713 	struct bofi_errent *ep;
1714 	struct bofi_shadow *hp;
1715 	struct bofi_link   *lp;
1716 
1717 	/*
1718 	 * allocate errdef structure and put on in-use list
1719 	 */
1720 	ep = kmem_zalloc(sizeof (struct bofi_errent), KM_SLEEP);
1721 	ep->errdef = *errdefp;
1722 	ep->name = namep;
1723 	ep->errdef.errdef_handle = (uint64_t)(uintptr_t)ep;
1724 	ep->errstate.errdef_handle = (uint64_t)(uintptr_t)ep;
1725 	cv_init(&ep->cv, NULL, CV_DRIVER, NULL);
1726 	/*
1727 	 * allocate space for logging
1728 	 */
1729 	ep->errdef.log.entries = 0;
1730 	ep->errdef.log.wrapcnt = 0;
1731 	if (ep->errdef.access_type & BOFI_LOG)
1732 		ep->logbase = kmem_alloc(sizeof (struct acc_log_elem) *
1733 		    ep->errdef.log.logsize, KM_SLEEP);
1734 	else
1735 		ep->logbase = NULL;
1736 	/*
1737 	 * put on in-use list
1738 	 */
1739 	mutex_enter(&bofi_low_mutex);
1740 	mutex_enter(&bofi_mutex);
1741 	ep->next = errent_listp;
1742 	errent_listp = ep;
1743 	/*
1744 	 * and add it to the per-clone list
1745 	 */
1746 	ep->cnext = softc->cnext;
1747 	softc->cnext->cprev = ep;
1748 	ep->cprev = softc;
1749 	softc->cnext = ep;
1750 
1751 	/*
1752 	 * look for corresponding shadow handle structures and if we find any
1753 	 * tag this errdef structure on to their link lists.
1754 	 */
1755 	for (hp = shadow_list.next; hp != &shadow_list; hp = hp->next) {
1756 		if (ddi_name_to_major(hp->name) == ddi_name_to_major(namep) &&
1757 		    hp->instance == errdefp->instance &&
1758 		    (((errdefp->access_type & BOFI_DMA_RW) &&
1759 		    (ep->errdef.rnumber == -1 ||
1760 		    hp->rnumber == ep->errdef.rnumber) &&
1761 		    hp->type == BOFI_DMA_HDL &&
1762 		    (((uintptr_t)(hp->addr + ep->errdef.offset +
1763 		    ep->errdef.len) & ~LLSZMASK) >
1764 		    ((uintptr_t)((hp->addr + ep->errdef.offset) +
1765 		    LLSZMASK) & ~LLSZMASK))) ||
1766 		    ((errdefp->access_type & BOFI_INTR) &&
1767 		    hp->type == BOFI_INT_HDL) ||
1768 		    ((errdefp->access_type & BOFI_PIO_RW) &&
1769 		    hp->type == BOFI_ACC_HDL &&
1770 		    (errdefp->rnumber == -1 ||
1771 		    hp->rnumber == errdefp->rnumber) &&
1772 		    (errdefp->len == 0 ||
1773 		    hp->offset < errdefp->offset + errdefp->len) &&
1774 		    hp->offset + hp->len > errdefp->offset))) {
1775 			lp = bofi_link_freelist;
1776 			if (lp != NULL) {
1777 				bofi_link_freelist = lp->link;
1778 				lp->errentp = ep;
1779 				lp->link = hp->link;
1780 				hp->link = lp;
1781 			}
1782 		}
1783 	}
1784 	errdefp->errdef_handle = (uint64_t)(uintptr_t)ep;
1785 	mutex_exit(&bofi_mutex);
1786 	mutex_exit(&bofi_low_mutex);
1787 	ep->softintr_id = NULL;
1788 	return (ddi_add_softintr(our_dip, DDI_SOFTINT_MED, &ep->softintr_id,
1789 	    NULL, NULL, bofi_signal, (caddr_t)&ep->errdef));
1790 }
1791 
1792 
1793 /*
1794  * delete existing errdef
1795  */
1796 static int
1797 bofi_errdef_free(struct bofi_errent *ep)
1798 {
1799 	struct bofi_errent *hep, *prev_hep;
1800 	struct bofi_link *lp, *prev_lp, *next_lp;
1801 	struct bofi_shadow *hp;
1802 
1803 	mutex_enter(&bofi_low_mutex);
1804 	mutex_enter(&bofi_mutex);
1805 	/*
1806 	 * don't just assume its a valid ep - check that its on the
1807 	 * in-use list
1808 	 */
1809 	prev_hep = NULL;
1810 	for (hep = errent_listp; hep != NULL; ) {
1811 		if (hep == ep)
1812 			break;
1813 		prev_hep = hep;
1814 		hep = hep->next;
1815 	}
1816 	if (hep == NULL) {
1817 		mutex_exit(&bofi_mutex);
1818 		mutex_exit(&bofi_low_mutex);
1819 		return (EINVAL);
1820 	}
1821 	/*
1822 	 * found it - delete from in-use list
1823 	 */
1824 
1825 	if (prev_hep)
1826 		prev_hep->next = hep->next;
1827 	else
1828 		errent_listp = hep->next;
1829 	/*
1830 	 * and take it off the per-clone list
1831 	 */
1832 	hep->cnext->cprev = hep->cprev;
1833 	hep->cprev->cnext = hep->cnext;
1834 	/*
1835 	 * see if we are on any shadow handle link lists - and if we
1836 	 * are then take us off
1837 	 */
1838 	for (hp = shadow_list.next; hp != &shadow_list; hp = hp->next) {
1839 		prev_lp = NULL;
1840 		for (lp = hp->link; lp != NULL; ) {
1841 			if (lp->errentp == ep) {
1842 				if (prev_lp)
1843 					prev_lp->link = lp->link;
1844 				else
1845 					hp->link = lp->link;
1846 				next_lp = lp->link;
1847 				lp->link = bofi_link_freelist;
1848 				bofi_link_freelist = lp;
1849 				lp = next_lp;
1850 			} else {
1851 				prev_lp = lp;
1852 				lp = lp->link;
1853 			}
1854 		}
1855 	}
1856 	mutex_exit(&bofi_mutex);
1857 	mutex_exit(&bofi_low_mutex);
1858 
1859 	cv_destroy(&ep->cv);
1860 	kmem_free(ep->name, ep->errdef.namesize+1);
1861 	if ((ep->errdef.access_type & BOFI_LOG) &&
1862 		ep->errdef.log.logsize && ep->logbase) /* double check */
1863 		kmem_free(ep->logbase,
1864 		    sizeof (struct acc_log_elem) * ep->errdef.log.logsize);
1865 
1866 	if (ep->softintr_id)
1867 		ddi_remove_softintr(ep->softintr_id);
1868 	kmem_free(ep, sizeof (struct bofi_errent));
1869 	return (0);
1870 }
1871 
1872 
1873 /*
1874  * start all errdefs corresponding to this name and instance
1875  */
1876 static void
1877 bofi_start(struct bofi_errctl *errctlp, char *namep)
1878 {
1879 	struct bofi_errent *ep;
1880 
1881 	/*
1882 	 * look for any errdefs with matching name and instance
1883 	 */
1884 	mutex_enter(&bofi_low_mutex);
1885 	for (ep = errent_listp; ep != NULL; ep = ep->next)
1886 		if (strncmp(namep, ep->name, NAMESIZE) == 0 &&
1887 		    errctlp->instance == ep->errdef.instance) {
1888 			ep->state |= BOFI_DEV_ACTIVE;
1889 			(void) drv_getparm(TIME, &(ep->errdef.log.start_time));
1890 			ep->errdef.log.stop_time = 0ul;
1891 		}
1892 	mutex_exit(&bofi_low_mutex);
1893 }
1894 
1895 
1896 /*
1897  * stop all errdefs corresponding to this name and instance
1898  */
1899 static void
1900 bofi_stop(struct bofi_errctl *errctlp, char *namep)
1901 {
1902 	struct bofi_errent *ep;
1903 
1904 	/*
1905 	 * look for any errdefs with matching name and instance
1906 	 */
1907 	mutex_enter(&bofi_low_mutex);
1908 	for (ep = errent_listp; ep != NULL; ep = ep->next)
1909 		if (strncmp(namep, ep->name, NAMESIZE) == 0 &&
1910 		    errctlp->instance == ep->errdef.instance) {
1911 			ep->state &= ~BOFI_DEV_ACTIVE;
1912 			if (ep->errdef.log.stop_time == 0ul)
1913 				(void) drv_getparm(TIME,
1914 				    &(ep->errdef.log.stop_time));
1915 		}
1916 	mutex_exit(&bofi_low_mutex);
1917 }
1918 
1919 
1920 /*
1921  * wake up any thread waiting on this errdefs
1922  */
1923 static uint_t
1924 bofi_signal(caddr_t arg)
1925 {
1926 	struct bofi_errdef *edp = (struct bofi_errdef *)arg;
1927 	struct bofi_errent *hep;
1928 	struct bofi_errent *ep =
1929 	    (struct bofi_errent *)(uintptr_t)edp->errdef_handle;
1930 
1931 	mutex_enter(&bofi_low_mutex);
1932 	for (hep = errent_listp; hep != NULL; ) {
1933 		if (hep == ep)
1934 			break;
1935 		hep = hep->next;
1936 	}
1937 	if (hep == NULL) {
1938 		mutex_exit(&bofi_low_mutex);
1939 		return (DDI_INTR_UNCLAIMED);
1940 	}
1941 	if ((ep->errdef.access_type & BOFI_LOG) &&
1942 	    (edp->log.flags & BOFI_LOG_FULL)) {
1943 		edp->log.stop_time = bofi_gettime();
1944 		ep->state |= BOFI_NEW_MESSAGE;
1945 		if (ep->state & BOFI_MESSAGE_WAIT)
1946 			cv_broadcast(&ep->cv);
1947 		ep->state &= ~BOFI_MESSAGE_WAIT;
1948 	}
1949 	if (ep->errstate.msg_time != 0) {
1950 		ep->state |= BOFI_NEW_MESSAGE;
1951 		if (ep->state & BOFI_MESSAGE_WAIT)
1952 			cv_broadcast(&ep->cv);
1953 		ep->state &= ~BOFI_MESSAGE_WAIT;
1954 	}
1955 	mutex_exit(&bofi_low_mutex);
1956 	return (DDI_INTR_CLAIMED);
1957 }
1958 
1959 
1960 /*
1961  * wake up all errdefs corresponding to this name and instance
1962  */
1963 static void
1964 bofi_broadcast(struct bofi_errctl *errctlp, char *namep)
1965 {
1966 	struct bofi_errent *ep;
1967 
1968 	/*
1969 	 * look for any errdefs with matching name and instance
1970 	 */
1971 	mutex_enter(&bofi_low_mutex);
1972 	for (ep = errent_listp; ep != NULL; ep = ep->next)
1973 		if (strncmp(namep, ep->name, NAMESIZE) == 0 &&
1974 		    errctlp->instance == ep->errdef.instance) {
1975 			/*
1976 			 * wake up sleepers
1977 			 */
1978 			ep->state |= BOFI_NEW_MESSAGE;
1979 			if (ep->state & BOFI_MESSAGE_WAIT)
1980 				cv_broadcast(&ep->cv);
1981 			ep->state &= ~BOFI_MESSAGE_WAIT;
1982 		}
1983 	mutex_exit(&bofi_low_mutex);
1984 }
1985 
1986 
1987 /*
1988  * clear "acc_chk" for all errdefs corresponding to this name and instance
1989  * and wake them up.
1990  */
1991 static void
1992 bofi_clear_acc_chk(struct bofi_errctl *errctlp, char *namep)
1993 {
1994 	struct bofi_errent *ep;
1995 
1996 	/*
1997 	 * look for any errdefs with matching name and instance
1998 	 */
1999 	mutex_enter(&bofi_low_mutex);
2000 	for (ep = errent_listp; ep != NULL; ep = ep->next)
2001 		if (strncmp(namep, ep->name, NAMESIZE) == 0 &&
2002 		    errctlp->instance == ep->errdef.instance) {
2003 			mutex_enter(&bofi_mutex);
2004 			if (ep->errdef.access_count == 0 &&
2005 			    ep->errdef.fail_count == 0)
2006 				ep->errdef.acc_chk = 0;
2007 			mutex_exit(&bofi_mutex);
2008 			/*
2009 			 * wake up sleepers
2010 			 */
2011 			ep->state |= BOFI_NEW_MESSAGE;
2012 			if (ep->state & BOFI_MESSAGE_WAIT)
2013 				cv_broadcast(&ep->cv);
2014 			ep->state &= ~BOFI_MESSAGE_WAIT;
2015 		}
2016 	mutex_exit(&bofi_low_mutex);
2017 }
2018 
2019 
2020 /*
2021  * set "fail_count" to 0 for all errdefs corresponding to this name and instance
2022  * whose "access_count" has expired, set "acc_chk" to 0 and wake them up.
2023  */
2024 static void
2025 bofi_clear_errors(struct bofi_errctl *errctlp, char *namep)
2026 {
2027 	struct bofi_errent *ep;
2028 
2029 	/*
2030 	 * look for any errdefs with matching name and instance
2031 	 */
2032 	mutex_enter(&bofi_low_mutex);
2033 	for (ep = errent_listp; ep != NULL; ep = ep->next)
2034 		if (strncmp(namep, ep->name, NAMESIZE) == 0 &&
2035 		    errctlp->instance == ep->errdef.instance) {
2036 			mutex_enter(&bofi_mutex);
2037 			if (ep->errdef.access_count == 0) {
2038 				ep->errdef.acc_chk = 0;
2039 				ep->errdef.fail_count = 0;
2040 				mutex_exit(&bofi_mutex);
2041 				if (ep->errdef.log.stop_time == 0ul)
2042 					(void) drv_getparm(TIME,
2043 					    &(ep->errdef.log.stop_time));
2044 			} else
2045 				mutex_exit(&bofi_mutex);
2046 			/*
2047 			 * wake up sleepers
2048 			 */
2049 			ep->state |= BOFI_NEW_MESSAGE;
2050 			if (ep->state & BOFI_MESSAGE_WAIT)
2051 				cv_broadcast(&ep->cv);
2052 			ep->state &= ~BOFI_MESSAGE_WAIT;
2053 		}
2054 	mutex_exit(&bofi_low_mutex);
2055 }
2056 
2057 
2058 /*
2059  * set "access_count" and "fail_count" to 0 for all errdefs corresponding to
2060  * this name and instance, set "acc_chk" to 0, and wake them up.
2061  */
2062 static void
2063 bofi_clear_errdefs(struct bofi_errctl *errctlp, char *namep)
2064 {
2065 	struct bofi_errent *ep;
2066 
2067 	/*
2068 	 * look for any errdefs with matching name and instance
2069 	 */
2070 	mutex_enter(&bofi_low_mutex);
2071 	for (ep = errent_listp; ep != NULL; ep = ep->next)
2072 		if (strncmp(namep, ep->name, NAMESIZE) == 0 &&
2073 		    errctlp->instance == ep->errdef.instance) {
2074 			mutex_enter(&bofi_mutex);
2075 			ep->errdef.acc_chk = 0;
2076 			ep->errdef.access_count = 0;
2077 			ep->errdef.fail_count = 0;
2078 			mutex_exit(&bofi_mutex);
2079 			if (ep->errdef.log.stop_time == 0ul)
2080 				(void) drv_getparm(TIME,
2081 				    &(ep->errdef.log.stop_time));
2082 			/*
2083 			 * wake up sleepers
2084 			 */
2085 			ep->state |= BOFI_NEW_MESSAGE;
2086 			if (ep->state & BOFI_MESSAGE_WAIT)
2087 				cv_broadcast(&ep->cv);
2088 			ep->state &= ~BOFI_MESSAGE_WAIT;
2089 		}
2090 	mutex_exit(&bofi_low_mutex);
2091 }
2092 
2093 
2094 /*
2095  * get state for this errdef
2096  */
2097 static int
2098 bofi_errdef_check(struct bofi_errstate *errstatep, struct acc_log_elem **logpp)
2099 {
2100 	struct bofi_errent *hep;
2101 	struct bofi_errent *ep;
2102 
2103 	ep = (struct bofi_errent *)(uintptr_t)errstatep->errdef_handle;
2104 	mutex_enter(&bofi_low_mutex);
2105 	/*
2106 	 * don't just assume its a valid ep - check that its on the
2107 	 * in-use list
2108 	 */
2109 	for (hep = errent_listp; hep != NULL; hep = hep->next)
2110 		if (hep == ep)
2111 			break;
2112 	if (hep == NULL) {
2113 		mutex_exit(&bofi_low_mutex);
2114 		return (EINVAL);
2115 	}
2116 	mutex_enter(&bofi_mutex);
2117 	ep->errstate.access_count = ep->errdef.access_count;
2118 	ep->errstate.fail_count = ep->errdef.fail_count;
2119 	ep->errstate.acc_chk = ep->errdef.acc_chk;
2120 	ep->errstate.log = ep->errdef.log;
2121 	*logpp = ep->logbase;
2122 	*errstatep = ep->errstate;
2123 	mutex_exit(&bofi_mutex);
2124 	mutex_exit(&bofi_low_mutex);
2125 	return (0);
2126 }
2127 
2128 
2129 /*
2130  * Wait for a ddi_report_fault message to come back for this errdef
2131  * Then return state for this errdef.
2132  * fault report is intercepted by bofi_post_event, which triggers
2133  * bofi_signal via a softint, which will wake up this routine if
2134  * we are waiting
2135  */
2136 static int
2137 bofi_errdef_check_w(struct bofi_errstate *errstatep,
2138     struct acc_log_elem **logpp)
2139 {
2140 	struct bofi_errent *hep;
2141 	struct bofi_errent *ep;
2142 	int rval = 0;
2143 
2144 	ep = (struct bofi_errent *)(uintptr_t)errstatep->errdef_handle;
2145 	mutex_enter(&bofi_low_mutex);
2146 retry:
2147 	/*
2148 	 * don't just assume its a valid ep - check that its on the
2149 	 * in-use list
2150 	 */
2151 	for (hep = errent_listp; hep != NULL; hep = hep->next)
2152 		if (hep == ep)
2153 			break;
2154 	if (hep == NULL) {
2155 		mutex_exit(&bofi_low_mutex);
2156 		return (EINVAL);
2157 	}
2158 	/*
2159 	 * wait for ddi_report_fault for the devinfo corresponding
2160 	 * to this errdef
2161 	 */
2162 	if (rval == 0 && !(ep->state & BOFI_NEW_MESSAGE)) {
2163 		ep->state |= BOFI_MESSAGE_WAIT;
2164 		if (cv_wait_sig(&ep->cv, &bofi_low_mutex) == 0)
2165 			rval = EINTR;
2166 		goto retry;
2167 	}
2168 	ep->state &= ~BOFI_NEW_MESSAGE;
2169 	/*
2170 	 * we either didn't need to sleep, we've been woken up or we've been
2171 	 * signaled - either way return state now
2172 	 */
2173 	mutex_enter(&bofi_mutex);
2174 	ep->errstate.access_count = ep->errdef.access_count;
2175 	ep->errstate.fail_count = ep->errdef.fail_count;
2176 	ep->errstate.acc_chk = ep->errdef.acc_chk;
2177 	ep->errstate.log = ep->errdef.log;
2178 	*logpp = ep->logbase;
2179 	*errstatep = ep->errstate;
2180 	mutex_exit(&bofi_mutex);
2181 	mutex_exit(&bofi_low_mutex);
2182 	return (rval);
2183 }
2184 
2185 
2186 /*
2187  * support routine - check if requested driver is defined as under test in the
2188  * conf file.
2189  */
2190 static int
2191 driver_under_test(dev_info_t *rdip)
2192 {
2193 	int i;
2194 	char	*rname;
2195 	major_t rmaj;
2196 
2197 	rname = ddi_get_name(rdip);
2198 	rmaj = ddi_name_to_major(rname);
2199 
2200 	/*
2201 	 * Enforce the user to specifically request the following drivers.
2202 	 */
2203 	for (i = 0; i < driver_list_size; i += (1 + strlen(&driver_list[i]))) {
2204 		if (driver_list_neg == 0) {
2205 			if (rmaj == ddi_name_to_major(&driver_list[i]))
2206 				return (1);
2207 		} else {
2208 			if (rmaj == ddi_name_to_major(&driver_list[i+1]))
2209 				return (0);
2210 		}
2211 	}
2212 	if (driver_list_neg == 0)
2213 		return (0);
2214 	else
2215 		return (1);
2216 
2217 }
2218 
2219 
2220 static void
2221 log_acc_event(struct bofi_errent *ep, uint_t at, offset_t offset, off_t len,
2222     size_t repcount, uint64_t *valuep)
2223 {
2224 	struct bofi_errdef *edp = &(ep->errdef);
2225 	struct acc_log *log = &edp->log;
2226 
2227 	ASSERT(log != NULL);
2228 	ASSERT(MUTEX_HELD(&bofi_mutex));
2229 
2230 	if (log->flags & BOFI_LOG_REPIO)
2231 		repcount = 1;
2232 	else if (repcount == 0 && edp->access_count > 0 &&
2233 				(log->flags & BOFI_LOG_FULL) == 0)
2234 		edp->access_count += 1;
2235 
2236 	if (repcount && log->entries < log->logsize) {
2237 		struct acc_log_elem *elem = ep->logbase + log->entries;
2238 
2239 		if (log->flags & BOFI_LOG_TIMESTAMP)
2240 			elem->access_time = bofi_gettime();
2241 		elem->access_type = at;
2242 		elem->offset = offset;
2243 		elem->value = valuep ? *valuep : 0ll;
2244 		elem->size = len;
2245 		elem->repcount = repcount;
2246 		++log->entries;
2247 		if (log->entries == log->logsize) {
2248 			log->flags |= BOFI_LOG_FULL;
2249 			ddi_trigger_softintr(((struct bofi_errent *)
2250 			    (uintptr_t)edp->errdef_handle)->softintr_id);
2251 		}
2252 	}
2253 	if ((log->flags & BOFI_LOG_WRAP) && edp->access_count <= 1) {
2254 		log->wrapcnt++;
2255 		edp->access_count = log->logsize;
2256 		log->entries = 0;	/* wrap back to the start */
2257 	}
2258 }
2259 
2260 
2261 /*
2262  * got a condition match on dma read/write - check counts and corrupt
2263  * data if necessary
2264  *
2265  * bofi_mutex always held when this is called.
2266  */
2267 static void
2268 do_dma_corrupt(struct bofi_shadow *hp, struct bofi_errent *ep,
2269 	uint_t synctype, off_t off, off_t length)
2270 {
2271 	uint64_t operand;
2272 	int i;
2273 	off_t len;
2274 	caddr_t logaddr;
2275 	uint64_t *addr;
2276 	uint64_t *endaddr;
2277 
2278 	ASSERT(MUTEX_HELD(&bofi_mutex));
2279 	if ((ep->errdef.access_count ||
2280 		ep->errdef.fail_count) &&
2281 		(ep->errdef.access_type & BOFI_LOG)) {
2282 		uint_t atype;
2283 
2284 		if (synctype == DDI_DMA_SYNC_FORDEV)
2285 			atype = BOFI_DMA_W;
2286 		else if (synctype == DDI_DMA_SYNC_FORCPU ||
2287 			synctype == DDI_DMA_SYNC_FORKERNEL)
2288 			atype = BOFI_DMA_R;
2289 		else
2290 			atype = 0;
2291 		if ((off <= ep->errdef.offset &&
2292 			off + length > ep->errdef.offset) ||
2293 			(off > ep->errdef.offset &&
2294 			off < ep->errdef.offset + ep->errdef.len)) {
2295 			logaddr = (caddr_t)((uintptr_t)(hp->addr +
2296 			    off + LLSZMASK) & ~LLSZMASK);
2297 
2298 			log_acc_event(ep, atype, logaddr - hp->addr,
2299 			    length, 1, 0);
2300 		}
2301 	}
2302 	if (ep->errdef.access_count > 1) {
2303 		ep->errdef.access_count--;
2304 	} else if (ep->errdef.fail_count > 0) {
2305 		ep->errdef.fail_count--;
2306 		ep->errdef.access_count = 0;
2307 		/*
2308 		 * OK do the corruption
2309 		 */
2310 		if (ep->errstate.fail_time == 0)
2311 			ep->errstate.fail_time = bofi_gettime();
2312 		/*
2313 		 * work out how much to corrupt
2314 		 *
2315 		 * Make sure endaddr isn't greater than hp->addr + hp->len.
2316 		 * If endaddr becomes less than addr len becomes negative
2317 		 * and the following loop isn't entered.
2318 		 */
2319 		addr = (uint64_t *)((uintptr_t)((hp->addr +
2320 		    ep->errdef.offset) + LLSZMASK) & ~LLSZMASK);
2321 		endaddr = (uint64_t *)((uintptr_t)(hp->addr + min(hp->len,
2322 		    ep->errdef.offset + ep->errdef.len)) & ~LLSZMASK);
2323 		len = endaddr - addr;
2324 		operand = ep->errdef.operand;
2325 		switch (ep->errdef.optype) {
2326 		case BOFI_EQUAL :
2327 			for (i = 0; i < len; i++)
2328 				*(addr + i) = operand;
2329 			break;
2330 		case BOFI_AND :
2331 			for (i = 0; i < len; i++)
2332 				*(addr + i) &= operand;
2333 			break;
2334 		case BOFI_OR :
2335 			for (i = 0; i < len; i++)
2336 				*(addr + i) |= operand;
2337 			break;
2338 		case BOFI_XOR :
2339 			for (i = 0; i < len; i++)
2340 				*(addr + i) ^= operand;
2341 			break;
2342 		default:
2343 			/* do nothing */
2344 			break;
2345 		}
2346 	}
2347 }
2348 
2349 
2350 static uint64_t do_bofi_rd8(struct bofi_shadow *, caddr_t);
2351 static uint64_t do_bofi_rd16(struct bofi_shadow *, caddr_t);
2352 static uint64_t do_bofi_rd32(struct bofi_shadow *, caddr_t);
2353 static uint64_t do_bofi_rd64(struct bofi_shadow *, caddr_t);
2354 
2355 
2356 /*
2357  * check all errdefs linked to this shadow handle. If we've got a condition
2358  * match check counts and corrupt data if necessary
2359  *
2360  * bofi_mutex always held when this is called.
2361  *
2362  * because of possibility of BOFI_NO_TRANSFER, we couldn't get data
2363  * from io-space before calling this, so we pass in the func to do the
2364  * transfer as a parameter.
2365  */
2366 static uint64_t
2367 do_pior_corrupt(struct bofi_shadow *hp, caddr_t addr,
2368 	uint64_t (*func)(), size_t repcount, size_t accsize)
2369 {
2370 	struct bofi_errent *ep;
2371 	struct bofi_link   *lp;
2372 	uint64_t operand;
2373 	uintptr_t minlen;
2374 	intptr_t base;
2375 	int done_get = 0;
2376 	uint64_t get_val, gv;
2377 
2378 	ASSERT(MUTEX_HELD(&bofi_mutex));
2379 	/*
2380 	 * check through all errdefs associated with this shadow handle
2381 	 */
2382 	for (lp = hp->link; lp != NULL; lp = lp->link) {
2383 		ep = lp->errentp;
2384 		if (ep->errdef.len == 0)
2385 			minlen = hp->len;
2386 		else
2387 			minlen = min(hp->len, ep->errdef.len);
2388 		base = addr - hp->addr - ep->errdef.offset + hp->offset;
2389 		if ((ep->errdef.access_type & BOFI_PIO_R) &&
2390 		    (ep->state & BOFI_DEV_ACTIVE) &&
2391 		    base >= 0 && base < minlen) {
2392 			/*
2393 			 * condition match for pio read
2394 			 */
2395 			if (ep->errdef.access_count > 1) {
2396 				ep->errdef.access_count--;
2397 				if (done_get == 0) {
2398 					done_get = 1;
2399 					gv = get_val = func(hp, addr);
2400 				}
2401 				if (ep->errdef.access_type & BOFI_LOG) {
2402 					log_acc_event(ep, BOFI_PIO_R,
2403 					    addr - hp->addr,
2404 					    accsize, repcount, &gv);
2405 				}
2406 			} else if (ep->errdef.fail_count > 0) {
2407 				ep->errdef.fail_count--;
2408 				ep->errdef.access_count = 0;
2409 				/*
2410 				 * OK do corruption
2411 				 */
2412 				if (ep->errstate.fail_time == 0)
2413 					ep->errstate.fail_time = bofi_gettime();
2414 				operand = ep->errdef.operand;
2415 				if (done_get == 0) {
2416 					if (ep->errdef.optype ==
2417 					    BOFI_NO_TRANSFER)
2418 						/*
2419 						 * no transfer - bomb out
2420 						 */
2421 						return (operand);
2422 					done_get = 1;
2423 					gv = get_val = func(hp, addr);
2424 
2425 				}
2426 				if (ep->errdef.access_type & BOFI_LOG) {
2427 					log_acc_event(ep, BOFI_PIO_R,
2428 					    addr - hp->addr,
2429 					    accsize, repcount, &gv);
2430 				}
2431 				switch (ep->errdef.optype) {
2432 				case BOFI_EQUAL :
2433 					get_val = operand;
2434 					break;
2435 				case BOFI_AND :
2436 					get_val &= operand;
2437 					break;
2438 				case BOFI_OR :
2439 					get_val |= operand;
2440 					break;
2441 				case BOFI_XOR :
2442 					get_val ^= operand;
2443 					break;
2444 				default:
2445 					/* do nothing */
2446 					break;
2447 				}
2448 			}
2449 		}
2450 	}
2451 	if (done_get == 0)
2452 		return (func(hp, addr));
2453 	else
2454 		return (get_val);
2455 }
2456 
2457 
2458 /*
2459  * check all errdefs linked to this shadow handle. If we've got a condition
2460  * match check counts and corrupt data if necessary
2461  *
2462  * bofi_mutex always held when this is called.
2463  *
2464  * because of possibility of BOFI_NO_TRANSFER, we return 0 if no data
2465  * is to be written out to io-space, 1 otherwise
2466  */
2467 static int
2468 do_piow_corrupt(struct bofi_shadow *hp, caddr_t addr, uint64_t *valuep,
2469 				size_t size, size_t repcount)
2470 {
2471 	struct bofi_errent *ep;
2472 	struct bofi_link   *lp;
2473 	uintptr_t minlen;
2474 	intptr_t base;
2475 	uint64_t v = *valuep;
2476 
2477 	ASSERT(MUTEX_HELD(&bofi_mutex));
2478 	/*
2479 	 * check through all errdefs associated with this shadow handle
2480 	 */
2481 	for (lp = hp->link; lp != NULL; lp = lp->link) {
2482 		ep = lp->errentp;
2483 		if (ep->errdef.len == 0)
2484 			minlen = hp->len;
2485 		else
2486 			minlen = min(hp->len, ep->errdef.len);
2487 		base = (caddr_t)addr - hp->addr - ep->errdef.offset +hp->offset;
2488 		if ((ep->errdef.access_type & BOFI_PIO_W) &&
2489 		    (ep->state & BOFI_DEV_ACTIVE) &&
2490 		    base >= 0 && base < minlen) {
2491 			/*
2492 			 * condition match for pio write
2493 			 */
2494 
2495 			if (ep->errdef.access_count > 1) {
2496 				ep->errdef.access_count--;
2497 				if (ep->errdef.access_type & BOFI_LOG)
2498 					log_acc_event(ep, BOFI_PIO_W,
2499 					    addr - hp->addr, size,
2500 					    repcount, &v);
2501 			} else if (ep->errdef.fail_count > 0) {
2502 				ep->errdef.fail_count--;
2503 				ep->errdef.access_count = 0;
2504 				if (ep->errdef.access_type & BOFI_LOG)
2505 					log_acc_event(ep, BOFI_PIO_W,
2506 					    addr - hp->addr, size,
2507 					    repcount, &v);
2508 				/*
2509 				 * OK do corruption
2510 				 */
2511 				if (ep->errstate.fail_time == 0)
2512 					ep->errstate.fail_time = bofi_gettime();
2513 				switch (ep->errdef.optype) {
2514 				case BOFI_EQUAL :
2515 					*valuep = ep->errdef.operand;
2516 					break;
2517 				case BOFI_AND :
2518 					*valuep &= ep->errdef.operand;
2519 					break;
2520 				case BOFI_OR :
2521 					*valuep |= ep->errdef.operand;
2522 					break;
2523 				case BOFI_XOR :
2524 					*valuep ^= ep->errdef.operand;
2525 					break;
2526 				case BOFI_NO_TRANSFER :
2527 					/*
2528 					 * no transfer - bomb out
2529 					 */
2530 					return (0);
2531 				default:
2532 					/* do nothing */
2533 					break;
2534 				}
2535 			}
2536 		}
2537 	}
2538 	return (1);
2539 }
2540 
2541 
2542 static uint64_t
2543 do_bofi_rd8(struct bofi_shadow *hp, caddr_t addr)
2544 {
2545 	return (hp->save.acc.ahi_get8(&hp->save.acc, (uint8_t *)addr));
2546 }
2547 
2548 #define	BOFI_READ_CHECKS(type) \
2549 	if (bofi_ddi_check) \
2550 		addr = (type *)((uintptr_t)addr - 64 + hp->addr); \
2551 	if (bofi_range_check && ((caddr_t)addr < hp->addr || \
2552 	    (caddr_t)addr - hp->addr >= hp->len)) { \
2553 		cmn_err((bofi_range_check == 2) ? CE_PANIC : CE_WARN, \
2554 		    "ddi_get() out of range addr %p not in %p/%llx", \
2555 		    (void *)addr, (void *)hp->addr, hp->len); \
2556 		return (0); \
2557 	}
2558 
2559 /*
2560  * our getb() routine - use tryenter
2561  */
2562 static uint8_t
2563 bofi_rd8(ddi_acc_impl_t *handle, uint8_t *addr)
2564 {
2565 	struct bofi_shadow *hp;
2566 	uint8_t retval;
2567 
2568 	hp = handle->ahi_common.ah_bus_private;
2569 	BOFI_READ_CHECKS(uint8_t)
2570 	if (!hp->link || !mutex_tryenter(&bofi_mutex))
2571 		return (hp->save.acc.ahi_get8(&hp->save.acc, addr));
2572 	retval = (uint8_t)do_pior_corrupt(hp, (caddr_t)addr, do_bofi_rd8, 1,
2573 	    1);
2574 	mutex_exit(&bofi_mutex);
2575 	return (retval);
2576 }
2577 
2578 
2579 static uint64_t
2580 do_bofi_rd16(struct bofi_shadow *hp, caddr_t addr)
2581 {
2582 	return (hp->save.acc.ahi_get16(&hp->save.acc, (uint16_t *)addr));
2583 }
2584 
2585 
2586 /*
2587  * our getw() routine - use tryenter
2588  */
2589 static uint16_t
2590 bofi_rd16(ddi_acc_impl_t *handle, uint16_t *addr)
2591 {
2592 	struct bofi_shadow *hp;
2593 	uint16_t retval;
2594 
2595 	hp = handle->ahi_common.ah_bus_private;
2596 	BOFI_READ_CHECKS(uint16_t)
2597 	if (!hp->link || !mutex_tryenter(&bofi_mutex))
2598 		return (hp->save.acc.ahi_get16(&hp->save.acc, addr));
2599 	retval = (uint16_t)do_pior_corrupt(hp, (caddr_t)addr, do_bofi_rd16, 1,
2600 	    2);
2601 	mutex_exit(&bofi_mutex);
2602 	return (retval);
2603 }
2604 
2605 
2606 static uint64_t
2607 do_bofi_rd32(struct bofi_shadow *hp, caddr_t addr)
2608 {
2609 	return (hp->save.acc.ahi_get32(&hp->save.acc, (uint32_t *)addr));
2610 }
2611 
2612 
2613 /*
2614  * our getl() routine - use tryenter
2615  */
2616 static uint32_t
2617 bofi_rd32(ddi_acc_impl_t *handle, uint32_t *addr)
2618 {
2619 	struct bofi_shadow *hp;
2620 	uint32_t retval;
2621 
2622 	hp = handle->ahi_common.ah_bus_private;
2623 	BOFI_READ_CHECKS(uint32_t)
2624 	if (!hp->link || !mutex_tryenter(&bofi_mutex))
2625 		return (hp->save.acc.ahi_get32(&hp->save.acc, addr));
2626 	retval = (uint32_t)do_pior_corrupt(hp, (caddr_t)addr, do_bofi_rd32, 1,
2627 	    4);
2628 	mutex_exit(&bofi_mutex);
2629 	return (retval);
2630 }
2631 
2632 
2633 static uint64_t
2634 do_bofi_rd64(struct bofi_shadow *hp, caddr_t addr)
2635 {
2636 	return (hp->save.acc.ahi_get64(&hp->save.acc, (uint64_t *)addr));
2637 }
2638 
2639 
2640 /*
2641  * our getll() routine - use tryenter
2642  */
2643 static uint64_t
2644 bofi_rd64(ddi_acc_impl_t *handle, uint64_t *addr)
2645 {
2646 	struct bofi_shadow *hp;
2647 	uint64_t retval;
2648 
2649 	hp = handle->ahi_common.ah_bus_private;
2650 	BOFI_READ_CHECKS(uint64_t)
2651 	if (!hp->link || !mutex_tryenter(&bofi_mutex))
2652 		return (hp->save.acc.ahi_get64(&hp->save.acc, addr));
2653 	retval = (uint64_t)do_pior_corrupt(hp, (caddr_t)addr, do_bofi_rd64, 1,
2654 	    8);
2655 	mutex_exit(&bofi_mutex);
2656 	return (retval);
2657 }
2658 
2659 #define	BOFI_WRITE_TESTS(type) \
2660 	if (bofi_ddi_check) \
2661 		addr = (type *)((uintptr_t)addr - 64 + hp->addr); \
2662 	if (bofi_range_check && ((caddr_t)addr < hp->addr || \
2663 	    (caddr_t)addr - hp->addr >= hp->len)) { \
2664 		cmn_err((bofi_range_check == 2) ? CE_PANIC : CE_WARN, \
2665 		    "ddi_put() out of range addr %p not in %p/%llx\n", \
2666 		    (void *)addr, (void *)hp->addr, hp->len); \
2667 		return; \
2668 	}
2669 
2670 /*
2671  * our putb() routine - use tryenter
2672  */
2673 static void
2674 bofi_wr8(ddi_acc_impl_t *handle, uint8_t *addr, uint8_t value)
2675 {
2676 	struct bofi_shadow *hp;
2677 	uint64_t llvalue = value;
2678 
2679 	hp = handle->ahi_common.ah_bus_private;
2680 	BOFI_WRITE_TESTS(uint8_t)
2681 	if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
2682 		hp->save.acc.ahi_put8(&hp->save.acc, addr, (uint8_t)llvalue);
2683 		return;
2684 	}
2685 	if (do_piow_corrupt(hp, (caddr_t)addr, &llvalue, 1, 1))
2686 		hp->save.acc.ahi_put8(&hp->save.acc, addr, (uint8_t)llvalue);
2687 	mutex_exit(&bofi_mutex);
2688 }
2689 
2690 
2691 /*
2692  * our putw() routine - use tryenter
2693  */
2694 static void
2695 bofi_wr16(ddi_acc_impl_t *handle, uint16_t *addr, uint16_t value)
2696 {
2697 	struct bofi_shadow *hp;
2698 	uint64_t llvalue = value;
2699 
2700 	hp = handle->ahi_common.ah_bus_private;
2701 	BOFI_WRITE_TESTS(uint16_t)
2702 	if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
2703 		hp->save.acc.ahi_put16(&hp->save.acc, addr, (uint16_t)llvalue);
2704 		return;
2705 	}
2706 	if (do_piow_corrupt(hp, (caddr_t)addr, &llvalue, 2, 1))
2707 		hp->save.acc.ahi_put16(&hp->save.acc, addr, (uint16_t)llvalue);
2708 	mutex_exit(&bofi_mutex);
2709 }
2710 
2711 
2712 /*
2713  * our putl() routine - use tryenter
2714  */
2715 static void
2716 bofi_wr32(ddi_acc_impl_t *handle, uint32_t *addr, uint32_t value)
2717 {
2718 	struct bofi_shadow *hp;
2719 	uint64_t llvalue = value;
2720 
2721 	hp = handle->ahi_common.ah_bus_private;
2722 	BOFI_WRITE_TESTS(uint32_t)
2723 	if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
2724 		hp->save.acc.ahi_put32(&hp->save.acc, addr, (uint32_t)llvalue);
2725 		return;
2726 	}
2727 	if (do_piow_corrupt(hp, (caddr_t)addr, &llvalue, 4, 1))
2728 		hp->save.acc.ahi_put32(&hp->save.acc, addr, (uint32_t)llvalue);
2729 	mutex_exit(&bofi_mutex);
2730 }
2731 
2732 
2733 /*
2734  * our putll() routine - use tryenter
2735  */
2736 static void
2737 bofi_wr64(ddi_acc_impl_t *handle, uint64_t *addr, uint64_t value)
2738 {
2739 	struct bofi_shadow *hp;
2740 	uint64_t llvalue = value;
2741 
2742 	hp = handle->ahi_common.ah_bus_private;
2743 	BOFI_WRITE_TESTS(uint64_t)
2744 	if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
2745 		hp->save.acc.ahi_put64(&hp->save.acc, addr, (uint64_t)llvalue);
2746 		return;
2747 	}
2748 	if (do_piow_corrupt(hp, (caddr_t)addr, &llvalue, 8, 1))
2749 		hp->save.acc.ahi_put64(&hp->save.acc, addr, (uint64_t)llvalue);
2750 	mutex_exit(&bofi_mutex);
2751 }
2752 
2753 #define	BOFI_REP_READ_TESTS(type) \
2754 	if (bofi_ddi_check) \
2755 		dev_addr = (type *)((uintptr_t)dev_addr - 64 + hp->addr); \
2756 	if (bofi_range_check && ((caddr_t)dev_addr < hp->addr || \
2757 	    (caddr_t)(dev_addr + repcount) - hp->addr > hp->len)) { \
2758 		cmn_err((bofi_range_check == 2) ? CE_PANIC : CE_WARN, \
2759 		    "ddi_rep_get() out of range addr %p not in %p/%llx\n", \
2760 		    (void *)dev_addr, (void *)hp->addr, hp->len); \
2761 		if ((caddr_t)dev_addr < hp->addr || \
2762 		    (caddr_t)dev_addr - hp->addr >= hp->len) \
2763 			return; \
2764 		repcount = (type *)(hp->addr + hp->len) - dev_addr; \
2765 	}
2766 
2767 /*
2768  * our rep_getb() routine - use tryenter
2769  */
2770 static void
2771 bofi_rep_rd8(ddi_acc_impl_t *handle, uint8_t *host_addr, uint8_t *dev_addr,
2772 	size_t repcount, uint_t flags)
2773 {
2774 	struct bofi_shadow *hp;
2775 	int i;
2776 	uint8_t *addr;
2777 
2778 	hp = handle->ahi_common.ah_bus_private;
2779 	BOFI_REP_READ_TESTS(uint8_t)
2780 	if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
2781 		hp->save.acc.ahi_rep_get8(&hp->save.acc, host_addr, dev_addr,
2782 		    repcount, flags);
2783 		return;
2784 	}
2785 	for (i = 0; i < repcount; i++) {
2786 		addr = dev_addr + ((flags == DDI_DEV_AUTOINCR) ? i : 0);
2787 		*(host_addr + i) = (uint8_t)do_pior_corrupt(hp, (caddr_t)addr,
2788 		    do_bofi_rd8, i ? 0 : repcount, 1);
2789 	}
2790 	mutex_exit(&bofi_mutex);
2791 }
2792 
2793 
2794 /*
2795  * our rep_getw() routine - use tryenter
2796  */
2797 static void
2798 bofi_rep_rd16(ddi_acc_impl_t *handle, uint16_t *host_addr,
2799 	uint16_t *dev_addr, size_t repcount, uint_t flags)
2800 {
2801 	struct bofi_shadow *hp;
2802 	int i;
2803 	uint16_t *addr;
2804 
2805 	hp = handle->ahi_common.ah_bus_private;
2806 	BOFI_REP_READ_TESTS(uint16_t)
2807 	if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
2808 		hp->save.acc.ahi_rep_get16(&hp->save.acc, host_addr, dev_addr,
2809 		    repcount, flags);
2810 		return;
2811 	}
2812 	for (i = 0; i < repcount; i++) {
2813 		addr = dev_addr + ((flags == DDI_DEV_AUTOINCR) ? i : 0);
2814 		*(host_addr + i) = (uint16_t)do_pior_corrupt(hp, (caddr_t)addr,
2815 		    do_bofi_rd16, i ? 0 : repcount, 2);
2816 	}
2817 	mutex_exit(&bofi_mutex);
2818 }
2819 
2820 
2821 /*
2822  * our rep_getl() routine - use tryenter
2823  */
2824 static void
2825 bofi_rep_rd32(ddi_acc_impl_t *handle, uint32_t *host_addr,
2826 	uint32_t *dev_addr, size_t repcount, uint_t flags)
2827 {
2828 	struct bofi_shadow *hp;
2829 	int i;
2830 	uint32_t *addr;
2831 
2832 	hp = handle->ahi_common.ah_bus_private;
2833 	BOFI_REP_READ_TESTS(uint32_t)
2834 	if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
2835 		hp->save.acc.ahi_rep_get32(&hp->save.acc, host_addr, dev_addr,
2836 		    repcount, flags);
2837 		return;
2838 	}
2839 	for (i = 0; i < repcount; i++) {
2840 		addr = dev_addr + ((flags == DDI_DEV_AUTOINCR) ? i : 0);
2841 		*(host_addr + i) = (uint32_t)do_pior_corrupt(hp, (caddr_t)addr,
2842 		    do_bofi_rd32, i ? 0 : repcount, 4);
2843 	}
2844 	mutex_exit(&bofi_mutex);
2845 }
2846 
2847 
2848 /*
2849  * our rep_getll() routine - use tryenter
2850  */
2851 static void
2852 bofi_rep_rd64(ddi_acc_impl_t *handle, uint64_t *host_addr,
2853 	uint64_t *dev_addr, size_t repcount, uint_t flags)
2854 {
2855 	struct bofi_shadow *hp;
2856 	int i;
2857 	uint64_t *addr;
2858 
2859 	hp = handle->ahi_common.ah_bus_private;
2860 	BOFI_REP_READ_TESTS(uint64_t)
2861 	if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
2862 		hp->save.acc.ahi_rep_get64(&hp->save.acc, host_addr, dev_addr,
2863 		    repcount, flags);
2864 		return;
2865 	}
2866 	for (i = 0; i < repcount; i++) {
2867 		addr = dev_addr + ((flags == DDI_DEV_AUTOINCR) ? i : 0);
2868 		*(host_addr + i) = (uint64_t)do_pior_corrupt(hp, (caddr_t)addr,
2869 		    do_bofi_rd64, i ? 0 : repcount, 8);
2870 	}
2871 	mutex_exit(&bofi_mutex);
2872 }
2873 
2874 #define	BOFI_REP_WRITE_TESTS(type) \
2875 	if (bofi_ddi_check) \
2876 		dev_addr = (type *)((uintptr_t)dev_addr - 64 + hp->addr); \
2877 	if (bofi_range_check && ((caddr_t)dev_addr < hp->addr || \
2878 	    (caddr_t)(dev_addr + repcount) - hp->addr > hp->len)) { \
2879 		cmn_err((bofi_range_check == 2) ? CE_PANIC : CE_WARN, \
2880 		    "ddi_rep_put() out of range addr %p not in %p/%llx\n", \
2881 		    (void *)dev_addr, (void *)hp->addr, hp->len); \
2882 		if ((caddr_t)dev_addr < hp->addr || \
2883 		    (caddr_t)dev_addr - hp->addr >= hp->len) \
2884 			return; \
2885 		repcount = (type *)(hp->addr + hp->len) - dev_addr; \
2886 	}
2887 
2888 /*
2889  * our rep_putb() routine - use tryenter
2890  */
2891 static void
2892 bofi_rep_wr8(ddi_acc_impl_t *handle, uint8_t *host_addr, uint8_t *dev_addr,
2893 	size_t repcount, uint_t flags)
2894 {
2895 	struct bofi_shadow *hp;
2896 	int i;
2897 	uint64_t llvalue;
2898 	uint8_t *addr;
2899 
2900 	hp = handle->ahi_common.ah_bus_private;
2901 	BOFI_REP_WRITE_TESTS(uint8_t)
2902 	if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
2903 		hp->save.acc.ahi_rep_put8(&hp->save.acc, host_addr, dev_addr,
2904 		    repcount, flags);
2905 		return;
2906 	}
2907 	for (i = 0; i < repcount; i++) {
2908 		llvalue = *(host_addr + i);
2909 		addr = dev_addr + ((flags == DDI_DEV_AUTOINCR) ? i : 0);
2910 		if (do_piow_corrupt(hp, (caddr_t)addr, &llvalue, 1, i ? 0 :
2911 		    repcount))
2912 			hp->save.acc.ahi_put8(&hp->save.acc, addr,
2913 			    (uint8_t)llvalue);
2914 	}
2915 	mutex_exit(&bofi_mutex);
2916 }
2917 
2918 
2919 /*
2920  * our rep_putw() routine - use tryenter
2921  */
2922 static void
2923 bofi_rep_wr16(ddi_acc_impl_t *handle, uint16_t *host_addr,
2924 	uint16_t *dev_addr, size_t repcount, uint_t flags)
2925 {
2926 	struct bofi_shadow *hp;
2927 	int i;
2928 	uint64_t llvalue;
2929 	uint16_t *addr;
2930 
2931 	hp = handle->ahi_common.ah_bus_private;
2932 	BOFI_REP_WRITE_TESTS(uint16_t)
2933 	if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
2934 		hp->save.acc.ahi_rep_put16(&hp->save.acc, host_addr, dev_addr,
2935 		    repcount, flags);
2936 		return;
2937 	}
2938 	for (i = 0; i < repcount; i++) {
2939 		llvalue = *(host_addr + i);
2940 		addr = dev_addr + ((flags == DDI_DEV_AUTOINCR) ? i : 0);
2941 		if (do_piow_corrupt(hp, (caddr_t)addr, &llvalue, 2, i ? 0 :
2942 		    repcount))
2943 			hp->save.acc.ahi_put16(&hp->save.acc, addr,
2944 			    (uint16_t)llvalue);
2945 	}
2946 	mutex_exit(&bofi_mutex);
2947 }
2948 
2949 
2950 /*
2951  * our rep_putl() routine - use tryenter
2952  */
2953 static void
2954 bofi_rep_wr32(ddi_acc_impl_t *handle, uint32_t *host_addr,
2955 	uint32_t *dev_addr, size_t repcount, uint_t flags)
2956 {
2957 	struct bofi_shadow *hp;
2958 	int i;
2959 	uint64_t llvalue;
2960 	uint32_t *addr;
2961 
2962 	hp = handle->ahi_common.ah_bus_private;
2963 	BOFI_REP_WRITE_TESTS(uint32_t)
2964 	if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
2965 		hp->save.acc.ahi_rep_put32(&hp->save.acc, host_addr, dev_addr,
2966 		    repcount, flags);
2967 		return;
2968 	}
2969 	for (i = 0; i < repcount; i++) {
2970 		llvalue = *(host_addr + i);
2971 		addr = dev_addr + ((flags == DDI_DEV_AUTOINCR) ? i : 0);
2972 		if (do_piow_corrupt(hp, (caddr_t)addr, &llvalue, 4, i ? 0 :
2973 		    repcount))
2974 			hp->save.acc.ahi_put32(&hp->save.acc, addr,
2975 			    (uint32_t)llvalue);
2976 	}
2977 	mutex_exit(&bofi_mutex);
2978 }
2979 
2980 
2981 /*
2982  * our rep_putll() routine - use tryenter
2983  */
2984 static void
2985 bofi_rep_wr64(ddi_acc_impl_t *handle, uint64_t *host_addr,
2986 	uint64_t *dev_addr, size_t repcount, uint_t flags)
2987 {
2988 	struct bofi_shadow *hp;
2989 	int i;
2990 	uint64_t llvalue;
2991 	uint64_t *addr;
2992 
2993 	hp = handle->ahi_common.ah_bus_private;
2994 	BOFI_REP_WRITE_TESTS(uint64_t)
2995 	if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
2996 		hp->save.acc.ahi_rep_put64(&hp->save.acc, host_addr, dev_addr,
2997 		    repcount, flags);
2998 		return;
2999 	}
3000 	for (i = 0; i < repcount; i++) {
3001 		llvalue = *(host_addr + i);
3002 		addr = dev_addr + ((flags == DDI_DEV_AUTOINCR) ? i : 0);
3003 		if (do_piow_corrupt(hp, (caddr_t)addr, &llvalue, 8, i ? 0 :
3004 		    repcount))
3005 			hp->save.acc.ahi_put64(&hp->save.acc, addr,
3006 			    (uint64_t)llvalue);
3007 	}
3008 	mutex_exit(&bofi_mutex);
3009 }
3010 
3011 
3012 /*
3013  * our ddi_map routine
3014  */
3015 static int
3016 bofi_map(dev_info_t *dip, dev_info_t *rdip,
3017 	ddi_map_req_t *reqp, off_t offset, off_t len, caddr_t *vaddrp)
3018 {
3019 	ddi_acc_impl_t *ap;
3020 	struct bofi_shadow *hp;
3021 	struct bofi_errent *ep;
3022 	struct bofi_link   *lp, *next_lp;
3023 	int retval;
3024 	struct bofi_shadow *dhashp;
3025 	struct bofi_shadow *hhashp;
3026 
3027 	switch (reqp->map_op) {
3028 	case DDI_MO_MAP_LOCKED:
3029 		/*
3030 		 * for this case get nexus to do real work first
3031 		 */
3032 		retval = save_bus_ops.bus_map(dip, rdip, reqp, offset, len,
3033 		    vaddrp);
3034 		if (retval != DDI_SUCCESS)
3035 			return (retval);
3036 
3037 		ap = (ddi_acc_impl_t *)reqp->map_handlep;
3038 		if (ap == NULL)
3039 			return (DDI_SUCCESS);
3040 		/*
3041 		 * if driver_list is set, only intercept those drivers
3042 		 */
3043 		if (!driver_under_test(ap->ahi_common.ah_dip))
3044 			return (DDI_SUCCESS);
3045 
3046 		/*
3047 		 * support for ddi_regs_map_setup()
3048 		 * - allocate shadow handle structure and fill it in
3049 		 */
3050 		hp = kmem_zalloc(sizeof (struct bofi_shadow), KM_SLEEP);
3051 		(void) strncpy(hp->name, ddi_get_name(ap->ahi_common.ah_dip),
3052 		    NAMESIZE);
3053 		hp->instance = ddi_get_instance(ap->ahi_common.ah_dip);
3054 		hp->dip = ap->ahi_common.ah_dip;
3055 		hp->addr = *vaddrp;
3056 		/*
3057 		 * return spurious value to catch direct access to registers
3058 		 */
3059 		if (bofi_ddi_check)
3060 			*vaddrp = (caddr_t)64;
3061 		hp->rnumber = ((ddi_acc_hdl_t *)ap)->ah_rnumber;
3062 		hp->offset = offset;
3063 		if (len == 0)
3064 			hp->len = INT_MAX - offset;
3065 		else
3066 			hp->len = min(len, INT_MAX - offset);
3067 		hp->hdl.acc_handle = (ddi_acc_handle_t)ap;
3068 		hp->link = NULL;
3069 		hp->type = BOFI_ACC_HDL;
3070 		/*
3071 		 * save existing function pointers and plug in our own
3072 		 */
3073 		hp->save.acc = *ap;
3074 		ap->ahi_get8 = bofi_rd8;
3075 		ap->ahi_get16 = bofi_rd16;
3076 		ap->ahi_get32 = bofi_rd32;
3077 		ap->ahi_get64 = bofi_rd64;
3078 		ap->ahi_put8 = bofi_wr8;
3079 		ap->ahi_put16 = bofi_wr16;
3080 		ap->ahi_put32 = bofi_wr32;
3081 		ap->ahi_put64 = bofi_wr64;
3082 		ap->ahi_rep_get8 = bofi_rep_rd8;
3083 		ap->ahi_rep_get16 = bofi_rep_rd16;
3084 		ap->ahi_rep_get32 = bofi_rep_rd32;
3085 		ap->ahi_rep_get64 = bofi_rep_rd64;
3086 		ap->ahi_rep_put8 = bofi_rep_wr8;
3087 		ap->ahi_rep_put16 = bofi_rep_wr16;
3088 		ap->ahi_rep_put32 = bofi_rep_wr32;
3089 		ap->ahi_rep_put64 = bofi_rep_wr64;
3090 		ap->ahi_fault_check = bofi_check_acc_hdl;
3091 #if defined(__sparc)
3092 #else
3093 		ap->ahi_acc_attr &= ~DDI_ACCATTR_DIRECT;
3094 #endif
3095 		/*
3096 		 * stick in a pointer to our shadow handle
3097 		 */
3098 		ap->ahi_common.ah_bus_private = hp;
3099 		/*
3100 		 * add to dhash, hhash and inuse lists
3101 		 */
3102 		mutex_enter(&bofi_low_mutex);
3103 		mutex_enter(&bofi_mutex);
3104 		hp->next = shadow_list.next;
3105 		shadow_list.next->prev = hp;
3106 		hp->prev = &shadow_list;
3107 		shadow_list.next = hp;
3108 		hhashp = HDL_HHASH(ap);
3109 		hp->hnext = hhashp->hnext;
3110 		hhashp->hnext->hprev = hp;
3111 		hp->hprev = hhashp;
3112 		hhashp->hnext = hp;
3113 		dhashp = HDL_DHASH(hp->dip);
3114 		hp->dnext = dhashp->dnext;
3115 		dhashp->dnext->dprev = hp;
3116 		hp->dprev = dhashp;
3117 		dhashp->dnext = hp;
3118 		/*
3119 		 * chain on any pre-existing errdefs that apply to this
3120 		 * acc_handle
3121 		 */
3122 		for (ep = errent_listp; ep != NULL; ep = ep->next) {
3123 			if (ddi_name_to_major(hp->name) ==
3124 			    ddi_name_to_major(ep->name) &&
3125 			    hp->instance == ep->errdef.instance &&
3126 			    (ep->errdef.access_type & BOFI_PIO_RW) &&
3127 			    (ep->errdef.rnumber == -1 ||
3128 			    hp->rnumber == ep->errdef.rnumber) &&
3129 			    (ep->errdef.len == 0 ||
3130 			    offset < ep->errdef.offset + ep->errdef.len) &&
3131 			    offset + hp->len > ep->errdef.offset) {
3132 				lp = bofi_link_freelist;
3133 				if (lp != NULL) {
3134 					bofi_link_freelist = lp->link;
3135 					lp->errentp = ep;
3136 					lp->link = hp->link;
3137 					hp->link = lp;
3138 				}
3139 			}
3140 		}
3141 		mutex_exit(&bofi_mutex);
3142 		mutex_exit(&bofi_low_mutex);
3143 		return (DDI_SUCCESS);
3144 	case DDI_MO_UNMAP:
3145 
3146 		ap = (ddi_acc_impl_t *)reqp->map_handlep;
3147 		if (ap == NULL)
3148 			break;
3149 		/*
3150 		 * support for ddi_regs_map_free()
3151 		 * - check we really have a shadow handle for this one
3152 		 */
3153 		mutex_enter(&bofi_low_mutex);
3154 		mutex_enter(&bofi_mutex);
3155 		hhashp = HDL_HHASH(ap);
3156 		for (hp = hhashp->hnext; hp != hhashp; hp = hp->hnext)
3157 			if (hp->hdl.acc_handle == (ddi_acc_handle_t)ap)
3158 				break;
3159 		if (hp == hhashp) {
3160 			mutex_exit(&bofi_mutex);
3161 			mutex_exit(&bofi_low_mutex);
3162 			break;
3163 		}
3164 		/*
3165 		 * got a shadow handle - restore original pointers
3166 		 */
3167 		*ap = hp->save.acc;
3168 		*vaddrp = hp->addr;
3169 		/*
3170 		 * remove from dhash, hhash and inuse lists
3171 		 */
3172 		hp->hnext->hprev = hp->hprev;
3173 		hp->hprev->hnext = hp->hnext;
3174 		hp->dnext->dprev = hp->dprev;
3175 		hp->dprev->dnext = hp->dnext;
3176 		hp->next->prev = hp->prev;
3177 		hp->prev->next = hp->next;
3178 		/*
3179 		 * free any errdef link structures tagged onto the shadow handle
3180 		 */
3181 		for (lp = hp->link; lp != NULL; ) {
3182 			next_lp = lp->link;
3183 			lp->link = bofi_link_freelist;
3184 			bofi_link_freelist = lp;
3185 			lp = next_lp;
3186 		}
3187 		hp->link = NULL;
3188 		mutex_exit(&bofi_mutex);
3189 		mutex_exit(&bofi_low_mutex);
3190 		/*
3191 		 * finally delete shadow handle
3192 		 */
3193 		kmem_free(hp, sizeof (struct bofi_shadow));
3194 		break;
3195 	default:
3196 		break;
3197 	}
3198 	return (save_bus_ops.bus_map(dip, rdip, reqp, offset, len, vaddrp));
3199 }
3200 
3201 
3202 /*
3203  * chain any pre-existing errdefs on to newly created dma handle
3204  * if required call do_dma_corrupt() to corrupt data
3205  */
3206 static void
3207 chain_on_errdefs(struct bofi_shadow *hp)
3208 {
3209 	struct bofi_errent *ep;
3210 	struct bofi_link   *lp;
3211 
3212 	ASSERT(MUTEX_HELD(&bofi_mutex));
3213 	/*
3214 	 * chain on any pre-existing errdefs that apply to this dma_handle
3215 	 */
3216 	for (ep = errent_listp; ep != NULL; ep = ep->next) {
3217 		if (ddi_name_to_major(hp->name) ==
3218 		    ddi_name_to_major(ep->name) &&
3219 		    hp->instance == ep->errdef.instance &&
3220 		    (ep->errdef.rnumber == -1 ||
3221 		    hp->rnumber == ep->errdef.rnumber) &&
3222 		    ((ep->errdef.access_type & BOFI_DMA_RW) &&
3223 		    (((uintptr_t)(hp->addr + ep->errdef.offset +
3224 		    ep->errdef.len) & ~LLSZMASK) >
3225 		    ((uintptr_t)((hp->addr + ep->errdef.offset) +
3226 		    LLSZMASK) & ~LLSZMASK)))) {
3227 			/*
3228 			 * got a match - link it on
3229 			 */
3230 			lp = bofi_link_freelist;
3231 			if (lp != NULL) {
3232 				bofi_link_freelist = lp->link;
3233 				lp->errentp = ep;
3234 				lp->link = hp->link;
3235 				hp->link = lp;
3236 				if ((ep->errdef.access_type & BOFI_DMA_W) &&
3237 				    (hp->flags & DDI_DMA_WRITE) &&
3238 				    (ep->state & BOFI_DEV_ACTIVE)) {
3239 					do_dma_corrupt(hp, ep,
3240 					    DDI_DMA_SYNC_FORDEV,
3241 					    0, hp->len);
3242 				}
3243 			}
3244 		}
3245 	}
3246 }
3247 
3248 
3249 /*
3250  * need to do copy byte-by-byte in case one of pages is little-endian
3251  */
3252 static void
3253 xbcopy(void *from, void *to, u_longlong_t len)
3254 {
3255 	uchar_t *f = from;
3256 	uchar_t *t = to;
3257 
3258 	while (len--)
3259 		*t++ = *f++;
3260 }
3261 
3262 
3263 /*
3264  * our ddi_dma_map routine
3265  */
3266 static int
3267 bofi_dma_map(dev_info_t *dip, dev_info_t *rdip,
3268 		struct ddi_dma_req *dmareqp, ddi_dma_handle_t *handlep)
3269 {
3270 	struct bofi_shadow *hp, *xhp;
3271 	int maxrnumber = 0;
3272 	int retval = DDI_DMA_NORESOURCES;
3273 	auto struct ddi_dma_req dmareq;
3274 	int sleep;
3275 	struct bofi_shadow *dhashp;
3276 	struct bofi_shadow *hhashp;
3277 	ddi_dma_impl_t *mp;
3278 	unsigned long pagemask = ddi_ptob(rdip, 1) - 1;
3279 
3280 	/*
3281 	 * if driver_list is set, only intercept those drivers
3282 	 */
3283 	if (handlep == NULL || !driver_under_test(rdip))
3284 		return (save_bus_ops.bus_dma_map(dip, rdip, dmareqp, handlep));
3285 
3286 	sleep = (dmareqp->dmar_fp == DDI_DMA_SLEEP) ? KM_SLEEP : KM_NOSLEEP;
3287 	/*
3288 	 * allocate shadow handle structure and fill it in
3289 	 */
3290 	hp = kmem_zalloc(sizeof (struct bofi_shadow), sleep);
3291 	if (hp == NULL)
3292 		goto error;
3293 	(void) strncpy(hp->name, ddi_get_name(rdip), NAMESIZE);
3294 	hp->instance = ddi_get_instance(rdip);
3295 	hp->dip = rdip;
3296 	hp->flags = dmareqp->dmar_flags;
3297 	hp->link = NULL;
3298 	hp->type = BOFI_DMA_HDL;
3299 	/*
3300 	 * get a kernel virtual mapping
3301 	 */
3302 	hp->addr = ddi_dmareq_mapin(dmareqp, &hp->mapaddr, &hp->len);
3303 	if (hp->addr == NULL)
3304 		goto error;
3305 	if (bofi_sync_check) {
3306 		/*
3307 		 * Take a copy and pass pointers to this up to nexus instead.
3308 		 * Data will be copied from the original on explicit
3309 		 * and implicit ddi_dma_sync()
3310 		 *
3311 		 * - maintain page alignment because some devices assume it.
3312 		 */
3313 		hp->origaddr = hp->addr;
3314 		hp->allocaddr = ddi_umem_alloc(
3315 		    ((uintptr_t)hp->addr & pagemask) + hp->len, sleep,
3316 		    &hp->umem_cookie);
3317 		if (hp->allocaddr == NULL)
3318 			goto error;
3319 		hp->addr = hp->allocaddr + ((uintptr_t)hp->addr & pagemask);
3320 		if (dmareqp->dmar_flags & DDI_DMA_WRITE)
3321 			xbcopy(hp->origaddr, hp->addr, hp->len);
3322 		dmareq = *dmareqp;
3323 		dmareq.dmar_object.dmao_size = hp->len;
3324 		dmareq.dmar_object.dmao_type = DMA_OTYP_VADDR;
3325 		dmareq.dmar_object.dmao_obj.virt_obj.v_as = &kas;
3326 		dmareq.dmar_object.dmao_obj.virt_obj.v_addr = hp->addr;
3327 		dmareq.dmar_object.dmao_obj.virt_obj.v_priv = NULL;
3328 		dmareqp = &dmareq;
3329 	}
3330 	/*
3331 	 * call nexus to do the real work
3332 	 */
3333 	retval = save_bus_ops.bus_dma_map(dip, rdip, dmareqp, handlep);
3334 	if (retval != DDI_SUCCESS)
3335 		goto error2;
3336 	/*
3337 	 * now set dma_handle to point to real handle
3338 	 */
3339 	hp->hdl.dma_handle = *handlep;
3340 	/*
3341 	 * unset DMP_NOSYNC
3342 	 */
3343 	mp = (ddi_dma_impl_t *)*handlep;
3344 	mp->dmai_rflags &= ~DMP_NOSYNC;
3345 	mp->dmai_fault_check = bofi_check_dma_hdl;
3346 	/*
3347 	 * bind and unbind are cached in devinfo - must overwrite them
3348 	 * - note that our bind and unbind are quite happy dealing with
3349 	 * any handles for this devinfo that were previously allocated
3350 	 */
3351 	if (save_bus_ops.bus_dma_bindhdl == DEVI(rdip)->devi_bus_dma_bindfunc)
3352 		DEVI(rdip)->devi_bus_dma_bindfunc = bofi_dma_bindhdl;
3353 	if (save_bus_ops.bus_dma_unbindhdl ==
3354 	    DEVI(rdip)->devi_bus_dma_unbindfunc)
3355 		DEVI(rdip)->devi_bus_dma_unbindfunc = bofi_dma_unbindhdl;
3356 	mutex_enter(&bofi_low_mutex);
3357 	mutex_enter(&bofi_mutex);
3358 	/*
3359 	 * get an "rnumber" for this handle - really just seeking to
3360 	 * get a unique number - generally only care for early allocated
3361 	 * handles - so we get as far as INT_MAX, just stay there
3362 	 */
3363 	dhashp = HDL_DHASH(hp->dip);
3364 	for (xhp = dhashp->dnext; xhp != dhashp; xhp = xhp->dnext)
3365 		if (ddi_name_to_major(xhp->name) ==
3366 		    ddi_name_to_major(hp->name) &&
3367 		    xhp->instance == hp->instance &&
3368 		    xhp->type == BOFI_DMA_HDL)
3369 			if (xhp->rnumber >= maxrnumber) {
3370 				if (xhp->rnumber == INT_MAX)
3371 					maxrnumber = INT_MAX;
3372 				else
3373 					maxrnumber = xhp->rnumber + 1;
3374 			}
3375 	hp->rnumber = maxrnumber;
3376 	/*
3377 	 * add to dhash, hhash and inuse lists
3378 	 */
3379 	hp->next = shadow_list.next;
3380 	shadow_list.next->prev = hp;
3381 	hp->prev = &shadow_list;
3382 	shadow_list.next = hp;
3383 	hhashp = HDL_HHASH(*handlep);
3384 	hp->hnext = hhashp->hnext;
3385 	hhashp->hnext->hprev = hp;
3386 	hp->hprev = hhashp;
3387 	hhashp->hnext = hp;
3388 	dhashp = HDL_DHASH(hp->dip);
3389 	hp->dnext = dhashp->dnext;
3390 	dhashp->dnext->dprev = hp;
3391 	hp->dprev = dhashp;
3392 	dhashp->dnext = hp;
3393 	/*
3394 	 * chain on any pre-existing errdefs that apply to this
3395 	 * acc_handle and corrupt if required (as there is an implicit
3396 	 * ddi_dma_sync() in this call)
3397 	 */
3398 	chain_on_errdefs(hp);
3399 	mutex_exit(&bofi_mutex);
3400 	mutex_exit(&bofi_low_mutex);
3401 	return (retval);
3402 error:
3403 	if (dmareqp->dmar_fp != DDI_DMA_DONTWAIT) {
3404 		/*
3405 		 * what to do here? Wait a bit and try again
3406 		 */
3407 		(void) timeout((void (*)())dmareqp->dmar_fp,
3408 		    dmareqp->dmar_arg, 10);
3409 	}
3410 error2:
3411 	if (hp) {
3412 		ddi_dmareq_mapout(hp->mapaddr, hp->len);
3413 		if (bofi_sync_check && hp->allocaddr)
3414 			ddi_umem_free(hp->umem_cookie);
3415 		kmem_free(hp, sizeof (struct bofi_shadow));
3416 	}
3417 	return (retval);
3418 }
3419 
3420 
3421 /*
3422  * our ddi_dma_allochdl routine
3423  */
3424 static int
3425 bofi_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attrp,
3426 	int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep)
3427 {
3428 	int retval = DDI_DMA_NORESOURCES;
3429 	struct bofi_shadow *hp, *xhp;
3430 	int maxrnumber = 0;
3431 	struct bofi_shadow *dhashp;
3432 	struct bofi_shadow *hhashp;
3433 	ddi_dma_impl_t *mp;
3434 
3435 	/*
3436 	 * if driver_list is set, only intercept those drivers
3437 	 */
3438 	if (!driver_under_test(rdip))
3439 		return (save_bus_ops.bus_dma_allochdl(dip, rdip, attrp,
3440 		    waitfp, arg, handlep));
3441 
3442 	/*
3443 	 * allocate shadow handle structure and fill it in
3444 	 */
3445 	hp = kmem_zalloc(sizeof (struct bofi_shadow),
3446 	    ((waitfp == DDI_DMA_SLEEP) ? KM_SLEEP : KM_NOSLEEP));
3447 	if (hp == NULL) {
3448 		/*
3449 		 * what to do here? Wait a bit and try again
3450 		 */
3451 		if (waitfp != DDI_DMA_DONTWAIT)
3452 			(void) timeout((void (*)())waitfp, arg, 10);
3453 		return (retval);
3454 	}
3455 	(void) strncpy(hp->name, ddi_get_name(rdip), NAMESIZE);
3456 	hp->instance = ddi_get_instance(rdip);
3457 	hp->dip = rdip;
3458 	hp->link = NULL;
3459 	hp->type = BOFI_NULL;
3460 	/*
3461 	 * call nexus to do the real work
3462 	 */
3463 	retval = save_bus_ops.bus_dma_allochdl(dip, rdip, attrp, waitfp, arg,
3464 	    handlep);
3465 	if (retval != DDI_SUCCESS) {
3466 		kmem_free(hp, sizeof (struct bofi_shadow));
3467 		return (retval);
3468 	}
3469 	/*
3470 	 * now point set dma_handle to point to real handle
3471 	 */
3472 	hp->hdl.dma_handle = *handlep;
3473 	mp = (ddi_dma_impl_t *)*handlep;
3474 	mp->dmai_fault_check = bofi_check_dma_hdl;
3475 	/*
3476 	 * bind and unbind are cached in devinfo - must overwrite them
3477 	 * - note that our bind and unbind are quite happy dealing with
3478 	 * any handles for this devinfo that were previously allocated
3479 	 */
3480 	if (save_bus_ops.bus_dma_bindhdl == DEVI(rdip)->devi_bus_dma_bindfunc)
3481 		DEVI(rdip)->devi_bus_dma_bindfunc = bofi_dma_bindhdl;
3482 	if (save_bus_ops.bus_dma_unbindhdl ==
3483 	    DEVI(rdip)->devi_bus_dma_unbindfunc)
3484 		DEVI(rdip)->devi_bus_dma_unbindfunc = bofi_dma_unbindhdl;
3485 	mutex_enter(&bofi_low_mutex);
3486 	mutex_enter(&bofi_mutex);
3487 	/*
3488 	 * get an "rnumber" for this handle - really just seeking to
3489 	 * get a unique number - generally only care for early allocated
3490 	 * handles - so we get as far as INT_MAX, just stay there
3491 	 */
3492 	dhashp = HDL_DHASH(hp->dip);
3493 	for (xhp = dhashp->dnext; xhp != dhashp; xhp = xhp->dnext)
3494 		if (ddi_name_to_major(xhp->name) ==
3495 		    ddi_name_to_major(hp->name) &&
3496 		    xhp->instance == hp->instance &&
3497 		    (xhp->type == BOFI_DMA_HDL ||
3498 		    xhp->type == BOFI_NULL))
3499 			if (xhp->rnumber >= maxrnumber) {
3500 				if (xhp->rnumber == INT_MAX)
3501 					maxrnumber = INT_MAX;
3502 				else
3503 					maxrnumber = xhp->rnumber + 1;
3504 			}
3505 	hp->rnumber = maxrnumber;
3506 	/*
3507 	 * add to dhash, hhash and inuse lists
3508 	 */
3509 	hp->next = shadow_list.next;
3510 	shadow_list.next->prev = hp;
3511 	hp->prev = &shadow_list;
3512 	shadow_list.next = hp;
3513 	hhashp = HDL_HHASH(*handlep);
3514 	hp->hnext = hhashp->hnext;
3515 	hhashp->hnext->hprev = hp;
3516 	hp->hprev = hhashp;
3517 	hhashp->hnext = hp;
3518 	dhashp = HDL_DHASH(hp->dip);
3519 	hp->dnext = dhashp->dnext;
3520 	dhashp->dnext->dprev = hp;
3521 	hp->dprev = dhashp;
3522 	dhashp->dnext = hp;
3523 	mutex_exit(&bofi_mutex);
3524 	mutex_exit(&bofi_low_mutex);
3525 	return (retval);
3526 }
3527 
3528 
3529 /*
3530  * our ddi_dma_freehdl routine
3531  */
3532 static int
3533 bofi_dma_freehdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle)
3534 {
3535 	int retval;
3536 	struct bofi_shadow *hp;
3537 	struct bofi_shadow *hhashp;
3538 
3539 	/*
3540 	 * find shadow for this handle
3541 	 */
3542 	mutex_enter(&bofi_low_mutex);
3543 	mutex_enter(&bofi_mutex);
3544 	hhashp = HDL_HHASH(handle);
3545 	for (hp = hhashp->hnext; hp != hhashp; hp = hp->hnext)
3546 		if (hp->hdl.dma_handle == handle)
3547 			break;
3548 	mutex_exit(&bofi_mutex);
3549 	mutex_exit(&bofi_low_mutex);
3550 	/*
3551 	 * call nexus to do the real work
3552 	 */
3553 	retval = save_bus_ops.bus_dma_freehdl(dip, rdip, handle);
3554 	if (retval != DDI_SUCCESS) {
3555 		return (retval);
3556 	}
3557 	/*
3558 	 * did we really have a shadow for this handle
3559 	 */
3560 	if (hp == hhashp)
3561 		return (retval);
3562 	/*
3563 	 * yes we have - see if it's still bound
3564 	 */
3565 	mutex_enter(&bofi_low_mutex);
3566 	mutex_enter(&bofi_mutex);
3567 	if (hp->type != BOFI_NULL)
3568 		panic("driver freeing bound dma_handle");
3569 	/*
3570 	 * remove from dhash, hhash and inuse lists
3571 	 */
3572 	hp->hnext->hprev = hp->hprev;
3573 	hp->hprev->hnext = hp->hnext;
3574 	hp->dnext->dprev = hp->dprev;
3575 	hp->dprev->dnext = hp->dnext;
3576 	hp->next->prev = hp->prev;
3577 	hp->prev->next = hp->next;
3578 	mutex_exit(&bofi_mutex);
3579 	mutex_exit(&bofi_low_mutex);
3580 
3581 	kmem_free(hp, sizeof (struct bofi_shadow));
3582 	return (retval);
3583 }
3584 
3585 
3586 /*
3587  * our ddi_dma_bindhdl routine
3588  */
3589 static int
3590 bofi_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip,
3591 	ddi_dma_handle_t handle, struct ddi_dma_req *dmareqp,
3592 	ddi_dma_cookie_t *cookiep, uint_t *ccountp)
3593 {
3594 	int retval = DDI_DMA_NORESOURCES;
3595 	auto struct ddi_dma_req dmareq;
3596 	struct bofi_shadow *hp;
3597 	struct bofi_shadow *hhashp;
3598 	ddi_dma_impl_t *mp;
3599 	unsigned long pagemask = ddi_ptob(rdip, 1) - 1;
3600 
3601 	/*
3602 	 * check we really have a shadow for this handle
3603 	 */
3604 	mutex_enter(&bofi_low_mutex);
3605 	mutex_enter(&bofi_mutex);
3606 	hhashp = HDL_HHASH(handle);
3607 	for (hp = hhashp->hnext; hp != hhashp; hp = hp->hnext)
3608 		if (hp->hdl.dma_handle == handle)
3609 			break;
3610 	mutex_exit(&bofi_mutex);
3611 	mutex_exit(&bofi_low_mutex);
3612 	if (hp == hhashp) {
3613 		/*
3614 		 * no we don't - just call nexus to do the real work
3615 		 */
3616 		return save_bus_ops.bus_dma_bindhdl(dip, rdip, handle, dmareqp,
3617 		    cookiep, ccountp);
3618 	}
3619 	/*
3620 	 * yes we have - see if it's already bound
3621 	 */
3622 	if (hp->type != BOFI_NULL)
3623 		return (DDI_DMA_INUSE);
3624 
3625 	hp->flags = dmareqp->dmar_flags;
3626 	/*
3627 	 * get a kernel virtual mapping
3628 	 */
3629 	hp->addr = ddi_dmareq_mapin(dmareqp, &hp->mapaddr, &hp->len);
3630 	if (hp->addr == NULL)
3631 		goto error;
3632 	if (bofi_sync_check) {
3633 		/*
3634 		 * Take a copy and pass pointers to this up to nexus instead.
3635 		 * Data will be copied from the original on explicit
3636 		 * and implicit ddi_dma_sync()
3637 		 *
3638 		 * - maintain page alignment because some devices assume it.
3639 		 */
3640 		hp->origaddr = hp->addr;
3641 		hp->allocaddr = ddi_umem_alloc(
3642 		    ((uintptr_t)hp->addr & pagemask) + hp->len,
3643 		    (dmareqp->dmar_fp == DDI_DMA_SLEEP) ? KM_SLEEP : KM_NOSLEEP,
3644 		    &hp->umem_cookie);
3645 		if (hp->allocaddr == NULL)
3646 			goto error;
3647 		hp->addr = hp->allocaddr + ((uintptr_t)hp->addr & pagemask);
3648 		if (dmareqp->dmar_flags & DDI_DMA_WRITE)
3649 			xbcopy(hp->origaddr, hp->addr, hp->len);
3650 		dmareq = *dmareqp;
3651 		dmareq.dmar_object.dmao_size = hp->len;
3652 		dmareq.dmar_object.dmao_type = DMA_OTYP_VADDR;
3653 		dmareq.dmar_object.dmao_obj.virt_obj.v_as = &kas;
3654 		dmareq.dmar_object.dmao_obj.virt_obj.v_addr = hp->addr;
3655 		dmareq.dmar_object.dmao_obj.virt_obj.v_priv = NULL;
3656 		dmareqp = &dmareq;
3657 	}
3658 	/*
3659 	 * call nexus to do the real work
3660 	 */
3661 	retval = save_bus_ops.bus_dma_bindhdl(dip, rdip, handle, dmareqp,
3662 	    cookiep, ccountp);
3663 	if (retval != DDI_SUCCESS)
3664 		goto error2;
3665 	/*
3666 	 * unset DMP_NOSYNC
3667 	 */
3668 	mp = (ddi_dma_impl_t *)handle;
3669 	mp->dmai_rflags &= ~DMP_NOSYNC;
3670 	/*
3671 	 * chain on any pre-existing errdefs that apply to this
3672 	 * acc_handle and corrupt if required (as there is an implicit
3673 	 * ddi_dma_sync() in this call)
3674 	 */
3675 	mutex_enter(&bofi_low_mutex);
3676 	mutex_enter(&bofi_mutex);
3677 	hp->type = BOFI_DMA_HDL;
3678 	chain_on_errdefs(hp);
3679 	mutex_exit(&bofi_mutex);
3680 	mutex_exit(&bofi_low_mutex);
3681 	return (retval);
3682 
3683 error:
3684 	if (dmareqp->dmar_fp != DDI_DMA_DONTWAIT) {
3685 		/*
3686 		 * what to do here? Wait a bit and try again
3687 		 */
3688 		(void) timeout((void (*)())dmareqp->dmar_fp,
3689 		    dmareqp->dmar_arg, 10);
3690 	}
3691 error2:
3692 	if (hp) {
3693 		ddi_dmareq_mapout(hp->mapaddr, hp->len);
3694 		if (bofi_sync_check && hp->allocaddr)
3695 			ddi_umem_free(hp->umem_cookie);
3696 		hp->mapaddr = NULL;
3697 		hp->allocaddr = NULL;
3698 		hp->origaddr = NULL;
3699 	}
3700 	return (retval);
3701 }
3702 
3703 
3704 /*
3705  * our ddi_dma_unbindhdl routine
3706  */
3707 static int
3708 bofi_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle)
3709 {
3710 	struct bofi_link *lp, *next_lp;
3711 	struct bofi_errent *ep;
3712 	int retval;
3713 	struct bofi_shadow *hp;
3714 	struct bofi_shadow *hhashp;
3715 
3716 	/*
3717 	 * call nexus to do the real work
3718 	 */
3719 	retval = save_bus_ops.bus_dma_unbindhdl(dip, rdip, handle);
3720 	if (retval != DDI_SUCCESS)
3721 		return (retval);
3722 	/*
3723 	 * check we really have a shadow for this handle
3724 	 */
3725 	mutex_enter(&bofi_low_mutex);
3726 	mutex_enter(&bofi_mutex);
3727 	hhashp = HDL_HHASH(handle);
3728 	for (hp = hhashp->hnext; hp != hhashp; hp = hp->hnext)
3729 		if (hp->hdl.dma_handle == handle)
3730 			break;
3731 	if (hp == hhashp) {
3732 		mutex_exit(&bofi_mutex);
3733 		mutex_exit(&bofi_low_mutex);
3734 		return (retval);
3735 	}
3736 	/*
3737 	 * yes we have - see if it's already unbound
3738 	 */
3739 	if (hp->type == BOFI_NULL)
3740 		panic("driver unbinding unbound dma_handle");
3741 	/*
3742 	 * free any errdef link structures tagged on to this
3743 	 * shadow handle
3744 	 */
3745 	for (lp = hp->link; lp != NULL; ) {
3746 		next_lp = lp->link;
3747 		/*
3748 		 * there is an implicit sync_for_cpu on free -
3749 		 * may need to corrupt
3750 		 */
3751 		ep = lp->errentp;
3752 		if ((ep->errdef.access_type & BOFI_DMA_R) &&
3753 		    (hp->flags & DDI_DMA_READ) &&
3754 		    (ep->state & BOFI_DEV_ACTIVE)) {
3755 			do_dma_corrupt(hp, ep, DDI_DMA_SYNC_FORCPU, 0, hp->len);
3756 		}
3757 		lp->link = bofi_link_freelist;
3758 		bofi_link_freelist = lp;
3759 		lp = next_lp;
3760 	}
3761 	hp->link = NULL;
3762 	hp->type = BOFI_NULL;
3763 	mutex_exit(&bofi_mutex);
3764 	mutex_exit(&bofi_low_mutex);
3765 
3766 	if (bofi_sync_check && (hp->flags & DDI_DMA_READ))
3767 		/*
3768 		 * implicit sync_for_cpu - copy data back
3769 		 */
3770 		if (hp->allocaddr)
3771 			xbcopy(hp->addr, hp->origaddr, hp->len);
3772 	ddi_dmareq_mapout(hp->mapaddr, hp->len);
3773 	if (bofi_sync_check && hp->allocaddr)
3774 		ddi_umem_free(hp->umem_cookie);
3775 	hp->mapaddr = NULL;
3776 	hp->allocaddr = NULL;
3777 	hp->origaddr = NULL;
3778 	return (retval);
3779 }
3780 
3781 
3782 /*
3783  * our ddi_dma_sync routine
3784  */
3785 static int
3786 bofi_dma_flush(dev_info_t *dip, dev_info_t *rdip,
3787 		ddi_dma_handle_t handle, off_t off, size_t len, uint_t flags)
3788 {
3789 	struct bofi_link *lp;
3790 	struct bofi_errent *ep;
3791 	struct bofi_shadow *hp;
3792 	struct bofi_shadow *hhashp;
3793 	int retval;
3794 
3795 	if (flags == DDI_DMA_SYNC_FORCPU || flags == DDI_DMA_SYNC_FORKERNEL) {
3796 		/*
3797 		 * in this case get nexus driver to do sync first
3798 		 */
3799 		retval = save_bus_ops.bus_dma_flush(dip, rdip, handle, off,
3800 		    len, flags);
3801 		if (retval != DDI_SUCCESS)
3802 			return (retval);
3803 	}
3804 	/*
3805 	 * check we really have a shadow for this handle
3806 	 */
3807 	mutex_enter(&bofi_low_mutex);
3808 	mutex_enter(&bofi_mutex);
3809 	hhashp = HDL_HHASH(handle);
3810 	for (hp = hhashp->hnext; hp != hhashp; hp = hp->hnext)
3811 		if (hp->hdl.dma_handle == handle &&
3812 		    hp->type == BOFI_DMA_HDL)
3813 			break;
3814 	mutex_exit(&bofi_mutex);
3815 	mutex_exit(&bofi_low_mutex);
3816 	if (hp != hhashp) {
3817 		/*
3818 		 * yes - do we need to copy data from original
3819 		 */
3820 		if (bofi_sync_check && flags == DDI_DMA_SYNC_FORDEV)
3821 			if (hp->allocaddr)
3822 				xbcopy(hp->origaddr+off, hp->addr+off,
3823 				    len ? len : (hp->len - off));
3824 		/*
3825 		 * yes - check if we need to corrupt the data
3826 		 */
3827 		mutex_enter(&bofi_low_mutex);
3828 		mutex_enter(&bofi_mutex);
3829 		for (lp = hp->link; lp != NULL; lp = lp->link) {
3830 			ep = lp->errentp;
3831 			if ((((ep->errdef.access_type & BOFI_DMA_R) &&
3832 			    (flags == DDI_DMA_SYNC_FORCPU ||
3833 			    flags == DDI_DMA_SYNC_FORKERNEL)) ||
3834 			    ((ep->errdef.access_type & BOFI_DMA_W) &&
3835 			    (flags == DDI_DMA_SYNC_FORDEV))) &&
3836 			    (ep->state & BOFI_DEV_ACTIVE)) {
3837 				do_dma_corrupt(hp, ep, flags, off,
3838 				    len ? len : (hp->len - off));
3839 			}
3840 		}
3841 		mutex_exit(&bofi_mutex);
3842 		mutex_exit(&bofi_low_mutex);
3843 		/*
3844 		 *  do we need to copy data to original
3845 		 */
3846 		if (bofi_sync_check && (flags == DDI_DMA_SYNC_FORCPU ||
3847 		    flags == DDI_DMA_SYNC_FORKERNEL))
3848 			if (hp->allocaddr)
3849 				xbcopy(hp->addr+off, hp->origaddr+off,
3850 				    len ? len : (hp->len - off));
3851 	}
3852 	if (flags == DDI_DMA_SYNC_FORDEV)
3853 		/*
3854 		 * in this case get nexus driver to do sync last
3855 		 */
3856 		retval = save_bus_ops.bus_dma_flush(dip, rdip, handle, off,
3857 		    len, flags);
3858 	return (retval);
3859 }
3860 
3861 
3862 /*
3863  * our dma_win routine
3864  */
3865 static int
3866 bofi_dma_win(dev_info_t *dip, dev_info_t *rdip,
3867 	ddi_dma_handle_t handle, uint_t win, off_t *offp,
3868 	size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp)
3869 {
3870 	struct bofi_shadow *hp;
3871 	struct bofi_shadow *hhashp;
3872 	int retval;
3873 	ddi_dma_impl_t *mp;
3874 
3875 	/*
3876 	 * call nexus to do the real work
3877 	 */
3878 	retval = save_bus_ops.bus_dma_win(dip, rdip, handle, win, offp, lenp,
3879 	    cookiep, ccountp);
3880 	if (retval != DDI_SUCCESS)
3881 		return (retval);
3882 	/*
3883 	 * check we really have a shadow for this handle
3884 	 */
3885 	mutex_enter(&bofi_low_mutex);
3886 	mutex_enter(&bofi_mutex);
3887 	hhashp = HDL_HHASH(handle);
3888 	for (hp = hhashp->hnext; hp != hhashp; hp = hp->hnext)
3889 		if (hp->hdl.dma_handle == handle)
3890 			break;
3891 	if (hp != hhashp) {
3892 		/*
3893 		 * yes - make sure DMP_NOSYNC is unset
3894 		 */
3895 		mp = (ddi_dma_impl_t *)handle;
3896 		mp->dmai_rflags &= ~DMP_NOSYNC;
3897 	}
3898 	mutex_exit(&bofi_mutex);
3899 	mutex_exit(&bofi_low_mutex);
3900 	return (retval);
3901 }
3902 
3903 
3904 /*
3905  * our dma_ctl routine
3906  */
3907 static int
3908 bofi_dma_ctl(dev_info_t *dip, dev_info_t *rdip,
3909 		ddi_dma_handle_t handle, enum ddi_dma_ctlops request,
3910 		off_t *offp, size_t *lenp, caddr_t *objp, uint_t flags)
3911 {
3912 	struct bofi_link *lp, *next_lp;
3913 	struct bofi_errent *ep;
3914 	struct bofi_shadow *hp;
3915 	struct bofi_shadow *hhashp;
3916 	int retval;
3917 	int i;
3918 	struct bofi_shadow *dummyhp;
3919 	ddi_dma_impl_t *mp;
3920 
3921 	/*
3922 	 * get nexus to do real work
3923 	 */
3924 	retval = save_bus_ops.bus_dma_ctl(dip, rdip, handle, request, offp,
3925 	    lenp, objp, flags);
3926 	if (retval != DDI_SUCCESS)
3927 		return (retval);
3928 	/*
3929 	 * if driver_list is set, only intercept those drivers
3930 	 */
3931 	if (!driver_under_test(rdip))
3932 		return (DDI_SUCCESS);
3933 
3934 #if defined(__sparc)
3935 	/*
3936 	 * check if this is a dvma_reserve - that one's like a
3937 	 * dma_allochdl and needs to be handled separately
3938 	 */
3939 	if (request == DDI_DMA_RESERVE) {
3940 		bofi_dvma_reserve(rdip, *(ddi_dma_handle_t *)objp);
3941 		return (DDI_SUCCESS);
3942 	}
3943 #endif
3944 	/*
3945 	 * check we really have a shadow for this handle
3946 	 */
3947 	mutex_enter(&bofi_low_mutex);
3948 	mutex_enter(&bofi_mutex);
3949 	hhashp = HDL_HHASH(handle);
3950 	for (hp = hhashp->hnext; hp != hhashp; hp = hp->hnext)
3951 		if (hp->hdl.dma_handle == handle)
3952 			break;
3953 	if (hp == hhashp) {
3954 		mutex_exit(&bofi_mutex);
3955 		mutex_exit(&bofi_low_mutex);
3956 		return (retval);
3957 	}
3958 	/*
3959 	 * yes we have - see what kind of command this is
3960 	 */
3961 	switch (request) {
3962 	case DDI_DMA_RELEASE:
3963 		/*
3964 		 * dvma release - release dummy handle and all the index handles
3965 		 */
3966 		dummyhp = hp;
3967 		dummyhp->hnext->hprev = dummyhp->hprev;
3968 		dummyhp->hprev->hnext = dummyhp->hnext;
3969 		mutex_exit(&bofi_mutex);
3970 		mutex_exit(&bofi_low_mutex);
3971 		for (i = 0; i < dummyhp->len; i++) {
3972 			hp = dummyhp->hparrayp[i];
3973 			/*
3974 			 * chek none of the index handles were still loaded
3975 			 */
3976 			if (hp->type != BOFI_NULL)
3977 				panic("driver releasing loaded dvma");
3978 			/*
3979 			 * remove from dhash and inuse lists
3980 			 */
3981 			mutex_enter(&bofi_low_mutex);
3982 			mutex_enter(&bofi_mutex);
3983 			hp->dnext->dprev = hp->dprev;
3984 			hp->dprev->dnext = hp->dnext;
3985 			hp->next->prev = hp->prev;
3986 			hp->prev->next = hp->next;
3987 			mutex_exit(&bofi_mutex);
3988 			mutex_exit(&bofi_low_mutex);
3989 
3990 			if (bofi_sync_check && hp->allocaddr)
3991 				ddi_umem_free(hp->umem_cookie);
3992 			kmem_free(hp, sizeof (struct bofi_shadow));
3993 		}
3994 		kmem_free(dummyhp->hparrayp, dummyhp->len *
3995 		    sizeof (struct bofi_shadow *));
3996 		kmem_free(dummyhp, sizeof (struct bofi_shadow));
3997 		return (retval);
3998 	case DDI_DMA_FREE:
3999 		/*
4000 		 * ddi_dma_free case - remove from dhash, hhash and inuse lists
4001 		 */
4002 		hp->hnext->hprev = hp->hprev;
4003 		hp->hprev->hnext = hp->hnext;
4004 		hp->dnext->dprev = hp->dprev;
4005 		hp->dprev->dnext = hp->dnext;
4006 		hp->next->prev = hp->prev;
4007 		hp->prev->next = hp->next;
4008 		/*
4009 		 * free any errdef link structures tagged on to this
4010 		 * shadow handle
4011 		 */
4012 		for (lp = hp->link; lp != NULL; ) {
4013 			next_lp = lp->link;
4014 			/*
4015 			 * there is an implicit sync_for_cpu on free -
4016 			 * may need to corrupt
4017 			 */
4018 			ep = lp->errentp;
4019 			if ((ep->errdef.access_type & BOFI_DMA_R) &&
4020 			    (hp->flags & DDI_DMA_READ) &&
4021 			    (ep->state & BOFI_DEV_ACTIVE)) {
4022 				do_dma_corrupt(hp, ep, DDI_DMA_SYNC_FORCPU,
4023 				    0, hp->len);
4024 			}
4025 			lp->link = bofi_link_freelist;
4026 			bofi_link_freelist = lp;
4027 			lp = next_lp;
4028 		}
4029 		hp->link = NULL;
4030 		mutex_exit(&bofi_mutex);
4031 		mutex_exit(&bofi_low_mutex);
4032 
4033 		if (bofi_sync_check && (hp->flags & DDI_DMA_READ))
4034 			if (hp->allocaddr)
4035 				xbcopy(hp->addr, hp->origaddr, hp->len);
4036 		ddi_dmareq_mapout(hp->mapaddr, hp->len);
4037 		if (bofi_sync_check && hp->allocaddr)
4038 			ddi_umem_free(hp->umem_cookie);
4039 		kmem_free(hp, sizeof (struct bofi_shadow));
4040 		return (retval);
4041 	case DDI_DMA_MOVWIN:
4042 		mp = (ddi_dma_impl_t *)handle;
4043 		mp->dmai_rflags &= ~DMP_NOSYNC;
4044 		break;
4045 	case DDI_DMA_NEXTWIN:
4046 		mp = (ddi_dma_impl_t *)handle;
4047 		mp->dmai_rflags &= ~DMP_NOSYNC;
4048 		break;
4049 	default:
4050 		break;
4051 	}
4052 	mutex_exit(&bofi_mutex);
4053 	mutex_exit(&bofi_low_mutex);
4054 	return (retval);
4055 }
4056 
4057 #if defined(__sparc)
4058 /*
4059  * dvma reserve case from bofi_dma_ctl()
4060  */
4061 static void
4062 bofi_dvma_reserve(dev_info_t *rdip, ddi_dma_handle_t handle)
4063 {
4064 	struct bofi_shadow *hp;
4065 	struct bofi_shadow *dummyhp;
4066 	struct bofi_shadow *dhashp;
4067 	struct bofi_shadow *hhashp;
4068 	ddi_dma_impl_t *mp;
4069 	struct fast_dvma *nexus_private;
4070 	int i, count;
4071 
4072 	mp = (ddi_dma_impl_t *)handle;
4073 	count = mp->dmai_ndvmapages;
4074 	/*
4075 	 * allocate dummy shadow handle structure
4076 	 */
4077 	dummyhp = kmem_zalloc(sizeof (*dummyhp), KM_SLEEP);
4078 	if (mp->dmai_rflags & DMP_BYPASSNEXUS) {
4079 		/*
4080 		 * overlay our routines over the nexus's dvma routines
4081 		 */
4082 		nexus_private = (struct fast_dvma *)mp->dmai_nexus_private;
4083 		dummyhp->save.dvma_ops = *(nexus_private->ops);
4084 		nexus_private->ops = &bofi_dvma_ops;
4085 	}
4086 	/*
4087 	 * now fill in the dummy handle. This just gets put on hhash queue
4088 	 * so our dvma routines can find and index off to the handle they
4089 	 * really want.
4090 	 */
4091 	(void) strncpy(dummyhp->name, ddi_get_name(rdip), NAMESIZE);
4092 	dummyhp->instance = ddi_get_instance(rdip);
4093 	dummyhp->rnumber = -1;
4094 	dummyhp->dip = rdip;
4095 	dummyhp->len = count;
4096 	dummyhp->hdl.dma_handle = handle;
4097 	dummyhp->link = NULL;
4098 	dummyhp->type = BOFI_NULL;
4099 	/*
4100 	 * allocate space for real handles
4101 	 */
4102 	dummyhp->hparrayp = kmem_alloc(count *
4103 	    sizeof (struct bofi_shadow *), KM_SLEEP);
4104 	for (i = 0; i < count; i++) {
4105 		/*
4106 		 * allocate shadow handle structures and fill them in
4107 		 */
4108 		hp = kmem_zalloc(sizeof (*hp), KM_SLEEP);
4109 		(void) strncpy(hp->name, ddi_get_name(rdip), NAMESIZE);
4110 		hp->instance = ddi_get_instance(rdip);
4111 		hp->rnumber = -1;
4112 		hp->dip = rdip;
4113 		hp->hdl.dma_handle = 0;
4114 		hp->link = NULL;
4115 		hp->type = BOFI_NULL;
4116 		if (bofi_sync_check) {
4117 			unsigned long pagemask = ddi_ptob(rdip, 1) - 1;
4118 			/*
4119 			 * Take a copy and set this to be hp->addr
4120 			 * Data will be copied to and from the original on
4121 			 * explicit and implicit ddi_dma_sync()
4122 			 *
4123 			 * - maintain page alignment because some devices
4124 			 * assume it.
4125 			 */
4126 			hp->allocaddr = ddi_umem_alloc(
4127 			    ((int)hp->addr & pagemask) + pagemask + 1,
4128 			    KM_SLEEP, &hp->umem_cookie);
4129 			hp->addr = hp->allocaddr + ((int)hp->addr & pagemask);
4130 		}
4131 		/*
4132 		 * add to dhash and inuse lists.
4133 		 * these don't go on hhash queue.
4134 		 */
4135 		mutex_enter(&bofi_low_mutex);
4136 		mutex_enter(&bofi_mutex);
4137 		hp->next = shadow_list.next;
4138 		shadow_list.next->prev = hp;
4139 		hp->prev = &shadow_list;
4140 		shadow_list.next = hp;
4141 		dhashp = HDL_DHASH(hp->dip);
4142 		hp->dnext = dhashp->dnext;
4143 		dhashp->dnext->dprev = hp;
4144 		hp->dprev = dhashp;
4145 		dhashp->dnext = hp;
4146 		dummyhp->hparrayp[i] = hp;
4147 		mutex_exit(&bofi_mutex);
4148 		mutex_exit(&bofi_low_mutex);
4149 	}
4150 	/*
4151 	 * add dummy handle to hhash list only
4152 	 */
4153 	mutex_enter(&bofi_low_mutex);
4154 	mutex_enter(&bofi_mutex);
4155 	hhashp = HDL_HHASH(handle);
4156 	dummyhp->hnext = hhashp->hnext;
4157 	hhashp->hnext->hprev = dummyhp;
4158 	dummyhp->hprev = hhashp;
4159 	hhashp->hnext = dummyhp;
4160 	mutex_exit(&bofi_mutex);
4161 	mutex_exit(&bofi_low_mutex);
4162 }
4163 
4164 /*
4165  * our dvma_kaddr_load()
4166  */
4167 static void
4168 bofi_dvma_kaddr_load(ddi_dma_handle_t h, caddr_t a, uint_t len, uint_t index,
4169 	ddi_dma_cookie_t *cp)
4170 {
4171 	struct bofi_shadow *dummyhp;
4172 	struct bofi_shadow *hp;
4173 	struct bofi_shadow *hhashp;
4174 	struct bofi_errent *ep;
4175 	struct bofi_link   *lp;
4176 
4177 	/*
4178 	 * check we really have a dummy shadow for this handle
4179 	 */
4180 	mutex_enter(&bofi_low_mutex);
4181 	mutex_enter(&bofi_mutex);
4182 	hhashp = HDL_HHASH(h);
4183 	for (dummyhp = hhashp->hnext; dummyhp != hhashp;
4184 	    dummyhp = dummyhp->hnext)
4185 		if (dummyhp->hdl.dma_handle == h)
4186 			break;
4187 	mutex_exit(&bofi_mutex);
4188 	mutex_exit(&bofi_low_mutex);
4189 	if (dummyhp == hhashp) {
4190 		/*
4191 		 * no dummy shadow - panic
4192 		 */
4193 		panic("driver dvma_kaddr_load with no reserve");
4194 	}
4195 
4196 	/*
4197 	 * find real hp
4198 	 */
4199 	hp = dummyhp->hparrayp[index];
4200 	/*
4201 	 * check its not already loaded
4202 	 */
4203 	if (hp->type != BOFI_NULL)
4204 		panic("driver loading loaded dvma");
4205 	/*
4206 	 * if were doing copying, just need to change origaddr and get
4207 	 * nexus to map hp->addr again
4208 	 * if not, set hp->addr to new address.
4209 	 * - note these are always kernel virtual addresses - no need to map
4210 	 */
4211 	if (bofi_sync_check && hp->allocaddr) {
4212 		hp->origaddr = a;
4213 		a = hp->addr;
4214 	} else
4215 		hp->addr = a;
4216 	hp->len = len;
4217 	/*
4218 	 * get nexus to do the real work
4219 	 */
4220 	dummyhp->save.dvma_ops.dvma_kaddr_load(h, a, len, index, cp);
4221 	/*
4222 	 * chain on any pre-existing errdefs that apply to this dma_handle
4223 	 * no need to corrupt - there's no implicit dma_sync on this one
4224 	 */
4225 	mutex_enter(&bofi_low_mutex);
4226 	mutex_enter(&bofi_mutex);
4227 	hp->type = BOFI_DMA_HDL;
4228 	for (ep = errent_listp; ep != NULL; ep = ep->next) {
4229 		if (ddi_name_to_major(hp->name) ==
4230 		    ddi_name_to_major(ep->name) &&
4231 		    hp->instance == ep->errdef.instance &&
4232 		    (ep->errdef.rnumber == -1 ||
4233 		    hp->rnumber == ep->errdef.rnumber) &&
4234 		    ((ep->errdef.access_type & BOFI_DMA_RW) &&
4235 		    (((uintptr_t)(hp->addr + ep->errdef.offset +
4236 		    ep->errdef.len) & ~LLSZMASK) >
4237 		    ((uintptr_t)((hp->addr + ep->errdef.offset) +
4238 		    LLSZMASK) & ~LLSZMASK)))) {
4239 			lp = bofi_link_freelist;
4240 			if (lp != NULL) {
4241 				bofi_link_freelist = lp->link;
4242 				lp->errentp = ep;
4243 				lp->link = hp->link;
4244 				hp->link = lp;
4245 			}
4246 		}
4247 	}
4248 	mutex_exit(&bofi_mutex);
4249 	mutex_exit(&bofi_low_mutex);
4250 }
4251 
4252 /*
4253  * our dvma_unload()
4254  */
4255 static void
4256 bofi_dvma_unload(ddi_dma_handle_t h, uint_t index, uint_t view)
4257 {
4258 	struct bofi_link *lp, *next_lp;
4259 	struct bofi_errent *ep;
4260 	struct bofi_shadow *dummyhp;
4261 	struct bofi_shadow *hp;
4262 	struct bofi_shadow *hhashp;
4263 
4264 	/*
4265 	 * check we really have a dummy shadow for this handle
4266 	 */
4267 	mutex_enter(&bofi_low_mutex);
4268 	mutex_enter(&bofi_mutex);
4269 	hhashp = HDL_HHASH(h);
4270 	for (dummyhp = hhashp->hnext; dummyhp != hhashp;
4271 	    dummyhp = dummyhp->hnext)
4272 		if (dummyhp->hdl.dma_handle == h)
4273 			break;
4274 	mutex_exit(&bofi_mutex);
4275 	mutex_exit(&bofi_low_mutex);
4276 	if (dummyhp == hhashp) {
4277 		/*
4278 		 * no dummy shadow - panic
4279 		 */
4280 		panic("driver dvma_unload with no reserve");
4281 	}
4282 	dummyhp->save.dvma_ops.dvma_unload(h, index, view);
4283 	/*
4284 	 * find real hp
4285 	 */
4286 	hp = dummyhp->hparrayp[index];
4287 	/*
4288 	 * check its not already unloaded
4289 	 */
4290 	if (hp->type == BOFI_NULL)
4291 		panic("driver unloading unloaded dvma");
4292 	/*
4293 	 * free any errdef link structures tagged on to this
4294 	 * shadow handle - do corruption if necessary
4295 	 */
4296 	mutex_enter(&bofi_low_mutex);
4297 	mutex_enter(&bofi_mutex);
4298 	for (lp = hp->link; lp != NULL; ) {
4299 		next_lp = lp->link;
4300 		ep = lp->errentp;
4301 		if ((ep->errdef.access_type & BOFI_DMA_R) &&
4302 		    (view == DDI_DMA_SYNC_FORCPU ||
4303 		    view == DDI_DMA_SYNC_FORKERNEL) &&
4304 		    (ep->state & BOFI_DEV_ACTIVE)) {
4305 			do_dma_corrupt(hp, ep, view, 0, hp->len);
4306 		}
4307 		lp->link = bofi_link_freelist;
4308 		bofi_link_freelist = lp;
4309 		lp = next_lp;
4310 	}
4311 	hp->link = NULL;
4312 	hp->type = BOFI_NULL;
4313 	mutex_exit(&bofi_mutex);
4314 	mutex_exit(&bofi_low_mutex);
4315 	/*
4316 	 * if there is an explicit sync_for_cpu, then do copy to original
4317 	 */
4318 	if (bofi_sync_check &&
4319 	    (view == DDI_DMA_SYNC_FORCPU || view == DDI_DMA_SYNC_FORKERNEL))
4320 		if (hp->allocaddr)
4321 			xbcopy(hp->addr, hp->origaddr, hp->len);
4322 }
4323 
4324 /*
4325  * our dvma_unload()
4326  */
4327 static void
4328 bofi_dvma_sync(ddi_dma_handle_t h, uint_t index, uint_t view)
4329 {
4330 	struct bofi_link *lp;
4331 	struct bofi_errent *ep;
4332 	struct bofi_shadow *hp;
4333 	struct bofi_shadow *dummyhp;
4334 	struct bofi_shadow *hhashp;
4335 
4336 	/*
4337 	 * check we really have a dummy shadow for this handle
4338 	 */
4339 	mutex_enter(&bofi_low_mutex);
4340 	mutex_enter(&bofi_mutex);
4341 	hhashp = HDL_HHASH(h);
4342 	for (dummyhp = hhashp->hnext; dummyhp != hhashp;
4343 	    dummyhp = dummyhp->hnext)
4344 		if (dummyhp->hdl.dma_handle == h)
4345 			break;
4346 	mutex_exit(&bofi_mutex);
4347 	mutex_exit(&bofi_low_mutex);
4348 	if (dummyhp == hhashp) {
4349 		/*
4350 		 * no dummy shadow - panic
4351 		 */
4352 		panic("driver dvma_sync with no reserve");
4353 	}
4354 	/*
4355 	 * find real hp
4356 	 */
4357 	hp = dummyhp->hparrayp[index];
4358 	/*
4359 	 * check its already loaded
4360 	 */
4361 	if (hp->type == BOFI_NULL)
4362 		panic("driver syncing unloaded dvma");
4363 	if (view == DDI_DMA_SYNC_FORCPU || view == DDI_DMA_SYNC_FORKERNEL)
4364 		/*
4365 		 * in this case do sync first
4366 		 */
4367 		dummyhp->save.dvma_ops.dvma_sync(h, index, view);
4368 	/*
4369 	 * if there is an explicit sync_for_dev, then do copy from original
4370 	 */
4371 	if (bofi_sync_check && view == DDI_DMA_SYNC_FORDEV) {
4372 		if (hp->allocaddr)
4373 			xbcopy(hp->origaddr, hp->addr, hp->len);
4374 	}
4375 	/*
4376 	 * do corruption if necessary
4377 	 */
4378 	mutex_enter(&bofi_low_mutex);
4379 	mutex_enter(&bofi_mutex);
4380 	for (lp = hp->link; lp != NULL; lp = lp->link) {
4381 		ep = lp->errentp;
4382 		if ((((ep->errdef.access_type & BOFI_DMA_R) &&
4383 		    (view == DDI_DMA_SYNC_FORCPU ||
4384 		    view == DDI_DMA_SYNC_FORKERNEL)) ||
4385 		    ((ep->errdef.access_type & BOFI_DMA_W) &&
4386 		    (view == DDI_DMA_SYNC_FORDEV))) &&
4387 		    (ep->state & BOFI_DEV_ACTIVE)) {
4388 			do_dma_corrupt(hp, ep, view, 0, hp->len);
4389 		}
4390 	}
4391 	mutex_exit(&bofi_mutex);
4392 	mutex_exit(&bofi_low_mutex);
4393 	/*
4394 	 * if there is an explicit sync_for_cpu, then do copy to original
4395 	 */
4396 	if (bofi_sync_check &&
4397 	    (view == DDI_DMA_SYNC_FORCPU || view == DDI_DMA_SYNC_FORKERNEL)) {
4398 		if (hp->allocaddr)
4399 			xbcopy(hp->addr, hp->origaddr, hp->len);
4400 	}
4401 	if (view == DDI_DMA_SYNC_FORDEV)
4402 		/*
4403 		 * in this case do sync last
4404 		 */
4405 		dummyhp->save.dvma_ops.dvma_sync(h, index, view);
4406 }
4407 #endif
4408 
4409 /*
4410  * bofi intercept routine - gets called instead of users interrupt routine
4411  */
4412 static uint_t
4413 bofi_intercept_intr(caddr_t xp)
4414 {
4415 	struct bofi_errent *ep;
4416 	struct bofi_link   *lp;
4417 	struct bofi_shadow *hp;
4418 	int intr_count = 1;
4419 	int i;
4420 	uint_t retval = DDI_INTR_UNCLAIMED;
4421 	uint_t result;
4422 	int unclaimed_counter = 0;
4423 	int jabber_detected = 0;
4424 
4425 	hp = (struct bofi_shadow *)xp;
4426 	/*
4427 	 * check if nothing to do
4428 	 */
4429 	if (hp->link == NULL)
4430 		return (hp->save.intr.int_handler
4431 		    (hp->save.intr.int_handler_arg1, NULL));
4432 	mutex_enter(&bofi_mutex);
4433 	/*
4434 	 * look for any errdefs
4435 	 */
4436 	for (lp = hp->link; lp != NULL; lp = lp->link) {
4437 		ep = lp->errentp;
4438 		if (ep->state & BOFI_DEV_ACTIVE) {
4439 			/*
4440 			 * got one
4441 			 */
4442 			if ((ep->errdef.access_count ||
4443 			    ep->errdef.fail_count) &&
4444 			    (ep->errdef.access_type & BOFI_LOG))
4445 				log_acc_event(ep, BOFI_INTR, 0, 0, 1, 0);
4446 			if (ep->errdef.access_count > 1) {
4447 				ep->errdef.access_count--;
4448 			} else if (ep->errdef.fail_count > 0) {
4449 				ep->errdef.fail_count--;
4450 				ep->errdef.access_count = 0;
4451 				/*
4452 				 * OK do "corruption"
4453 				 */
4454 				if (ep->errstate.fail_time == 0)
4455 					ep->errstate.fail_time = bofi_gettime();
4456 				switch (ep->errdef.optype) {
4457 				case BOFI_DELAY_INTR:
4458 					if (!hp->hilevel) {
4459 						drv_usecwait
4460 						    (ep->errdef.operand);
4461 					}
4462 					break;
4463 				case BOFI_LOSE_INTR:
4464 					intr_count = 0;
4465 					break;
4466 				case BOFI_EXTRA_INTR:
4467 					intr_count += ep->errdef.operand;
4468 					break;
4469 				default:
4470 					break;
4471 				}
4472 			}
4473 		}
4474 	}
4475 	mutex_exit(&bofi_mutex);
4476 	/*
4477 	 * send extra or fewer interrupts as requested
4478 	 */
4479 	for (i = 0; i < intr_count; i++) {
4480 		result = hp->save.intr.int_handler
4481 		    (hp->save.intr.int_handler_arg1, NULL);
4482 		if (result == DDI_INTR_CLAIMED)
4483 			unclaimed_counter >>= 1;
4484 		else if (++unclaimed_counter >= 20)
4485 			jabber_detected = 1;
4486 		if (i == 0)
4487 			retval = result;
4488 	}
4489 	/*
4490 	 * if more than 1000 spurious interrupts requested and
4491 	 * jabber not detected - give warning
4492 	 */
4493 	if (intr_count > 1000 && !jabber_detected)
4494 		panic("undetected interrupt jabber: %s%d",
4495 		    hp->name, hp->instance);
4496 	/*
4497 	 * return first response - or "unclaimed" if none
4498 	 */
4499 	return (retval);
4500 }
4501 
4502 
4503 /*
4504  * our ddi_check_acc_hdl
4505  */
4506 /* ARGSUSED */
4507 static int
4508 bofi_check_acc_hdl(ddi_acc_impl_t *handle)
4509 {
4510 	struct bofi_shadow *hp;
4511 	struct bofi_link   *lp;
4512 	uint_t result = 0;
4513 
4514 	hp = handle->ahi_common.ah_bus_private;
4515 	if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
4516 		return (0);
4517 	}
4518 	for (lp = hp->link; lp != NULL; lp = lp->link) {
4519 		/*
4520 		 * OR in error state from all associated
4521 		 * errdef structures
4522 		 */
4523 		if (lp->errentp->errdef.access_count == 0 &&
4524 		    (lp->errentp->state & BOFI_DEV_ACTIVE)) {
4525 			result = (lp->errentp->errdef.acc_chk & 1);
4526 		}
4527 	}
4528 	mutex_exit(&bofi_mutex);
4529 	return (result);
4530 }
4531 
4532 /*
4533  * our ddi_check_dma_hdl
4534  */
4535 /* ARGSUSED */
4536 static int
4537 bofi_check_dma_hdl(ddi_dma_impl_t *handle)
4538 {
4539 	struct bofi_shadow *hp;
4540 	struct bofi_link   *lp;
4541 	struct bofi_shadow *hhashp;
4542 	uint_t result = 0;
4543 
4544 	if (!mutex_tryenter(&bofi_mutex)) {
4545 		return (0);
4546 	}
4547 	hhashp = HDL_HHASH(handle);
4548 	for (hp = hhashp->hnext; hp != hhashp; hp = hp->hnext)
4549 		if (hp->hdl.dma_handle == (ddi_dma_handle_t)handle)
4550 			break;
4551 	if (hp == hhashp) {
4552 		mutex_exit(&bofi_mutex);
4553 		return (0);
4554 	}
4555 	if (!hp->link) {
4556 		mutex_exit(&bofi_mutex);
4557 		return (0);
4558 	}
4559 	for (lp = hp->link; lp != NULL; lp = lp->link) {
4560 		/*
4561 		 * OR in error state from all associated
4562 		 * errdef structures
4563 		 */
4564 		if (lp->errentp->errdef.access_count == 0 &&
4565 		    (lp->errentp->state & BOFI_DEV_ACTIVE)) {
4566 			result = ((lp->errentp->errdef.acc_chk & 2) ? 1 : 0);
4567 		}
4568 	}
4569 	mutex_exit(&bofi_mutex);
4570 	return (result);
4571 }
4572 
4573 
4574 /* ARGSUSED */
4575 static int
4576 bofi_post_event(dev_info_t *dip, dev_info_t *rdip,
4577 		    ddi_eventcookie_t eventhdl, void *impl_data)
4578 {
4579 	ddi_eventcookie_t ec;
4580 	struct ddi_fault_event_data *arg;
4581 	struct bofi_errent *ep;
4582 	struct bofi_shadow *hp;
4583 	struct bofi_shadow *dhashp;
4584 	struct bofi_link   *lp;
4585 
4586 	ASSERT(eventhdl);
4587 	if (ddi_get_eventcookie(dip, DDI_DEVI_FAULT_EVENT, &ec) != DDI_SUCCESS)
4588 		return (DDI_FAILURE);
4589 
4590 	if (ec != eventhdl)
4591 		return (save_bus_ops.bus_post_event(dip, rdip, eventhdl,
4592 		    impl_data));
4593 
4594 	arg = (struct ddi_fault_event_data *)impl_data;
4595 	mutex_enter(&bofi_mutex);
4596 	/*
4597 	 * find shadow handles with appropriate dev_infos
4598 	 * and set error reported on all associated errdef structures
4599 	 */
4600 	dhashp = HDL_DHASH(arg->f_dip);
4601 	for (hp = dhashp->dnext; hp != dhashp; hp = hp->dnext) {
4602 		if (hp->dip == arg->f_dip) {
4603 			for (lp = hp->link; lp != NULL; lp = lp->link) {
4604 				ep = lp->errentp;
4605 				ep->errstate.errmsg_count++;
4606 				if ((ep->errstate.msg_time == NULL ||
4607 				    ep->errstate.severity > arg->f_impact) &&
4608 				    (ep->state & BOFI_DEV_ACTIVE)) {
4609 					ep->errstate.msg_time = bofi_gettime();
4610 					ep->errstate.severity = arg->f_impact;
4611 					(void) strncpy(ep->errstate.buffer,
4612 					    arg->f_message, ERRMSGSIZE);
4613 					ddi_trigger_softintr(ep->softintr_id);
4614 				}
4615 			}
4616 		}
4617 	}
4618 	mutex_exit(&bofi_mutex);
4619 	return (save_bus_ops.bus_post_event(dip, rdip, eventhdl, impl_data));
4620 }
4621 
4622 /*
4623  * our intr_ops routine
4624  */
4625 static int
4626 bofi_intr_ops(dev_info_t *dip, dev_info_t *rdip, ddi_intr_op_t intr_op,
4627     ddi_intr_handle_impl_t *hdlp, void *result)
4628 {
4629 	int retval;
4630 	struct bofi_shadow *hp;
4631 	struct bofi_shadow *dhashp;
4632 	struct bofi_shadow *hhashp;
4633 	struct bofi_errent *ep;
4634 	struct bofi_link   *lp, *next_lp;
4635 
4636 	switch (intr_op) {
4637 	case DDI_INTROP_ADDISR:
4638 		/*
4639 		 * if driver_list is set, only intercept those drivers
4640 		 */
4641 		if (!driver_under_test(rdip))
4642 			return (save_bus_ops.bus_intr_op(dip, rdip,
4643 			    intr_op, hdlp, result));
4644 		/*
4645 		 * allocate shadow handle structure and fill in
4646 		 */
4647 		hp = kmem_zalloc(sizeof (struct bofi_shadow), KM_SLEEP);
4648 		(void) strncpy(hp->name, ddi_get_name(rdip), NAMESIZE);
4649 		hp->instance = ddi_get_instance(rdip);
4650 		hp->save.intr.int_handler = hdlp->ih_cb_func;
4651 		hp->save.intr.int_handler_arg1 = hdlp->ih_cb_arg1;
4652 		hdlp->ih_cb_func = (ddi_intr_handler_t *)bofi_intercept_intr;
4653 		hdlp->ih_cb_arg1 = (caddr_t)hp;
4654 		hp->bofi_inum = hdlp->ih_inum;
4655 		hp->dip = rdip;
4656 		hp->link = NULL;
4657 		hp->type = BOFI_INT_HDL;
4658 		/*
4659 		 * save whether hilevel or not
4660 		 */
4661 
4662 		if (hdlp->ih_pri >= ddi_intr_get_hilevel_pri())
4663 			hp->hilevel = 1;
4664 		else
4665 			hp->hilevel = 0;
4666 
4667 		/*
4668 		 * call nexus to do real work, but specifying our handler, and
4669 		 * our shadow handle as argument
4670 		 */
4671 		retval = save_bus_ops.bus_intr_op(dip, rdip,
4672 		    intr_op, hdlp, result);
4673 		if (retval != DDI_SUCCESS) {
4674 			kmem_free(hp, sizeof (struct bofi_shadow));
4675 			return (retval);
4676 		}
4677 		/*
4678 		 * add to dhash, hhash and inuse lists
4679 		 */
4680 		mutex_enter(&bofi_low_mutex);
4681 		mutex_enter(&bofi_mutex);
4682 		hp->next = shadow_list.next;
4683 		shadow_list.next->prev = hp;
4684 		hp->prev = &shadow_list;
4685 		shadow_list.next = hp;
4686 		hhashp = HDL_HHASH(hdlp->ih_inum);
4687 		hp->hnext = hhashp->hnext;
4688 		hhashp->hnext->hprev = hp;
4689 		hp->hprev = hhashp;
4690 		hhashp->hnext = hp;
4691 		dhashp = HDL_DHASH(hp->dip);
4692 		hp->dnext = dhashp->dnext;
4693 		dhashp->dnext->dprev = hp;
4694 		hp->dprev = dhashp;
4695 		dhashp->dnext = hp;
4696 		/*
4697 		 * chain on any pre-existing errdefs that apply to this
4698 		 * acc_handle
4699 		 */
4700 		for (ep = errent_listp; ep != NULL; ep = ep->next) {
4701 			if (ddi_name_to_major(hp->name) ==
4702 			    ddi_name_to_major(ep->name) &&
4703 			    hp->instance == ep->errdef.instance &&
4704 			    (ep->errdef.access_type & BOFI_INTR)) {
4705 				lp = bofi_link_freelist;
4706 				if (lp != NULL) {
4707 					bofi_link_freelist = lp->link;
4708 					lp->errentp = ep;
4709 					lp->link = hp->link;
4710 					hp->link = lp;
4711 				}
4712 			}
4713 		}
4714 		mutex_exit(&bofi_mutex);
4715 		mutex_exit(&bofi_low_mutex);
4716 		return (retval);
4717 	case DDI_INTROP_REMISR:
4718 		/*
4719 		 * call nexus routine first
4720 		 */
4721 		retval = save_bus_ops.bus_intr_op(dip, rdip,
4722 		    intr_op, hdlp, result);
4723 		/*
4724 		 * find shadow handle
4725 		 */
4726 		mutex_enter(&bofi_low_mutex);
4727 		mutex_enter(&bofi_mutex);
4728 		hhashp = HDL_HHASH(hdlp->ih_inum);
4729 		for (hp = hhashp->hnext; hp != hhashp; hp = hp->hnext) {
4730 			if (hp->dip == rdip &&
4731 			    hp->type == BOFI_INT_HDL &&
4732 			    hp->bofi_inum == hdlp->ih_inum) {
4733 				break;
4734 			}
4735 		}
4736 		if (hp == hhashp) {
4737 			mutex_exit(&bofi_mutex);
4738 			mutex_exit(&bofi_low_mutex);
4739 			return (retval);
4740 		}
4741 		/*
4742 		 * found one - remove from dhash, hhash and inuse lists
4743 		 */
4744 		hp->hnext->hprev = hp->hprev;
4745 		hp->hprev->hnext = hp->hnext;
4746 		hp->dnext->dprev = hp->dprev;
4747 		hp->dprev->dnext = hp->dnext;
4748 		hp->next->prev = hp->prev;
4749 		hp->prev->next = hp->next;
4750 		/*
4751 		 * free any errdef link structures
4752 		 * tagged on to this shadow handle
4753 		 */
4754 		for (lp = hp->link; lp != NULL; ) {
4755 			next_lp = lp->link;
4756 			lp->link = bofi_link_freelist;
4757 			bofi_link_freelist = lp;
4758 			lp = next_lp;
4759 		}
4760 		hp->link = NULL;
4761 		mutex_exit(&bofi_mutex);
4762 		mutex_exit(&bofi_low_mutex);
4763 		kmem_free(hp, sizeof (struct bofi_shadow));
4764 		return (retval);
4765 	default:
4766 		return (save_bus_ops.bus_intr_op(dip, rdip,
4767 		    intr_op, hdlp, result));
4768 	}
4769 }
4770