xref: /titanic_41/usr/src/uts/common/io/bofi.c (revision 22c9e08b20c7848675e1e097ca3fcbcf8b549fba)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 #include <sys/types.h>
29 #include <sys/sysmacros.h>
30 #include <sys/buf.h>
31 #include <sys/errno.h>
32 #include <sys/modctl.h>
33 #include <sys/conf.h>
34 #include <sys/stat.h>
35 #include <sys/kmem.h>
36 #include <sys/proc.h>
37 #include <sys/cpuvar.h>
38 #include <sys/ddi_impldefs.h>
39 #include <sys/ddi.h>
40 #include <sys/fm/protocol.h>
41 #include <sys/fm/util.h>
42 #include <sys/fm/io/ddi.h>
43 #include <sys/sysevent/eventdefs.h>
44 #include <sys/sunddi.h>
45 #include <sys/sunndi.h>
46 #include <sys/debug.h>
47 #include <sys/bofi.h>
48 #include <sys/dvma.h>
49 #include <sys/bofi_impl.h>
50 
51 /*
52  * Testing the resilience of a hardened device driver requires a suitably wide
53  * range of different types of "typical" hardware faults to be injected,
54  * preferably in a controlled and repeatable fashion. This is not in general
55  * possible via hardware, so the "fault injection test harness" is provided.
56  * This works by intercepting calls from the driver to various DDI routines,
57  * and then corrupting the result of those DDI routine calls as if the
58  * hardware had caused the corruption.
59  *
60  * Conceptually, the bofi driver consists of two parts:
61  *
62  * A driver interface that supports a number of ioctls which allow error
63  * definitions ("errdefs") to be defined and subsequently managed. The
64  * driver is a clone driver, so each open will create a separate
65  * invocation. Any errdefs created by using ioctls to that invocation
66  * will automatically be deleted when that invocation is closed.
67  *
68  * Intercept routines: When the bofi driver is attached, it edits the
69  * bus_ops structure of the bus nexus specified by the "bofi-nexus"
70  * field in the "bofi.conf" file, thus allowing the
71  * bofi driver to intercept various ddi functions. These intercept
72  * routines primarily carry out fault injections based on the errdefs
73  * created for that device.
74  *
75  * Faults can be injected into:
76  *
77  * DMA (corrupting data for DMA to/from memory areas defined by
78  * ddi_dma_setup(), ddi_dma_bind_handle(), etc)
79  *
80  * Physical IO (corrupting data sent/received via ddi_get8(), ddi_put8(),
81  * etc),
82  *
83  * Interrupts (generating spurious interrupts, losing interrupts,
84  * delaying interrupts).
85  *
86  * By default, ddi routines called from all drivers will be intercepted
87  * and faults potentially injected. However, the "bofi-to-test" field in
88  * the "bofi.conf" file can be set to a space-separated list of drivers to
89  * test (or by preceding each driver name in the list with an "!", a list
90  * of drivers not to test).
91  *
92  * In addition to fault injection, the bofi driver does a number of static
93  * checks which are controlled by properties in the "bofi.conf" file.
94  *
95  * "bofi-ddi-check" - if set will validate that there are no PIO access
96  * other than those using the DDI routines (ddi_get8(), ddi_put8(), etc).
97  *
98  * "bofi-range-check" - if set to values 1 (warning) or 2 (panic), will
99  * validate that calls to ddi_get8(), ddi_put8(), etc are not made
100  * specifying addresses outside the range of the access_handle.
101  *
102  * "bofi-sync-check" - if set will validate that calls to ddi_dma_sync()
103  * are being made correctly.
104  */
105 
106 extern void *bp_mapin_common(struct buf *, int);
107 
108 static int bofi_ddi_check;
109 static int bofi_sync_check;
110 static int bofi_range_check;
111 
112 static struct bofi_link bofi_link_array[BOFI_NLINKS], *bofi_link_freelist;
113 
114 #define	LLSZMASK (sizeof (uint64_t)-1)
115 
116 #define	HDL_HASH_TBL_SIZE 64
117 static struct bofi_shadow hhash_table[HDL_HASH_TBL_SIZE];
118 static struct bofi_shadow dhash_table[HDL_HASH_TBL_SIZE];
119 #define	HDL_DHASH(x) \
120 	(&dhash_table[((uintptr_t)(x) >> 3) & (HDL_HASH_TBL_SIZE-1)])
121 #define	HDL_HHASH(x) \
122 	(&hhash_table[((uintptr_t)(x) >> 5) & (HDL_HASH_TBL_SIZE-1)])
123 
124 static struct bofi_shadow shadow_list;
125 static struct bofi_errent *errent_listp;
126 
127 static char driver_list[NAMESIZE];
128 static int driver_list_size;
129 static int driver_list_neg;
130 static char nexus_name[NAMESIZE];
131 
132 static int initialized = 0;
133 
134 #define	NCLONES 256
135 static int clone_tab[NCLONES];
136 
137 static dev_info_t *our_dip;
138 
139 static kmutex_t bofi_mutex;
140 static kmutex_t clone_tab_mutex;
141 static kmutex_t bofi_low_mutex;
142 static ddi_iblock_cookie_t bofi_low_cookie;
143 static uint_t	bofi_signal(caddr_t arg);
144 static int	bofi_getinfo(dev_info_t *, ddi_info_cmd_t, void *, void **);
145 static int	bofi_attach(dev_info_t *, ddi_attach_cmd_t);
146 static int	bofi_detach(dev_info_t *, ddi_detach_cmd_t);
147 static int	bofi_open(dev_t *, int, int, cred_t *);
148 static int	bofi_close(dev_t, int, int, cred_t *);
149 static int	bofi_ioctl(dev_t, int, intptr_t, int, cred_t *, int *);
150 static int	bofi_errdef_alloc(struct bofi_errdef *, char *,
151 		    struct bofi_errent *);
152 static int	bofi_errdef_free(struct bofi_errent *);
153 static void	bofi_start(struct bofi_errctl *, char *);
154 static void	bofi_stop(struct bofi_errctl *, char *);
155 static void	bofi_broadcast(struct bofi_errctl *, char *);
156 static void	bofi_clear_acc_chk(struct bofi_errctl *, char *);
157 static void	bofi_clear_errors(struct bofi_errctl *, char *);
158 static void	bofi_clear_errdefs(struct bofi_errctl *, char *);
159 static int	bofi_errdef_check(struct bofi_errstate *,
160 		    struct acc_log_elem **);
161 static int	bofi_errdef_check_w(struct bofi_errstate *,
162 		    struct acc_log_elem **);
163 static int	bofi_map(dev_info_t *, dev_info_t *, ddi_map_req_t *,
164 		    off_t, off_t, caddr_t *);
165 static int	bofi_dma_map(dev_info_t *, dev_info_t *,
166 		    struct ddi_dma_req *, ddi_dma_handle_t *);
167 static int	bofi_dma_allochdl(dev_info_t *, dev_info_t *,
168 		    ddi_dma_attr_t *, int (*)(caddr_t), caddr_t,
169 		    ddi_dma_handle_t *);
170 static int	bofi_dma_freehdl(dev_info_t *, dev_info_t *,
171 		    ddi_dma_handle_t);
172 static int	bofi_dma_bindhdl(dev_info_t *, dev_info_t *,
173 		    ddi_dma_handle_t, struct ddi_dma_req *, ddi_dma_cookie_t *,
174 		    uint_t *);
175 static int	bofi_dma_unbindhdl(dev_info_t *, dev_info_t *,
176 		    ddi_dma_handle_t);
177 static int	bofi_dma_flush(dev_info_t *, dev_info_t *, ddi_dma_handle_t,
178 		    off_t, size_t, uint_t);
179 static int	bofi_dma_ctl(dev_info_t *, dev_info_t *, ddi_dma_handle_t,
180 		    enum ddi_dma_ctlops, off_t *, size_t *, caddr_t *, uint_t);
181 static int	bofi_dma_win(dev_info_t *, dev_info_t *, ddi_dma_handle_t,
182 		    uint_t, off_t *, size_t *, ddi_dma_cookie_t *, uint_t *);
183 static int	bofi_intr_ops(dev_info_t *dip, dev_info_t *rdip,
184 		    ddi_intr_op_t intr_op, ddi_intr_handle_impl_t *hdlp,
185 		    void *result);
186 static int	bofi_fm_ereport_callback(sysevent_t *ev, void *cookie);
187 
188 evchan_t *bofi_error_chan;
189 
190 #define	FM_SIMULATED_DMA "simulated.dma"
191 #define	FM_SIMULATED_PIO "simulated.pio"
192 
193 #if defined(__sparc)
194 static void	bofi_dvma_kaddr_load(ddi_dma_handle_t, caddr_t, uint_t,
195 		    uint_t, ddi_dma_cookie_t *);
196 static void	bofi_dvma_unload(ddi_dma_handle_t, uint_t, uint_t);
197 static void	bofi_dvma_sync(ddi_dma_handle_t, uint_t, uint_t);
198 static void	bofi_dvma_reserve(dev_info_t *, ddi_dma_handle_t);
199 #endif
200 static int	driver_under_test(dev_info_t *);
201 static int	bofi_check_acc_hdl(ddi_acc_impl_t *);
202 static int	bofi_check_dma_hdl(ddi_dma_impl_t *);
203 static int	bofi_post_event(dev_info_t *dip, dev_info_t *rdip,
204 		    ddi_eventcookie_t eventhdl, void *impl_data);
205 
206 static struct bus_ops bofi_bus_ops = {
207 	BUSO_REV,
208 	bofi_map,
209 	NULL,
210 	NULL,
211 	NULL,
212 	i_ddi_map_fault,
213 	bofi_dma_map,
214 	bofi_dma_allochdl,
215 	bofi_dma_freehdl,
216 	bofi_dma_bindhdl,
217 	bofi_dma_unbindhdl,
218 	bofi_dma_flush,
219 	bofi_dma_win,
220 	bofi_dma_ctl,
221 	NULL,
222 	ddi_bus_prop_op,
223 	ndi_busop_get_eventcookie,
224 	ndi_busop_add_eventcall,
225 	ndi_busop_remove_eventcall,
226 	bofi_post_event,
227 	NULL,
228 	0,
229 	0,
230 	0,
231 	0,
232 	0,
233 	0,
234 	0,
235 	bofi_intr_ops
236 };
237 
238 static struct cb_ops bofi_cb_ops = {
239 	bofi_open,		/* open */
240 	bofi_close,		/* close */
241 	nodev,			/* strategy */
242 	nodev,			/* print */
243 	nodev,			/* dump */
244 	nodev,			/* read */
245 	nodev,			/* write */
246 	bofi_ioctl,		/* ioctl */
247 	nodev,			/* devmap */
248 	nodev,			/* mmap */
249 	nodev,			/* segmap */
250 	nochpoll,		/* chpoll */
251 	ddi_prop_op,		/* prop_op */
252 	NULL,			/* for STREAMS drivers */
253 	D_MP,			/* driver compatibility flag */
254 	CB_REV,			/* cb_ops revision */
255 	nodev,			/* aread */
256 	nodev			/* awrite */
257 };
258 
259 static struct dev_ops bofi_ops = {
260 	DEVO_REV,		/* driver build version */
261 	0,			/* device reference count */
262 	bofi_getinfo,
263 	nulldev,
264 	nulldev,		/* probe */
265 	bofi_attach,
266 	bofi_detach,
267 	nulldev,		/* reset */
268 	&bofi_cb_ops,
269 	(struct bus_ops *)NULL,
270 	nulldev			/* power */
271 };
272 
273 /* module configuration stuff */
274 static void    *statep;
275 
276 static struct modldrv modldrv = {
277 	&mod_driverops,
278 	"bofi driver %I%",
279 	&bofi_ops
280 };
281 
282 static struct modlinkage modlinkage = {
283 	MODREV_1,
284 	&modldrv,
285 	0
286 };
287 
288 static struct bus_ops save_bus_ops;
289 
290 #if defined(__sparc)
291 static struct dvma_ops bofi_dvma_ops = {
292 	DVMAO_REV,
293 	bofi_dvma_kaddr_load,
294 	bofi_dvma_unload,
295 	bofi_dvma_sync
296 };
297 #endif
298 
299 /*
300  * support routine - map user page into kernel virtual
301  */
302 static caddr_t
303 dmareq_mapin(offset_t len, caddr_t addr, struct as *as, int flag)
304 {
305 	struct buf buf;
306 	struct proc proc;
307 
308 	/*
309 	 * mock up a buf structure so we can call bp_mapin_common()
310 	 */
311 	buf.b_flags = B_PHYS;
312 	buf.b_un.b_addr = (caddr_t)addr;
313 	buf.b_bcount = (size_t)len;
314 	proc.p_as = as;
315 	buf.b_proc = &proc;
316 	return (bp_mapin_common(&buf, flag));
317 }
318 
319 
320 /*
321  * support routine - map page chain into kernel virtual
322  */
323 static caddr_t
324 dmareq_pp_mapin(offset_t len, uint_t offset, page_t *pp, int flag)
325 {
326 	struct buf buf;
327 
328 	/*
329 	 * mock up a buf structure so we can call bp_mapin_common()
330 	 */
331 	buf.b_flags = B_PAGEIO;
332 	buf.b_un.b_addr = (caddr_t)(uintptr_t)offset;
333 	buf.b_bcount = (size_t)len;
334 	buf.b_pages = pp;
335 	return (bp_mapin_common(&buf, flag));
336 }
337 
338 
339 /*
340  * support routine - map page array into kernel virtual
341  */
342 static caddr_t
343 dmareq_pplist_mapin(uint_t len, caddr_t addr, page_t **pplist, struct as *as,
344     int flag)
345 {
346 	struct buf buf;
347 	struct proc proc;
348 
349 	/*
350 	 * mock up a buf structure so we can call bp_mapin_common()
351 	 */
352 	buf.b_flags = B_PHYS|B_SHADOW;
353 	buf.b_un.b_addr = addr;
354 	buf.b_bcount = len;
355 	buf.b_shadow = pplist;
356 	proc.p_as = as;
357 	buf.b_proc = &proc;
358 	return (bp_mapin_common(&buf, flag));
359 }
360 
361 
362 /*
363  * support routine - map dmareq into kernel virtual if not already
364  * fills in *lenp with length
365  * *mapaddr will be new kernel virtual address - or null if no mapping needed
366  */
367 static caddr_t
368 ddi_dmareq_mapin(struct ddi_dma_req *dmareqp, caddr_t *mapaddrp,
369 	offset_t *lenp)
370 {
371 	int sleep = (dmareqp->dmar_fp == DDI_DMA_SLEEP) ? VM_SLEEP: VM_NOSLEEP;
372 
373 	*lenp = dmareqp->dmar_object.dmao_size;
374 	if (dmareqp->dmar_object.dmao_type == DMA_OTYP_PAGES) {
375 		*mapaddrp = dmareq_pp_mapin(dmareqp->dmar_object.dmao_size,
376 		    dmareqp->dmar_object.dmao_obj.pp_obj.pp_offset,
377 		    dmareqp->dmar_object.dmao_obj.pp_obj.pp_pp, sleep);
378 		return (*mapaddrp);
379 	} else if (dmareqp->dmar_object.dmao_obj.virt_obj.v_priv != NULL) {
380 		*mapaddrp = dmareq_pplist_mapin(dmareqp->dmar_object.dmao_size,
381 		    dmareqp->dmar_object.dmao_obj.virt_obj.v_addr,
382 		    dmareqp->dmar_object.dmao_obj.virt_obj.v_priv,
383 		    dmareqp->dmar_object.dmao_obj.virt_obj.v_as, sleep);
384 		return (*mapaddrp);
385 	} else if (dmareqp->dmar_object.dmao_obj.virt_obj.v_as == &kas) {
386 		*mapaddrp = NULL;
387 		return (dmareqp->dmar_object.dmao_obj.virt_obj.v_addr);
388 	} else if (dmareqp->dmar_object.dmao_obj.virt_obj.v_as == NULL) {
389 		*mapaddrp = NULL;
390 		return (dmareqp->dmar_object.dmao_obj.virt_obj.v_addr);
391 	} else {
392 		*mapaddrp = dmareq_mapin(dmareqp->dmar_object.dmao_size,
393 		    dmareqp->dmar_object.dmao_obj.virt_obj.v_addr,
394 		    dmareqp->dmar_object.dmao_obj.virt_obj.v_as, sleep);
395 		return (*mapaddrp);
396 	}
397 }
398 
399 
400 /*
401  * support routine - free off kernel virtual mapping as allocated by
402  * ddi_dmareq_mapin()
403  */
404 static void
405 ddi_dmareq_mapout(caddr_t addr, offset_t len)
406 {
407 	struct buf buf;
408 
409 	if (addr == NULL)
410 		return;
411 	/*
412 	 * mock up a buf structure
413 	 */
414 	buf.b_flags = B_REMAPPED;
415 	buf.b_un.b_addr = addr;
416 	buf.b_bcount = (size_t)len;
417 	bp_mapout(&buf);
418 }
419 
420 static time_t
421 bofi_gettime()
422 {
423 	timestruc_t ts;
424 
425 	gethrestime(&ts);
426 	return (ts.tv_sec);
427 }
428 
429 /*
430  * reset the bus_ops structure of the specified nexus to point to
431  * the original values in the save_bus_ops structure.
432  *
433  * Note that both this routine and modify_bus_ops() rely on the current
434  * behavior of the framework in that nexus drivers are not unloadable
435  *
436  */
437 
438 static int
439 reset_bus_ops(char *name, struct bus_ops *bop)
440 {
441 	struct modctl *modp;
442 	struct modldrv *mp;
443 	struct bus_ops *bp;
444 	struct dev_ops *ops;
445 
446 	mutex_enter(&mod_lock);
447 	/*
448 	 * find specified module
449 	 */
450 	modp = &modules;
451 	do {
452 		if (strcmp(name, modp->mod_modname) == 0) {
453 			if (!modp->mod_linkage) {
454 				mutex_exit(&mod_lock);
455 				return (0);
456 			}
457 			mp = modp->mod_linkage->ml_linkage[0];
458 			if (!mp || !mp->drv_dev_ops) {
459 				mutex_exit(&mod_lock);
460 				return (0);
461 			}
462 			ops = mp->drv_dev_ops;
463 			bp = ops->devo_bus_ops;
464 			if (!bp) {
465 				mutex_exit(&mod_lock);
466 				return (0);
467 			}
468 			if (ops->devo_refcnt > 0) {
469 				/*
470 				 * As long as devices are active with modified
471 				 * bus ops bofi must not go away. There may be
472 				 * drivers with modified access or dma handles.
473 				 */
474 				mutex_exit(&mod_lock);
475 				return (0);
476 			}
477 			cmn_err(CE_NOTE, "bofi reset bus_ops for %s",
478 			    mp->drv_linkinfo);
479 			bp->bus_intr_op = bop->bus_intr_op;
480 			bp->bus_post_event = bop->bus_post_event;
481 			bp->bus_map = bop->bus_map;
482 			bp->bus_dma_map = bop->bus_dma_map;
483 			bp->bus_dma_allochdl = bop->bus_dma_allochdl;
484 			bp->bus_dma_freehdl = bop->bus_dma_freehdl;
485 			bp->bus_dma_bindhdl = bop->bus_dma_bindhdl;
486 			bp->bus_dma_unbindhdl = bop->bus_dma_unbindhdl;
487 			bp->bus_dma_flush = bop->bus_dma_flush;
488 			bp->bus_dma_win = bop->bus_dma_win;
489 			bp->bus_dma_ctl = bop->bus_dma_ctl;
490 			mutex_exit(&mod_lock);
491 			return (1);
492 		}
493 	} while ((modp = modp->mod_next) != &modules);
494 	mutex_exit(&mod_lock);
495 	return (0);
496 }
497 
498 /*
499  * modify the bus_ops structure of the specified nexus to point to bofi
500  * routines, saving the original values in the save_bus_ops structure
501  */
502 
503 static int
504 modify_bus_ops(char *name, struct bus_ops *bop)
505 {
506 	struct modctl *modp;
507 	struct modldrv *mp;
508 	struct bus_ops *bp;
509 	struct dev_ops *ops;
510 
511 	if (ddi_name_to_major(name) == -1)
512 		return (0);
513 
514 	mutex_enter(&mod_lock);
515 	/*
516 	 * find specified module
517 	 */
518 	modp = &modules;
519 	do {
520 		if (strcmp(name, modp->mod_modname) == 0) {
521 			if (!modp->mod_linkage) {
522 				mutex_exit(&mod_lock);
523 				return (0);
524 			}
525 			mp = modp->mod_linkage->ml_linkage[0];
526 			if (!mp || !mp->drv_dev_ops) {
527 				mutex_exit(&mod_lock);
528 				return (0);
529 			}
530 			ops = mp->drv_dev_ops;
531 			bp = ops->devo_bus_ops;
532 			if (!bp) {
533 				mutex_exit(&mod_lock);
534 				return (0);
535 			}
536 			if (ops->devo_refcnt == 0) {
537 				/*
538 				 * If there is no device active for this
539 				 * module then there is nothing to do for bofi.
540 				 */
541 				mutex_exit(&mod_lock);
542 				return (0);
543 			}
544 			cmn_err(CE_NOTE, "bofi modify bus_ops for %s",
545 			    mp->drv_linkinfo);
546 			save_bus_ops = *bp;
547 			bp->bus_intr_op = bop->bus_intr_op;
548 			bp->bus_post_event = bop->bus_post_event;
549 			bp->bus_map = bop->bus_map;
550 			bp->bus_dma_map = bop->bus_dma_map;
551 			bp->bus_dma_allochdl = bop->bus_dma_allochdl;
552 			bp->bus_dma_freehdl = bop->bus_dma_freehdl;
553 			bp->bus_dma_bindhdl = bop->bus_dma_bindhdl;
554 			bp->bus_dma_unbindhdl = bop->bus_dma_unbindhdl;
555 			bp->bus_dma_flush = bop->bus_dma_flush;
556 			bp->bus_dma_win = bop->bus_dma_win;
557 			bp->bus_dma_ctl = bop->bus_dma_ctl;
558 			mutex_exit(&mod_lock);
559 			return (1);
560 		}
561 	} while ((modp = modp->mod_next) != &modules);
562 	mutex_exit(&mod_lock);
563 	return (0);
564 }
565 
566 
567 int
568 _init(void)
569 {
570 	int    e;
571 
572 	e = ddi_soft_state_init(&statep, sizeof (struct bofi_errent), 1);
573 	if (e != 0)
574 		return (e);
575 	if ((e = mod_install(&modlinkage)) != 0)
576 		ddi_soft_state_fini(&statep);
577 	return (e);
578 }
579 
580 
581 int
582 _fini(void)
583 {
584 	int e;
585 
586 	if ((e = mod_remove(&modlinkage)) != 0)
587 		return (e);
588 	ddi_soft_state_fini(&statep);
589 	return (e);
590 }
591 
592 
593 int
594 _info(struct modinfo *modinfop)
595 {
596 	return (mod_info(&modlinkage, modinfop));
597 }
598 
599 
600 static int
601 bofi_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
602 {
603 	char *name;
604 	char buf[80];
605 	int i;
606 	int s, ss;
607 	int size = NAMESIZE;
608 	int new_string;
609 	char *ptr;
610 
611 	if (cmd != DDI_ATTACH)
612 		return (DDI_FAILURE);
613 	/*
614 	 * only one instance - but we clone using the open routine
615 	 */
616 	if (ddi_get_instance(dip) > 0)
617 		return (DDI_FAILURE);
618 
619 	if (!initialized) {
620 		if ((name = ddi_get_name(dip)) == NULL)
621 			return (DDI_FAILURE);
622 		(void) snprintf(buf, sizeof (buf), "%s,ctl", name);
623 		if (ddi_create_minor_node(dip, buf, S_IFCHR, 0,
624 		    DDI_PSEUDO, NULL) == DDI_FAILURE)
625 			return (DDI_FAILURE);
626 
627 		if (ddi_get_soft_iblock_cookie(dip, DDI_SOFTINT_MED,
628 		    &bofi_low_cookie) != DDI_SUCCESS) {
629 			ddi_remove_minor_node(dip, buf);
630 			return (DDI_FAILURE); /* fail attach */
631 		}
632 		/*
633 		 * get nexus name (from conf file)
634 		 */
635 		if (ddi_prop_op(DDI_DEV_T_ANY, dip, PROP_LEN_AND_VAL_BUF, 0,
636 		    "bofi-nexus", nexus_name, &size) != DDI_PROP_SUCCESS) {
637 			ddi_remove_minor_node(dip, buf);
638 			return (DDI_FAILURE);
639 		}
640 		/*
641 		 * get whether to do dma map kmem private checking
642 		 */
643 		if ((bofi_range_check = ddi_prop_lookup_string(DDI_DEV_T_ANY,
644 		    dip, 0, "bofi-range-check", &ptr)) != DDI_PROP_SUCCESS)
645 			bofi_range_check = 0;
646 		else if (strcmp(ptr, "panic") == 0)
647 			bofi_range_check = 2;
648 		else if (strcmp(ptr, "warn") == 0)
649 			bofi_range_check = 1;
650 		else
651 			bofi_range_check = 0;
652 		ddi_prop_free(ptr);
653 
654 		/*
655 		 * get whether to prevent direct access to register
656 		 */
657 		if ((bofi_ddi_check = ddi_prop_lookup_string(DDI_DEV_T_ANY,
658 		    dip, 0, "bofi-ddi-check", &ptr)) != DDI_PROP_SUCCESS)
659 			bofi_ddi_check = 0;
660 		else if (strcmp(ptr, "on") == 0)
661 			bofi_ddi_check = 1;
662 		else
663 			bofi_ddi_check = 0;
664 		ddi_prop_free(ptr);
665 
666 		/*
667 		 * get whether to do copy on ddi_dma_sync
668 		 */
669 		if ((bofi_sync_check = ddi_prop_lookup_string(DDI_DEV_T_ANY,
670 		    dip, 0, "bofi-sync-check", &ptr)) != DDI_PROP_SUCCESS)
671 			bofi_sync_check = 0;
672 		else if (strcmp(ptr, "on") == 0)
673 			bofi_sync_check = 1;
674 		else
675 			bofi_sync_check = 0;
676 		ddi_prop_free(ptr);
677 
678 		/*
679 		 * get driver-under-test names (from conf file)
680 		 */
681 		size = NAMESIZE;
682 		if (ddi_prop_op(DDI_DEV_T_ANY, dip, PROP_LEN_AND_VAL_BUF, 0,
683 		    "bofi-to-test", driver_list, &size) != DDI_PROP_SUCCESS)
684 			driver_list[0] = 0;
685 		/*
686 		 * and convert into a sequence of strings
687 		 */
688 		driver_list_neg = 1;
689 		new_string = 1;
690 		driver_list_size = strlen(driver_list);
691 		for (i = 0; i < driver_list_size; i++) {
692 			if (driver_list[i] == ' ') {
693 				driver_list[i] = '\0';
694 				new_string = 1;
695 			} else if (new_string) {
696 				if (driver_list[i] != '!')
697 					driver_list_neg = 0;
698 				new_string = 0;
699 			}
700 		}
701 		/*
702 		 * initialize mutex, lists
703 		 */
704 		mutex_init(&clone_tab_mutex, NULL, MUTEX_DRIVER,
705 		    NULL);
706 		/*
707 		 * fake up iblock cookie - need to protect outselves
708 		 * against drivers that use hilevel interrupts
709 		 */
710 		ss = spl8();
711 		s = spl8();
712 		splx(ss);
713 		mutex_init(&bofi_mutex, NULL, MUTEX_SPIN, (void *)(uintptr_t)s);
714 		mutex_init(&bofi_low_mutex, NULL, MUTEX_DRIVER,
715 		    (void *)bofi_low_cookie);
716 		shadow_list.next = &shadow_list;
717 		shadow_list.prev = &shadow_list;
718 		for (i = 0; i < HDL_HASH_TBL_SIZE; i++) {
719 			hhash_table[i].hnext = &hhash_table[i];
720 			hhash_table[i].hprev = &hhash_table[i];
721 			dhash_table[i].dnext = &dhash_table[i];
722 			dhash_table[i].dprev = &dhash_table[i];
723 		}
724 		for (i = 1; i < BOFI_NLINKS; i++)
725 			bofi_link_array[i].link = &bofi_link_array[i-1];
726 		bofi_link_freelist = &bofi_link_array[BOFI_NLINKS - 1];
727 		/*
728 		 * overlay bus_ops structure
729 		 */
730 		if (modify_bus_ops(nexus_name, &bofi_bus_ops) == 0) {
731 			ddi_remove_minor_node(dip, buf);
732 			mutex_destroy(&clone_tab_mutex);
733 			mutex_destroy(&bofi_mutex);
734 			mutex_destroy(&bofi_low_mutex);
735 			return (DDI_FAILURE);
736 		}
737 		if (sysevent_evc_bind(FM_ERROR_CHAN, &bofi_error_chan, 0) == 0)
738 			(void) sysevent_evc_subscribe(bofi_error_chan, "bofi",
739 			    EC_FM, bofi_fm_ereport_callback, NULL, 0);
740 
741 		/*
742 		 * save dip for getinfo
743 		 */
744 		our_dip = dip;
745 		ddi_report_dev(dip);
746 		initialized = 1;
747 	}
748 	return (DDI_SUCCESS);
749 }
750 
751 
752 static int
753 bofi_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
754 {
755 	char *name;
756 	char buf[80];
757 
758 	if (cmd != DDI_DETACH)
759 		return (DDI_FAILURE);
760 	if (ddi_get_instance(dip) > 0)
761 		return (DDI_FAILURE);
762 	if ((name = ddi_get_name(dip)) == NULL)
763 		return (DDI_FAILURE);
764 	(void) snprintf(buf, sizeof (buf), "%s,ctl", name);
765 	mutex_enter(&bofi_low_mutex);
766 	mutex_enter(&bofi_mutex);
767 	/*
768 	 * make sure test bofi is no longer in use
769 	 */
770 	if (shadow_list.next != &shadow_list || errent_listp != NULL) {
771 		mutex_exit(&bofi_mutex);
772 		mutex_exit(&bofi_low_mutex);
773 		return (DDI_FAILURE);
774 	}
775 	mutex_exit(&bofi_mutex);
776 	mutex_exit(&bofi_low_mutex);
777 
778 	/*
779 	 * restore bus_ops structure
780 	 */
781 	if (reset_bus_ops(nexus_name, &save_bus_ops) == 0)
782 		return (DDI_FAILURE);
783 
784 	sysevent_evc_unbind(bofi_error_chan);
785 
786 	mutex_destroy(&clone_tab_mutex);
787 	mutex_destroy(&bofi_mutex);
788 	mutex_destroy(&bofi_low_mutex);
789 	ddi_remove_minor_node(dip, buf);
790 	our_dip = NULL;
791 	initialized = 0;
792 	return (DDI_SUCCESS);
793 }
794 
795 
796 /* ARGSUSED */
797 static int
798 bofi_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result)
799 {
800 	dev_t	dev = (dev_t)arg;
801 	int	minor = (int)getminor(dev);
802 	int	retval;
803 
804 	switch (cmd) {
805 	case DDI_INFO_DEVT2DEVINFO:
806 		if (minor != 0 || our_dip == NULL) {
807 			*result = (void *)NULL;
808 			retval = DDI_FAILURE;
809 		} else {
810 			*result = (void *)our_dip;
811 			retval = DDI_SUCCESS;
812 		}
813 		break;
814 	case DDI_INFO_DEVT2INSTANCE:
815 		*result = (void *)0;
816 		retval = DDI_SUCCESS;
817 		break;
818 	default:
819 		retval = DDI_FAILURE;
820 	}
821 	return (retval);
822 }
823 
824 
825 /* ARGSUSED */
826 static int
827 bofi_open(dev_t *devp, int flag, int otyp, cred_t *credp)
828 {
829 	int	minor = (int)getminor(*devp);
830 	struct bofi_errent *softc;
831 
832 	/*
833 	 * only allow open on minor=0 - the clone device
834 	 */
835 	if (minor != 0)
836 		return (ENXIO);
837 	/*
838 	 * fail if not attached
839 	 */
840 	if (!initialized)
841 		return (ENXIO);
842 	/*
843 	 * find a free slot and grab it
844 	 */
845 	mutex_enter(&clone_tab_mutex);
846 	for (minor = 1; minor < NCLONES; minor++) {
847 		if (clone_tab[minor] == 0) {
848 			clone_tab[minor] = 1;
849 			break;
850 		}
851 	}
852 	mutex_exit(&clone_tab_mutex);
853 	if (minor == NCLONES)
854 		return (EAGAIN);
855 	/*
856 	 * soft state structure for this clone is used to maintain a list
857 	 * of allocated errdefs so they can be freed on close
858 	 */
859 	if (ddi_soft_state_zalloc(statep, minor) != DDI_SUCCESS) {
860 		mutex_enter(&clone_tab_mutex);
861 		clone_tab[minor] = 0;
862 		mutex_exit(&clone_tab_mutex);
863 		return (EAGAIN);
864 	}
865 	softc = ddi_get_soft_state(statep, minor);
866 	softc->cnext = softc;
867 	softc->cprev = softc;
868 
869 	*devp = makedevice(getmajor(*devp), minor);
870 	return (0);
871 }
872 
873 
874 /* ARGSUSED */
875 static int
876 bofi_close(dev_t dev, int flag, int otyp, cred_t *credp)
877 {
878 	int	minor = (int)getminor(dev);
879 	struct bofi_errent *softc;
880 	struct bofi_errent *ep, *next_ep;
881 
882 	softc = ddi_get_soft_state(statep, minor);
883 	if (softc == NULL)
884 		return (ENXIO);
885 	/*
886 	 * find list of errdefs and free them off
887 	 */
888 	for (ep = softc->cnext; ep != softc; ) {
889 		next_ep = ep->cnext;
890 		(void) bofi_errdef_free(ep);
891 		ep = next_ep;
892 	}
893 	/*
894 	 * free clone tab slot
895 	 */
896 	mutex_enter(&clone_tab_mutex);
897 	clone_tab[minor] = 0;
898 	mutex_exit(&clone_tab_mutex);
899 
900 	ddi_soft_state_free(statep, minor);
901 	return (0);
902 }
903 
904 
905 /* ARGSUSED */
906 static int
907 bofi_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp,
908 	int *rvalp)
909 {
910 	struct bofi_errent *softc;
911 	int	minor = (int)getminor(dev);
912 	struct bofi_errdef errdef;
913 	struct bofi_errctl errctl;
914 	struct bofi_errstate errstate;
915 	void *ed_handle;
916 	struct bofi_get_handles get_handles;
917 	struct bofi_get_hdl_info hdl_info;
918 	struct handle_info *hdlip;
919 	struct handle_info *hib;
920 
921 	char *buffer;
922 	char *bufptr;
923 	char *endbuf;
924 	int req_count, count, err;
925 	char *namep;
926 	struct bofi_shadow *hp;
927 	int retval;
928 	struct bofi_shadow *hhashp;
929 	int i;
930 
931 	switch (cmd) {
932 	case BOFI_ADD_DEF:
933 		/*
934 		 * add a new error definition
935 		 */
936 #ifdef _MULTI_DATAMODEL
937 		switch (ddi_model_convert_from(mode & FMODELS)) {
938 		case DDI_MODEL_ILP32:
939 		{
940 			/*
941 			 * For use when a 32 bit app makes a call into a
942 			 * 64 bit ioctl
943 			 */
944 			struct bofi_errdef32	errdef_32;
945 
946 			if (ddi_copyin((void *)arg, &errdef_32,
947 			    sizeof (struct bofi_errdef32), mode)) {
948 				return (EFAULT);
949 			}
950 			errdef.namesize = errdef_32.namesize;
951 			(void) strncpy(errdef.name, errdef_32.name, NAMESIZE);
952 			errdef.instance = errdef_32.instance;
953 			errdef.rnumber = errdef_32.rnumber;
954 			errdef.offset = errdef_32.offset;
955 			errdef.len = errdef_32.len;
956 			errdef.access_type = errdef_32.access_type;
957 			errdef.access_count = errdef_32.access_count;
958 			errdef.fail_count = errdef_32.fail_count;
959 			errdef.acc_chk = errdef_32.acc_chk;
960 			errdef.optype = errdef_32.optype;
961 			errdef.operand = errdef_32.operand;
962 			errdef.log.logsize = errdef_32.log.logsize;
963 			errdef.log.entries = errdef_32.log.entries;
964 			errdef.log.flags = errdef_32.log.flags;
965 			errdef.log.wrapcnt = errdef_32.log.wrapcnt;
966 			errdef.log.start_time = errdef_32.log.start_time;
967 			errdef.log.stop_time = errdef_32.log.stop_time;
968 			errdef.log.logbase =
969 			    (caddr_t)(uintptr_t)errdef_32.log.logbase;
970 			errdef.errdef_handle = errdef_32.errdef_handle;
971 			break;
972 		}
973 		case DDI_MODEL_NONE:
974 			if (ddi_copyin((void *)arg, &errdef,
975 			    sizeof (struct bofi_errdef), mode))
976 				return (EFAULT);
977 			break;
978 		}
979 #else /* ! _MULTI_DATAMODEL */
980 		if (ddi_copyin((void *)arg, &errdef,
981 		    sizeof (struct bofi_errdef), mode) != 0)
982 			return (EFAULT);
983 #endif /* _MULTI_DATAMODEL */
984 		/*
985 		 * do some validation
986 		 */
987 		if (errdef.fail_count == 0)
988 			errdef.optype = 0;
989 		if (errdef.optype != 0) {
990 			if (errdef.access_type & BOFI_INTR &&
991 			    errdef.optype != BOFI_DELAY_INTR &&
992 			    errdef.optype != BOFI_LOSE_INTR &&
993 			    errdef.optype != BOFI_EXTRA_INTR)
994 				return (EINVAL);
995 			if ((errdef.access_type & (BOFI_DMA_RW|BOFI_PIO_R)) &&
996 			    errdef.optype == BOFI_NO_TRANSFER)
997 				return (EINVAL);
998 			if ((errdef.access_type & (BOFI_PIO_RW)) &&
999 			    errdef.optype != BOFI_EQUAL &&
1000 			    errdef.optype != BOFI_OR &&
1001 			    errdef.optype != BOFI_XOR &&
1002 			    errdef.optype != BOFI_AND &&
1003 			    errdef.optype != BOFI_NO_TRANSFER)
1004 				return (EINVAL);
1005 		}
1006 		/*
1007 		 * find softstate for this clone, so we can tag
1008 		 * new errdef on to it
1009 		 */
1010 		softc = ddi_get_soft_state(statep, minor);
1011 		if (softc == NULL)
1012 			return (ENXIO);
1013 		/*
1014 		 * read in name
1015 		 */
1016 		if (errdef.namesize > NAMESIZE)
1017 			return (EINVAL);
1018 		namep = kmem_zalloc(errdef.namesize+1, KM_SLEEP);
1019 		(void) strncpy(namep, errdef.name, errdef.namesize);
1020 
1021 		if (bofi_errdef_alloc(&errdef, namep, softc) != DDI_SUCCESS) {
1022 			(void) bofi_errdef_free((struct bofi_errent *)
1023 			    (uintptr_t)errdef.errdef_handle);
1024 			kmem_free(namep, errdef.namesize+1);
1025 			return (EINVAL);
1026 		}
1027 		/*
1028 		 * copy out errdef again, including filled in errdef_handle
1029 		 */
1030 #ifdef _MULTI_DATAMODEL
1031 		switch (ddi_model_convert_from(mode & FMODELS)) {
1032 		case DDI_MODEL_ILP32:
1033 		{
1034 			/*
1035 			 * For use when a 32 bit app makes a call into a
1036 			 * 64 bit ioctl
1037 			 */
1038 			struct bofi_errdef32	errdef_32;
1039 
1040 			errdef_32.namesize = errdef.namesize;
1041 			(void) strncpy(errdef_32.name, errdef.name, NAMESIZE);
1042 			errdef_32.instance = errdef.instance;
1043 			errdef_32.rnumber = errdef.rnumber;
1044 			errdef_32.offset = errdef.offset;
1045 			errdef_32.len = errdef.len;
1046 			errdef_32.access_type = errdef.access_type;
1047 			errdef_32.access_count = errdef.access_count;
1048 			errdef_32.fail_count = errdef.fail_count;
1049 			errdef_32.acc_chk = errdef.acc_chk;
1050 			errdef_32.optype = errdef.optype;
1051 			errdef_32.operand = errdef.operand;
1052 			errdef_32.log.logsize = errdef.log.logsize;
1053 			errdef_32.log.entries = errdef.log.entries;
1054 			errdef_32.log.flags = errdef.log.flags;
1055 			errdef_32.log.wrapcnt = errdef.log.wrapcnt;
1056 			errdef_32.log.start_time = errdef.log.start_time;
1057 			errdef_32.log.stop_time = errdef.log.stop_time;
1058 			errdef_32.log.logbase =
1059 			    (caddr32_t)(uintptr_t)errdef.log.logbase;
1060 			errdef_32.errdef_handle = errdef.errdef_handle;
1061 			if (ddi_copyout(&errdef_32, (void *)arg,
1062 			    sizeof (struct bofi_errdef32), mode) != 0) {
1063 				(void) bofi_errdef_free((struct bofi_errent *)
1064 				    errdef.errdef_handle);
1065 				kmem_free(namep, errdef.namesize+1);
1066 				return (EFAULT);
1067 			}
1068 			break;
1069 		}
1070 		case DDI_MODEL_NONE:
1071 			if (ddi_copyout(&errdef, (void *)arg,
1072 			    sizeof (struct bofi_errdef), mode) != 0) {
1073 				(void) bofi_errdef_free((struct bofi_errent *)
1074 				    errdef.errdef_handle);
1075 				kmem_free(namep, errdef.namesize+1);
1076 				return (EFAULT);
1077 			}
1078 			break;
1079 		}
1080 #else /* ! _MULTI_DATAMODEL */
1081 		if (ddi_copyout(&errdef, (void *)arg,
1082 		    sizeof (struct bofi_errdef), mode) != 0) {
1083 			(void) bofi_errdef_free((struct bofi_errent *)
1084 			    (uintptr_t)errdef.errdef_handle);
1085 			kmem_free(namep, errdef.namesize+1);
1086 			return (EFAULT);
1087 		}
1088 #endif /* _MULTI_DATAMODEL */
1089 		return (0);
1090 	case BOFI_DEL_DEF:
1091 		/*
1092 		 * delete existing errdef
1093 		 */
1094 		if (ddi_copyin((void *)arg, &ed_handle,
1095 		    sizeof (void *), mode) != 0)
1096 			return (EFAULT);
1097 		return (bofi_errdef_free((struct bofi_errent *)ed_handle));
1098 	case BOFI_START:
1099 		/*
1100 		 * start all errdefs corresponding to
1101 		 * this name and instance
1102 		 */
1103 		if (ddi_copyin((void *)arg, &errctl,
1104 		    sizeof (struct bofi_errctl), mode) != 0)
1105 			return (EFAULT);
1106 		/*
1107 		 * copy in name
1108 		 */
1109 		if (errctl.namesize > NAMESIZE)
1110 			return (EINVAL);
1111 		namep = kmem_zalloc(errctl.namesize+1, KM_SLEEP);
1112 		(void) strncpy(namep, errctl.name, errctl.namesize);
1113 		bofi_start(&errctl, namep);
1114 		kmem_free(namep, errctl.namesize+1);
1115 		return (0);
1116 	case BOFI_STOP:
1117 		/*
1118 		 * stop all errdefs corresponding to
1119 		 * this name and instance
1120 		 */
1121 		if (ddi_copyin((void *)arg, &errctl,
1122 		    sizeof (struct bofi_errctl), mode) != 0)
1123 			return (EFAULT);
1124 		/*
1125 		 * copy in name
1126 		 */
1127 		if (errctl.namesize > NAMESIZE)
1128 			return (EINVAL);
1129 		namep = kmem_zalloc(errctl.namesize+1, KM_SLEEP);
1130 		(void) strncpy(namep, errctl.name, errctl.namesize);
1131 		bofi_stop(&errctl, namep);
1132 		kmem_free(namep, errctl.namesize+1);
1133 		return (0);
1134 	case BOFI_BROADCAST:
1135 		/*
1136 		 * wakeup all errdefs corresponding to
1137 		 * this name and instance
1138 		 */
1139 		if (ddi_copyin((void *)arg, &errctl,
1140 		    sizeof (struct bofi_errctl), mode) != 0)
1141 			return (EFAULT);
1142 		/*
1143 		 * copy in name
1144 		 */
1145 		if (errctl.namesize > NAMESIZE)
1146 			return (EINVAL);
1147 		namep = kmem_zalloc(errctl.namesize+1, KM_SLEEP);
1148 		(void) strncpy(namep, errctl.name, errctl.namesize);
1149 		bofi_broadcast(&errctl, namep);
1150 		kmem_free(namep, errctl.namesize+1);
1151 		return (0);
1152 	case BOFI_CLEAR_ACC_CHK:
1153 		/*
1154 		 * clear "acc_chk" for all errdefs corresponding to
1155 		 * this name and instance
1156 		 */
1157 		if (ddi_copyin((void *)arg, &errctl,
1158 		    sizeof (struct bofi_errctl), mode) != 0)
1159 			return (EFAULT);
1160 		/*
1161 		 * copy in name
1162 		 */
1163 		if (errctl.namesize > NAMESIZE)
1164 			return (EINVAL);
1165 		namep = kmem_zalloc(errctl.namesize+1, KM_SLEEP);
1166 		(void) strncpy(namep, errctl.name, errctl.namesize);
1167 		bofi_clear_acc_chk(&errctl, namep);
1168 		kmem_free(namep, errctl.namesize+1);
1169 		return (0);
1170 	case BOFI_CLEAR_ERRORS:
1171 		/*
1172 		 * set "fail_count" to 0 for all errdefs corresponding to
1173 		 * this name and instance whose "access_count"
1174 		 * has expired.
1175 		 */
1176 		if (ddi_copyin((void *)arg, &errctl,
1177 		    sizeof (struct bofi_errctl), mode) != 0)
1178 			return (EFAULT);
1179 		/*
1180 		 * copy in name
1181 		 */
1182 		if (errctl.namesize > NAMESIZE)
1183 			return (EINVAL);
1184 		namep = kmem_zalloc(errctl.namesize+1, KM_SLEEP);
1185 		(void) strncpy(namep, errctl.name, errctl.namesize);
1186 		bofi_clear_errors(&errctl, namep);
1187 		kmem_free(namep, errctl.namesize+1);
1188 		return (0);
1189 	case BOFI_CLEAR_ERRDEFS:
1190 		/*
1191 		 * set "access_count" and "fail_count" to 0 for all errdefs
1192 		 * corresponding to this name and instance
1193 		 */
1194 		if (ddi_copyin((void *)arg, &errctl,
1195 		    sizeof (struct bofi_errctl), mode) != 0)
1196 			return (EFAULT);
1197 		/*
1198 		 * copy in name
1199 		 */
1200 		if (errctl.namesize > NAMESIZE)
1201 			return (EINVAL);
1202 		namep = kmem_zalloc(errctl.namesize+1, KM_SLEEP);
1203 		(void) strncpy(namep, errctl.name, errctl.namesize);
1204 		bofi_clear_errdefs(&errctl, namep);
1205 		kmem_free(namep, errctl.namesize+1);
1206 		return (0);
1207 	case BOFI_CHK_STATE:
1208 	{
1209 		struct acc_log_elem *klg;
1210 		size_t uls;
1211 		/*
1212 		 * get state for this errdef - read in dummy errstate
1213 		 * with just the errdef_handle filled in
1214 		 */
1215 #ifdef _MULTI_DATAMODEL
1216 		switch (ddi_model_convert_from(mode & FMODELS)) {
1217 		case DDI_MODEL_ILP32:
1218 		{
1219 			/*
1220 			 * For use when a 32 bit app makes a call into a
1221 			 * 64 bit ioctl
1222 			 */
1223 			struct bofi_errstate32	errstate_32;
1224 
1225 			if (ddi_copyin((void *)arg, &errstate_32,
1226 			    sizeof (struct bofi_errstate32), mode) != 0) {
1227 				return (EFAULT);
1228 			}
1229 			errstate.fail_time = errstate_32.fail_time;
1230 			errstate.msg_time = errstate_32.msg_time;
1231 			errstate.access_count = errstate_32.access_count;
1232 			errstate.fail_count = errstate_32.fail_count;
1233 			errstate.acc_chk = errstate_32.acc_chk;
1234 			errstate.errmsg_count = errstate_32.errmsg_count;
1235 			(void) strncpy(errstate.buffer, errstate_32.buffer,
1236 			    ERRMSGSIZE);
1237 			errstate.severity = errstate_32.severity;
1238 			errstate.log.logsize = errstate_32.log.logsize;
1239 			errstate.log.entries = errstate_32.log.entries;
1240 			errstate.log.flags = errstate_32.log.flags;
1241 			errstate.log.wrapcnt = errstate_32.log.wrapcnt;
1242 			errstate.log.start_time = errstate_32.log.start_time;
1243 			errstate.log.stop_time = errstate_32.log.stop_time;
1244 			errstate.log.logbase =
1245 			    (caddr_t)(uintptr_t)errstate_32.log.logbase;
1246 			errstate.errdef_handle = errstate_32.errdef_handle;
1247 			break;
1248 		}
1249 		case DDI_MODEL_NONE:
1250 			if (ddi_copyin((void *)arg, &errstate,
1251 			    sizeof (struct bofi_errstate), mode) != 0)
1252 				return (EFAULT);
1253 			break;
1254 		}
1255 #else /* ! _MULTI_DATAMODEL */
1256 		if (ddi_copyin((void *)arg, &errstate,
1257 		    sizeof (struct bofi_errstate), mode) != 0)
1258 			return (EFAULT);
1259 #endif /* _MULTI_DATAMODEL */
1260 		if ((retval = bofi_errdef_check(&errstate, &klg)) == EINVAL)
1261 			return (EINVAL);
1262 		/*
1263 		 * copy out real errstate structure
1264 		 */
1265 		uls = errstate.log.logsize;
1266 		if (errstate.log.entries > uls && uls)
1267 			/* insufficient user memory */
1268 			errstate.log.entries = uls;
1269 		/* always pass back a time */
1270 		if (errstate.log.stop_time == 0ul)
1271 			(void) drv_getparm(TIME, &(errstate.log.stop_time));
1272 
1273 #ifdef _MULTI_DATAMODEL
1274 		switch (ddi_model_convert_from(mode & FMODELS)) {
1275 		case DDI_MODEL_ILP32:
1276 		{
1277 			/*
1278 			 * For use when a 32 bit app makes a call into a
1279 			 * 64 bit ioctl
1280 			 */
1281 			struct bofi_errstate32	errstate_32;
1282 
1283 			errstate_32.fail_time = errstate.fail_time;
1284 			errstate_32.msg_time = errstate.msg_time;
1285 			errstate_32.access_count = errstate.access_count;
1286 			errstate_32.fail_count = errstate.fail_count;
1287 			errstate_32.acc_chk = errstate.acc_chk;
1288 			errstate_32.errmsg_count = errstate.errmsg_count;
1289 			(void) strncpy(errstate_32.buffer, errstate.buffer,
1290 			    ERRMSGSIZE);
1291 			errstate_32.severity = errstate.severity;
1292 			errstate_32.log.logsize = errstate.log.logsize;
1293 			errstate_32.log.entries = errstate.log.entries;
1294 			errstate_32.log.flags = errstate.log.flags;
1295 			errstate_32.log.wrapcnt = errstate.log.wrapcnt;
1296 			errstate_32.log.start_time = errstate.log.start_time;
1297 			errstate_32.log.stop_time = errstate.log.stop_time;
1298 			errstate_32.log.logbase =
1299 			    (caddr32_t)(uintptr_t)errstate.log.logbase;
1300 			errstate_32.errdef_handle = errstate.errdef_handle;
1301 			if (ddi_copyout(&errstate_32, (void *)arg,
1302 			    sizeof (struct bofi_errstate32), mode) != 0)
1303 				return (EFAULT);
1304 			break;
1305 		}
1306 		case DDI_MODEL_NONE:
1307 			if (ddi_copyout(&errstate, (void *)arg,
1308 			    sizeof (struct bofi_errstate), mode) != 0)
1309 				return (EFAULT);
1310 			break;
1311 		}
1312 #else /* ! _MULTI_DATAMODEL */
1313 		if (ddi_copyout(&errstate, (void *)arg,
1314 		    sizeof (struct bofi_errstate), mode) != 0)
1315 			return (EFAULT);
1316 #endif /* _MULTI_DATAMODEL */
1317 		if (uls && errstate.log.entries &&
1318 		    ddi_copyout(klg, errstate.log.logbase,
1319 		    errstate.log.entries * sizeof (struct acc_log_elem),
1320 		    mode) != 0) {
1321 			return (EFAULT);
1322 		}
1323 		return (retval);
1324 	}
1325 	case BOFI_CHK_STATE_W:
1326 	{
1327 		struct acc_log_elem *klg;
1328 		size_t uls;
1329 		/*
1330 		 * get state for this errdef - read in dummy errstate
1331 		 * with just the errdef_handle filled in. Then wait for
1332 		 * a ddi_report_fault message to come back
1333 		 */
1334 #ifdef _MULTI_DATAMODEL
1335 		switch (ddi_model_convert_from(mode & FMODELS)) {
1336 		case DDI_MODEL_ILP32:
1337 		{
1338 			/*
1339 			 * For use when a 32 bit app makes a call into a
1340 			 * 64 bit ioctl
1341 			 */
1342 			struct bofi_errstate32	errstate_32;
1343 
1344 			if (ddi_copyin((void *)arg, &errstate_32,
1345 			    sizeof (struct bofi_errstate32), mode) != 0) {
1346 				return (EFAULT);
1347 			}
1348 			errstate.fail_time = errstate_32.fail_time;
1349 			errstate.msg_time = errstate_32.msg_time;
1350 			errstate.access_count = errstate_32.access_count;
1351 			errstate.fail_count = errstate_32.fail_count;
1352 			errstate.acc_chk = errstate_32.acc_chk;
1353 			errstate.errmsg_count = errstate_32.errmsg_count;
1354 			(void) strncpy(errstate.buffer, errstate_32.buffer,
1355 			    ERRMSGSIZE);
1356 			errstate.severity = errstate_32.severity;
1357 			errstate.log.logsize = errstate_32.log.logsize;
1358 			errstate.log.entries = errstate_32.log.entries;
1359 			errstate.log.flags = errstate_32.log.flags;
1360 			errstate.log.wrapcnt = errstate_32.log.wrapcnt;
1361 			errstate.log.start_time = errstate_32.log.start_time;
1362 			errstate.log.stop_time = errstate_32.log.stop_time;
1363 			errstate.log.logbase =
1364 			    (caddr_t)(uintptr_t)errstate_32.log.logbase;
1365 			errstate.errdef_handle = errstate_32.errdef_handle;
1366 			break;
1367 		}
1368 		case DDI_MODEL_NONE:
1369 			if (ddi_copyin((void *)arg, &errstate,
1370 			    sizeof (struct bofi_errstate), mode) != 0)
1371 				return (EFAULT);
1372 			break;
1373 		}
1374 #else /* ! _MULTI_DATAMODEL */
1375 		if (ddi_copyin((void *)arg, &errstate,
1376 		    sizeof (struct bofi_errstate), mode) != 0)
1377 			return (EFAULT);
1378 #endif /* _MULTI_DATAMODEL */
1379 		if ((retval = bofi_errdef_check_w(&errstate, &klg)) == EINVAL)
1380 			return (EINVAL);
1381 		/*
1382 		 * copy out real errstate structure
1383 		 */
1384 		uls = errstate.log.logsize;
1385 		uls = errstate.log.logsize;
1386 		if (errstate.log.entries > uls && uls)
1387 			/* insufficient user memory */
1388 			errstate.log.entries = uls;
1389 		/* always pass back a time */
1390 		if (errstate.log.stop_time == 0ul)
1391 			(void) drv_getparm(TIME, &(errstate.log.stop_time));
1392 
1393 #ifdef _MULTI_DATAMODEL
1394 		switch (ddi_model_convert_from(mode & FMODELS)) {
1395 		case DDI_MODEL_ILP32:
1396 		{
1397 			/*
1398 			 * For use when a 32 bit app makes a call into a
1399 			 * 64 bit ioctl
1400 			 */
1401 			struct bofi_errstate32	errstate_32;
1402 
1403 			errstate_32.fail_time = errstate.fail_time;
1404 			errstate_32.msg_time = errstate.msg_time;
1405 			errstate_32.access_count = errstate.access_count;
1406 			errstate_32.fail_count = errstate.fail_count;
1407 			errstate_32.acc_chk = errstate.acc_chk;
1408 			errstate_32.errmsg_count = errstate.errmsg_count;
1409 			(void) strncpy(errstate_32.buffer, errstate.buffer,
1410 			    ERRMSGSIZE);
1411 			errstate_32.severity = errstate.severity;
1412 			errstate_32.log.logsize = errstate.log.logsize;
1413 			errstate_32.log.entries = errstate.log.entries;
1414 			errstate_32.log.flags = errstate.log.flags;
1415 			errstate_32.log.wrapcnt = errstate.log.wrapcnt;
1416 			errstate_32.log.start_time = errstate.log.start_time;
1417 			errstate_32.log.stop_time = errstate.log.stop_time;
1418 			errstate_32.log.logbase =
1419 			    (caddr32_t)(uintptr_t)errstate.log.logbase;
1420 			errstate_32.errdef_handle = errstate.errdef_handle;
1421 			if (ddi_copyout(&errstate_32, (void *)arg,
1422 			    sizeof (struct bofi_errstate32), mode) != 0)
1423 				return (EFAULT);
1424 			break;
1425 		}
1426 		case DDI_MODEL_NONE:
1427 			if (ddi_copyout(&errstate, (void *)arg,
1428 			    sizeof (struct bofi_errstate), mode) != 0)
1429 				return (EFAULT);
1430 			break;
1431 		}
1432 #else /* ! _MULTI_DATAMODEL */
1433 		if (ddi_copyout(&errstate, (void *)arg,
1434 		    sizeof (struct bofi_errstate), mode) != 0)
1435 			return (EFAULT);
1436 #endif /* _MULTI_DATAMODEL */
1437 
1438 		if (uls && errstate.log.entries &&
1439 		    ddi_copyout(klg, errstate.log.logbase,
1440 		    errstate.log.entries * sizeof (struct acc_log_elem),
1441 		    mode) != 0) {
1442 			return (EFAULT);
1443 		}
1444 		return (retval);
1445 	}
1446 	case BOFI_GET_HANDLES:
1447 		/*
1448 		 * display existing handles
1449 		 */
1450 #ifdef _MULTI_DATAMODEL
1451 		switch (ddi_model_convert_from(mode & FMODELS)) {
1452 		case DDI_MODEL_ILP32:
1453 		{
1454 			/*
1455 			 * For use when a 32 bit app makes a call into a
1456 			 * 64 bit ioctl
1457 			 */
1458 			struct bofi_get_handles32	get_handles_32;
1459 
1460 			if (ddi_copyin((void *)arg, &get_handles_32,
1461 			    sizeof (get_handles_32), mode) != 0) {
1462 				return (EFAULT);
1463 			}
1464 			get_handles.namesize = get_handles_32.namesize;
1465 			(void) strncpy(get_handles.name, get_handles_32.name,
1466 			    NAMESIZE);
1467 			get_handles.instance = get_handles_32.instance;
1468 			get_handles.count = get_handles_32.count;
1469 			get_handles.buffer =
1470 			    (caddr_t)(uintptr_t)get_handles_32.buffer;
1471 			break;
1472 		}
1473 		case DDI_MODEL_NONE:
1474 			if (ddi_copyin((void *)arg, &get_handles,
1475 			    sizeof (get_handles), mode) != 0)
1476 				return (EFAULT);
1477 			break;
1478 		}
1479 #else /* ! _MULTI_DATAMODEL */
1480 		if (ddi_copyin((void *)arg, &get_handles,
1481 		    sizeof (get_handles), mode) != 0)
1482 			return (EFAULT);
1483 #endif /* _MULTI_DATAMODEL */
1484 		/*
1485 		 * read in name
1486 		 */
1487 		if (get_handles.namesize > NAMESIZE)
1488 			return (EINVAL);
1489 		namep = kmem_zalloc(get_handles.namesize+1, KM_SLEEP);
1490 		(void) strncpy(namep, get_handles.name, get_handles.namesize);
1491 		req_count = get_handles.count;
1492 		bufptr = buffer = kmem_zalloc(req_count, KM_SLEEP);
1493 		endbuf = bufptr + req_count;
1494 		/*
1495 		 * display existing handles
1496 		 */
1497 		mutex_enter(&bofi_low_mutex);
1498 		mutex_enter(&bofi_mutex);
1499 		for (i = 0; i < HDL_HASH_TBL_SIZE; i++) {
1500 			hhashp = &hhash_table[i];
1501 			for (hp = hhashp->hnext; hp != hhashp; hp = hp->hnext) {
1502 				if (!driver_under_test(hp->dip))
1503 					continue;
1504 				if (ddi_name_to_major(ddi_get_name(hp->dip)) !=
1505 				    ddi_name_to_major(namep))
1506 					continue;
1507 				if (hp->instance != get_handles.instance)
1508 					continue;
1509 				/*
1510 				 * print information per handle - note that
1511 				 * DMA* means an unbound DMA handle
1512 				 */
1513 				(void) snprintf(bufptr, (size_t)(endbuf-bufptr),
1514 				    "  %s %d %s ", hp->name, hp->instance,
1515 				    (hp->type == BOFI_INT_HDL) ? "INTR" :
1516 				    (hp->type == BOFI_ACC_HDL) ? "PIO" :
1517 				    (hp->type == BOFI_DMA_HDL) ? "DMA" :
1518 				    (hp->hparrayp != NULL) ? "DVMA" : "DMA*");
1519 				bufptr += strlen(bufptr);
1520 				if (hp->type == BOFI_ACC_HDL) {
1521 					if (hp->len == INT_MAX - hp->offset)
1522 						(void) snprintf(bufptr,
1523 						    (size_t)(endbuf-bufptr),
1524 						    "reg set %d off 0x%llx\n",
1525 						    hp->rnumber, hp->offset);
1526 					else
1527 						(void) snprintf(bufptr,
1528 						    (size_t)(endbuf-bufptr),
1529 						    "reg set %d off 0x%llx"
1530 						    " len 0x%llx\n",
1531 						    hp->rnumber, hp->offset,
1532 						    hp->len);
1533 				} else if (hp->type == BOFI_DMA_HDL)
1534 					(void) snprintf(bufptr,
1535 					    (size_t)(endbuf-bufptr),
1536 					    "handle no %d len 0x%llx"
1537 					    " addr 0x%p\n", hp->rnumber,
1538 					    hp->len, (void *)hp->addr);
1539 				else if (hp->type == BOFI_NULL &&
1540 				    hp->hparrayp == NULL)
1541 					(void) snprintf(bufptr,
1542 					    (size_t)(endbuf-bufptr),
1543 					    "handle no %d\n", hp->rnumber);
1544 				else
1545 					(void) snprintf(bufptr,
1546 					    (size_t)(endbuf-bufptr), "\n");
1547 				bufptr += strlen(bufptr);
1548 			}
1549 		}
1550 		mutex_exit(&bofi_mutex);
1551 		mutex_exit(&bofi_low_mutex);
1552 		err = ddi_copyout(buffer, get_handles.buffer, req_count, mode);
1553 		kmem_free(namep, get_handles.namesize+1);
1554 		kmem_free(buffer, req_count);
1555 		if (err != 0)
1556 			return (EFAULT);
1557 		else
1558 			return (0);
1559 	case BOFI_GET_HANDLE_INFO:
1560 		/*
1561 		 * display existing handles
1562 		 */
1563 #ifdef _MULTI_DATAMODEL
1564 		switch (ddi_model_convert_from(mode & FMODELS)) {
1565 		case DDI_MODEL_ILP32:
1566 		{
1567 			/*
1568 			 * For use when a 32 bit app makes a call into a
1569 			 * 64 bit ioctl
1570 			 */
1571 			struct bofi_get_hdl_info32	hdl_info_32;
1572 
1573 			if (ddi_copyin((void *)arg, &hdl_info_32,
1574 			    sizeof (hdl_info_32), mode)) {
1575 				return (EFAULT);
1576 			}
1577 			hdl_info.namesize = hdl_info_32.namesize;
1578 			(void) strncpy(hdl_info.name, hdl_info_32.name,
1579 			    NAMESIZE);
1580 			hdl_info.count = hdl_info_32.count;
1581 			hdl_info.hdli = (caddr_t)(uintptr_t)hdl_info_32.hdli;
1582 			break;
1583 		}
1584 		case DDI_MODEL_NONE:
1585 			if (ddi_copyin((void *)arg, &hdl_info,
1586 			    sizeof (hdl_info), mode))
1587 				return (EFAULT);
1588 			break;
1589 		}
1590 #else /* ! _MULTI_DATAMODEL */
1591 		if (ddi_copyin((void *)arg, &hdl_info,
1592 		    sizeof (hdl_info), mode))
1593 			return (EFAULT);
1594 #endif /* _MULTI_DATAMODEL */
1595 		if (hdl_info.namesize > NAMESIZE)
1596 			return (EINVAL);
1597 		namep = kmem_zalloc(hdl_info.namesize + 1, KM_SLEEP);
1598 		(void) strncpy(namep, hdl_info.name, hdl_info.namesize);
1599 		req_count = hdl_info.count;
1600 		count = hdl_info.count = 0; /* the actual no of handles */
1601 		if (req_count > 0) {
1602 			hib = hdlip =
1603 			    kmem_zalloc(req_count * sizeof (struct handle_info),
1604 			    KM_SLEEP);
1605 		} else {
1606 			hib = hdlip = 0;
1607 			req_count = hdl_info.count = 0;
1608 		}
1609 
1610 		/*
1611 		 * display existing handles
1612 		 */
1613 		mutex_enter(&bofi_low_mutex);
1614 		mutex_enter(&bofi_mutex);
1615 		for (i = 0; i < HDL_HASH_TBL_SIZE; i++) {
1616 			hhashp = &hhash_table[i];
1617 			for (hp = hhashp->hnext; hp != hhashp; hp = hp->hnext) {
1618 				if (!driver_under_test(hp->dip) ||
1619 				    ddi_name_to_major(ddi_get_name(hp->dip)) !=
1620 				    ddi_name_to_major(namep) ||
1621 				    ++(hdl_info.count) > req_count ||
1622 				    count == req_count)
1623 					continue;
1624 
1625 				hdlip->instance = hp->instance;
1626 				hdlip->rnumber = hp->rnumber;
1627 				switch (hp->type) {
1628 				case BOFI_ACC_HDL:
1629 					hdlip->access_type = BOFI_PIO_RW;
1630 					hdlip->offset = hp->offset;
1631 					hdlip->len = hp->len;
1632 					break;
1633 				case BOFI_DMA_HDL:
1634 					hdlip->access_type = 0;
1635 					if (hp->flags & DDI_DMA_WRITE)
1636 						hdlip->access_type |=
1637 						    BOFI_DMA_W;
1638 					if (hp->flags & DDI_DMA_READ)
1639 						hdlip->access_type |=
1640 						    BOFI_DMA_R;
1641 					hdlip->len = hp->len;
1642 					hdlip->addr_cookie =
1643 					    (uint64_t)(uintptr_t)hp->addr;
1644 					break;
1645 				case BOFI_INT_HDL:
1646 					hdlip->access_type = BOFI_INTR;
1647 					break;
1648 				default:
1649 					hdlip->access_type = 0;
1650 					break;
1651 				}
1652 				hdlip++;
1653 				count++;
1654 			}
1655 		}
1656 		mutex_exit(&bofi_mutex);
1657 		mutex_exit(&bofi_low_mutex);
1658 		err = 0;
1659 #ifdef _MULTI_DATAMODEL
1660 		switch (ddi_model_convert_from(mode & FMODELS)) {
1661 		case DDI_MODEL_ILP32:
1662 		{
1663 			/*
1664 			 * For use when a 32 bit app makes a call into a
1665 			 * 64 bit ioctl
1666 			 */
1667 			struct bofi_get_hdl_info32	hdl_info_32;
1668 
1669 			hdl_info_32.namesize = hdl_info.namesize;
1670 			(void) strncpy(hdl_info_32.name, hdl_info.name,
1671 			    NAMESIZE);
1672 			hdl_info_32.count = hdl_info.count;
1673 			hdl_info_32.hdli = (caddr32_t)(uintptr_t)hdl_info.hdli;
1674 			if (ddi_copyout(&hdl_info_32, (void *)arg,
1675 			    sizeof (hdl_info_32), mode) != 0) {
1676 				kmem_free(namep, hdl_info.namesize+1);
1677 				if (req_count > 0)
1678 					kmem_free(hib,
1679 					    req_count * sizeof (*hib));
1680 				return (EFAULT);
1681 			}
1682 			break;
1683 		}
1684 		case DDI_MODEL_NONE:
1685 			if (ddi_copyout(&hdl_info, (void *)arg,
1686 			    sizeof (hdl_info), mode) != 0) {
1687 				kmem_free(namep, hdl_info.namesize+1);
1688 				if (req_count > 0)
1689 					kmem_free(hib,
1690 					    req_count * sizeof (*hib));
1691 				return (EFAULT);
1692 			}
1693 			break;
1694 		}
1695 #else /* ! _MULTI_DATAMODEL */
1696 		if (ddi_copyout(&hdl_info, (void *)arg,
1697 		    sizeof (hdl_info), mode) != 0) {
1698 			kmem_free(namep, hdl_info.namesize+1);
1699 			if (req_count > 0)
1700 				kmem_free(hib, req_count * sizeof (*hib));
1701 			return (EFAULT);
1702 		}
1703 #endif /* ! _MULTI_DATAMODEL */
1704 		if (count > 0) {
1705 			if (ddi_copyout(hib, hdl_info.hdli,
1706 			    count * sizeof (*hib), mode) != 0) {
1707 				kmem_free(namep, hdl_info.namesize+1);
1708 				if (req_count > 0)
1709 					kmem_free(hib,
1710 					    req_count * sizeof (*hib));
1711 				return (EFAULT);
1712 			}
1713 		}
1714 		kmem_free(namep, hdl_info.namesize+1);
1715 		if (req_count > 0)
1716 			kmem_free(hib, req_count * sizeof (*hib));
1717 		return (err);
1718 	default:
1719 		return (ENOTTY);
1720 	}
1721 }
1722 
1723 
1724 /*
1725  * add a new error definition
1726  */
1727 static int
1728 bofi_errdef_alloc(struct bofi_errdef *errdefp, char *namep,
1729 	struct bofi_errent *softc)
1730 {
1731 	struct bofi_errent *ep;
1732 	struct bofi_shadow *hp;
1733 	struct bofi_link   *lp;
1734 
1735 	/*
1736 	 * allocate errdef structure and put on in-use list
1737 	 */
1738 	ep = kmem_zalloc(sizeof (struct bofi_errent), KM_SLEEP);
1739 	ep->errdef = *errdefp;
1740 	ep->name = namep;
1741 	ep->errdef.errdef_handle = (uint64_t)(uintptr_t)ep;
1742 	ep->errstate.severity = DDI_SERVICE_RESTORED;
1743 	ep->errstate.errdef_handle = (uint64_t)(uintptr_t)ep;
1744 	cv_init(&ep->cv, NULL, CV_DRIVER, NULL);
1745 	/*
1746 	 * allocate space for logging
1747 	 */
1748 	ep->errdef.log.entries = 0;
1749 	ep->errdef.log.wrapcnt = 0;
1750 	if (ep->errdef.access_type & BOFI_LOG)
1751 		ep->logbase = kmem_alloc(sizeof (struct acc_log_elem) *
1752 		    ep->errdef.log.logsize, KM_SLEEP);
1753 	else
1754 		ep->logbase = NULL;
1755 	/*
1756 	 * put on in-use list
1757 	 */
1758 	mutex_enter(&bofi_low_mutex);
1759 	mutex_enter(&bofi_mutex);
1760 	ep->next = errent_listp;
1761 	errent_listp = ep;
1762 	/*
1763 	 * and add it to the per-clone list
1764 	 */
1765 	ep->cnext = softc->cnext;
1766 	softc->cnext->cprev = ep;
1767 	ep->cprev = softc;
1768 	softc->cnext = ep;
1769 
1770 	/*
1771 	 * look for corresponding shadow handle structures and if we find any
1772 	 * tag this errdef structure on to their link lists.
1773 	 */
1774 	for (hp = shadow_list.next; hp != &shadow_list; hp = hp->next) {
1775 		if (ddi_name_to_major(hp->name) == ddi_name_to_major(namep) &&
1776 		    hp->instance == errdefp->instance &&
1777 		    (((errdefp->access_type & BOFI_DMA_RW) &&
1778 		    (ep->errdef.rnumber == -1 ||
1779 		    hp->rnumber == ep->errdef.rnumber) &&
1780 		    hp->type == BOFI_DMA_HDL &&
1781 		    (((uintptr_t)(hp->addr + ep->errdef.offset +
1782 		    ep->errdef.len) & ~LLSZMASK) >
1783 		    ((uintptr_t)((hp->addr + ep->errdef.offset) +
1784 		    LLSZMASK) & ~LLSZMASK))) ||
1785 		    ((errdefp->access_type & BOFI_INTR) &&
1786 		    hp->type == BOFI_INT_HDL) ||
1787 		    ((errdefp->access_type & BOFI_PIO_RW) &&
1788 		    hp->type == BOFI_ACC_HDL &&
1789 		    (errdefp->rnumber == -1 ||
1790 		    hp->rnumber == errdefp->rnumber) &&
1791 		    (errdefp->len == 0 ||
1792 		    hp->offset < errdefp->offset + errdefp->len) &&
1793 		    hp->offset + hp->len > errdefp->offset))) {
1794 			lp = bofi_link_freelist;
1795 			if (lp != NULL) {
1796 				bofi_link_freelist = lp->link;
1797 				lp->errentp = ep;
1798 				lp->link = hp->link;
1799 				hp->link = lp;
1800 			}
1801 		}
1802 	}
1803 	errdefp->errdef_handle = (uint64_t)(uintptr_t)ep;
1804 	mutex_exit(&bofi_mutex);
1805 	mutex_exit(&bofi_low_mutex);
1806 	ep->softintr_id = NULL;
1807 	return (ddi_add_softintr(our_dip, DDI_SOFTINT_MED, &ep->softintr_id,
1808 	    NULL, NULL, bofi_signal, (caddr_t)&ep->errdef));
1809 }
1810 
1811 
1812 /*
1813  * delete existing errdef
1814  */
1815 static int
1816 bofi_errdef_free(struct bofi_errent *ep)
1817 {
1818 	struct bofi_errent *hep, *prev_hep;
1819 	struct bofi_link *lp, *prev_lp, *next_lp;
1820 	struct bofi_shadow *hp;
1821 
1822 	mutex_enter(&bofi_low_mutex);
1823 	mutex_enter(&bofi_mutex);
1824 	/*
1825 	 * don't just assume its a valid ep - check that its on the
1826 	 * in-use list
1827 	 */
1828 	prev_hep = NULL;
1829 	for (hep = errent_listp; hep != NULL; ) {
1830 		if (hep == ep)
1831 			break;
1832 		prev_hep = hep;
1833 		hep = hep->next;
1834 	}
1835 	if (hep == NULL) {
1836 		mutex_exit(&bofi_mutex);
1837 		mutex_exit(&bofi_low_mutex);
1838 		return (EINVAL);
1839 	}
1840 	/*
1841 	 * found it - delete from in-use list
1842 	 */
1843 
1844 	if (prev_hep)
1845 		prev_hep->next = hep->next;
1846 	else
1847 		errent_listp = hep->next;
1848 	/*
1849 	 * and take it off the per-clone list
1850 	 */
1851 	hep->cnext->cprev = hep->cprev;
1852 	hep->cprev->cnext = hep->cnext;
1853 	/*
1854 	 * see if we are on any shadow handle link lists - and if we
1855 	 * are then take us off
1856 	 */
1857 	for (hp = shadow_list.next; hp != &shadow_list; hp = hp->next) {
1858 		prev_lp = NULL;
1859 		for (lp = hp->link; lp != NULL; ) {
1860 			if (lp->errentp == ep) {
1861 				if (prev_lp)
1862 					prev_lp->link = lp->link;
1863 				else
1864 					hp->link = lp->link;
1865 				next_lp = lp->link;
1866 				lp->link = bofi_link_freelist;
1867 				bofi_link_freelist = lp;
1868 				lp = next_lp;
1869 			} else {
1870 				prev_lp = lp;
1871 				lp = lp->link;
1872 			}
1873 		}
1874 	}
1875 	mutex_exit(&bofi_mutex);
1876 	mutex_exit(&bofi_low_mutex);
1877 
1878 	cv_destroy(&ep->cv);
1879 	kmem_free(ep->name, ep->errdef.namesize+1);
1880 	if ((ep->errdef.access_type & BOFI_LOG) &&
1881 		ep->errdef.log.logsize && ep->logbase) /* double check */
1882 		kmem_free(ep->logbase,
1883 		    sizeof (struct acc_log_elem) * ep->errdef.log.logsize);
1884 
1885 	if (ep->softintr_id)
1886 		ddi_remove_softintr(ep->softintr_id);
1887 	kmem_free(ep, sizeof (struct bofi_errent));
1888 	return (0);
1889 }
1890 
1891 
1892 /*
1893  * start all errdefs corresponding to this name and instance
1894  */
1895 static void
1896 bofi_start(struct bofi_errctl *errctlp, char *namep)
1897 {
1898 	struct bofi_errent *ep;
1899 
1900 	/*
1901 	 * look for any errdefs with matching name and instance
1902 	 */
1903 	mutex_enter(&bofi_low_mutex);
1904 	for (ep = errent_listp; ep != NULL; ep = ep->next)
1905 		if (strncmp(namep, ep->name, NAMESIZE) == 0 &&
1906 		    errctlp->instance == ep->errdef.instance) {
1907 			ep->state |= BOFI_DEV_ACTIVE;
1908 			(void) drv_getparm(TIME, &(ep->errdef.log.start_time));
1909 			ep->errdef.log.stop_time = 0ul;
1910 		}
1911 	mutex_exit(&bofi_low_mutex);
1912 }
1913 
1914 
1915 /*
1916  * stop all errdefs corresponding to this name and instance
1917  */
1918 static void
1919 bofi_stop(struct bofi_errctl *errctlp, char *namep)
1920 {
1921 	struct bofi_errent *ep;
1922 
1923 	/*
1924 	 * look for any errdefs with matching name and instance
1925 	 */
1926 	mutex_enter(&bofi_low_mutex);
1927 	for (ep = errent_listp; ep != NULL; ep = ep->next)
1928 		if (strncmp(namep, ep->name, NAMESIZE) == 0 &&
1929 		    errctlp->instance == ep->errdef.instance) {
1930 			ep->state &= ~BOFI_DEV_ACTIVE;
1931 			if (ep->errdef.log.stop_time == 0ul)
1932 				(void) drv_getparm(TIME,
1933 				    &(ep->errdef.log.stop_time));
1934 		}
1935 	mutex_exit(&bofi_low_mutex);
1936 }
1937 
1938 
1939 /*
1940  * wake up any thread waiting on this errdefs
1941  */
1942 static uint_t
1943 bofi_signal(caddr_t arg)
1944 {
1945 	struct bofi_errdef *edp = (struct bofi_errdef *)arg;
1946 	struct bofi_errent *hep;
1947 	struct bofi_errent *ep =
1948 	    (struct bofi_errent *)(uintptr_t)edp->errdef_handle;
1949 
1950 	mutex_enter(&bofi_low_mutex);
1951 	for (hep = errent_listp; hep != NULL; ) {
1952 		if (hep == ep)
1953 			break;
1954 		hep = hep->next;
1955 	}
1956 	if (hep == NULL) {
1957 		mutex_exit(&bofi_low_mutex);
1958 		return (DDI_INTR_UNCLAIMED);
1959 	}
1960 	if ((ep->errdef.access_type & BOFI_LOG) &&
1961 	    (edp->log.flags & BOFI_LOG_FULL)) {
1962 		edp->log.stop_time = bofi_gettime();
1963 		ep->state |= BOFI_NEW_MESSAGE;
1964 		if (ep->state & BOFI_MESSAGE_WAIT)
1965 			cv_broadcast(&ep->cv);
1966 		ep->state &= ~BOFI_MESSAGE_WAIT;
1967 	}
1968 	if (ep->errstate.msg_time != 0) {
1969 		ep->state |= BOFI_NEW_MESSAGE;
1970 		if (ep->state & BOFI_MESSAGE_WAIT)
1971 			cv_broadcast(&ep->cv);
1972 		ep->state &= ~BOFI_MESSAGE_WAIT;
1973 	}
1974 	mutex_exit(&bofi_low_mutex);
1975 	return (DDI_INTR_CLAIMED);
1976 }
1977 
1978 
1979 /*
1980  * wake up all errdefs corresponding to this name and instance
1981  */
1982 static void
1983 bofi_broadcast(struct bofi_errctl *errctlp, char *namep)
1984 {
1985 	struct bofi_errent *ep;
1986 
1987 	/*
1988 	 * look for any errdefs with matching name and instance
1989 	 */
1990 	mutex_enter(&bofi_low_mutex);
1991 	for (ep = errent_listp; ep != NULL; ep = ep->next)
1992 		if (strncmp(namep, ep->name, NAMESIZE) == 0 &&
1993 		    errctlp->instance == ep->errdef.instance) {
1994 			/*
1995 			 * wake up sleepers
1996 			 */
1997 			ep->state |= BOFI_NEW_MESSAGE;
1998 			if (ep->state & BOFI_MESSAGE_WAIT)
1999 				cv_broadcast(&ep->cv);
2000 			ep->state &= ~BOFI_MESSAGE_WAIT;
2001 		}
2002 	mutex_exit(&bofi_low_mutex);
2003 }
2004 
2005 
2006 /*
2007  * clear "acc_chk" for all errdefs corresponding to this name and instance
2008  * and wake them up.
2009  */
2010 static void
2011 bofi_clear_acc_chk(struct bofi_errctl *errctlp, char *namep)
2012 {
2013 	struct bofi_errent *ep;
2014 
2015 	/*
2016 	 * look for any errdefs with matching name and instance
2017 	 */
2018 	mutex_enter(&bofi_low_mutex);
2019 	for (ep = errent_listp; ep != NULL; ep = ep->next)
2020 		if (strncmp(namep, ep->name, NAMESIZE) == 0 &&
2021 		    errctlp->instance == ep->errdef.instance) {
2022 			mutex_enter(&bofi_mutex);
2023 			if (ep->errdef.access_count == 0 &&
2024 			    ep->errdef.fail_count == 0)
2025 				ep->errdef.acc_chk = 0;
2026 			mutex_exit(&bofi_mutex);
2027 			/*
2028 			 * wake up sleepers
2029 			 */
2030 			ep->state |= BOFI_NEW_MESSAGE;
2031 			if (ep->state & BOFI_MESSAGE_WAIT)
2032 				cv_broadcast(&ep->cv);
2033 			ep->state &= ~BOFI_MESSAGE_WAIT;
2034 		}
2035 	mutex_exit(&bofi_low_mutex);
2036 }
2037 
2038 
2039 /*
2040  * set "fail_count" to 0 for all errdefs corresponding to this name and instance
2041  * whose "access_count" has expired, set "acc_chk" to 0 and wake them up.
2042  */
2043 static void
2044 bofi_clear_errors(struct bofi_errctl *errctlp, char *namep)
2045 {
2046 	struct bofi_errent *ep;
2047 
2048 	/*
2049 	 * look for any errdefs with matching name and instance
2050 	 */
2051 	mutex_enter(&bofi_low_mutex);
2052 	for (ep = errent_listp; ep != NULL; ep = ep->next)
2053 		if (strncmp(namep, ep->name, NAMESIZE) == 0 &&
2054 		    errctlp->instance == ep->errdef.instance) {
2055 			mutex_enter(&bofi_mutex);
2056 			if (ep->errdef.access_count == 0) {
2057 				ep->errdef.acc_chk = 0;
2058 				ep->errdef.fail_count = 0;
2059 				mutex_exit(&bofi_mutex);
2060 				if (ep->errdef.log.stop_time == 0ul)
2061 					(void) drv_getparm(TIME,
2062 					    &(ep->errdef.log.stop_time));
2063 			} else
2064 				mutex_exit(&bofi_mutex);
2065 			/*
2066 			 * wake up sleepers
2067 			 */
2068 			ep->state |= BOFI_NEW_MESSAGE;
2069 			if (ep->state & BOFI_MESSAGE_WAIT)
2070 				cv_broadcast(&ep->cv);
2071 			ep->state &= ~BOFI_MESSAGE_WAIT;
2072 		}
2073 	mutex_exit(&bofi_low_mutex);
2074 }
2075 
2076 
2077 /*
2078  * set "access_count" and "fail_count" to 0 for all errdefs corresponding to
2079  * this name and instance, set "acc_chk" to 0, and wake them up.
2080  */
2081 static void
2082 bofi_clear_errdefs(struct bofi_errctl *errctlp, char *namep)
2083 {
2084 	struct bofi_errent *ep;
2085 
2086 	/*
2087 	 * look for any errdefs with matching name and instance
2088 	 */
2089 	mutex_enter(&bofi_low_mutex);
2090 	for (ep = errent_listp; ep != NULL; ep = ep->next)
2091 		if (strncmp(namep, ep->name, NAMESIZE) == 0 &&
2092 		    errctlp->instance == ep->errdef.instance) {
2093 			mutex_enter(&bofi_mutex);
2094 			ep->errdef.acc_chk = 0;
2095 			ep->errdef.access_count = 0;
2096 			ep->errdef.fail_count = 0;
2097 			mutex_exit(&bofi_mutex);
2098 			if (ep->errdef.log.stop_time == 0ul)
2099 				(void) drv_getparm(TIME,
2100 				    &(ep->errdef.log.stop_time));
2101 			/*
2102 			 * wake up sleepers
2103 			 */
2104 			ep->state |= BOFI_NEW_MESSAGE;
2105 			if (ep->state & BOFI_MESSAGE_WAIT)
2106 				cv_broadcast(&ep->cv);
2107 			ep->state &= ~BOFI_MESSAGE_WAIT;
2108 		}
2109 	mutex_exit(&bofi_low_mutex);
2110 }
2111 
2112 
2113 /*
2114  * get state for this errdef
2115  */
2116 static int
2117 bofi_errdef_check(struct bofi_errstate *errstatep, struct acc_log_elem **logpp)
2118 {
2119 	struct bofi_errent *hep;
2120 	struct bofi_errent *ep;
2121 
2122 	ep = (struct bofi_errent *)(uintptr_t)errstatep->errdef_handle;
2123 	mutex_enter(&bofi_low_mutex);
2124 	/*
2125 	 * don't just assume its a valid ep - check that its on the
2126 	 * in-use list
2127 	 */
2128 	for (hep = errent_listp; hep != NULL; hep = hep->next)
2129 		if (hep == ep)
2130 			break;
2131 	if (hep == NULL) {
2132 		mutex_exit(&bofi_low_mutex);
2133 		return (EINVAL);
2134 	}
2135 	mutex_enter(&bofi_mutex);
2136 	ep->errstate.access_count = ep->errdef.access_count;
2137 	ep->errstate.fail_count = ep->errdef.fail_count;
2138 	ep->errstate.acc_chk = ep->errdef.acc_chk;
2139 	ep->errstate.log = ep->errdef.log;
2140 	*logpp = ep->logbase;
2141 	*errstatep = ep->errstate;
2142 	mutex_exit(&bofi_mutex);
2143 	mutex_exit(&bofi_low_mutex);
2144 	return (0);
2145 }
2146 
2147 
2148 /*
2149  * Wait for a ddi_report_fault message to come back for this errdef
2150  * Then return state for this errdef.
2151  * fault report is intercepted by bofi_post_event, which triggers
2152  * bofi_signal via a softint, which will wake up this routine if
2153  * we are waiting
2154  */
2155 static int
2156 bofi_errdef_check_w(struct bofi_errstate *errstatep,
2157     struct acc_log_elem **logpp)
2158 {
2159 	struct bofi_errent *hep;
2160 	struct bofi_errent *ep;
2161 	int rval = 0;
2162 
2163 	ep = (struct bofi_errent *)(uintptr_t)errstatep->errdef_handle;
2164 	mutex_enter(&bofi_low_mutex);
2165 retry:
2166 	/*
2167 	 * don't just assume its a valid ep - check that its on the
2168 	 * in-use list
2169 	 */
2170 	for (hep = errent_listp; hep != NULL; hep = hep->next)
2171 		if (hep == ep)
2172 			break;
2173 	if (hep == NULL) {
2174 		mutex_exit(&bofi_low_mutex);
2175 		return (EINVAL);
2176 	}
2177 	/*
2178 	 * wait for ddi_report_fault for the devinfo corresponding
2179 	 * to this errdef
2180 	 */
2181 	if (rval == 0 && !(ep->state & BOFI_NEW_MESSAGE)) {
2182 		ep->state |= BOFI_MESSAGE_WAIT;
2183 		if (cv_wait_sig(&ep->cv, &bofi_low_mutex) == 0) {
2184 			if (!(ep->state & BOFI_NEW_MESSAGE))
2185 				rval = EINTR;
2186 		}
2187 		goto retry;
2188 	}
2189 	ep->state &= ~BOFI_NEW_MESSAGE;
2190 	/*
2191 	 * we either didn't need to sleep, we've been woken up or we've been
2192 	 * signaled - either way return state now
2193 	 */
2194 	mutex_enter(&bofi_mutex);
2195 	ep->errstate.access_count = ep->errdef.access_count;
2196 	ep->errstate.fail_count = ep->errdef.fail_count;
2197 	ep->errstate.acc_chk = ep->errdef.acc_chk;
2198 	ep->errstate.log = ep->errdef.log;
2199 	*logpp = ep->logbase;
2200 	*errstatep = ep->errstate;
2201 	mutex_exit(&bofi_mutex);
2202 	mutex_exit(&bofi_low_mutex);
2203 	return (rval);
2204 }
2205 
2206 
2207 /*
2208  * support routine - check if requested driver is defined as under test in the
2209  * conf file.
2210  */
2211 static int
2212 driver_under_test(dev_info_t *rdip)
2213 {
2214 	int i;
2215 	char	*rname;
2216 	major_t rmaj;
2217 
2218 	rname = ddi_get_name(rdip);
2219 	rmaj = ddi_name_to_major(rname);
2220 
2221 	/*
2222 	 * Enforce the user to specifically request the following drivers.
2223 	 */
2224 	for (i = 0; i < driver_list_size; i += (1 + strlen(&driver_list[i]))) {
2225 		if (driver_list_neg == 0) {
2226 			if (rmaj == ddi_name_to_major(&driver_list[i]))
2227 				return (1);
2228 		} else {
2229 			if (rmaj == ddi_name_to_major(&driver_list[i+1]))
2230 				return (0);
2231 		}
2232 	}
2233 	if (driver_list_neg == 0)
2234 		return (0);
2235 	else
2236 		return (1);
2237 
2238 }
2239 
2240 
2241 static void
2242 log_acc_event(struct bofi_errent *ep, uint_t at, offset_t offset, off_t len,
2243     size_t repcount, uint64_t *valuep)
2244 {
2245 	struct bofi_errdef *edp = &(ep->errdef);
2246 	struct acc_log *log = &edp->log;
2247 
2248 	ASSERT(log != NULL);
2249 	ASSERT(MUTEX_HELD(&bofi_mutex));
2250 
2251 	if (log->flags & BOFI_LOG_REPIO)
2252 		repcount = 1;
2253 	else if (repcount == 0 && edp->access_count > 0 &&
2254 				(log->flags & BOFI_LOG_FULL) == 0)
2255 		edp->access_count += 1;
2256 
2257 	if (repcount && log->entries < log->logsize) {
2258 		struct acc_log_elem *elem = ep->logbase + log->entries;
2259 
2260 		if (log->flags & BOFI_LOG_TIMESTAMP)
2261 			elem->access_time = bofi_gettime();
2262 		elem->access_type = at;
2263 		elem->offset = offset;
2264 		elem->value = valuep ? *valuep : 0ll;
2265 		elem->size = len;
2266 		elem->repcount = repcount;
2267 		++log->entries;
2268 		if (log->entries == log->logsize) {
2269 			log->flags |= BOFI_LOG_FULL;
2270 			ddi_trigger_softintr(((struct bofi_errent *)
2271 			    (uintptr_t)edp->errdef_handle)->softintr_id);
2272 		}
2273 	}
2274 	if ((log->flags & BOFI_LOG_WRAP) && edp->access_count <= 1) {
2275 		log->wrapcnt++;
2276 		edp->access_count = log->logsize;
2277 		log->entries = 0;	/* wrap back to the start */
2278 	}
2279 }
2280 
2281 
2282 /*
2283  * got a condition match on dma read/write - check counts and corrupt
2284  * data if necessary
2285  *
2286  * bofi_mutex always held when this is called.
2287  */
2288 static void
2289 do_dma_corrupt(struct bofi_shadow *hp, struct bofi_errent *ep,
2290 	uint_t synctype, off_t off, off_t length)
2291 {
2292 	uint64_t operand;
2293 	int i;
2294 	off_t len;
2295 	caddr_t logaddr;
2296 	uint64_t *addr;
2297 	uint64_t *endaddr;
2298 	ddi_dma_impl_t *hdlp;
2299 	ndi_err_t *errp;
2300 
2301 	ASSERT(MUTEX_HELD(&bofi_mutex));
2302 	if ((ep->errdef.access_count ||
2303 		ep->errdef.fail_count) &&
2304 		(ep->errdef.access_type & BOFI_LOG)) {
2305 		uint_t atype;
2306 
2307 		if (synctype == DDI_DMA_SYNC_FORDEV)
2308 			atype = BOFI_DMA_W;
2309 		else if (synctype == DDI_DMA_SYNC_FORCPU ||
2310 			synctype == DDI_DMA_SYNC_FORKERNEL)
2311 			atype = BOFI_DMA_R;
2312 		else
2313 			atype = 0;
2314 		if ((off <= ep->errdef.offset &&
2315 			off + length > ep->errdef.offset) ||
2316 			(off > ep->errdef.offset &&
2317 			off < ep->errdef.offset + ep->errdef.len)) {
2318 			logaddr = (caddr_t)((uintptr_t)(hp->addr +
2319 			    off + LLSZMASK) & ~LLSZMASK);
2320 
2321 			log_acc_event(ep, atype, logaddr - hp->addr,
2322 			    length, 1, 0);
2323 		}
2324 	}
2325 	if (ep->errdef.access_count > 1) {
2326 		ep->errdef.access_count--;
2327 	} else if (ep->errdef.fail_count > 0) {
2328 		ep->errdef.fail_count--;
2329 		ep->errdef.access_count = 0;
2330 		/*
2331 		 * OK do the corruption
2332 		 */
2333 		if (ep->errstate.fail_time == 0)
2334 			ep->errstate.fail_time = bofi_gettime();
2335 		/*
2336 		 * work out how much to corrupt
2337 		 *
2338 		 * Make sure endaddr isn't greater than hp->addr + hp->len.
2339 		 * If endaddr becomes less than addr len becomes negative
2340 		 * and the following loop isn't entered.
2341 		 */
2342 		addr = (uint64_t *)((uintptr_t)((hp->addr +
2343 		    ep->errdef.offset) + LLSZMASK) & ~LLSZMASK);
2344 		endaddr = (uint64_t *)((uintptr_t)(hp->addr + min(hp->len,
2345 		    ep->errdef.offset + ep->errdef.len)) & ~LLSZMASK);
2346 		len = endaddr - addr;
2347 		operand = ep->errdef.operand;
2348 		hdlp = (ddi_dma_impl_t *)(hp->hdl.dma_handle);
2349 		errp = &hdlp->dmai_error;
2350 		if (ep->errdef.acc_chk & 2) {
2351 			uint64_t ena;
2352 			char buf[FM_MAX_CLASS];
2353 
2354 			errp->err_status = DDI_FM_NONFATAL;
2355 			(void) snprintf(buf, FM_MAX_CLASS, FM_SIMULATED_DMA);
2356 			ena = fm_ena_generate(0, FM_ENA_FMT1);
2357 			ddi_fm_ereport_post(hp->dip, buf, ena,
2358 			    DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8,
2359 			    FM_EREPORT_VERS0, NULL);
2360 		}
2361 		switch (ep->errdef.optype) {
2362 		case BOFI_EQUAL :
2363 			for (i = 0; i < len; i++)
2364 				*(addr + i) = operand;
2365 			break;
2366 		case BOFI_AND :
2367 			for (i = 0; i < len; i++)
2368 				*(addr + i) &= operand;
2369 			break;
2370 		case BOFI_OR :
2371 			for (i = 0; i < len; i++)
2372 				*(addr + i) |= operand;
2373 			break;
2374 		case BOFI_XOR :
2375 			for (i = 0; i < len; i++)
2376 				*(addr + i) ^= operand;
2377 			break;
2378 		default:
2379 			/* do nothing */
2380 			break;
2381 		}
2382 	}
2383 }
2384 
2385 
2386 static uint64_t do_bofi_rd8(struct bofi_shadow *, caddr_t);
2387 static uint64_t do_bofi_rd16(struct bofi_shadow *, caddr_t);
2388 static uint64_t do_bofi_rd32(struct bofi_shadow *, caddr_t);
2389 static uint64_t do_bofi_rd64(struct bofi_shadow *, caddr_t);
2390 
2391 
2392 /*
2393  * check all errdefs linked to this shadow handle. If we've got a condition
2394  * match check counts and corrupt data if necessary
2395  *
2396  * bofi_mutex always held when this is called.
2397  *
2398  * because of possibility of BOFI_NO_TRANSFER, we couldn't get data
2399  * from io-space before calling this, so we pass in the func to do the
2400  * transfer as a parameter.
2401  */
2402 static uint64_t
2403 do_pior_corrupt(struct bofi_shadow *hp, caddr_t addr,
2404 	uint64_t (*func)(), size_t repcount, size_t accsize)
2405 {
2406 	struct bofi_errent *ep;
2407 	struct bofi_link   *lp;
2408 	uint64_t operand;
2409 	uintptr_t minlen;
2410 	intptr_t base;
2411 	int done_get = 0;
2412 	uint64_t get_val, gv;
2413 	ddi_acc_impl_t *hdlp;
2414 	ndi_err_t *errp;
2415 
2416 	ASSERT(MUTEX_HELD(&bofi_mutex));
2417 	/*
2418 	 * check through all errdefs associated with this shadow handle
2419 	 */
2420 	for (lp = hp->link; lp != NULL; lp = lp->link) {
2421 		ep = lp->errentp;
2422 		if (ep->errdef.len == 0)
2423 			minlen = hp->len;
2424 		else
2425 			minlen = min(hp->len, ep->errdef.len);
2426 		base = addr - hp->addr - ep->errdef.offset + hp->offset;
2427 		if ((ep->errdef.access_type & BOFI_PIO_R) &&
2428 		    (ep->state & BOFI_DEV_ACTIVE) &&
2429 		    base >= 0 && base < minlen) {
2430 			/*
2431 			 * condition match for pio read
2432 			 */
2433 			if (ep->errdef.access_count > 1) {
2434 				ep->errdef.access_count--;
2435 				if (done_get == 0) {
2436 					done_get = 1;
2437 					gv = get_val = func(hp, addr);
2438 				}
2439 				if (ep->errdef.access_type & BOFI_LOG) {
2440 					log_acc_event(ep, BOFI_PIO_R,
2441 					    addr - hp->addr,
2442 					    accsize, repcount, &gv);
2443 				}
2444 			} else if (ep->errdef.fail_count > 0) {
2445 				ep->errdef.fail_count--;
2446 				ep->errdef.access_count = 0;
2447 				/*
2448 				 * OK do corruption
2449 				 */
2450 				if (ep->errstate.fail_time == 0)
2451 					ep->errstate.fail_time = bofi_gettime();
2452 				operand = ep->errdef.operand;
2453 				if (done_get == 0) {
2454 					if (ep->errdef.optype ==
2455 					    BOFI_NO_TRANSFER)
2456 						/*
2457 						 * no transfer - bomb out
2458 						 */
2459 						return (operand);
2460 					done_get = 1;
2461 					gv = get_val = func(hp, addr);
2462 
2463 				}
2464 				if (ep->errdef.access_type & BOFI_LOG) {
2465 					log_acc_event(ep, BOFI_PIO_R,
2466 					    addr - hp->addr,
2467 					    accsize, repcount, &gv);
2468 				}
2469 				hdlp = (ddi_acc_impl_t *)(hp->hdl.acc_handle);
2470 				errp = hdlp->ahi_err;
2471 				if (ep->errdef.acc_chk & 1) {
2472 					uint64_t ena;
2473 					char buf[FM_MAX_CLASS];
2474 
2475 					errp->err_status = DDI_FM_NONFATAL;
2476 					(void) snprintf(buf, FM_MAX_CLASS,
2477 					    FM_SIMULATED_PIO);
2478 					ena = fm_ena_generate(0, FM_ENA_FMT1);
2479 					ddi_fm_ereport_post(hp->dip, buf, ena,
2480 					    DDI_NOSLEEP, FM_VERSION,
2481 					    DATA_TYPE_UINT8, FM_EREPORT_VERS0,
2482 					    NULL);
2483 				}
2484 				switch (ep->errdef.optype) {
2485 				case BOFI_EQUAL :
2486 					get_val = operand;
2487 					break;
2488 				case BOFI_AND :
2489 					get_val &= operand;
2490 					break;
2491 				case BOFI_OR :
2492 					get_val |= operand;
2493 					break;
2494 				case BOFI_XOR :
2495 					get_val ^= operand;
2496 					break;
2497 				default:
2498 					/* do nothing */
2499 					break;
2500 				}
2501 			}
2502 		}
2503 	}
2504 	if (done_get == 0)
2505 		return (func(hp, addr));
2506 	else
2507 		return (get_val);
2508 }
2509 
2510 
2511 /*
2512  * check all errdefs linked to this shadow handle. If we've got a condition
2513  * match check counts and corrupt data if necessary
2514  *
2515  * bofi_mutex always held when this is called.
2516  *
2517  * because of possibility of BOFI_NO_TRANSFER, we return 0 if no data
2518  * is to be written out to io-space, 1 otherwise
2519  */
2520 static int
2521 do_piow_corrupt(struct bofi_shadow *hp, caddr_t addr, uint64_t *valuep,
2522 				size_t size, size_t repcount)
2523 {
2524 	struct bofi_errent *ep;
2525 	struct bofi_link   *lp;
2526 	uintptr_t minlen;
2527 	intptr_t base;
2528 	uint64_t v = *valuep;
2529 	ddi_acc_impl_t *hdlp;
2530 	ndi_err_t *errp;
2531 
2532 	ASSERT(MUTEX_HELD(&bofi_mutex));
2533 	/*
2534 	 * check through all errdefs associated with this shadow handle
2535 	 */
2536 	for (lp = hp->link; lp != NULL; lp = lp->link) {
2537 		ep = lp->errentp;
2538 		if (ep->errdef.len == 0)
2539 			minlen = hp->len;
2540 		else
2541 			minlen = min(hp->len, ep->errdef.len);
2542 		base = (caddr_t)addr - hp->addr - ep->errdef.offset +hp->offset;
2543 		if ((ep->errdef.access_type & BOFI_PIO_W) &&
2544 		    (ep->state & BOFI_DEV_ACTIVE) &&
2545 		    base >= 0 && base < minlen) {
2546 			/*
2547 			 * condition match for pio write
2548 			 */
2549 
2550 			if (ep->errdef.access_count > 1) {
2551 				ep->errdef.access_count--;
2552 				if (ep->errdef.access_type & BOFI_LOG)
2553 					log_acc_event(ep, BOFI_PIO_W,
2554 					    addr - hp->addr, size,
2555 					    repcount, &v);
2556 			} else if (ep->errdef.fail_count > 0) {
2557 				ep->errdef.fail_count--;
2558 				ep->errdef.access_count = 0;
2559 				if (ep->errdef.access_type & BOFI_LOG)
2560 					log_acc_event(ep, BOFI_PIO_W,
2561 					    addr - hp->addr, size,
2562 					    repcount, &v);
2563 				/*
2564 				 * OK do corruption
2565 				 */
2566 				if (ep->errstate.fail_time == 0)
2567 					ep->errstate.fail_time = bofi_gettime();
2568 				hdlp = (ddi_acc_impl_t *)(hp->hdl.acc_handle);
2569 				errp = hdlp->ahi_err;
2570 				if (ep->errdef.acc_chk & 1) {
2571 					uint64_t ena;
2572 					char buf[FM_MAX_CLASS];
2573 
2574 					errp->err_status = DDI_FM_NONFATAL;
2575 					(void) snprintf(buf, FM_MAX_CLASS,
2576 					    FM_SIMULATED_PIO);
2577 					ena = fm_ena_generate(0, FM_ENA_FMT1);
2578 					ddi_fm_ereport_post(hp->dip, buf, ena,
2579 					    DDI_NOSLEEP, FM_VERSION,
2580 					    DATA_TYPE_UINT8, FM_EREPORT_VERS0,
2581 					    NULL);
2582 				}
2583 				switch (ep->errdef.optype) {
2584 				case BOFI_EQUAL :
2585 					*valuep = ep->errdef.operand;
2586 					break;
2587 				case BOFI_AND :
2588 					*valuep &= ep->errdef.operand;
2589 					break;
2590 				case BOFI_OR :
2591 					*valuep |= ep->errdef.operand;
2592 					break;
2593 				case BOFI_XOR :
2594 					*valuep ^= ep->errdef.operand;
2595 					break;
2596 				case BOFI_NO_TRANSFER :
2597 					/*
2598 					 * no transfer - bomb out
2599 					 */
2600 					return (0);
2601 				default:
2602 					/* do nothing */
2603 					break;
2604 				}
2605 			}
2606 		}
2607 	}
2608 	return (1);
2609 }
2610 
2611 
2612 static uint64_t
2613 do_bofi_rd8(struct bofi_shadow *hp, caddr_t addr)
2614 {
2615 	return (hp->save.acc.ahi_get8(&hp->save.acc, (uint8_t *)addr));
2616 }
2617 
2618 #define	BOFI_READ_CHECKS(type) \
2619 	if (bofi_ddi_check) \
2620 		addr = (type *)((uintptr_t)addr - 64 + hp->addr); \
2621 	if (bofi_range_check && ((caddr_t)addr < hp->addr || \
2622 	    (caddr_t)addr - hp->addr >= hp->len)) { \
2623 		cmn_err((bofi_range_check == 2) ? CE_PANIC : CE_WARN, \
2624 		    "ddi_get() out of range addr %p not in %p/%llx", \
2625 		    (void *)addr, (void *)hp->addr, hp->len); \
2626 		return (0); \
2627 	}
2628 
2629 /*
2630  * our getb() routine - use tryenter
2631  */
2632 static uint8_t
2633 bofi_rd8(ddi_acc_impl_t *handle, uint8_t *addr)
2634 {
2635 	struct bofi_shadow *hp;
2636 	uint8_t retval;
2637 
2638 	hp = handle->ahi_common.ah_bus_private;
2639 	BOFI_READ_CHECKS(uint8_t)
2640 	if (!hp->link || !mutex_tryenter(&bofi_mutex))
2641 		return (hp->save.acc.ahi_get8(&hp->save.acc, addr));
2642 	retval = (uint8_t)do_pior_corrupt(hp, (caddr_t)addr, do_bofi_rd8, 1,
2643 	    1);
2644 	mutex_exit(&bofi_mutex);
2645 	return (retval);
2646 }
2647 
2648 
2649 static uint64_t
2650 do_bofi_rd16(struct bofi_shadow *hp, caddr_t addr)
2651 {
2652 	return (hp->save.acc.ahi_get16(&hp->save.acc, (uint16_t *)addr));
2653 }
2654 
2655 
2656 /*
2657  * our getw() routine - use tryenter
2658  */
2659 static uint16_t
2660 bofi_rd16(ddi_acc_impl_t *handle, uint16_t *addr)
2661 {
2662 	struct bofi_shadow *hp;
2663 	uint16_t retval;
2664 
2665 	hp = handle->ahi_common.ah_bus_private;
2666 	BOFI_READ_CHECKS(uint16_t)
2667 	if (!hp->link || !mutex_tryenter(&bofi_mutex))
2668 		return (hp->save.acc.ahi_get16(&hp->save.acc, addr));
2669 	retval = (uint16_t)do_pior_corrupt(hp, (caddr_t)addr, do_bofi_rd16, 1,
2670 	    2);
2671 	mutex_exit(&bofi_mutex);
2672 	return (retval);
2673 }
2674 
2675 
2676 static uint64_t
2677 do_bofi_rd32(struct bofi_shadow *hp, caddr_t addr)
2678 {
2679 	return (hp->save.acc.ahi_get32(&hp->save.acc, (uint32_t *)addr));
2680 }
2681 
2682 
2683 /*
2684  * our getl() routine - use tryenter
2685  */
2686 static uint32_t
2687 bofi_rd32(ddi_acc_impl_t *handle, uint32_t *addr)
2688 {
2689 	struct bofi_shadow *hp;
2690 	uint32_t retval;
2691 
2692 	hp = handle->ahi_common.ah_bus_private;
2693 	BOFI_READ_CHECKS(uint32_t)
2694 	if (!hp->link || !mutex_tryenter(&bofi_mutex))
2695 		return (hp->save.acc.ahi_get32(&hp->save.acc, addr));
2696 	retval = (uint32_t)do_pior_corrupt(hp, (caddr_t)addr, do_bofi_rd32, 1,
2697 	    4);
2698 	mutex_exit(&bofi_mutex);
2699 	return (retval);
2700 }
2701 
2702 
2703 static uint64_t
2704 do_bofi_rd64(struct bofi_shadow *hp, caddr_t addr)
2705 {
2706 	return (hp->save.acc.ahi_get64(&hp->save.acc, (uint64_t *)addr));
2707 }
2708 
2709 
2710 /*
2711  * our getll() routine - use tryenter
2712  */
2713 static uint64_t
2714 bofi_rd64(ddi_acc_impl_t *handle, uint64_t *addr)
2715 {
2716 	struct bofi_shadow *hp;
2717 	uint64_t retval;
2718 
2719 	hp = handle->ahi_common.ah_bus_private;
2720 	BOFI_READ_CHECKS(uint64_t)
2721 	if (!hp->link || !mutex_tryenter(&bofi_mutex))
2722 		return (hp->save.acc.ahi_get64(&hp->save.acc, addr));
2723 	retval = (uint64_t)do_pior_corrupt(hp, (caddr_t)addr, do_bofi_rd64, 1,
2724 	    8);
2725 	mutex_exit(&bofi_mutex);
2726 	return (retval);
2727 }
2728 
2729 #define	BOFI_WRITE_TESTS(type) \
2730 	if (bofi_ddi_check) \
2731 		addr = (type *)((uintptr_t)addr - 64 + hp->addr); \
2732 	if (bofi_range_check && ((caddr_t)addr < hp->addr || \
2733 	    (caddr_t)addr - hp->addr >= hp->len)) { \
2734 		cmn_err((bofi_range_check == 2) ? CE_PANIC : CE_WARN, \
2735 		    "ddi_put() out of range addr %p not in %p/%llx\n", \
2736 		    (void *)addr, (void *)hp->addr, hp->len); \
2737 		return; \
2738 	}
2739 
2740 /*
2741  * our putb() routine - use tryenter
2742  */
2743 static void
2744 bofi_wr8(ddi_acc_impl_t *handle, uint8_t *addr, uint8_t value)
2745 {
2746 	struct bofi_shadow *hp;
2747 	uint64_t llvalue = value;
2748 
2749 	hp = handle->ahi_common.ah_bus_private;
2750 	BOFI_WRITE_TESTS(uint8_t)
2751 	if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
2752 		hp->save.acc.ahi_put8(&hp->save.acc, addr, (uint8_t)llvalue);
2753 		return;
2754 	}
2755 	if (do_piow_corrupt(hp, (caddr_t)addr, &llvalue, 1, 1))
2756 		hp->save.acc.ahi_put8(&hp->save.acc, addr, (uint8_t)llvalue);
2757 	mutex_exit(&bofi_mutex);
2758 }
2759 
2760 
2761 /*
2762  * our putw() routine - use tryenter
2763  */
2764 static void
2765 bofi_wr16(ddi_acc_impl_t *handle, uint16_t *addr, uint16_t value)
2766 {
2767 	struct bofi_shadow *hp;
2768 	uint64_t llvalue = value;
2769 
2770 	hp = handle->ahi_common.ah_bus_private;
2771 	BOFI_WRITE_TESTS(uint16_t)
2772 	if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
2773 		hp->save.acc.ahi_put16(&hp->save.acc, addr, (uint16_t)llvalue);
2774 		return;
2775 	}
2776 	if (do_piow_corrupt(hp, (caddr_t)addr, &llvalue, 2, 1))
2777 		hp->save.acc.ahi_put16(&hp->save.acc, addr, (uint16_t)llvalue);
2778 	mutex_exit(&bofi_mutex);
2779 }
2780 
2781 
2782 /*
2783  * our putl() routine - use tryenter
2784  */
2785 static void
2786 bofi_wr32(ddi_acc_impl_t *handle, uint32_t *addr, uint32_t value)
2787 {
2788 	struct bofi_shadow *hp;
2789 	uint64_t llvalue = value;
2790 
2791 	hp = handle->ahi_common.ah_bus_private;
2792 	BOFI_WRITE_TESTS(uint32_t)
2793 	if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
2794 		hp->save.acc.ahi_put32(&hp->save.acc, addr, (uint32_t)llvalue);
2795 		return;
2796 	}
2797 	if (do_piow_corrupt(hp, (caddr_t)addr, &llvalue, 4, 1))
2798 		hp->save.acc.ahi_put32(&hp->save.acc, addr, (uint32_t)llvalue);
2799 	mutex_exit(&bofi_mutex);
2800 }
2801 
2802 
2803 /*
2804  * our putll() routine - use tryenter
2805  */
2806 static void
2807 bofi_wr64(ddi_acc_impl_t *handle, uint64_t *addr, uint64_t value)
2808 {
2809 	struct bofi_shadow *hp;
2810 	uint64_t llvalue = value;
2811 
2812 	hp = handle->ahi_common.ah_bus_private;
2813 	BOFI_WRITE_TESTS(uint64_t)
2814 	if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
2815 		hp->save.acc.ahi_put64(&hp->save.acc, addr, (uint64_t)llvalue);
2816 		return;
2817 	}
2818 	if (do_piow_corrupt(hp, (caddr_t)addr, &llvalue, 8, 1))
2819 		hp->save.acc.ahi_put64(&hp->save.acc, addr, (uint64_t)llvalue);
2820 	mutex_exit(&bofi_mutex);
2821 }
2822 
2823 #define	BOFI_REP_READ_TESTS(type) \
2824 	if (bofi_ddi_check) \
2825 		dev_addr = (type *)((uintptr_t)dev_addr - 64 + hp->addr); \
2826 	if (bofi_range_check && ((caddr_t)dev_addr < hp->addr || \
2827 	    (caddr_t)(dev_addr + repcount) - hp->addr > hp->len)) { \
2828 		cmn_err((bofi_range_check == 2) ? CE_PANIC : CE_WARN, \
2829 		    "ddi_rep_get() out of range addr %p not in %p/%llx\n", \
2830 		    (void *)dev_addr, (void *)hp->addr, hp->len); \
2831 		if ((caddr_t)dev_addr < hp->addr || \
2832 		    (caddr_t)dev_addr - hp->addr >= hp->len) \
2833 			return; \
2834 		repcount = (type *)(hp->addr + hp->len) - dev_addr; \
2835 	}
2836 
2837 /*
2838  * our rep_getb() routine - use tryenter
2839  */
2840 static void
2841 bofi_rep_rd8(ddi_acc_impl_t *handle, uint8_t *host_addr, uint8_t *dev_addr,
2842 	size_t repcount, uint_t flags)
2843 {
2844 	struct bofi_shadow *hp;
2845 	int i;
2846 	uint8_t *addr;
2847 
2848 	hp = handle->ahi_common.ah_bus_private;
2849 	BOFI_REP_READ_TESTS(uint8_t)
2850 	if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
2851 		hp->save.acc.ahi_rep_get8(&hp->save.acc, host_addr, dev_addr,
2852 		    repcount, flags);
2853 		return;
2854 	}
2855 	for (i = 0; i < repcount; i++) {
2856 		addr = dev_addr + ((flags == DDI_DEV_AUTOINCR) ? i : 0);
2857 		*(host_addr + i) = (uint8_t)do_pior_corrupt(hp, (caddr_t)addr,
2858 		    do_bofi_rd8, i ? 0 : repcount, 1);
2859 	}
2860 	mutex_exit(&bofi_mutex);
2861 }
2862 
2863 
2864 /*
2865  * our rep_getw() routine - use tryenter
2866  */
2867 static void
2868 bofi_rep_rd16(ddi_acc_impl_t *handle, uint16_t *host_addr,
2869 	uint16_t *dev_addr, size_t repcount, uint_t flags)
2870 {
2871 	struct bofi_shadow *hp;
2872 	int i;
2873 	uint16_t *addr;
2874 
2875 	hp = handle->ahi_common.ah_bus_private;
2876 	BOFI_REP_READ_TESTS(uint16_t)
2877 	if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
2878 		hp->save.acc.ahi_rep_get16(&hp->save.acc, host_addr, dev_addr,
2879 		    repcount, flags);
2880 		return;
2881 	}
2882 	for (i = 0; i < repcount; i++) {
2883 		addr = dev_addr + ((flags == DDI_DEV_AUTOINCR) ? i : 0);
2884 		*(host_addr + i) = (uint16_t)do_pior_corrupt(hp, (caddr_t)addr,
2885 		    do_bofi_rd16, i ? 0 : repcount, 2);
2886 	}
2887 	mutex_exit(&bofi_mutex);
2888 }
2889 
2890 
2891 /*
2892  * our rep_getl() routine - use tryenter
2893  */
2894 static void
2895 bofi_rep_rd32(ddi_acc_impl_t *handle, uint32_t *host_addr,
2896 	uint32_t *dev_addr, size_t repcount, uint_t flags)
2897 {
2898 	struct bofi_shadow *hp;
2899 	int i;
2900 	uint32_t *addr;
2901 
2902 	hp = handle->ahi_common.ah_bus_private;
2903 	BOFI_REP_READ_TESTS(uint32_t)
2904 	if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
2905 		hp->save.acc.ahi_rep_get32(&hp->save.acc, host_addr, dev_addr,
2906 		    repcount, flags);
2907 		return;
2908 	}
2909 	for (i = 0; i < repcount; i++) {
2910 		addr = dev_addr + ((flags == DDI_DEV_AUTOINCR) ? i : 0);
2911 		*(host_addr + i) = (uint32_t)do_pior_corrupt(hp, (caddr_t)addr,
2912 		    do_bofi_rd32, i ? 0 : repcount, 4);
2913 	}
2914 	mutex_exit(&bofi_mutex);
2915 }
2916 
2917 
2918 /*
2919  * our rep_getll() routine - use tryenter
2920  */
2921 static void
2922 bofi_rep_rd64(ddi_acc_impl_t *handle, uint64_t *host_addr,
2923 	uint64_t *dev_addr, size_t repcount, uint_t flags)
2924 {
2925 	struct bofi_shadow *hp;
2926 	int i;
2927 	uint64_t *addr;
2928 
2929 	hp = handle->ahi_common.ah_bus_private;
2930 	BOFI_REP_READ_TESTS(uint64_t)
2931 	if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
2932 		hp->save.acc.ahi_rep_get64(&hp->save.acc, host_addr, dev_addr,
2933 		    repcount, flags);
2934 		return;
2935 	}
2936 	for (i = 0; i < repcount; i++) {
2937 		addr = dev_addr + ((flags == DDI_DEV_AUTOINCR) ? i : 0);
2938 		*(host_addr + i) = (uint64_t)do_pior_corrupt(hp, (caddr_t)addr,
2939 		    do_bofi_rd64, i ? 0 : repcount, 8);
2940 	}
2941 	mutex_exit(&bofi_mutex);
2942 }
2943 
2944 #define	BOFI_REP_WRITE_TESTS(type) \
2945 	if (bofi_ddi_check) \
2946 		dev_addr = (type *)((uintptr_t)dev_addr - 64 + hp->addr); \
2947 	if (bofi_range_check && ((caddr_t)dev_addr < hp->addr || \
2948 	    (caddr_t)(dev_addr + repcount) - hp->addr > hp->len)) { \
2949 		cmn_err((bofi_range_check == 2) ? CE_PANIC : CE_WARN, \
2950 		    "ddi_rep_put() out of range addr %p not in %p/%llx\n", \
2951 		    (void *)dev_addr, (void *)hp->addr, hp->len); \
2952 		if ((caddr_t)dev_addr < hp->addr || \
2953 		    (caddr_t)dev_addr - hp->addr >= hp->len) \
2954 			return; \
2955 		repcount = (type *)(hp->addr + hp->len) - dev_addr; \
2956 	}
2957 
2958 /*
2959  * our rep_putb() routine - use tryenter
2960  */
2961 static void
2962 bofi_rep_wr8(ddi_acc_impl_t *handle, uint8_t *host_addr, uint8_t *dev_addr,
2963 	size_t repcount, uint_t flags)
2964 {
2965 	struct bofi_shadow *hp;
2966 	int i;
2967 	uint64_t llvalue;
2968 	uint8_t *addr;
2969 
2970 	hp = handle->ahi_common.ah_bus_private;
2971 	BOFI_REP_WRITE_TESTS(uint8_t)
2972 	if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
2973 		hp->save.acc.ahi_rep_put8(&hp->save.acc, host_addr, dev_addr,
2974 		    repcount, flags);
2975 		return;
2976 	}
2977 	for (i = 0; i < repcount; i++) {
2978 		llvalue = *(host_addr + i);
2979 		addr = dev_addr + ((flags == DDI_DEV_AUTOINCR) ? i : 0);
2980 		if (do_piow_corrupt(hp, (caddr_t)addr, &llvalue, 1, i ? 0 :
2981 		    repcount))
2982 			hp->save.acc.ahi_put8(&hp->save.acc, addr,
2983 			    (uint8_t)llvalue);
2984 	}
2985 	mutex_exit(&bofi_mutex);
2986 }
2987 
2988 
2989 /*
2990  * our rep_putw() routine - use tryenter
2991  */
2992 static void
2993 bofi_rep_wr16(ddi_acc_impl_t *handle, uint16_t *host_addr,
2994 	uint16_t *dev_addr, size_t repcount, uint_t flags)
2995 {
2996 	struct bofi_shadow *hp;
2997 	int i;
2998 	uint64_t llvalue;
2999 	uint16_t *addr;
3000 
3001 	hp = handle->ahi_common.ah_bus_private;
3002 	BOFI_REP_WRITE_TESTS(uint16_t)
3003 	if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
3004 		hp->save.acc.ahi_rep_put16(&hp->save.acc, host_addr, dev_addr,
3005 		    repcount, flags);
3006 		return;
3007 	}
3008 	for (i = 0; i < repcount; i++) {
3009 		llvalue = *(host_addr + i);
3010 		addr = dev_addr + ((flags == DDI_DEV_AUTOINCR) ? i : 0);
3011 		if (do_piow_corrupt(hp, (caddr_t)addr, &llvalue, 2, i ? 0 :
3012 		    repcount))
3013 			hp->save.acc.ahi_put16(&hp->save.acc, addr,
3014 			    (uint16_t)llvalue);
3015 	}
3016 	mutex_exit(&bofi_mutex);
3017 }
3018 
3019 
3020 /*
3021  * our rep_putl() routine - use tryenter
3022  */
3023 static void
3024 bofi_rep_wr32(ddi_acc_impl_t *handle, uint32_t *host_addr,
3025 	uint32_t *dev_addr, size_t repcount, uint_t flags)
3026 {
3027 	struct bofi_shadow *hp;
3028 	int i;
3029 	uint64_t llvalue;
3030 	uint32_t *addr;
3031 
3032 	hp = handle->ahi_common.ah_bus_private;
3033 	BOFI_REP_WRITE_TESTS(uint32_t)
3034 	if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
3035 		hp->save.acc.ahi_rep_put32(&hp->save.acc, host_addr, dev_addr,
3036 		    repcount, flags);
3037 		return;
3038 	}
3039 	for (i = 0; i < repcount; i++) {
3040 		llvalue = *(host_addr + i);
3041 		addr = dev_addr + ((flags == DDI_DEV_AUTOINCR) ? i : 0);
3042 		if (do_piow_corrupt(hp, (caddr_t)addr, &llvalue, 4, i ? 0 :
3043 		    repcount))
3044 			hp->save.acc.ahi_put32(&hp->save.acc, addr,
3045 			    (uint32_t)llvalue);
3046 	}
3047 	mutex_exit(&bofi_mutex);
3048 }
3049 
3050 
3051 /*
3052  * our rep_putll() routine - use tryenter
3053  */
3054 static void
3055 bofi_rep_wr64(ddi_acc_impl_t *handle, uint64_t *host_addr,
3056 	uint64_t *dev_addr, size_t repcount, uint_t flags)
3057 {
3058 	struct bofi_shadow *hp;
3059 	int i;
3060 	uint64_t llvalue;
3061 	uint64_t *addr;
3062 
3063 	hp = handle->ahi_common.ah_bus_private;
3064 	BOFI_REP_WRITE_TESTS(uint64_t)
3065 	if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
3066 		hp->save.acc.ahi_rep_put64(&hp->save.acc, host_addr, dev_addr,
3067 		    repcount, flags);
3068 		return;
3069 	}
3070 	for (i = 0; i < repcount; i++) {
3071 		llvalue = *(host_addr + i);
3072 		addr = dev_addr + ((flags == DDI_DEV_AUTOINCR) ? i : 0);
3073 		if (do_piow_corrupt(hp, (caddr_t)addr, &llvalue, 8, i ? 0 :
3074 		    repcount))
3075 			hp->save.acc.ahi_put64(&hp->save.acc, addr,
3076 			    (uint64_t)llvalue);
3077 	}
3078 	mutex_exit(&bofi_mutex);
3079 }
3080 
3081 
3082 /*
3083  * our ddi_map routine
3084  */
3085 static int
3086 bofi_map(dev_info_t *dip, dev_info_t *rdip,
3087 	ddi_map_req_t *reqp, off_t offset, off_t len, caddr_t *vaddrp)
3088 {
3089 	ddi_acc_impl_t *ap;
3090 	struct bofi_shadow *hp;
3091 	struct bofi_errent *ep;
3092 	struct bofi_link   *lp, *next_lp;
3093 	int retval;
3094 	struct bofi_shadow *dhashp;
3095 	struct bofi_shadow *hhashp;
3096 
3097 	switch (reqp->map_op) {
3098 	case DDI_MO_MAP_LOCKED:
3099 		/*
3100 		 * for this case get nexus to do real work first
3101 		 */
3102 		retval = save_bus_ops.bus_map(dip, rdip, reqp, offset, len,
3103 		    vaddrp);
3104 		if (retval != DDI_SUCCESS)
3105 			return (retval);
3106 
3107 		ap = (ddi_acc_impl_t *)reqp->map_handlep;
3108 		if (ap == NULL)
3109 			return (DDI_SUCCESS);
3110 		/*
3111 		 * if driver_list is set, only intercept those drivers
3112 		 */
3113 		if (!driver_under_test(ap->ahi_common.ah_dip))
3114 			return (DDI_SUCCESS);
3115 
3116 		/*
3117 		 * support for ddi_regs_map_setup()
3118 		 * - allocate shadow handle structure and fill it in
3119 		 */
3120 		hp = kmem_zalloc(sizeof (struct bofi_shadow), KM_SLEEP);
3121 		(void) strncpy(hp->name, ddi_get_name(ap->ahi_common.ah_dip),
3122 		    NAMESIZE);
3123 		hp->instance = ddi_get_instance(ap->ahi_common.ah_dip);
3124 		hp->dip = ap->ahi_common.ah_dip;
3125 		hp->addr = *vaddrp;
3126 		/*
3127 		 * return spurious value to catch direct access to registers
3128 		 */
3129 		if (bofi_ddi_check)
3130 			*vaddrp = (caddr_t)64;
3131 		hp->rnumber = ((ddi_acc_hdl_t *)ap)->ah_rnumber;
3132 		hp->offset = offset;
3133 		if (len == 0)
3134 			hp->len = INT_MAX - offset;
3135 		else
3136 			hp->len = min(len, INT_MAX - offset);
3137 		hp->hdl.acc_handle = (ddi_acc_handle_t)ap;
3138 		hp->link = NULL;
3139 		hp->type = BOFI_ACC_HDL;
3140 		/*
3141 		 * save existing function pointers and plug in our own
3142 		 */
3143 		hp->save.acc = *ap;
3144 		ap->ahi_get8 = bofi_rd8;
3145 		ap->ahi_get16 = bofi_rd16;
3146 		ap->ahi_get32 = bofi_rd32;
3147 		ap->ahi_get64 = bofi_rd64;
3148 		ap->ahi_put8 = bofi_wr8;
3149 		ap->ahi_put16 = bofi_wr16;
3150 		ap->ahi_put32 = bofi_wr32;
3151 		ap->ahi_put64 = bofi_wr64;
3152 		ap->ahi_rep_get8 = bofi_rep_rd8;
3153 		ap->ahi_rep_get16 = bofi_rep_rd16;
3154 		ap->ahi_rep_get32 = bofi_rep_rd32;
3155 		ap->ahi_rep_get64 = bofi_rep_rd64;
3156 		ap->ahi_rep_put8 = bofi_rep_wr8;
3157 		ap->ahi_rep_put16 = bofi_rep_wr16;
3158 		ap->ahi_rep_put32 = bofi_rep_wr32;
3159 		ap->ahi_rep_put64 = bofi_rep_wr64;
3160 		ap->ahi_fault_check = bofi_check_acc_hdl;
3161 #if defined(__sparc)
3162 #else
3163 		ap->ahi_acc_attr &= ~DDI_ACCATTR_DIRECT;
3164 #endif
3165 		/*
3166 		 * stick in a pointer to our shadow handle
3167 		 */
3168 		ap->ahi_common.ah_bus_private = hp;
3169 		/*
3170 		 * add to dhash, hhash and inuse lists
3171 		 */
3172 		mutex_enter(&bofi_low_mutex);
3173 		mutex_enter(&bofi_mutex);
3174 		hp->next = shadow_list.next;
3175 		shadow_list.next->prev = hp;
3176 		hp->prev = &shadow_list;
3177 		shadow_list.next = hp;
3178 		hhashp = HDL_HHASH(ap);
3179 		hp->hnext = hhashp->hnext;
3180 		hhashp->hnext->hprev = hp;
3181 		hp->hprev = hhashp;
3182 		hhashp->hnext = hp;
3183 		dhashp = HDL_DHASH(hp->dip);
3184 		hp->dnext = dhashp->dnext;
3185 		dhashp->dnext->dprev = hp;
3186 		hp->dprev = dhashp;
3187 		dhashp->dnext = hp;
3188 		/*
3189 		 * chain on any pre-existing errdefs that apply to this
3190 		 * acc_handle
3191 		 */
3192 		for (ep = errent_listp; ep != NULL; ep = ep->next) {
3193 			if (ddi_name_to_major(hp->name) ==
3194 			    ddi_name_to_major(ep->name) &&
3195 			    hp->instance == ep->errdef.instance &&
3196 			    (ep->errdef.access_type & BOFI_PIO_RW) &&
3197 			    (ep->errdef.rnumber == -1 ||
3198 			    hp->rnumber == ep->errdef.rnumber) &&
3199 			    (ep->errdef.len == 0 ||
3200 			    offset < ep->errdef.offset + ep->errdef.len) &&
3201 			    offset + hp->len > ep->errdef.offset) {
3202 				lp = bofi_link_freelist;
3203 				if (lp != NULL) {
3204 					bofi_link_freelist = lp->link;
3205 					lp->errentp = ep;
3206 					lp->link = hp->link;
3207 					hp->link = lp;
3208 				}
3209 			}
3210 		}
3211 		mutex_exit(&bofi_mutex);
3212 		mutex_exit(&bofi_low_mutex);
3213 		return (DDI_SUCCESS);
3214 	case DDI_MO_UNMAP:
3215 
3216 		ap = (ddi_acc_impl_t *)reqp->map_handlep;
3217 		if (ap == NULL)
3218 			break;
3219 		/*
3220 		 * support for ddi_regs_map_free()
3221 		 * - check we really have a shadow handle for this one
3222 		 */
3223 		mutex_enter(&bofi_low_mutex);
3224 		mutex_enter(&bofi_mutex);
3225 		hhashp = HDL_HHASH(ap);
3226 		for (hp = hhashp->hnext; hp != hhashp; hp = hp->hnext)
3227 			if (hp->hdl.acc_handle == (ddi_acc_handle_t)ap)
3228 				break;
3229 		if (hp == hhashp) {
3230 			mutex_exit(&bofi_mutex);
3231 			mutex_exit(&bofi_low_mutex);
3232 			break;
3233 		}
3234 		/*
3235 		 * got a shadow handle - restore original pointers
3236 		 */
3237 		*ap = hp->save.acc;
3238 		*vaddrp = hp->addr;
3239 		/*
3240 		 * remove from dhash, hhash and inuse lists
3241 		 */
3242 		hp->hnext->hprev = hp->hprev;
3243 		hp->hprev->hnext = hp->hnext;
3244 		hp->dnext->dprev = hp->dprev;
3245 		hp->dprev->dnext = hp->dnext;
3246 		hp->next->prev = hp->prev;
3247 		hp->prev->next = hp->next;
3248 		/*
3249 		 * free any errdef link structures tagged onto the shadow handle
3250 		 */
3251 		for (lp = hp->link; lp != NULL; ) {
3252 			next_lp = lp->link;
3253 			lp->link = bofi_link_freelist;
3254 			bofi_link_freelist = lp;
3255 			lp = next_lp;
3256 		}
3257 		hp->link = NULL;
3258 		mutex_exit(&bofi_mutex);
3259 		mutex_exit(&bofi_low_mutex);
3260 		/*
3261 		 * finally delete shadow handle
3262 		 */
3263 		kmem_free(hp, sizeof (struct bofi_shadow));
3264 		break;
3265 	default:
3266 		break;
3267 	}
3268 	return (save_bus_ops.bus_map(dip, rdip, reqp, offset, len, vaddrp));
3269 }
3270 
3271 
3272 /*
3273  * chain any pre-existing errdefs on to newly created dma handle
3274  * if required call do_dma_corrupt() to corrupt data
3275  */
3276 static void
3277 chain_on_errdefs(struct bofi_shadow *hp)
3278 {
3279 	struct bofi_errent *ep;
3280 	struct bofi_link   *lp;
3281 
3282 	ASSERT(MUTEX_HELD(&bofi_mutex));
3283 	/*
3284 	 * chain on any pre-existing errdefs that apply to this dma_handle
3285 	 */
3286 	for (ep = errent_listp; ep != NULL; ep = ep->next) {
3287 		if (ddi_name_to_major(hp->name) ==
3288 		    ddi_name_to_major(ep->name) &&
3289 		    hp->instance == ep->errdef.instance &&
3290 		    (ep->errdef.rnumber == -1 ||
3291 		    hp->rnumber == ep->errdef.rnumber) &&
3292 		    ((ep->errdef.access_type & BOFI_DMA_RW) &&
3293 		    (((uintptr_t)(hp->addr + ep->errdef.offset +
3294 		    ep->errdef.len) & ~LLSZMASK) >
3295 		    ((uintptr_t)((hp->addr + ep->errdef.offset) +
3296 		    LLSZMASK) & ~LLSZMASK)))) {
3297 			/*
3298 			 * got a match - link it on
3299 			 */
3300 			lp = bofi_link_freelist;
3301 			if (lp != NULL) {
3302 				bofi_link_freelist = lp->link;
3303 				lp->errentp = ep;
3304 				lp->link = hp->link;
3305 				hp->link = lp;
3306 				if ((ep->errdef.access_type & BOFI_DMA_W) &&
3307 				    (hp->flags & DDI_DMA_WRITE) &&
3308 				    (ep->state & BOFI_DEV_ACTIVE)) {
3309 					do_dma_corrupt(hp, ep,
3310 					    DDI_DMA_SYNC_FORDEV,
3311 					    0, hp->len);
3312 				}
3313 			}
3314 		}
3315 	}
3316 }
3317 
3318 
3319 /*
3320  * need to do copy byte-by-byte in case one of pages is little-endian
3321  */
3322 static void
3323 xbcopy(void *from, void *to, u_longlong_t len)
3324 {
3325 	uchar_t *f = from;
3326 	uchar_t *t = to;
3327 
3328 	while (len--)
3329 		*t++ = *f++;
3330 }
3331 
3332 
3333 /*
3334  * our ddi_dma_map routine
3335  */
3336 static int
3337 bofi_dma_map(dev_info_t *dip, dev_info_t *rdip,
3338 		struct ddi_dma_req *dmareqp, ddi_dma_handle_t *handlep)
3339 {
3340 	struct bofi_shadow *hp, *xhp;
3341 	int maxrnumber = 0;
3342 	int retval = DDI_DMA_NORESOURCES;
3343 	auto struct ddi_dma_req dmareq;
3344 	int sleep;
3345 	struct bofi_shadow *dhashp;
3346 	struct bofi_shadow *hhashp;
3347 	ddi_dma_impl_t *mp;
3348 	unsigned long pagemask = ddi_ptob(rdip, 1) - 1;
3349 
3350 	/*
3351 	 * if driver_list is set, only intercept those drivers
3352 	 */
3353 	if (handlep == NULL || !driver_under_test(rdip))
3354 		return (save_bus_ops.bus_dma_map(dip, rdip, dmareqp, handlep));
3355 
3356 	sleep = (dmareqp->dmar_fp == DDI_DMA_SLEEP) ? KM_SLEEP : KM_NOSLEEP;
3357 	/*
3358 	 * allocate shadow handle structure and fill it in
3359 	 */
3360 	hp = kmem_zalloc(sizeof (struct bofi_shadow), sleep);
3361 	if (hp == NULL)
3362 		goto error;
3363 	(void) strncpy(hp->name, ddi_get_name(rdip), NAMESIZE);
3364 	hp->instance = ddi_get_instance(rdip);
3365 	hp->dip = rdip;
3366 	hp->flags = dmareqp->dmar_flags;
3367 	hp->link = NULL;
3368 	hp->type = BOFI_DMA_HDL;
3369 	/*
3370 	 * get a kernel virtual mapping
3371 	 */
3372 	hp->addr = ddi_dmareq_mapin(dmareqp, &hp->mapaddr, &hp->len);
3373 	if (hp->addr == NULL)
3374 		goto error;
3375 	if (bofi_sync_check) {
3376 		/*
3377 		 * Take a copy and pass pointers to this up to nexus instead.
3378 		 * Data will be copied from the original on explicit
3379 		 * and implicit ddi_dma_sync()
3380 		 *
3381 		 * - maintain page alignment because some devices assume it.
3382 		 */
3383 		hp->origaddr = hp->addr;
3384 		hp->allocaddr = ddi_umem_alloc(
3385 		    ((uintptr_t)hp->addr & pagemask) + hp->len, sleep,
3386 		    &hp->umem_cookie);
3387 		if (hp->allocaddr == NULL)
3388 			goto error;
3389 		hp->addr = hp->allocaddr + ((uintptr_t)hp->addr & pagemask);
3390 		if (dmareqp->dmar_flags & DDI_DMA_WRITE)
3391 			xbcopy(hp->origaddr, hp->addr, hp->len);
3392 		dmareq = *dmareqp;
3393 		dmareq.dmar_object.dmao_size = hp->len;
3394 		dmareq.dmar_object.dmao_type = DMA_OTYP_VADDR;
3395 		dmareq.dmar_object.dmao_obj.virt_obj.v_as = &kas;
3396 		dmareq.dmar_object.dmao_obj.virt_obj.v_addr = hp->addr;
3397 		dmareq.dmar_object.dmao_obj.virt_obj.v_priv = NULL;
3398 		dmareqp = &dmareq;
3399 	}
3400 	/*
3401 	 * call nexus to do the real work
3402 	 */
3403 	retval = save_bus_ops.bus_dma_map(dip, rdip, dmareqp, handlep);
3404 	if (retval != DDI_SUCCESS)
3405 		goto error2;
3406 	/*
3407 	 * now set dma_handle to point to real handle
3408 	 */
3409 	hp->hdl.dma_handle = *handlep;
3410 	/*
3411 	 * unset DMP_NOSYNC
3412 	 */
3413 	mp = (ddi_dma_impl_t *)*handlep;
3414 	mp->dmai_rflags &= ~DMP_NOSYNC;
3415 	mp->dmai_fault_check = bofi_check_dma_hdl;
3416 	/*
3417 	 * bind and unbind are cached in devinfo - must overwrite them
3418 	 * - note that our bind and unbind are quite happy dealing with
3419 	 * any handles for this devinfo that were previously allocated
3420 	 */
3421 	if (save_bus_ops.bus_dma_bindhdl == DEVI(rdip)->devi_bus_dma_bindfunc)
3422 		DEVI(rdip)->devi_bus_dma_bindfunc = bofi_dma_bindhdl;
3423 	if (save_bus_ops.bus_dma_unbindhdl ==
3424 	    DEVI(rdip)->devi_bus_dma_unbindfunc)
3425 		DEVI(rdip)->devi_bus_dma_unbindfunc = bofi_dma_unbindhdl;
3426 	mutex_enter(&bofi_low_mutex);
3427 	mutex_enter(&bofi_mutex);
3428 	/*
3429 	 * get an "rnumber" for this handle - really just seeking to
3430 	 * get a unique number - generally only care for early allocated
3431 	 * handles - so we get as far as INT_MAX, just stay there
3432 	 */
3433 	dhashp = HDL_DHASH(hp->dip);
3434 	for (xhp = dhashp->dnext; xhp != dhashp; xhp = xhp->dnext)
3435 		if (ddi_name_to_major(xhp->name) ==
3436 		    ddi_name_to_major(hp->name) &&
3437 		    xhp->instance == hp->instance &&
3438 		    xhp->type == BOFI_DMA_HDL)
3439 			if (xhp->rnumber >= maxrnumber) {
3440 				if (xhp->rnumber == INT_MAX)
3441 					maxrnumber = INT_MAX;
3442 				else
3443 					maxrnumber = xhp->rnumber + 1;
3444 			}
3445 	hp->rnumber = maxrnumber;
3446 	/*
3447 	 * add to dhash, hhash and inuse lists
3448 	 */
3449 	hp->next = shadow_list.next;
3450 	shadow_list.next->prev = hp;
3451 	hp->prev = &shadow_list;
3452 	shadow_list.next = hp;
3453 	hhashp = HDL_HHASH(*handlep);
3454 	hp->hnext = hhashp->hnext;
3455 	hhashp->hnext->hprev = hp;
3456 	hp->hprev = hhashp;
3457 	hhashp->hnext = hp;
3458 	dhashp = HDL_DHASH(hp->dip);
3459 	hp->dnext = dhashp->dnext;
3460 	dhashp->dnext->dprev = hp;
3461 	hp->dprev = dhashp;
3462 	dhashp->dnext = hp;
3463 	/*
3464 	 * chain on any pre-existing errdefs that apply to this
3465 	 * acc_handle and corrupt if required (as there is an implicit
3466 	 * ddi_dma_sync() in this call)
3467 	 */
3468 	chain_on_errdefs(hp);
3469 	mutex_exit(&bofi_mutex);
3470 	mutex_exit(&bofi_low_mutex);
3471 	return (retval);
3472 error:
3473 	if (dmareqp->dmar_fp != DDI_DMA_DONTWAIT) {
3474 		/*
3475 		 * what to do here? Wait a bit and try again
3476 		 */
3477 		(void) timeout((void (*)())dmareqp->dmar_fp,
3478 		    dmareqp->dmar_arg, 10);
3479 	}
3480 error2:
3481 	if (hp) {
3482 		ddi_dmareq_mapout(hp->mapaddr, hp->len);
3483 		if (bofi_sync_check && hp->allocaddr)
3484 			ddi_umem_free(hp->umem_cookie);
3485 		kmem_free(hp, sizeof (struct bofi_shadow));
3486 	}
3487 	return (retval);
3488 }
3489 
3490 
3491 /*
3492  * our ddi_dma_allochdl routine
3493  */
3494 static int
3495 bofi_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attrp,
3496 	int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep)
3497 {
3498 	int retval = DDI_DMA_NORESOURCES;
3499 	struct bofi_shadow *hp, *xhp;
3500 	int maxrnumber = 0;
3501 	struct bofi_shadow *dhashp;
3502 	struct bofi_shadow *hhashp;
3503 	ddi_dma_impl_t *mp;
3504 
3505 	/*
3506 	 * if driver_list is set, only intercept those drivers
3507 	 */
3508 	if (!driver_under_test(rdip))
3509 		return (save_bus_ops.bus_dma_allochdl(dip, rdip, attrp,
3510 		    waitfp, arg, handlep));
3511 
3512 	/*
3513 	 * allocate shadow handle structure and fill it in
3514 	 */
3515 	hp = kmem_zalloc(sizeof (struct bofi_shadow),
3516 	    ((waitfp == DDI_DMA_SLEEP) ? KM_SLEEP : KM_NOSLEEP));
3517 	if (hp == NULL) {
3518 		/*
3519 		 * what to do here? Wait a bit and try again
3520 		 */
3521 		if (waitfp != DDI_DMA_DONTWAIT)
3522 			(void) timeout((void (*)())waitfp, arg, 10);
3523 		return (retval);
3524 	}
3525 	(void) strncpy(hp->name, ddi_get_name(rdip), NAMESIZE);
3526 	hp->instance = ddi_get_instance(rdip);
3527 	hp->dip = rdip;
3528 	hp->link = NULL;
3529 	hp->type = BOFI_NULL;
3530 	/*
3531 	 * call nexus to do the real work
3532 	 */
3533 	retval = save_bus_ops.bus_dma_allochdl(dip, rdip, attrp, waitfp, arg,
3534 	    handlep);
3535 	if (retval != DDI_SUCCESS) {
3536 		kmem_free(hp, sizeof (struct bofi_shadow));
3537 		return (retval);
3538 	}
3539 	/*
3540 	 * now point set dma_handle to point to real handle
3541 	 */
3542 	hp->hdl.dma_handle = *handlep;
3543 	mp = (ddi_dma_impl_t *)*handlep;
3544 	mp->dmai_fault_check = bofi_check_dma_hdl;
3545 	/*
3546 	 * bind and unbind are cached in devinfo - must overwrite them
3547 	 * - note that our bind and unbind are quite happy dealing with
3548 	 * any handles for this devinfo that were previously allocated
3549 	 */
3550 	if (save_bus_ops.bus_dma_bindhdl == DEVI(rdip)->devi_bus_dma_bindfunc)
3551 		DEVI(rdip)->devi_bus_dma_bindfunc = bofi_dma_bindhdl;
3552 	if (save_bus_ops.bus_dma_unbindhdl ==
3553 	    DEVI(rdip)->devi_bus_dma_unbindfunc)
3554 		DEVI(rdip)->devi_bus_dma_unbindfunc = bofi_dma_unbindhdl;
3555 	mutex_enter(&bofi_low_mutex);
3556 	mutex_enter(&bofi_mutex);
3557 	/*
3558 	 * get an "rnumber" for this handle - really just seeking to
3559 	 * get a unique number - generally only care for early allocated
3560 	 * handles - so we get as far as INT_MAX, just stay there
3561 	 */
3562 	dhashp = HDL_DHASH(hp->dip);
3563 	for (xhp = dhashp->dnext; xhp != dhashp; xhp = xhp->dnext)
3564 		if (ddi_name_to_major(xhp->name) ==
3565 		    ddi_name_to_major(hp->name) &&
3566 		    xhp->instance == hp->instance &&
3567 		    (xhp->type == BOFI_DMA_HDL ||
3568 		    xhp->type == BOFI_NULL))
3569 			if (xhp->rnumber >= maxrnumber) {
3570 				if (xhp->rnumber == INT_MAX)
3571 					maxrnumber = INT_MAX;
3572 				else
3573 					maxrnumber = xhp->rnumber + 1;
3574 			}
3575 	hp->rnumber = maxrnumber;
3576 	/*
3577 	 * add to dhash, hhash and inuse lists
3578 	 */
3579 	hp->next = shadow_list.next;
3580 	shadow_list.next->prev = hp;
3581 	hp->prev = &shadow_list;
3582 	shadow_list.next = hp;
3583 	hhashp = HDL_HHASH(*handlep);
3584 	hp->hnext = hhashp->hnext;
3585 	hhashp->hnext->hprev = hp;
3586 	hp->hprev = hhashp;
3587 	hhashp->hnext = hp;
3588 	dhashp = HDL_DHASH(hp->dip);
3589 	hp->dnext = dhashp->dnext;
3590 	dhashp->dnext->dprev = hp;
3591 	hp->dprev = dhashp;
3592 	dhashp->dnext = hp;
3593 	mutex_exit(&bofi_mutex);
3594 	mutex_exit(&bofi_low_mutex);
3595 	return (retval);
3596 }
3597 
3598 
3599 /*
3600  * our ddi_dma_freehdl routine
3601  */
3602 static int
3603 bofi_dma_freehdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle)
3604 {
3605 	int retval;
3606 	struct bofi_shadow *hp;
3607 	struct bofi_shadow *hhashp;
3608 
3609 	/*
3610 	 * find shadow for this handle
3611 	 */
3612 	mutex_enter(&bofi_low_mutex);
3613 	mutex_enter(&bofi_mutex);
3614 	hhashp = HDL_HHASH(handle);
3615 	for (hp = hhashp->hnext; hp != hhashp; hp = hp->hnext)
3616 		if (hp->hdl.dma_handle == handle)
3617 			break;
3618 	mutex_exit(&bofi_mutex);
3619 	mutex_exit(&bofi_low_mutex);
3620 	/*
3621 	 * call nexus to do the real work
3622 	 */
3623 	retval = save_bus_ops.bus_dma_freehdl(dip, rdip, handle);
3624 	if (retval != DDI_SUCCESS) {
3625 		return (retval);
3626 	}
3627 	/*
3628 	 * did we really have a shadow for this handle
3629 	 */
3630 	if (hp == hhashp)
3631 		return (retval);
3632 	/*
3633 	 * yes we have - see if it's still bound
3634 	 */
3635 	mutex_enter(&bofi_low_mutex);
3636 	mutex_enter(&bofi_mutex);
3637 	if (hp->type != BOFI_NULL)
3638 		panic("driver freeing bound dma_handle");
3639 	/*
3640 	 * remove from dhash, hhash and inuse lists
3641 	 */
3642 	hp->hnext->hprev = hp->hprev;
3643 	hp->hprev->hnext = hp->hnext;
3644 	hp->dnext->dprev = hp->dprev;
3645 	hp->dprev->dnext = hp->dnext;
3646 	hp->next->prev = hp->prev;
3647 	hp->prev->next = hp->next;
3648 	mutex_exit(&bofi_mutex);
3649 	mutex_exit(&bofi_low_mutex);
3650 
3651 	kmem_free(hp, sizeof (struct bofi_shadow));
3652 	return (retval);
3653 }
3654 
3655 
3656 /*
3657  * our ddi_dma_bindhdl routine
3658  */
3659 static int
3660 bofi_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip,
3661 	ddi_dma_handle_t handle, struct ddi_dma_req *dmareqp,
3662 	ddi_dma_cookie_t *cookiep, uint_t *ccountp)
3663 {
3664 	int retval = DDI_DMA_NORESOURCES;
3665 	auto struct ddi_dma_req dmareq;
3666 	struct bofi_shadow *hp;
3667 	struct bofi_shadow *hhashp;
3668 	ddi_dma_impl_t *mp;
3669 	unsigned long pagemask = ddi_ptob(rdip, 1) - 1;
3670 
3671 	/*
3672 	 * check we really have a shadow for this handle
3673 	 */
3674 	mutex_enter(&bofi_low_mutex);
3675 	mutex_enter(&bofi_mutex);
3676 	hhashp = HDL_HHASH(handle);
3677 	for (hp = hhashp->hnext; hp != hhashp; hp = hp->hnext)
3678 		if (hp->hdl.dma_handle == handle)
3679 			break;
3680 	mutex_exit(&bofi_mutex);
3681 	mutex_exit(&bofi_low_mutex);
3682 	if (hp == hhashp) {
3683 		/*
3684 		 * no we don't - just call nexus to do the real work
3685 		 */
3686 		return save_bus_ops.bus_dma_bindhdl(dip, rdip, handle, dmareqp,
3687 		    cookiep, ccountp);
3688 	}
3689 	/*
3690 	 * yes we have - see if it's already bound
3691 	 */
3692 	if (hp->type != BOFI_NULL)
3693 		return (DDI_DMA_INUSE);
3694 
3695 	hp->flags = dmareqp->dmar_flags;
3696 	/*
3697 	 * get a kernel virtual mapping
3698 	 */
3699 	hp->addr = ddi_dmareq_mapin(dmareqp, &hp->mapaddr, &hp->len);
3700 	if (hp->addr == NULL)
3701 		goto error;
3702 	if (bofi_sync_check) {
3703 		/*
3704 		 * Take a copy and pass pointers to this up to nexus instead.
3705 		 * Data will be copied from the original on explicit
3706 		 * and implicit ddi_dma_sync()
3707 		 *
3708 		 * - maintain page alignment because some devices assume it.
3709 		 */
3710 		hp->origaddr = hp->addr;
3711 		hp->allocaddr = ddi_umem_alloc(
3712 		    ((uintptr_t)hp->addr & pagemask) + hp->len,
3713 		    (dmareqp->dmar_fp == DDI_DMA_SLEEP) ? KM_SLEEP : KM_NOSLEEP,
3714 		    &hp->umem_cookie);
3715 		if (hp->allocaddr == NULL)
3716 			goto error;
3717 		hp->addr = hp->allocaddr + ((uintptr_t)hp->addr & pagemask);
3718 		if (dmareqp->dmar_flags & DDI_DMA_WRITE)
3719 			xbcopy(hp->origaddr, hp->addr, hp->len);
3720 		dmareq = *dmareqp;
3721 		dmareq.dmar_object.dmao_size = hp->len;
3722 		dmareq.dmar_object.dmao_type = DMA_OTYP_VADDR;
3723 		dmareq.dmar_object.dmao_obj.virt_obj.v_as = &kas;
3724 		dmareq.dmar_object.dmao_obj.virt_obj.v_addr = hp->addr;
3725 		dmareq.dmar_object.dmao_obj.virt_obj.v_priv = NULL;
3726 		dmareqp = &dmareq;
3727 	}
3728 	/*
3729 	 * call nexus to do the real work
3730 	 */
3731 	retval = save_bus_ops.bus_dma_bindhdl(dip, rdip, handle, dmareqp,
3732 	    cookiep, ccountp);
3733 	if (retval != DDI_SUCCESS)
3734 		goto error2;
3735 	/*
3736 	 * unset DMP_NOSYNC
3737 	 */
3738 	mp = (ddi_dma_impl_t *)handle;
3739 	mp->dmai_rflags &= ~DMP_NOSYNC;
3740 	/*
3741 	 * chain on any pre-existing errdefs that apply to this
3742 	 * acc_handle and corrupt if required (as there is an implicit
3743 	 * ddi_dma_sync() in this call)
3744 	 */
3745 	mutex_enter(&bofi_low_mutex);
3746 	mutex_enter(&bofi_mutex);
3747 	hp->type = BOFI_DMA_HDL;
3748 	chain_on_errdefs(hp);
3749 	mutex_exit(&bofi_mutex);
3750 	mutex_exit(&bofi_low_mutex);
3751 	return (retval);
3752 
3753 error:
3754 	if (dmareqp->dmar_fp != DDI_DMA_DONTWAIT) {
3755 		/*
3756 		 * what to do here? Wait a bit and try again
3757 		 */
3758 		(void) timeout((void (*)())dmareqp->dmar_fp,
3759 		    dmareqp->dmar_arg, 10);
3760 	}
3761 error2:
3762 	if (hp) {
3763 		ddi_dmareq_mapout(hp->mapaddr, hp->len);
3764 		if (bofi_sync_check && hp->allocaddr)
3765 			ddi_umem_free(hp->umem_cookie);
3766 		hp->mapaddr = NULL;
3767 		hp->allocaddr = NULL;
3768 		hp->origaddr = NULL;
3769 	}
3770 	return (retval);
3771 }
3772 
3773 
3774 /*
3775  * our ddi_dma_unbindhdl routine
3776  */
3777 static int
3778 bofi_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle)
3779 {
3780 	struct bofi_link *lp, *next_lp;
3781 	struct bofi_errent *ep;
3782 	int retval;
3783 	struct bofi_shadow *hp;
3784 	struct bofi_shadow *hhashp;
3785 
3786 	/*
3787 	 * call nexus to do the real work
3788 	 */
3789 	retval = save_bus_ops.bus_dma_unbindhdl(dip, rdip, handle);
3790 	if (retval != DDI_SUCCESS)
3791 		return (retval);
3792 	/*
3793 	 * check we really have a shadow for this handle
3794 	 */
3795 	mutex_enter(&bofi_low_mutex);
3796 	mutex_enter(&bofi_mutex);
3797 	hhashp = HDL_HHASH(handle);
3798 	for (hp = hhashp->hnext; hp != hhashp; hp = hp->hnext)
3799 		if (hp->hdl.dma_handle == handle)
3800 			break;
3801 	if (hp == hhashp) {
3802 		mutex_exit(&bofi_mutex);
3803 		mutex_exit(&bofi_low_mutex);
3804 		return (retval);
3805 	}
3806 	/*
3807 	 * yes we have - see if it's already unbound
3808 	 */
3809 	if (hp->type == BOFI_NULL)
3810 		panic("driver unbinding unbound dma_handle");
3811 	/*
3812 	 * free any errdef link structures tagged on to this
3813 	 * shadow handle
3814 	 */
3815 	for (lp = hp->link; lp != NULL; ) {
3816 		next_lp = lp->link;
3817 		/*
3818 		 * there is an implicit sync_for_cpu on free -
3819 		 * may need to corrupt
3820 		 */
3821 		ep = lp->errentp;
3822 		if ((ep->errdef.access_type & BOFI_DMA_R) &&
3823 		    (hp->flags & DDI_DMA_READ) &&
3824 		    (ep->state & BOFI_DEV_ACTIVE)) {
3825 			do_dma_corrupt(hp, ep, DDI_DMA_SYNC_FORCPU, 0, hp->len);
3826 		}
3827 		lp->link = bofi_link_freelist;
3828 		bofi_link_freelist = lp;
3829 		lp = next_lp;
3830 	}
3831 	hp->link = NULL;
3832 	hp->type = BOFI_NULL;
3833 	mutex_exit(&bofi_mutex);
3834 	mutex_exit(&bofi_low_mutex);
3835 
3836 	if (bofi_sync_check && (hp->flags & DDI_DMA_READ))
3837 		/*
3838 		 * implicit sync_for_cpu - copy data back
3839 		 */
3840 		if (hp->allocaddr)
3841 			xbcopy(hp->addr, hp->origaddr, hp->len);
3842 	ddi_dmareq_mapout(hp->mapaddr, hp->len);
3843 	if (bofi_sync_check && hp->allocaddr)
3844 		ddi_umem_free(hp->umem_cookie);
3845 	hp->mapaddr = NULL;
3846 	hp->allocaddr = NULL;
3847 	hp->origaddr = NULL;
3848 	return (retval);
3849 }
3850 
3851 
3852 /*
3853  * our ddi_dma_sync routine
3854  */
3855 static int
3856 bofi_dma_flush(dev_info_t *dip, dev_info_t *rdip,
3857 		ddi_dma_handle_t handle, off_t off, size_t len, uint_t flags)
3858 {
3859 	struct bofi_link *lp;
3860 	struct bofi_errent *ep;
3861 	struct bofi_shadow *hp;
3862 	struct bofi_shadow *hhashp;
3863 	int retval;
3864 
3865 	if (flags == DDI_DMA_SYNC_FORCPU || flags == DDI_DMA_SYNC_FORKERNEL) {
3866 		/*
3867 		 * in this case get nexus driver to do sync first
3868 		 */
3869 		retval = save_bus_ops.bus_dma_flush(dip, rdip, handle, off,
3870 		    len, flags);
3871 		if (retval != DDI_SUCCESS)
3872 			return (retval);
3873 	}
3874 	/*
3875 	 * check we really have a shadow for this handle
3876 	 */
3877 	mutex_enter(&bofi_low_mutex);
3878 	mutex_enter(&bofi_mutex);
3879 	hhashp = HDL_HHASH(handle);
3880 	for (hp = hhashp->hnext; hp != hhashp; hp = hp->hnext)
3881 		if (hp->hdl.dma_handle == handle &&
3882 		    hp->type == BOFI_DMA_HDL)
3883 			break;
3884 	mutex_exit(&bofi_mutex);
3885 	mutex_exit(&bofi_low_mutex);
3886 	if (hp != hhashp) {
3887 		/*
3888 		 * yes - do we need to copy data from original
3889 		 */
3890 		if (bofi_sync_check && flags == DDI_DMA_SYNC_FORDEV)
3891 			if (hp->allocaddr)
3892 				xbcopy(hp->origaddr+off, hp->addr+off,
3893 				    len ? len : (hp->len - off));
3894 		/*
3895 		 * yes - check if we need to corrupt the data
3896 		 */
3897 		mutex_enter(&bofi_low_mutex);
3898 		mutex_enter(&bofi_mutex);
3899 		for (lp = hp->link; lp != NULL; lp = lp->link) {
3900 			ep = lp->errentp;
3901 			if ((((ep->errdef.access_type & BOFI_DMA_R) &&
3902 			    (flags == DDI_DMA_SYNC_FORCPU ||
3903 			    flags == DDI_DMA_SYNC_FORKERNEL)) ||
3904 			    ((ep->errdef.access_type & BOFI_DMA_W) &&
3905 			    (flags == DDI_DMA_SYNC_FORDEV))) &&
3906 			    (ep->state & BOFI_DEV_ACTIVE)) {
3907 				do_dma_corrupt(hp, ep, flags, off,
3908 				    len ? len : (hp->len - off));
3909 			}
3910 		}
3911 		mutex_exit(&bofi_mutex);
3912 		mutex_exit(&bofi_low_mutex);
3913 		/*
3914 		 *  do we need to copy data to original
3915 		 */
3916 		if (bofi_sync_check && (flags == DDI_DMA_SYNC_FORCPU ||
3917 		    flags == DDI_DMA_SYNC_FORKERNEL))
3918 			if (hp->allocaddr)
3919 				xbcopy(hp->addr+off, hp->origaddr+off,
3920 				    len ? len : (hp->len - off));
3921 	}
3922 	if (flags == DDI_DMA_SYNC_FORDEV)
3923 		/*
3924 		 * in this case get nexus driver to do sync last
3925 		 */
3926 		retval = save_bus_ops.bus_dma_flush(dip, rdip, handle, off,
3927 		    len, flags);
3928 	return (retval);
3929 }
3930 
3931 
3932 /*
3933  * our dma_win routine
3934  */
3935 static int
3936 bofi_dma_win(dev_info_t *dip, dev_info_t *rdip,
3937 	ddi_dma_handle_t handle, uint_t win, off_t *offp,
3938 	size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp)
3939 {
3940 	struct bofi_shadow *hp;
3941 	struct bofi_shadow *hhashp;
3942 	int retval;
3943 	ddi_dma_impl_t *mp;
3944 
3945 	/*
3946 	 * call nexus to do the real work
3947 	 */
3948 	retval = save_bus_ops.bus_dma_win(dip, rdip, handle, win, offp, lenp,
3949 	    cookiep, ccountp);
3950 	if (retval != DDI_SUCCESS)
3951 		return (retval);
3952 	/*
3953 	 * check we really have a shadow for this handle
3954 	 */
3955 	mutex_enter(&bofi_low_mutex);
3956 	mutex_enter(&bofi_mutex);
3957 	hhashp = HDL_HHASH(handle);
3958 	for (hp = hhashp->hnext; hp != hhashp; hp = hp->hnext)
3959 		if (hp->hdl.dma_handle == handle)
3960 			break;
3961 	if (hp != hhashp) {
3962 		/*
3963 		 * yes - make sure DMP_NOSYNC is unset
3964 		 */
3965 		mp = (ddi_dma_impl_t *)handle;
3966 		mp->dmai_rflags &= ~DMP_NOSYNC;
3967 	}
3968 	mutex_exit(&bofi_mutex);
3969 	mutex_exit(&bofi_low_mutex);
3970 	return (retval);
3971 }
3972 
3973 
3974 /*
3975  * our dma_ctl routine
3976  */
3977 static int
3978 bofi_dma_ctl(dev_info_t *dip, dev_info_t *rdip,
3979 		ddi_dma_handle_t handle, enum ddi_dma_ctlops request,
3980 		off_t *offp, size_t *lenp, caddr_t *objp, uint_t flags)
3981 {
3982 	struct bofi_link *lp, *next_lp;
3983 	struct bofi_errent *ep;
3984 	struct bofi_shadow *hp;
3985 	struct bofi_shadow *hhashp;
3986 	int retval;
3987 	int i;
3988 	struct bofi_shadow *dummyhp;
3989 	ddi_dma_impl_t *mp;
3990 
3991 	/*
3992 	 * get nexus to do real work
3993 	 */
3994 	retval = save_bus_ops.bus_dma_ctl(dip, rdip, handle, request, offp,
3995 	    lenp, objp, flags);
3996 	if (retval != DDI_SUCCESS)
3997 		return (retval);
3998 	/*
3999 	 * if driver_list is set, only intercept those drivers
4000 	 */
4001 	if (!driver_under_test(rdip))
4002 		return (DDI_SUCCESS);
4003 
4004 #if defined(__sparc)
4005 	/*
4006 	 * check if this is a dvma_reserve - that one's like a
4007 	 * dma_allochdl and needs to be handled separately
4008 	 */
4009 	if (request == DDI_DMA_RESERVE) {
4010 		bofi_dvma_reserve(rdip, *(ddi_dma_handle_t *)objp);
4011 		return (DDI_SUCCESS);
4012 	}
4013 #endif
4014 	/*
4015 	 * check we really have a shadow for this handle
4016 	 */
4017 	mutex_enter(&bofi_low_mutex);
4018 	mutex_enter(&bofi_mutex);
4019 	hhashp = HDL_HHASH(handle);
4020 	for (hp = hhashp->hnext; hp != hhashp; hp = hp->hnext)
4021 		if (hp->hdl.dma_handle == handle)
4022 			break;
4023 	if (hp == hhashp) {
4024 		mutex_exit(&bofi_mutex);
4025 		mutex_exit(&bofi_low_mutex);
4026 		return (retval);
4027 	}
4028 	/*
4029 	 * yes we have - see what kind of command this is
4030 	 */
4031 	switch (request) {
4032 	case DDI_DMA_RELEASE:
4033 		/*
4034 		 * dvma release - release dummy handle and all the index handles
4035 		 */
4036 		dummyhp = hp;
4037 		dummyhp->hnext->hprev = dummyhp->hprev;
4038 		dummyhp->hprev->hnext = dummyhp->hnext;
4039 		mutex_exit(&bofi_mutex);
4040 		mutex_exit(&bofi_low_mutex);
4041 		for (i = 0; i < dummyhp->len; i++) {
4042 			hp = dummyhp->hparrayp[i];
4043 			/*
4044 			 * chek none of the index handles were still loaded
4045 			 */
4046 			if (hp->type != BOFI_NULL)
4047 				panic("driver releasing loaded dvma");
4048 			/*
4049 			 * remove from dhash and inuse lists
4050 			 */
4051 			mutex_enter(&bofi_low_mutex);
4052 			mutex_enter(&bofi_mutex);
4053 			hp->dnext->dprev = hp->dprev;
4054 			hp->dprev->dnext = hp->dnext;
4055 			hp->next->prev = hp->prev;
4056 			hp->prev->next = hp->next;
4057 			mutex_exit(&bofi_mutex);
4058 			mutex_exit(&bofi_low_mutex);
4059 
4060 			if (bofi_sync_check && hp->allocaddr)
4061 				ddi_umem_free(hp->umem_cookie);
4062 			kmem_free(hp, sizeof (struct bofi_shadow));
4063 		}
4064 		kmem_free(dummyhp->hparrayp, dummyhp->len *
4065 		    sizeof (struct bofi_shadow *));
4066 		kmem_free(dummyhp, sizeof (struct bofi_shadow));
4067 		return (retval);
4068 	case DDI_DMA_FREE:
4069 		/*
4070 		 * ddi_dma_free case - remove from dhash, hhash and inuse lists
4071 		 */
4072 		hp->hnext->hprev = hp->hprev;
4073 		hp->hprev->hnext = hp->hnext;
4074 		hp->dnext->dprev = hp->dprev;
4075 		hp->dprev->dnext = hp->dnext;
4076 		hp->next->prev = hp->prev;
4077 		hp->prev->next = hp->next;
4078 		/*
4079 		 * free any errdef link structures tagged on to this
4080 		 * shadow handle
4081 		 */
4082 		for (lp = hp->link; lp != NULL; ) {
4083 			next_lp = lp->link;
4084 			/*
4085 			 * there is an implicit sync_for_cpu on free -
4086 			 * may need to corrupt
4087 			 */
4088 			ep = lp->errentp;
4089 			if ((ep->errdef.access_type & BOFI_DMA_R) &&
4090 			    (hp->flags & DDI_DMA_READ) &&
4091 			    (ep->state & BOFI_DEV_ACTIVE)) {
4092 				do_dma_corrupt(hp, ep, DDI_DMA_SYNC_FORCPU,
4093 				    0, hp->len);
4094 			}
4095 			lp->link = bofi_link_freelist;
4096 			bofi_link_freelist = lp;
4097 			lp = next_lp;
4098 		}
4099 		hp->link = NULL;
4100 		mutex_exit(&bofi_mutex);
4101 		mutex_exit(&bofi_low_mutex);
4102 
4103 		if (bofi_sync_check && (hp->flags & DDI_DMA_READ))
4104 			if (hp->allocaddr)
4105 				xbcopy(hp->addr, hp->origaddr, hp->len);
4106 		ddi_dmareq_mapout(hp->mapaddr, hp->len);
4107 		if (bofi_sync_check && hp->allocaddr)
4108 			ddi_umem_free(hp->umem_cookie);
4109 		kmem_free(hp, sizeof (struct bofi_shadow));
4110 		return (retval);
4111 	case DDI_DMA_MOVWIN:
4112 		mp = (ddi_dma_impl_t *)handle;
4113 		mp->dmai_rflags &= ~DMP_NOSYNC;
4114 		break;
4115 	case DDI_DMA_NEXTWIN:
4116 		mp = (ddi_dma_impl_t *)handle;
4117 		mp->dmai_rflags &= ~DMP_NOSYNC;
4118 		break;
4119 	default:
4120 		break;
4121 	}
4122 	mutex_exit(&bofi_mutex);
4123 	mutex_exit(&bofi_low_mutex);
4124 	return (retval);
4125 }
4126 
4127 #if defined(__sparc)
4128 /*
4129  * dvma reserve case from bofi_dma_ctl()
4130  */
4131 static void
4132 bofi_dvma_reserve(dev_info_t *rdip, ddi_dma_handle_t handle)
4133 {
4134 	struct bofi_shadow *hp;
4135 	struct bofi_shadow *dummyhp;
4136 	struct bofi_shadow *dhashp;
4137 	struct bofi_shadow *hhashp;
4138 	ddi_dma_impl_t *mp;
4139 	struct fast_dvma *nexus_private;
4140 	int i, count;
4141 
4142 	mp = (ddi_dma_impl_t *)handle;
4143 	count = mp->dmai_ndvmapages;
4144 	/*
4145 	 * allocate dummy shadow handle structure
4146 	 */
4147 	dummyhp = kmem_zalloc(sizeof (*dummyhp), KM_SLEEP);
4148 	if (mp->dmai_rflags & DMP_BYPASSNEXUS) {
4149 		/*
4150 		 * overlay our routines over the nexus's dvma routines
4151 		 */
4152 		nexus_private = (struct fast_dvma *)mp->dmai_nexus_private;
4153 		dummyhp->save.dvma_ops = *(nexus_private->ops);
4154 		nexus_private->ops = &bofi_dvma_ops;
4155 	}
4156 	/*
4157 	 * now fill in the dummy handle. This just gets put on hhash queue
4158 	 * so our dvma routines can find and index off to the handle they
4159 	 * really want.
4160 	 */
4161 	(void) strncpy(dummyhp->name, ddi_get_name(rdip), NAMESIZE);
4162 	dummyhp->instance = ddi_get_instance(rdip);
4163 	dummyhp->rnumber = -1;
4164 	dummyhp->dip = rdip;
4165 	dummyhp->len = count;
4166 	dummyhp->hdl.dma_handle = handle;
4167 	dummyhp->link = NULL;
4168 	dummyhp->type = BOFI_NULL;
4169 	/*
4170 	 * allocate space for real handles
4171 	 */
4172 	dummyhp->hparrayp = kmem_alloc(count *
4173 	    sizeof (struct bofi_shadow *), KM_SLEEP);
4174 	for (i = 0; i < count; i++) {
4175 		/*
4176 		 * allocate shadow handle structures and fill them in
4177 		 */
4178 		hp = kmem_zalloc(sizeof (*hp), KM_SLEEP);
4179 		(void) strncpy(hp->name, ddi_get_name(rdip), NAMESIZE);
4180 		hp->instance = ddi_get_instance(rdip);
4181 		hp->rnumber = -1;
4182 		hp->dip = rdip;
4183 		hp->hdl.dma_handle = 0;
4184 		hp->link = NULL;
4185 		hp->type = BOFI_NULL;
4186 		if (bofi_sync_check) {
4187 			unsigned long pagemask = ddi_ptob(rdip, 1) - 1;
4188 			/*
4189 			 * Take a copy and set this to be hp->addr
4190 			 * Data will be copied to and from the original on
4191 			 * explicit and implicit ddi_dma_sync()
4192 			 *
4193 			 * - maintain page alignment because some devices
4194 			 * assume it.
4195 			 */
4196 			hp->allocaddr = ddi_umem_alloc(
4197 			    ((int)(uintptr_t)hp->addr & pagemask)
4198 				+ pagemask + 1,
4199 			    KM_SLEEP, &hp->umem_cookie);
4200 			hp->addr = hp->allocaddr +
4201 			    ((int)(uintptr_t)hp->addr & pagemask);
4202 		}
4203 		/*
4204 		 * add to dhash and inuse lists.
4205 		 * these don't go on hhash queue.
4206 		 */
4207 		mutex_enter(&bofi_low_mutex);
4208 		mutex_enter(&bofi_mutex);
4209 		hp->next = shadow_list.next;
4210 		shadow_list.next->prev = hp;
4211 		hp->prev = &shadow_list;
4212 		shadow_list.next = hp;
4213 		dhashp = HDL_DHASH(hp->dip);
4214 		hp->dnext = dhashp->dnext;
4215 		dhashp->dnext->dprev = hp;
4216 		hp->dprev = dhashp;
4217 		dhashp->dnext = hp;
4218 		dummyhp->hparrayp[i] = hp;
4219 		mutex_exit(&bofi_mutex);
4220 		mutex_exit(&bofi_low_mutex);
4221 	}
4222 	/*
4223 	 * add dummy handle to hhash list only
4224 	 */
4225 	mutex_enter(&bofi_low_mutex);
4226 	mutex_enter(&bofi_mutex);
4227 	hhashp = HDL_HHASH(handle);
4228 	dummyhp->hnext = hhashp->hnext;
4229 	hhashp->hnext->hprev = dummyhp;
4230 	dummyhp->hprev = hhashp;
4231 	hhashp->hnext = dummyhp;
4232 	mutex_exit(&bofi_mutex);
4233 	mutex_exit(&bofi_low_mutex);
4234 }
4235 
4236 /*
4237  * our dvma_kaddr_load()
4238  */
4239 static void
4240 bofi_dvma_kaddr_load(ddi_dma_handle_t h, caddr_t a, uint_t len, uint_t index,
4241 	ddi_dma_cookie_t *cp)
4242 {
4243 	struct bofi_shadow *dummyhp;
4244 	struct bofi_shadow *hp;
4245 	struct bofi_shadow *hhashp;
4246 	struct bofi_errent *ep;
4247 	struct bofi_link   *lp;
4248 
4249 	/*
4250 	 * check we really have a dummy shadow for this handle
4251 	 */
4252 	mutex_enter(&bofi_low_mutex);
4253 	mutex_enter(&bofi_mutex);
4254 	hhashp = HDL_HHASH(h);
4255 	for (dummyhp = hhashp->hnext; dummyhp != hhashp;
4256 	    dummyhp = dummyhp->hnext)
4257 		if (dummyhp->hdl.dma_handle == h)
4258 			break;
4259 	mutex_exit(&bofi_mutex);
4260 	mutex_exit(&bofi_low_mutex);
4261 	if (dummyhp == hhashp) {
4262 		/*
4263 		 * no dummy shadow - panic
4264 		 */
4265 		panic("driver dvma_kaddr_load with no reserve");
4266 	}
4267 
4268 	/*
4269 	 * find real hp
4270 	 */
4271 	hp = dummyhp->hparrayp[index];
4272 	/*
4273 	 * check its not already loaded
4274 	 */
4275 	if (hp->type != BOFI_NULL)
4276 		panic("driver loading loaded dvma");
4277 	/*
4278 	 * if were doing copying, just need to change origaddr and get
4279 	 * nexus to map hp->addr again
4280 	 * if not, set hp->addr to new address.
4281 	 * - note these are always kernel virtual addresses - no need to map
4282 	 */
4283 	if (bofi_sync_check && hp->allocaddr) {
4284 		hp->origaddr = a;
4285 		a = hp->addr;
4286 	} else
4287 		hp->addr = a;
4288 	hp->len = len;
4289 	/*
4290 	 * get nexus to do the real work
4291 	 */
4292 	dummyhp->save.dvma_ops.dvma_kaddr_load(h, a, len, index, cp);
4293 	/*
4294 	 * chain on any pre-existing errdefs that apply to this dma_handle
4295 	 * no need to corrupt - there's no implicit dma_sync on this one
4296 	 */
4297 	mutex_enter(&bofi_low_mutex);
4298 	mutex_enter(&bofi_mutex);
4299 	hp->type = BOFI_DMA_HDL;
4300 	for (ep = errent_listp; ep != NULL; ep = ep->next) {
4301 		if (ddi_name_to_major(hp->name) ==
4302 		    ddi_name_to_major(ep->name) &&
4303 		    hp->instance == ep->errdef.instance &&
4304 		    (ep->errdef.rnumber == -1 ||
4305 		    hp->rnumber == ep->errdef.rnumber) &&
4306 		    ((ep->errdef.access_type & BOFI_DMA_RW) &&
4307 		    (((uintptr_t)(hp->addr + ep->errdef.offset +
4308 		    ep->errdef.len) & ~LLSZMASK) >
4309 		    ((uintptr_t)((hp->addr + ep->errdef.offset) +
4310 		    LLSZMASK) & ~LLSZMASK)))) {
4311 			lp = bofi_link_freelist;
4312 			if (lp != NULL) {
4313 				bofi_link_freelist = lp->link;
4314 				lp->errentp = ep;
4315 				lp->link = hp->link;
4316 				hp->link = lp;
4317 			}
4318 		}
4319 	}
4320 	mutex_exit(&bofi_mutex);
4321 	mutex_exit(&bofi_low_mutex);
4322 }
4323 
4324 /*
4325  * our dvma_unload()
4326  */
4327 static void
4328 bofi_dvma_unload(ddi_dma_handle_t h, uint_t index, uint_t view)
4329 {
4330 	struct bofi_link *lp, *next_lp;
4331 	struct bofi_errent *ep;
4332 	struct bofi_shadow *dummyhp;
4333 	struct bofi_shadow *hp;
4334 	struct bofi_shadow *hhashp;
4335 
4336 	/*
4337 	 * check we really have a dummy shadow for this handle
4338 	 */
4339 	mutex_enter(&bofi_low_mutex);
4340 	mutex_enter(&bofi_mutex);
4341 	hhashp = HDL_HHASH(h);
4342 	for (dummyhp = hhashp->hnext; dummyhp != hhashp;
4343 	    dummyhp = dummyhp->hnext)
4344 		if (dummyhp->hdl.dma_handle == h)
4345 			break;
4346 	mutex_exit(&bofi_mutex);
4347 	mutex_exit(&bofi_low_mutex);
4348 	if (dummyhp == hhashp) {
4349 		/*
4350 		 * no dummy shadow - panic
4351 		 */
4352 		panic("driver dvma_unload with no reserve");
4353 	}
4354 	dummyhp->save.dvma_ops.dvma_unload(h, index, view);
4355 	/*
4356 	 * find real hp
4357 	 */
4358 	hp = dummyhp->hparrayp[index];
4359 	/*
4360 	 * check its not already unloaded
4361 	 */
4362 	if (hp->type == BOFI_NULL)
4363 		panic("driver unloading unloaded dvma");
4364 	/*
4365 	 * free any errdef link structures tagged on to this
4366 	 * shadow handle - do corruption if necessary
4367 	 */
4368 	mutex_enter(&bofi_low_mutex);
4369 	mutex_enter(&bofi_mutex);
4370 	for (lp = hp->link; lp != NULL; ) {
4371 		next_lp = lp->link;
4372 		ep = lp->errentp;
4373 		if ((ep->errdef.access_type & BOFI_DMA_R) &&
4374 		    (view == DDI_DMA_SYNC_FORCPU ||
4375 		    view == DDI_DMA_SYNC_FORKERNEL) &&
4376 		    (ep->state & BOFI_DEV_ACTIVE)) {
4377 			do_dma_corrupt(hp, ep, view, 0, hp->len);
4378 		}
4379 		lp->link = bofi_link_freelist;
4380 		bofi_link_freelist = lp;
4381 		lp = next_lp;
4382 	}
4383 	hp->link = NULL;
4384 	hp->type = BOFI_NULL;
4385 	mutex_exit(&bofi_mutex);
4386 	mutex_exit(&bofi_low_mutex);
4387 	/*
4388 	 * if there is an explicit sync_for_cpu, then do copy to original
4389 	 */
4390 	if (bofi_sync_check &&
4391 	    (view == DDI_DMA_SYNC_FORCPU || view == DDI_DMA_SYNC_FORKERNEL))
4392 		if (hp->allocaddr)
4393 			xbcopy(hp->addr, hp->origaddr, hp->len);
4394 }
4395 
4396 /*
4397  * our dvma_unload()
4398  */
4399 static void
4400 bofi_dvma_sync(ddi_dma_handle_t h, uint_t index, uint_t view)
4401 {
4402 	struct bofi_link *lp;
4403 	struct bofi_errent *ep;
4404 	struct bofi_shadow *hp;
4405 	struct bofi_shadow *dummyhp;
4406 	struct bofi_shadow *hhashp;
4407 
4408 	/*
4409 	 * check we really have a dummy shadow for this handle
4410 	 */
4411 	mutex_enter(&bofi_low_mutex);
4412 	mutex_enter(&bofi_mutex);
4413 	hhashp = HDL_HHASH(h);
4414 	for (dummyhp = hhashp->hnext; dummyhp != hhashp;
4415 	    dummyhp = dummyhp->hnext)
4416 		if (dummyhp->hdl.dma_handle == h)
4417 			break;
4418 	mutex_exit(&bofi_mutex);
4419 	mutex_exit(&bofi_low_mutex);
4420 	if (dummyhp == hhashp) {
4421 		/*
4422 		 * no dummy shadow - panic
4423 		 */
4424 		panic("driver dvma_sync with no reserve");
4425 	}
4426 	/*
4427 	 * find real hp
4428 	 */
4429 	hp = dummyhp->hparrayp[index];
4430 	/*
4431 	 * check its already loaded
4432 	 */
4433 	if (hp->type == BOFI_NULL)
4434 		panic("driver syncing unloaded dvma");
4435 	if (view == DDI_DMA_SYNC_FORCPU || view == DDI_DMA_SYNC_FORKERNEL)
4436 		/*
4437 		 * in this case do sync first
4438 		 */
4439 		dummyhp->save.dvma_ops.dvma_sync(h, index, view);
4440 	/*
4441 	 * if there is an explicit sync_for_dev, then do copy from original
4442 	 */
4443 	if (bofi_sync_check && view == DDI_DMA_SYNC_FORDEV) {
4444 		if (hp->allocaddr)
4445 			xbcopy(hp->origaddr, hp->addr, hp->len);
4446 	}
4447 	/*
4448 	 * do corruption if necessary
4449 	 */
4450 	mutex_enter(&bofi_low_mutex);
4451 	mutex_enter(&bofi_mutex);
4452 	for (lp = hp->link; lp != NULL; lp = lp->link) {
4453 		ep = lp->errentp;
4454 		if ((((ep->errdef.access_type & BOFI_DMA_R) &&
4455 		    (view == DDI_DMA_SYNC_FORCPU ||
4456 		    view == DDI_DMA_SYNC_FORKERNEL)) ||
4457 		    ((ep->errdef.access_type & BOFI_DMA_W) &&
4458 		    (view == DDI_DMA_SYNC_FORDEV))) &&
4459 		    (ep->state & BOFI_DEV_ACTIVE)) {
4460 			do_dma_corrupt(hp, ep, view, 0, hp->len);
4461 		}
4462 	}
4463 	mutex_exit(&bofi_mutex);
4464 	mutex_exit(&bofi_low_mutex);
4465 	/*
4466 	 * if there is an explicit sync_for_cpu, then do copy to original
4467 	 */
4468 	if (bofi_sync_check &&
4469 	    (view == DDI_DMA_SYNC_FORCPU || view == DDI_DMA_SYNC_FORKERNEL)) {
4470 		if (hp->allocaddr)
4471 			xbcopy(hp->addr, hp->origaddr, hp->len);
4472 	}
4473 	if (view == DDI_DMA_SYNC_FORDEV)
4474 		/*
4475 		 * in this case do sync last
4476 		 */
4477 		dummyhp->save.dvma_ops.dvma_sync(h, index, view);
4478 }
4479 #endif
4480 
4481 /*
4482  * bofi intercept routine - gets called instead of users interrupt routine
4483  */
4484 static uint_t
4485 bofi_intercept_intr(caddr_t xp)
4486 {
4487 	struct bofi_errent *ep;
4488 	struct bofi_link   *lp;
4489 	struct bofi_shadow *hp;
4490 	int intr_count = 1;
4491 	int i;
4492 	uint_t retval = DDI_INTR_UNCLAIMED;
4493 	uint_t result;
4494 	int unclaimed_counter = 0;
4495 	int jabber_detected = 0;
4496 
4497 	hp = (struct bofi_shadow *)xp;
4498 	/*
4499 	 * check if nothing to do
4500 	 */
4501 	if (hp->link == NULL)
4502 		return (hp->save.intr.int_handler
4503 		    (hp->save.intr.int_handler_arg1, NULL));
4504 	mutex_enter(&bofi_mutex);
4505 	/*
4506 	 * look for any errdefs
4507 	 */
4508 	for (lp = hp->link; lp != NULL; lp = lp->link) {
4509 		ep = lp->errentp;
4510 		if (ep->state & BOFI_DEV_ACTIVE) {
4511 			/*
4512 			 * got one
4513 			 */
4514 			if ((ep->errdef.access_count ||
4515 			    ep->errdef.fail_count) &&
4516 			    (ep->errdef.access_type & BOFI_LOG))
4517 				log_acc_event(ep, BOFI_INTR, 0, 0, 1, 0);
4518 			if (ep->errdef.access_count > 1) {
4519 				ep->errdef.access_count--;
4520 			} else if (ep->errdef.fail_count > 0) {
4521 				ep->errdef.fail_count--;
4522 				ep->errdef.access_count = 0;
4523 				/*
4524 				 * OK do "corruption"
4525 				 */
4526 				if (ep->errstate.fail_time == 0)
4527 					ep->errstate.fail_time = bofi_gettime();
4528 				switch (ep->errdef.optype) {
4529 				case BOFI_DELAY_INTR:
4530 					if (!hp->hilevel) {
4531 						drv_usecwait
4532 						    (ep->errdef.operand);
4533 					}
4534 					break;
4535 				case BOFI_LOSE_INTR:
4536 					intr_count = 0;
4537 					break;
4538 				case BOFI_EXTRA_INTR:
4539 					intr_count += ep->errdef.operand;
4540 					break;
4541 				default:
4542 					break;
4543 				}
4544 			}
4545 		}
4546 	}
4547 	mutex_exit(&bofi_mutex);
4548 	/*
4549 	 * send extra or fewer interrupts as requested
4550 	 */
4551 	for (i = 0; i < intr_count; i++) {
4552 		result = hp->save.intr.int_handler
4553 		    (hp->save.intr.int_handler_arg1, NULL);
4554 		if (result == DDI_INTR_CLAIMED)
4555 			unclaimed_counter >>= 1;
4556 		else if (++unclaimed_counter >= 20)
4557 			jabber_detected = 1;
4558 		if (i == 0)
4559 			retval = result;
4560 	}
4561 	/*
4562 	 * if more than 1000 spurious interrupts requested and
4563 	 * jabber not detected - give warning
4564 	 */
4565 	if (intr_count > 1000 && !jabber_detected)
4566 		panic("undetected interrupt jabber: %s%d",
4567 		    hp->name, hp->instance);
4568 	/*
4569 	 * return first response - or "unclaimed" if none
4570 	 */
4571 	return (retval);
4572 }
4573 
4574 
4575 /*
4576  * our ddi_check_acc_hdl
4577  */
4578 /* ARGSUSED */
4579 static int
4580 bofi_check_acc_hdl(ddi_acc_impl_t *handle)
4581 {
4582 	struct bofi_shadow *hp;
4583 	struct bofi_link   *lp;
4584 	uint_t result = 0;
4585 
4586 	hp = handle->ahi_common.ah_bus_private;
4587 	if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
4588 		return (0);
4589 	}
4590 	for (lp = hp->link; lp != NULL; lp = lp->link) {
4591 		/*
4592 		 * OR in error state from all associated
4593 		 * errdef structures
4594 		 */
4595 		if (lp->errentp->errdef.access_count == 0 &&
4596 		    (lp->errentp->state & BOFI_DEV_ACTIVE)) {
4597 			result = (lp->errentp->errdef.acc_chk & 1);
4598 		}
4599 	}
4600 	mutex_exit(&bofi_mutex);
4601 	return (result);
4602 }
4603 
4604 /*
4605  * our ddi_check_dma_hdl
4606  */
4607 /* ARGSUSED */
4608 static int
4609 bofi_check_dma_hdl(ddi_dma_impl_t *handle)
4610 {
4611 	struct bofi_shadow *hp;
4612 	struct bofi_link   *lp;
4613 	struct bofi_shadow *hhashp;
4614 	uint_t result = 0;
4615 
4616 	if (!mutex_tryenter(&bofi_mutex)) {
4617 		return (0);
4618 	}
4619 	hhashp = HDL_HHASH(handle);
4620 	for (hp = hhashp->hnext; hp != hhashp; hp = hp->hnext)
4621 		if (hp->hdl.dma_handle == (ddi_dma_handle_t)handle)
4622 			break;
4623 	if (hp == hhashp) {
4624 		mutex_exit(&bofi_mutex);
4625 		return (0);
4626 	}
4627 	if (!hp->link) {
4628 		mutex_exit(&bofi_mutex);
4629 		return (0);
4630 	}
4631 	for (lp = hp->link; lp != NULL; lp = lp->link) {
4632 		/*
4633 		 * OR in error state from all associated
4634 		 * errdef structures
4635 		 */
4636 		if (lp->errentp->errdef.access_count == 0 &&
4637 		    (lp->errentp->state & BOFI_DEV_ACTIVE)) {
4638 			result = ((lp->errentp->errdef.acc_chk & 2) ? 1 : 0);
4639 		}
4640 	}
4641 	mutex_exit(&bofi_mutex);
4642 	return (result);
4643 }
4644 
4645 
4646 /* ARGSUSED */
4647 static int
4648 bofi_post_event(dev_info_t *dip, dev_info_t *rdip,
4649 		    ddi_eventcookie_t eventhdl, void *impl_data)
4650 {
4651 	ddi_eventcookie_t ec;
4652 	struct ddi_fault_event_data *arg;
4653 	struct bofi_errent *ep;
4654 	struct bofi_shadow *hp;
4655 	struct bofi_shadow *dhashp;
4656 	struct bofi_link   *lp;
4657 
4658 	ASSERT(eventhdl);
4659 	if (ddi_get_eventcookie(dip, DDI_DEVI_FAULT_EVENT, &ec) != DDI_SUCCESS)
4660 		return (DDI_FAILURE);
4661 
4662 	if (ec != eventhdl)
4663 		return (save_bus_ops.bus_post_event(dip, rdip, eventhdl,
4664 		    impl_data));
4665 
4666 	arg = (struct ddi_fault_event_data *)impl_data;
4667 	mutex_enter(&bofi_mutex);
4668 	/*
4669 	 * find shadow handles with appropriate dev_infos
4670 	 * and set error reported on all associated errdef structures
4671 	 */
4672 	dhashp = HDL_DHASH(arg->f_dip);
4673 	for (hp = dhashp->dnext; hp != dhashp; hp = hp->dnext) {
4674 		if (hp->dip == arg->f_dip) {
4675 			for (lp = hp->link; lp != NULL; lp = lp->link) {
4676 				ep = lp->errentp;
4677 				ep->errstate.errmsg_count++;
4678 				if ((ep->errstate.msg_time == NULL ||
4679 				    ep->errstate.severity > arg->f_impact) &&
4680 				    (ep->state & BOFI_DEV_ACTIVE)) {
4681 					ep->errstate.msg_time = bofi_gettime();
4682 					ep->errstate.severity = arg->f_impact;
4683 					(void) strncpy(ep->errstate.buffer,
4684 					    arg->f_message, ERRMSGSIZE);
4685 					ddi_trigger_softintr(ep->softintr_id);
4686 				}
4687 			}
4688 		}
4689 	}
4690 	mutex_exit(&bofi_mutex);
4691 	return (save_bus_ops.bus_post_event(dip, rdip, eventhdl, impl_data));
4692 }
4693 
4694 /*ARGSUSED*/
4695 static int
4696 bofi_fm_ereport_callback(sysevent_t *ev, void *cookie)
4697 {
4698 	char *class = "";
4699 	char *path = "";
4700 	char *ptr;
4701 	nvlist_t *nvlist;
4702 	nvlist_t *detector;
4703 	ddi_fault_impact_t impact;
4704 	struct bofi_errent *ep;
4705 	struct bofi_shadow *hp;
4706 	struct bofi_link   *lp;
4707 	char service_class[FM_MAX_CLASS];
4708 	char hppath[MAXPATHLEN];
4709 	int service_ereport = 0;
4710 
4711 	(void) sysevent_get_attr_list(ev, &nvlist);
4712 	(void) nvlist_lookup_string(nvlist, FM_CLASS, &class);
4713 	if (nvlist_lookup_nvlist(nvlist, FM_EREPORT_DETECTOR, &detector) == 0)
4714 		(void) nvlist_lookup_string(detector, FM_FMRI_DEV_PATH, &path);
4715 
4716 	(void) snprintf(service_class, FM_MAX_CLASS, "%s.%s.%s.",
4717 	    FM_EREPORT_CLASS, DDI_IO_CLASS, DDI_FM_SERVICE_IMPACT);
4718 	if (strncmp(class, service_class, strlen(service_class) - 1) == 0)
4719 		service_ereport = 1;
4720 
4721 	mutex_enter(&bofi_mutex);
4722 	/*
4723 	 * find shadow handles with appropriate dev_infos
4724 	 * and set error reported on all associated errdef structures
4725 	 */
4726 	for (hp = shadow_list.next; hp != &shadow_list; hp = hp->next) {
4727 		(void) ddi_pathname(hp->dip, hppath);
4728 		if (strcmp(path, hppath) != 0)
4729 			continue;
4730 		for (lp = hp->link; lp != NULL; lp = lp->link) {
4731 			ep = lp->errentp;
4732 			ep->errstate.errmsg_count++;
4733 			if (!(ep->state & BOFI_DEV_ACTIVE))
4734 				continue;
4735 			if (ep->errstate.msg_time != NULL)
4736 				continue;
4737 			if (service_ereport) {
4738 				ptr = class + strlen(service_class);
4739 				if (strcmp(ptr, DDI_FM_SERVICE_LOST) == 0)
4740 					impact = DDI_SERVICE_LOST;
4741 				else if (strcmp(ptr,
4742 				    DDI_FM_SERVICE_DEGRADED) == 0)
4743 					impact = DDI_SERVICE_DEGRADED;
4744 				else if (strcmp(ptr,
4745 				    DDI_FM_SERVICE_RESTORED) == 0)
4746 					impact = DDI_SERVICE_RESTORED;
4747 				else
4748 					impact = DDI_SERVICE_UNAFFECTED;
4749 				if (ep->errstate.severity > impact)
4750 					ep->errstate.severity = impact;
4751 			} else if (ep->errstate.buffer[0] == '\0') {
4752 				(void) strncpy(ep->errstate.buffer, class,
4753 				    ERRMSGSIZE);
4754 			}
4755 			if (ep->errstate.buffer[0] != '\0' &&
4756 			    ep->errstate.severity < DDI_SERVICE_RESTORED) {
4757 				ep->errstate.msg_time = bofi_gettime();
4758 				ddi_trigger_softintr(ep->softintr_id);
4759 			}
4760 		}
4761 	}
4762 	nvlist_free(nvlist);
4763 	mutex_exit(&bofi_mutex);
4764 	return (0);
4765 }
4766 
4767 /*
4768  * our intr_ops routine
4769  */
4770 static int
4771 bofi_intr_ops(dev_info_t *dip, dev_info_t *rdip, ddi_intr_op_t intr_op,
4772     ddi_intr_handle_impl_t *hdlp, void *result)
4773 {
4774 	int retval;
4775 	struct bofi_shadow *hp;
4776 	struct bofi_shadow *dhashp;
4777 	struct bofi_shadow *hhashp;
4778 	struct bofi_errent *ep;
4779 	struct bofi_link   *lp, *next_lp;
4780 
4781 	switch (intr_op) {
4782 	case DDI_INTROP_ADDISR:
4783 		/*
4784 		 * if driver_list is set, only intercept those drivers
4785 		 */
4786 		if (!driver_under_test(rdip))
4787 			return (save_bus_ops.bus_intr_op(dip, rdip,
4788 			    intr_op, hdlp, result));
4789 		/*
4790 		 * allocate shadow handle structure and fill in
4791 		 */
4792 		hp = kmem_zalloc(sizeof (struct bofi_shadow), KM_SLEEP);
4793 		(void) strncpy(hp->name, ddi_get_name(rdip), NAMESIZE);
4794 		hp->instance = ddi_get_instance(rdip);
4795 		hp->save.intr.int_handler = hdlp->ih_cb_func;
4796 		hp->save.intr.int_handler_arg1 = hdlp->ih_cb_arg1;
4797 		hdlp->ih_cb_func = (ddi_intr_handler_t *)bofi_intercept_intr;
4798 		hdlp->ih_cb_arg1 = (caddr_t)hp;
4799 		hp->bofi_inum = hdlp->ih_inum;
4800 		hp->dip = rdip;
4801 		hp->link = NULL;
4802 		hp->type = BOFI_INT_HDL;
4803 		/*
4804 		 * save whether hilevel or not
4805 		 */
4806 
4807 		if (hdlp->ih_pri >= ddi_intr_get_hilevel_pri())
4808 			hp->hilevel = 1;
4809 		else
4810 			hp->hilevel = 0;
4811 
4812 		/*
4813 		 * call nexus to do real work, but specifying our handler, and
4814 		 * our shadow handle as argument
4815 		 */
4816 		retval = save_bus_ops.bus_intr_op(dip, rdip,
4817 		    intr_op, hdlp, result);
4818 		if (retval != DDI_SUCCESS) {
4819 			kmem_free(hp, sizeof (struct bofi_shadow));
4820 			return (retval);
4821 		}
4822 		/*
4823 		 * add to dhash, hhash and inuse lists
4824 		 */
4825 		mutex_enter(&bofi_low_mutex);
4826 		mutex_enter(&bofi_mutex);
4827 		hp->next = shadow_list.next;
4828 		shadow_list.next->prev = hp;
4829 		hp->prev = &shadow_list;
4830 		shadow_list.next = hp;
4831 		hhashp = HDL_HHASH(hdlp->ih_inum);
4832 		hp->hnext = hhashp->hnext;
4833 		hhashp->hnext->hprev = hp;
4834 		hp->hprev = hhashp;
4835 		hhashp->hnext = hp;
4836 		dhashp = HDL_DHASH(hp->dip);
4837 		hp->dnext = dhashp->dnext;
4838 		dhashp->dnext->dprev = hp;
4839 		hp->dprev = dhashp;
4840 		dhashp->dnext = hp;
4841 		/*
4842 		 * chain on any pre-existing errdefs that apply to this
4843 		 * acc_handle
4844 		 */
4845 		for (ep = errent_listp; ep != NULL; ep = ep->next) {
4846 			if (ddi_name_to_major(hp->name) ==
4847 			    ddi_name_to_major(ep->name) &&
4848 			    hp->instance == ep->errdef.instance &&
4849 			    (ep->errdef.access_type & BOFI_INTR)) {
4850 				lp = bofi_link_freelist;
4851 				if (lp != NULL) {
4852 					bofi_link_freelist = lp->link;
4853 					lp->errentp = ep;
4854 					lp->link = hp->link;
4855 					hp->link = lp;
4856 				}
4857 			}
4858 		}
4859 		mutex_exit(&bofi_mutex);
4860 		mutex_exit(&bofi_low_mutex);
4861 		return (retval);
4862 	case DDI_INTROP_REMISR:
4863 		/*
4864 		 * call nexus routine first
4865 		 */
4866 		retval = save_bus_ops.bus_intr_op(dip, rdip,
4867 		    intr_op, hdlp, result);
4868 		/*
4869 		 * find shadow handle
4870 		 */
4871 		mutex_enter(&bofi_low_mutex);
4872 		mutex_enter(&bofi_mutex);
4873 		hhashp = HDL_HHASH(hdlp->ih_inum);
4874 		for (hp = hhashp->hnext; hp != hhashp; hp = hp->hnext) {
4875 			if (hp->dip == rdip &&
4876 			    hp->type == BOFI_INT_HDL &&
4877 			    hp->bofi_inum == hdlp->ih_inum) {
4878 				break;
4879 			}
4880 		}
4881 		if (hp == hhashp) {
4882 			mutex_exit(&bofi_mutex);
4883 			mutex_exit(&bofi_low_mutex);
4884 			return (retval);
4885 		}
4886 		/*
4887 		 * found one - remove from dhash, hhash and inuse lists
4888 		 */
4889 		hp->hnext->hprev = hp->hprev;
4890 		hp->hprev->hnext = hp->hnext;
4891 		hp->dnext->dprev = hp->dprev;
4892 		hp->dprev->dnext = hp->dnext;
4893 		hp->next->prev = hp->prev;
4894 		hp->prev->next = hp->next;
4895 		/*
4896 		 * free any errdef link structures
4897 		 * tagged on to this shadow handle
4898 		 */
4899 		for (lp = hp->link; lp != NULL; ) {
4900 			next_lp = lp->link;
4901 			lp->link = bofi_link_freelist;
4902 			bofi_link_freelist = lp;
4903 			lp = next_lp;
4904 		}
4905 		hp->link = NULL;
4906 		mutex_exit(&bofi_mutex);
4907 		mutex_exit(&bofi_low_mutex);
4908 		kmem_free(hp, sizeof (struct bofi_shadow));
4909 		return (retval);
4910 	default:
4911 		return (save_bus_ops.bus_intr_op(dip, rdip,
4912 		    intr_op, hdlp, result));
4913 	}
4914 }
4915